1 | This patch collection contains: | 1 | v3: Rebase and add a few more patches. |
---|---|---|---|
2 | |||
3 | * A couple of fixes for i386 host vector support. | ||
4 | |||
5 | * Some random cleanups cherry-picked from some inactive branches. | ||
6 | |||
7 | * A reposting (with fix) of my "better handling of constants" set: | ||
8 | |||
9 | https://lists.nongnu.org/archive/html/qemu-devel/2020-05/msg02152.html | ||
10 | |||
11 | * A couple patches that centralizes the set of host constraints. | ||
12 | This, I believe is slightly cleaner than the current state of | ||
13 | afairs, even before the ultimtate goal of pre-validating the | ||
14 | contents as well. | ||
15 | 2 | ||
16 | 3 | ||
17 | r~ | 4 | r~ |
18 | 5 | ||
19 | 6 | ||
20 | Richard Henderson (43): | 7 | The following changes since commit 384dbdda94c0bba55bf186cccd3714bbb9b737e9: |
21 | tcg: Adjust simd_desc size encoding | ||
22 | tcg: Drop union from TCGArgConstraint | ||
23 | tcg: Move sorted_args into TCGArgConstraint.sort_index | ||
24 | tcg: Remove TCG_CT_REG | ||
25 | tcg: Move some TCG_CT_* bits to TCGArgConstraint bitfields | ||
26 | tcg: Remove TCGOpDef.used | ||
27 | tcg/i386: Fix dupi for avx2 32-bit hosts | ||
28 | tcg: Fix generation of dupi_vec for 32-bit host | ||
29 | tcg/optimize: Fold dup2_vec | ||
30 | tcg: Remove TCG_TARGET_HAS_cmp_vec | ||
31 | tcg: Use tcg_out_dupi_vec from temp_load | ||
32 | tcg: Increase tcg_out_dupi_vec immediate to int64_t | ||
33 | tcg: Consolidate 3 bits into enum TCGTempKind | ||
34 | tcg: Add temp_readonly | ||
35 | tcg: Expand TCGTemp.val to 64-bits | ||
36 | tcg: Rename struct tcg_temp_info to TempOptInfo | ||
37 | tcg: Expand TempOptInfo to 64-bits | ||
38 | tcg: Introduce TYPE_CONST temporaries | ||
39 | tcg/optimize: Improve find_better_copy | ||
40 | tcg/optimize: Adjust TempOptInfo allocation | ||
41 | tcg/optimize: Use tcg_constant_internal with constant folding | ||
42 | tcg: Convert tcg_gen_dupi_vec to TCG_CONST | ||
43 | tcg: Use tcg_constant_i32 with icount expander | ||
44 | tcg: Use tcg_constant_{i32,i64} with tcg int expanders | ||
45 | tcg: Use tcg_constant_{i32,i64} with tcg plugins | ||
46 | tcg: Use tcg_constant_{i32,i64,vec} with gvec expanders | ||
47 | tcg/tci: Add special tci_movi_{i32,i64} opcodes | ||
48 | tcg: Remove movi and dupi opcodes | ||
49 | tcg: Add tcg_reg_alloc_dup2 | ||
50 | tcg/i386: Use tcg_constant_vec with tcg vec expanders | ||
51 | tcg: Remove tcg_gen_dup{8,16,32,64}i_vec | ||
52 | tcg/ppc: Use tcg_constant_vec with tcg vec expanders | ||
53 | tcg/aarch64: Use tcg_constant_vec with tcg vec expanders | ||
54 | tcg: Add tcg-constr.c.inc | ||
55 | tcg/i386: Convert to tcg-constr.c.inc | ||
56 | tcg/aarch64: Convert to tcg-constr.c.inc | ||
57 | tcg/arm: Convert to tcg-constr.c.inc | ||
58 | tcg/mips: Convert to tcg-constr.c.inc | ||
59 | tcg/ppc: Convert to tcg-constr.c.inc | ||
60 | tcg/riscv: Convert to tcg-constr.c.inc | ||
61 | tcg/s390: Convert to tcg-constr.c.inc | ||
62 | tcg/sparc: Convert to tcg-constr.c.inc | ||
63 | tcg/tci: Convert to tcg-constr.c.inc | ||
64 | 8 | ||
65 | include/exec/gen-icount.h | 25 +- | 9 | Merge tag 'migration-20231020-pull-request' of https://gitlab.com/juan.quintela/qemu into staging (2023-10-20 06:46:53 -0700) |
66 | include/tcg/tcg-gvec-desc.h | 38 ++- | ||
67 | include/tcg/tcg-op.h | 17 +- | ||
68 | include/tcg/tcg-opc.h | 11 +- | ||
69 | include/tcg/tcg.h | 72 +++-- | ||
70 | tcg/aarch64/tcg-target-constr.h | 31 ++ | ||
71 | tcg/aarch64/tcg-target.h | 1 - | ||
72 | tcg/arm/tcg-target-constr.h | 30 ++ | ||
73 | tcg/i386/tcg-target-constr.h | 55 ++++ | ||
74 | tcg/i386/tcg-target.h | 1 - | ||
75 | tcg/mips/tcg-target-constr.h | 31 ++ | ||
76 | tcg/ppc/tcg-target-constr.h | 37 +++ | ||
77 | tcg/ppc/tcg-target.h | 1 - | ||
78 | tcg/riscv/tcg-target-constr.h | 25 ++ | ||
79 | tcg/s390/tcg-target-constr.h | 24 ++ | ||
80 | tcg/sparc/tcg-target-constr.h | 27 ++ | ||
81 | tcg/tci/tcg-target-constr.h | 28 ++ | ||
82 | accel/tcg/plugin-gen.c | 49 ++- | ||
83 | tcg/optimize.c | 254 ++++++++------- | ||
84 | tcg/tcg-op-gvec.c | 160 +++++----- | ||
85 | tcg/tcg-op-vec.c | 48 +-- | ||
86 | tcg/tcg-op.c | 227 +++++++------ | ||
87 | tcg/tcg.c | 549 +++++++++++++++++++++++--------- | ||
88 | tcg/tci.c | 4 +- | ||
89 | tcg/aarch64/tcg-target.c.inc | 134 +++----- | ||
90 | tcg/arm/tcg-target.c.inc | 123 +++---- | ||
91 | tcg/i386/tcg-target.c.inc | 336 +++++++++---------- | ||
92 | tcg/mips/tcg-target.c.inc | 118 +++---- | ||
93 | tcg/ppc/tcg-target.c.inc | 254 +++++++-------- | ||
94 | tcg/riscv/tcg-target.c.inc | 100 ++---- | ||
95 | tcg/s390/tcg-target.c.inc | 143 ++++----- | ||
96 | tcg/sparc/tcg-target.c.inc | 97 ++---- | ||
97 | tcg/tcg-constr.c.inc | 108 +++++++ | ||
98 | tcg/tci/tcg-target.c.inc | 369 ++++++++------------- | ||
99 | 34 files changed, 1893 insertions(+), 1634 deletions(-) | ||
100 | create mode 100644 tcg/aarch64/tcg-target-constr.h | ||
101 | create mode 100644 tcg/arm/tcg-target-constr.h | ||
102 | create mode 100644 tcg/i386/tcg-target-constr.h | ||
103 | create mode 100644 tcg/mips/tcg-target-constr.h | ||
104 | create mode 100644 tcg/ppc/tcg-target-constr.h | ||
105 | create mode 100644 tcg/riscv/tcg-target-constr.h | ||
106 | create mode 100644 tcg/s390/tcg-target-constr.h | ||
107 | create mode 100644 tcg/sparc/tcg-target-constr.h | ||
108 | create mode 100644 tcg/tci/tcg-target-constr.h | ||
109 | create mode 100644 tcg/tcg-constr.c.inc | ||
110 | 10 | ||
111 | -- | 11 | are available in the Git repository at: |
112 | 2.25.1 | ||
113 | 12 | ||
13 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20231023 | ||
114 | 14 | ||
15 | for you to fetch changes up to e40df3522b384d3b7dd38187d735bd6228b20e47: | ||
16 | |||
17 | target/xtensa: Use tcg_gen_sextract_i32 (2023-10-22 16:44:49 -0700) | ||
18 | |||
19 | ---------------------------------------------------------------- | ||
20 | tcg: Drop unused tcg_temp_free define | ||
21 | tcg: Introduce tcg_use_softmmu | ||
22 | tcg: Optimize past conditional branches | ||
23 | tcg: Use constant zero when expanding with divu2 | ||
24 | tcg: Add negsetcondi | ||
25 | tcg: Define MO_TL | ||
26 | tcg: Export tcg_gen_ext_{i32,i64,tl} | ||
27 | target/*: Use tcg_gen_ext_* | ||
28 | tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB | ||
29 | tcg/ppc: Use ADDPCIS for power9 | ||
30 | tcg/ppc: Use prefixed instructions for power10 | ||
31 | tcg/ppc: Disable TCG_REG_TB for Power9/Power10 | ||
32 | tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB | ||
33 | tcg/ppc: Use ADDPCIS for power9 | ||
34 | tcg/ppc: Use prefixed instructions for power10 | ||
35 | tcg/ppc: Disable TCG_REG_TB for Power9/Power10 | ||
36 | |||
37 | ---------------------------------------------------------------- | ||
38 | Jordan Niethe (1): | ||
39 | tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB | ||
40 | |||
41 | Mike Frysinger (1): | ||
42 | tcg: drop unused tcg_temp_free define | ||
43 | |||
44 | Paolo Bonzini (2): | ||
45 | tcg: add negsetcondi | ||
46 | tcg: Define MO_TL | ||
47 | |||
48 | Richard Henderson (34): | ||
49 | tcg/ppc: Untabify tcg-target.c.inc | ||
50 | tcg/ppc: Reinterpret tb-relative to TB+4 | ||
51 | tcg/ppc: Use ADDPCIS in tcg_out_tb_start | ||
52 | tcg/ppc: Use ADDPCIS in tcg_out_movi_int | ||
53 | tcg/ppc: Use ADDPCIS for the constant pool | ||
54 | tcg/ppc: Use ADDPCIS in tcg_out_goto_tb | ||
55 | tcg/ppc: Use PADDI in tcg_out_movi | ||
56 | tcg/ppc: Use prefixed instructions in tcg_out_mem_long | ||
57 | tcg/ppc: Use PLD in tcg_out_movi for constant pool | ||
58 | tcg/ppc: Use prefixed instructions in tcg_out_dupi_vec | ||
59 | tcg/ppc: Use PLD in tcg_out_goto_tb | ||
60 | tcg/ppc: Disable TCG_REG_TB for Power9/Power10 | ||
61 | tcg: Introduce tcg_use_softmmu | ||
62 | tcg: Provide guest_base fallback for system mode | ||
63 | tcg/arm: Use tcg_use_softmmu | ||
64 | tcg/aarch64: Use tcg_use_softmmu | ||
65 | tcg/i386: Use tcg_use_softmmu | ||
66 | tcg/loongarch64: Use tcg_use_softmmu | ||
67 | tcg/mips: Use tcg_use_softmmu | ||
68 | tcg/ppc: Use tcg_use_softmmu | ||
69 | tcg/riscv: Do not reserve TCG_GUEST_BASE_REG for guest_base zero | ||
70 | tcg/riscv: Use tcg_use_softmmu | ||
71 | tcg/s390x: Use tcg_use_softmmu | ||
72 | tcg: Use constant zero when expanding with divu2 | ||
73 | tcg: Optimize past conditional branches | ||
74 | tcg: Add tcg_gen_{ld,st}_i128 | ||
75 | target/i386: Use i128 for 128 and 256-bit loads and stores | ||
76 | tcg: Export tcg_gen_ext_{i32,i64,tl} | ||
77 | target/arm: Use tcg_gen_ext_i64 | ||
78 | target/i386: Use tcg_gen_ext_tl | ||
79 | target/m68k: Use tcg_gen_ext_i32 | ||
80 | target/rx: Use tcg_gen_ext_i32 | ||
81 | target/tricore: Use tcg_gen_*extract_tl | ||
82 | target/xtensa: Use tcg_gen_sextract_i32 | ||
83 | |||
84 | include/exec/target_long.h | 2 + | ||
85 | include/tcg/tcg-op-common.h | 9 + | ||
86 | include/tcg/tcg-op.h | 6 +- | ||
87 | include/tcg/tcg.h | 8 +- | ||
88 | target/arm/tcg/translate-a64.c | 37 +-- | ||
89 | target/i386/tcg/translate.c | 91 +++---- | ||
90 | target/m68k/translate.c | 23 +- | ||
91 | target/rx/translate.c | 11 +- | ||
92 | target/tricore/translate.c | 20 +- | ||
93 | target/xtensa/translate.c | 12 +- | ||
94 | tcg/optimize.c | 8 +- | ||
95 | tcg/tcg-op-ldst.c | 28 +- | ||
96 | tcg/tcg-op.c | 50 +++- | ||
97 | tcg/tcg.c | 13 +- | ||
98 | tcg/aarch64/tcg-target.c.inc | 177 ++++++------ | ||
99 | tcg/arm/tcg-target.c.inc | 203 +++++++------- | ||
100 | tcg/i386/tcg-target.c.inc | 198 +++++++------- | ||
101 | tcg/loongarch64/tcg-target.c.inc | 126 +++++---- | ||
102 | tcg/mips/tcg-target.c.inc | 231 ++++++++-------- | ||
103 | tcg/ppc/tcg-target.c.inc | 561 ++++++++++++++++++++++++++------------- | ||
104 | tcg/riscv/tcg-target.c.inc | 189 ++++++------- | ||
105 | tcg/s390x/tcg-target.c.inc | 161 ++++++----- | ||
106 | 22 files changed, 1152 insertions(+), 1012 deletions(-) | diff view generated by jsdifflib |
1 | Improve expand_vec_shi to use sign-extraction for MO_32. | ||
---|---|---|---|
2 | This allows a single VSPLTISB instruction to load all of | ||
3 | the valid shift constants. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 2 | --- |
7 | tcg/ppc/tcg-target.c.inc | 44 ++++++++++++++++++++++++---------------- | 3 | tcg/ppc/tcg-target.c.inc | 6 +++--- |
8 | 1 file changed, 27 insertions(+), 17 deletions(-) | 4 | 1 file changed, 3 insertions(+), 3 deletions(-) |
9 | 5 | ||
10 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | 6 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
11 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tcg/ppc/tcg-target.c.inc | 8 | --- a/tcg/ppc/tcg-target.c.inc |
13 | +++ b/tcg/ppc/tcg-target.c.inc | 9 | +++ b/tcg/ppc/tcg-target.c.inc |
14 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | 10 | @@ -XXX,XX +XXX,XX @@ static inline bool in_range_b(tcg_target_long target) |
15 | static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0, | 11 | } |
16 | TCGv_vec v1, TCGArg imm, TCGOpcode opci) | 12 | |
13 | static uint32_t reloc_pc24_val(const tcg_insn_unit *pc, | ||
14 | - const tcg_insn_unit *target) | ||
15 | + const tcg_insn_unit *target) | ||
17 | { | 16 | { |
18 | - TCGv_vec t1 = tcg_temp_new_vec(type); | 17 | ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); |
19 | + TCGv_vec t1; | 18 | tcg_debug_assert(in_range_b(disp)); |
20 | 19 | @@ -XXX,XX +XXX,XX @@ static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) | |
21 | - /* Splat w/bytes for xxspltib. */ | ||
22 | - tcg_gen_dupi_vec(MO_8, t1, imm & ((8 << vece) - 1)); | ||
23 | + if (vece == MO_32) { | ||
24 | + /* | ||
25 | + * Only 5 bits are significant, and VSPLTISB can represent -16..15. | ||
26 | + * So using negative numbers gets us the 4th bit easily. | ||
27 | + */ | ||
28 | + imm = sextract32(imm, 0, 5); | ||
29 | + } else { | ||
30 | + imm &= (8 << vece) - 1; | ||
31 | + } | ||
32 | + | ||
33 | + /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */ | ||
34 | + t1 = tcg_constant_vec(type, MO_8, imm); | ||
35 | vec_gen_3(opci, type, vece, tcgv_vec_arg(v0), | ||
36 | tcgv_vec_arg(v1), tcgv_vec_arg(t1)); | ||
37 | - tcg_temp_free_vec(t1); | ||
38 | } | 20 | } |
39 | 21 | ||
40 | static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, | 22 | static uint16_t reloc_pc14_val(const tcg_insn_unit *pc, |
23 | - const tcg_insn_unit *target) | ||
24 | + const tcg_insn_unit *target) | ||
25 | { | ||
26 | ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); | ||
27 | tcg_debug_assert(disp == (int16_t) disp); | ||
41 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, | 28 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, |
42 | { | 29 | tcgv_vec_arg(t1), tcgv_vec_arg(t2)); |
43 | TCGv_vec t1 = tcg_temp_new_vec(type); | 30 | vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0), |
44 | TCGv_vec t2 = tcg_temp_new_vec(type); | 31 | tcgv_vec_arg(v0), tcgv_vec_arg(t1)); |
45 | - TCGv_vec t3, t4; | 32 | - break; |
46 | + TCGv_vec c0, c16; | 33 | + break; |
47 | |||
48 | switch (vece) { | ||
49 | case MO_8: | ||
50 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, | ||
51 | 34 | ||
52 | case MO_32: | 35 | case MO_32: |
53 | tcg_debug_assert(!have_isa_2_07); | 36 | tcg_debug_assert(!have_isa_2_07); |
54 | - t3 = tcg_temp_new_vec(type); | ||
55 | - t4 = tcg_temp_new_vec(type); | ||
56 | - tcg_gen_dupi_vec(MO_8, t4, -16); | ||
57 | + /* | ||
58 | + * Only 5 bits are significant, and VSPLTISB can represent -16..15. | ||
59 | + * So using -16 is a quick way to represent 16. | ||
60 | + */ | ||
61 | + c16 = tcg_constant_vec(type, MO_8, -16); | ||
62 | + c0 = tcg_constant_vec(type, MO_8, 0); | ||
63 | + | ||
64 | vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1), | ||
65 | - tcgv_vec_arg(v2), tcgv_vec_arg(t4)); | ||
66 | + tcgv_vec_arg(v2), tcgv_vec_arg(c16)); | ||
67 | vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2), | ||
68 | tcgv_vec_arg(v1), tcgv_vec_arg(v2)); | ||
69 | - tcg_gen_dupi_vec(MO_8, t3, 0); | ||
70 | - vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t3), | ||
71 | - tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(t3)); | ||
72 | - vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t3), | ||
73 | - tcgv_vec_arg(t3), tcgv_vec_arg(t4)); | ||
74 | - tcg_gen_add_vec(MO_32, v0, t2, t3); | ||
75 | - tcg_temp_free_vec(t3); | ||
76 | - tcg_temp_free_vec(t4); | ||
77 | + vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1), | ||
78 | + tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0)); | ||
79 | + vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1), | ||
80 | + tcgv_vec_arg(t1), tcgv_vec_arg(c16)); | ||
81 | + tcg_gen_add_vec(MO_32, v0, t1, t2); | ||
82 | break; | ||
83 | |||
84 | default: | ||
85 | -- | 37 | -- |
86 | 2.25.1 | 38 | 2.34.1 |
87 | |||
88 | diff view generated by jsdifflib |
1 | From: Jordan Niethe <jniethe5@gmail.com> | ||
---|---|---|---|
2 | |||
3 | Direct branch patching was disabled when using TCG_REG_TB in commit | ||
4 | 736a1588c1 ("tcg/ppc: Fix race in goto_tb implementation"). | ||
5 | |||
6 | The issue with direct branch patching with TCG_REG_TB is the lack of | ||
7 | synchronization between the new TCG_REG_TB being established and the | ||
8 | direct branch being patched in. | ||
9 | |||
10 | If each translation block is responsible for establishing its own | ||
11 | TCG_REG_TB then there can be no synchronization issue. | ||
12 | |||
13 | Make each translation block begin by setting up its own TCG_REG_TB. | ||
14 | Use the preferred 'bcl 20,31,$+4' sequence. | ||
15 | |||
16 | Signed-off-by: Jordan Niethe <jniethe5@gmail.com> | ||
17 | [rth: Split out tcg_out_tb_start, power9 addpcis] | ||
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
2 | --- | 19 | --- |
3 | tcg/s390/tcg-target-constr.h | 24 +++++++ | 20 | tcg/ppc/tcg-target.c.inc | 48 ++++++++++++++-------------------------- |
4 | tcg/s390/tcg-target.c.inc | 119 +++++++++++++++-------------------- | 21 | 1 file changed, 17 insertions(+), 31 deletions(-) |
5 | 2 files changed, 76 insertions(+), 67 deletions(-) | ||
6 | create mode 100644 tcg/s390/tcg-target-constr.h | ||
7 | 22 | ||
8 | diff --git a/tcg/s390/tcg-target-constr.h b/tcg/s390/tcg-target-constr.h | 23 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
9 | new file mode 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
10 | index XXXXXXX..XXXXXXX | 25 | --- a/tcg/ppc/tcg-target.c.inc |
11 | --- /dev/null | 26 | +++ b/tcg/ppc/tcg-target.c.inc |
12 | +++ b/tcg/s390/tcg-target-constr.h | 27 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) |
13 | @@ -XXX,XX +XXX,XX @@ | 28 | |
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | 29 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); |
15 | +/* | 30 | tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); |
16 | + * S390 target-specific operand constaints. | 31 | - if (USE_REG_TB) { |
17 | + * Copyright (c) 2020 Linaro | 32 | - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); |
18 | + */ | 33 | - } |
34 | tcg_out32(s, BCCTR | BO_ALWAYS); | ||
35 | |||
36 | /* Epilogue */ | ||
37 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
38 | |||
39 | static void tcg_out_tb_start(TCGContext *s) | ||
40 | { | ||
41 | - /* nothing to do */ | ||
42 | + /* Load TCG_REG_TB. */ | ||
43 | + if (USE_REG_TB) { | ||
44 | + /* bcl 20,31,$+4 (preferred form for getting nia) */ | ||
45 | + tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK); | ||
46 | + tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR); | ||
47 | + tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, -4)); | ||
48 | + } | ||
49 | } | ||
50 | |||
51 | static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) | ||
52 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which) | ||
53 | { | ||
54 | uintptr_t ptr = get_jmp_target_addr(s, which); | ||
55 | |||
56 | + /* Direct branch will be patched by tb_target_set_jmp_target. */ | ||
57 | + set_jmp_insn_offset(s, which); | ||
58 | + tcg_out32(s, NOP); | ||
19 | + | 59 | + |
20 | +C_O0_I1(r) | 60 | + /* When branch is out of range, fall through to indirect. */ |
21 | +C_O0_I2(L, L) | 61 | if (USE_REG_TB) { |
22 | +C_O0_I2(r, r) | 62 | ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr); |
23 | +C_O0_I2(r, ri) | 63 | - tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset); |
24 | +C_O1_I1(r, L) | 64 | - |
25 | +C_O1_I1(r, r) | 65 | - /* TODO: Use direct branches when possible. */ |
26 | +C_O1_I2(r, 0, ri) | 66 | - set_jmp_insn_offset(s, which); |
27 | +C_O1_I2(r, 0, rI) | 67 | - tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR); |
28 | +C_O1_I2(r, 0, rJ) | 68 | - |
29 | +C_O1_I2(r, r, ri) | 69 | - tcg_out32(s, BCCTR | BO_ALWAYS); |
30 | +C_O1_I2(r, rZ, r) | 70 | - |
31 | +C_O1_I4(r, r, ri, r, 0) | 71 | - /* For the unlinked case, need to reset TCG_REG_TB. */ |
32 | +C_O1_I4(r, r, ri, rI, 0) | 72 | - set_jmp_reset_offset(s, which); |
33 | +C_O2_I2(b, a, 0, r) | 73 | - tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB, |
34 | +C_O2_I3(b, a, 0, 1, r) | 74 | - -tcg_current_code_size(s)); |
35 | +C_O2_I4(r, r, 0, 1, rA, r) | 75 | + tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset); |
36 | +C_O2_I4(r, r, 0, 1, ri, r) | 76 | } else { |
37 | +C_O2_I4(r, r, 0, 1, r, r) | 77 | - /* Direct branch will be patched by tb_target_set_jmp_target. */ |
38 | diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc | 78 | - set_jmp_insn_offset(s, which); |
39 | index XXXXXXX..XXXXXXX 100644 | 79 | - tcg_out32(s, NOP); |
40 | --- a/tcg/s390/tcg-target.c.inc | 80 | - |
41 | +++ b/tcg/s390/tcg-target.c.inc | 81 | - /* When branch is out of range, fall through to indirect. */ |
42 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | 82 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr); |
83 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr); | ||
84 | - tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR); | ||
85 | - tcg_out32(s, BCCTR | BO_ALWAYS); | ||
86 | - set_jmp_reset_offset(s, which); | ||
43 | } | 87 | } |
88 | + | ||
89 | + tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR); | ||
90 | + tcg_out32(s, BCCTR | BO_ALWAYS); | ||
91 | + set_jmp_reset_offset(s, which); | ||
44 | } | 92 | } |
45 | 93 | ||
46 | +/* Define all constraint sets. */ | 94 | void tb_target_set_jmp_target(const TranslationBlock *tb, int n, |
47 | +#include "../tcg-constr.c.inc" | 95 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, |
48 | + | 96 | intptr_t diff = addr - jmp_rx; |
49 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | 97 | tcg_insn_unit insn; |
50 | { | 98 | |
51 | - static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; | 99 | - if (USE_REG_TB) { |
52 | - static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; | 100 | - return; |
53 | - static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; | 101 | - } |
54 | - static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; | ||
55 | - static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; | ||
56 | - static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; | ||
57 | - static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } }; | ||
58 | - static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } }; | ||
59 | - static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } }; | ||
60 | - static const TCGTargetOpDef a2_r | ||
61 | - = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } }; | ||
62 | - static const TCGTargetOpDef a2_ri | ||
63 | - = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } }; | ||
64 | - static const TCGTargetOpDef a2_rA | ||
65 | - = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } }; | ||
66 | - | 102 | - |
67 | switch (op) { | 103 | if (in_range_b(diff)) { |
104 | insn = B | (diff & 0x3fffffc); | ||
105 | } else { | ||
106 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
107 | switch (opc) { | ||
68 | case INDEX_op_goto_ptr: | 108 | case INDEX_op_goto_ptr: |
69 | - return &r; | 109 | tcg_out32(s, MTSPR | RS(args[0]) | CTR); |
70 | + return C_O0_I1(r); | 110 | - if (USE_REG_TB) { |
71 | 111 | - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]); | |
72 | case INDEX_op_ld8u_i32: | ||
73 | case INDEX_op_ld8u_i64: | ||
74 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
75 | case INDEX_op_ld32u_i64: | ||
76 | case INDEX_op_ld32s_i64: | ||
77 | case INDEX_op_ld_i64: | ||
78 | + return C_O1_I1(r, r); | ||
79 | + | ||
80 | case INDEX_op_st8_i32: | ||
81 | case INDEX_op_st8_i64: | ||
82 | case INDEX_op_st16_i32: | ||
83 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
84 | case INDEX_op_st_i32: | ||
85 | case INDEX_op_st32_i64: | ||
86 | case INDEX_op_st_i64: | ||
87 | - return &r_r; | ||
88 | + return C_O0_I2(r, r); | ||
89 | |||
90 | case INDEX_op_add_i32: | ||
91 | case INDEX_op_add_i64: | ||
92 | - return &r_r_ri; | ||
93 | + case INDEX_op_shl_i64: | ||
94 | + case INDEX_op_shr_i64: | ||
95 | + case INDEX_op_sar_i64: | ||
96 | + case INDEX_op_rotl_i32: | ||
97 | + case INDEX_op_rotl_i64: | ||
98 | + case INDEX_op_rotr_i32: | ||
99 | + case INDEX_op_rotr_i64: | ||
100 | + case INDEX_op_clz_i64: | ||
101 | + case INDEX_op_setcond_i32: | ||
102 | + case INDEX_op_setcond_i64: | ||
103 | + return C_O1_I2(r, r, ri); | ||
104 | + | ||
105 | case INDEX_op_sub_i32: | ||
106 | case INDEX_op_sub_i64: | ||
107 | case INDEX_op_and_i32: | ||
108 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
109 | case INDEX_op_or_i64: | ||
110 | case INDEX_op_xor_i32: | ||
111 | case INDEX_op_xor_i64: | ||
112 | - return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); | ||
113 | + return (s390_facilities & FACILITY_DISTINCT_OPS | ||
114 | + ? C_O1_I2(r, r, ri) | ||
115 | + : C_O1_I2(r, 0, ri)); | ||
116 | |||
117 | case INDEX_op_mul_i32: | ||
118 | /* If we have the general-instruction-extensions, then we have | ||
119 | MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we | ||
120 | have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ | ||
121 | - return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI); | ||
122 | + return (s390_facilities & FACILITY_GEN_INST_EXT | ||
123 | + ? C_O1_I2(r, 0, ri) | ||
124 | + : C_O1_I2(r, 0, rI)); | ||
125 | + | ||
126 | case INDEX_op_mul_i64: | ||
127 | - return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI); | ||
128 | + return (s390_facilities & FACILITY_GEN_INST_EXT | ||
129 | + ? C_O1_I2(r, 0, rJ) | ||
130 | + : C_O1_I2(r, 0, rI)); | ||
131 | |||
132 | case INDEX_op_shl_i32: | ||
133 | case INDEX_op_shr_i32: | ||
134 | case INDEX_op_sar_i32: | ||
135 | - return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); | ||
136 | - | ||
137 | - case INDEX_op_shl_i64: | ||
138 | - case INDEX_op_shr_i64: | ||
139 | - case INDEX_op_sar_i64: | ||
140 | - return &r_r_ri; | ||
141 | - | ||
142 | - case INDEX_op_rotl_i32: | ||
143 | - case INDEX_op_rotl_i64: | ||
144 | - case INDEX_op_rotr_i32: | ||
145 | - case INDEX_op_rotr_i64: | ||
146 | - return &r_r_ri; | ||
147 | + return (s390_facilities & FACILITY_DISTINCT_OPS | ||
148 | + ? C_O1_I2(r, r, ri) | ||
149 | + : C_O1_I2(r, 0, ri)); | ||
150 | |||
151 | case INDEX_op_brcond_i32: | ||
152 | case INDEX_op_brcond_i64: | ||
153 | - return &r_ri; | ||
154 | + return C_O0_I2(r, ri); | ||
155 | |||
156 | case INDEX_op_bswap16_i32: | ||
157 | case INDEX_op_bswap16_i64: | ||
158 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
159 | case INDEX_op_extu_i32_i64: | ||
160 | case INDEX_op_extract_i32: | ||
161 | case INDEX_op_extract_i64: | ||
162 | - return &r_r; | ||
163 | - | ||
164 | - case INDEX_op_clz_i64: | ||
165 | - case INDEX_op_setcond_i32: | ||
166 | - case INDEX_op_setcond_i64: | ||
167 | - return &r_r_ri; | ||
168 | + return C_O1_I1(r, r); | ||
169 | |||
170 | case INDEX_op_qemu_ld_i32: | ||
171 | case INDEX_op_qemu_ld_i64: | ||
172 | - return &r_L; | ||
173 | + return C_O1_I1(r, L); | ||
174 | case INDEX_op_qemu_st_i64: | ||
175 | case INDEX_op_qemu_st_i32: | ||
176 | - return &L_L; | ||
177 | + return C_O0_I2(L, L); | ||
178 | |||
179 | case INDEX_op_deposit_i32: | ||
180 | case INDEX_op_deposit_i64: | ||
181 | - { | ||
182 | - static const TCGTargetOpDef dep | ||
183 | - = { .args_ct_str = { "r", "rZ", "r" } }; | ||
184 | - return &dep; | ||
185 | - } | 112 | - } |
186 | + return C_O1_I2(r, rZ, r); | 113 | tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0)); |
187 | + | 114 | tcg_out32(s, BCCTR | BO_ALWAYS); |
188 | case INDEX_op_movcond_i32: | ||
189 | case INDEX_op_movcond_i64: | ||
190 | - { | ||
191 | - static const TCGTargetOpDef movc | ||
192 | - = { .args_ct_str = { "r", "r", "ri", "r", "0" } }; | ||
193 | - static const TCGTargetOpDef movc_l | ||
194 | - = { .args_ct_str = { "r", "r", "ri", "rI", "0" } }; | ||
195 | - return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc); | ||
196 | - } | ||
197 | + return (s390_facilities & FACILITY_LOAD_ON_COND2 | ||
198 | + ? C_O1_I4(r, r, ri, rI, 0) | ||
199 | + : C_O1_I4(r, r, ri, r, 0)); | ||
200 | + | ||
201 | case INDEX_op_div2_i32: | ||
202 | case INDEX_op_div2_i64: | ||
203 | case INDEX_op_divu2_i32: | ||
204 | case INDEX_op_divu2_i64: | ||
205 | - { | ||
206 | - static const TCGTargetOpDef div2 | ||
207 | - = { .args_ct_str = { "b", "a", "0", "1", "r" } }; | ||
208 | - return &div2; | ||
209 | - } | ||
210 | + return C_O2_I3(b, a, 0, 1, r); | ||
211 | + | ||
212 | case INDEX_op_mulu2_i64: | ||
213 | - { | ||
214 | - static const TCGTargetOpDef mul2 | ||
215 | - = { .args_ct_str = { "b", "a", "0", "r" } }; | ||
216 | - return &mul2; | ||
217 | - } | ||
218 | + return C_O2_I2(b, a, 0, r); | ||
219 | |||
220 | case INDEX_op_add2_i32: | ||
221 | case INDEX_op_sub2_i32: | ||
222 | - return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r); | ||
223 | + return (s390_facilities & FACILITY_EXT_IMM | ||
224 | + ? C_O2_I4(r, r, 0, 1, ri, r) | ||
225 | + : C_O2_I4(r, r, 0, 1, r, r)); | ||
226 | + | ||
227 | case INDEX_op_add2_i64: | ||
228 | case INDEX_op_sub2_i64: | ||
229 | - return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r); | ||
230 | + return (s390_facilities & FACILITY_EXT_IMM | ||
231 | + ? C_O2_I4(r, r, 0, 1, rA, r) | ||
232 | + : C_O2_I4(r, r, 0, 1, r, r)); | ||
233 | |||
234 | default: | ||
235 | break; | 115 | break; |
236 | -- | 116 | -- |
237 | 2.25.1 | 117 | 2.34.1 |
238 | |||
239 | diff view generated by jsdifflib |
1 | In most, but not all, places that we check for TEMP_FIXED, | 1 | It saves one insn to load the address of TB+4 instead of TB. |
---|---|---|---|
2 | we are really testing that we do not modify the temporary. | 2 | Adjust all of the indexing to match. |
3 | 3 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 5 | --- |
8 | include/tcg/tcg.h | 5 +++++ | 6 | tcg/ppc/tcg-target.c.inc | 15 ++++++++++----- |
9 | tcg/tcg.c | 21 ++++++++++----------- | 7 | 1 file changed, 10 insertions(+), 5 deletions(-) |
10 | 2 files changed, 15 insertions(+), 11 deletions(-) | ||
11 | 8 | ||
12 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 9 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/include/tcg/tcg.h | 11 | --- a/tcg/ppc/tcg-target.c.inc |
15 | +++ b/include/tcg/tcg.h | 12 | +++ b/tcg/ppc/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ struct TCGContext { | 13 | @@ -XXX,XX +XXX,XX @@ static const int tcg_target_callee_save_regs[] = { |
17 | target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; | 14 | TCG_REG_R31 |
18 | }; | 15 | }; |
19 | 16 | ||
20 | +static inline bool temp_readonly(TCGTemp *ts) | 17 | +/* For PPC, we use TB+4 instead of TB as the base. */ |
18 | +static inline ptrdiff_t ppc_tbrel_diff(TCGContext *s, const void *target) | ||
21 | +{ | 19 | +{ |
22 | + return ts->kind == TEMP_FIXED; | 20 | + return tcg_tbrel_diff(s, target) - 4; |
23 | +} | 21 | +} |
24 | + | 22 | + |
25 | extern TCGContext tcg_init_ctx; | 23 | static inline bool in_range_b(tcg_target_long target) |
26 | extern __thread TCGContext *tcg_ctx; | ||
27 | extern TCGv_env cpu_env; | ||
28 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/tcg/tcg.c | ||
31 | +++ b/tcg/tcg.c | ||
32 | @@ -XXX,XX +XXX,XX @@ static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet); | ||
33 | mark it free; otherwise mark it dead. */ | ||
34 | static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) | ||
35 | { | 24 | { |
36 | - if (ts->kind == TEMP_FIXED) { | 25 | return target == sextract64(target, 0, 26); |
37 | + if (temp_readonly(ts)) { | 26 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
27 | } | ||
28 | |||
29 | /* Load addresses within the TB with one insn. */ | ||
30 | - tb_diff = tcg_tbrel_diff(s, (void *)arg); | ||
31 | + tb_diff = ppc_tbrel_diff(s, (void *)arg); | ||
32 | if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) { | ||
33 | tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff)); | ||
34 | return; | ||
35 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, | ||
36 | /* Use the constant pool, if possible. */ | ||
37 | if (!in_prologue && USE_REG_TB) { | ||
38 | new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr, | ||
39 | - tcg_tbrel_diff(s, NULL)); | ||
40 | + ppc_tbrel_diff(s, NULL)); | ||
41 | tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0)); | ||
38 | return; | 42 | return; |
39 | } | 43 | } |
40 | if (ts->val_type == TEMP_VAL_REG) { | 44 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, |
41 | @@ -XXX,XX +XXX,XX @@ static inline void temp_dead(TCGContext *s, TCGTemp *ts) | 45 | */ |
42 | static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, | 46 | if (USE_REG_TB) { |
43 | TCGRegSet preferred_regs, int free_or_dead) | 47 | rel = R_PPC_ADDR16; |
44 | { | 48 | - add = tcg_tbrel_diff(s, NULL); |
45 | - if (ts->kind == TEMP_FIXED) { | 49 | + add = ppc_tbrel_diff(s, NULL); |
46 | + if (temp_readonly(ts)) { | 50 | } else { |
47 | return; | 51 | rel = R_PPC_ADDR32; |
52 | add = 0; | ||
53 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tb_start(TCGContext *s) | ||
54 | /* bcl 20,31,$+4 (preferred form for getting nia) */ | ||
55 | tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK); | ||
56 | tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR); | ||
57 | - tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, -4)); | ||
48 | } | 58 | } |
49 | if (!ts->mem_coherent) { | ||
50 | @@ -XXX,XX +XXX,XX @@ static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) | ||
51 | { | ||
52 | /* The liveness analysis already ensures that globals are back | ||
53 | in memory. Keep an tcg_debug_assert for safety. */ | ||
54 | - tcg_debug_assert(ts->val_type == TEMP_VAL_MEM | ||
55 | - || ts->kind == TEMP_FIXED); | ||
56 | + tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts)); | ||
57 | } | 59 | } |
58 | 60 | ||
59 | /* save globals to their canonical location and assume they can be | 61 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which) |
60 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, | 62 | |
61 | TCGRegSet preferred_regs) | 63 | /* When branch is out of range, fall through to indirect. */ |
62 | { | 64 | if (USE_REG_TB) { |
63 | /* ENV should not be modified. */ | 65 | - ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr); |
64 | - tcg_debug_assert(ots->kind != TEMP_FIXED); | 66 | + ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr); |
65 | + tcg_debug_assert(!temp_readonly(ots)); | 67 | tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset); |
66 | 68 | } else { | |
67 | /* The movi is not explicitly generated here. */ | 69 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr); |
68 | if (ots->val_type == TEMP_VAL_REG) { | ||
69 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) | ||
70 | ts = arg_temp(op->args[1]); | ||
71 | |||
72 | /* ENV should not be modified. */ | ||
73 | - tcg_debug_assert(ots->kind != TEMP_FIXED); | ||
74 | + tcg_debug_assert(!temp_readonly(ots)); | ||
75 | |||
76 | /* Note that otype != itype for no-op truncation. */ | ||
77 | otype = ots->type; | ||
78 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) | ||
79 | * Store the source register into the destination slot | ||
80 | * and leave the destination temp as TEMP_VAL_MEM. | ||
81 | */ | ||
82 | - assert(ots->kind != TEMP_FIXED); | ||
83 | + assert(!temp_readonly(ots)); | ||
84 | if (!ts->mem_allocated) { | ||
85 | temp_allocate_frame(s, ots); | ||
86 | } | ||
87 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) | ||
88 | its = arg_temp(op->args[1]); | ||
89 | |||
90 | /* ENV should not be modified. */ | ||
91 | - tcg_debug_assert(ots->kind != TEMP_FIXED); | ||
92 | + tcg_debug_assert(!temp_readonly(ots)); | ||
93 | |||
94 | itype = its->type; | ||
95 | vece = TCGOP_VECE(op); | ||
96 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
97 | ts = arg_temp(arg); | ||
98 | |||
99 | /* ENV should not be modified. */ | ||
100 | - tcg_debug_assert(ts->kind != TEMP_FIXED); | ||
101 | + tcg_debug_assert(!temp_readonly(ts)); | ||
102 | |||
103 | if (arg_ct->oalias && !const_args[arg_ct->alias_index]) { | ||
104 | reg = new_args[arg_ct->alias_index]; | ||
105 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
106 | ts = arg_temp(op->args[i]); | ||
107 | |||
108 | /* ENV should not be modified. */ | ||
109 | - tcg_debug_assert(ts->kind != TEMP_FIXED); | ||
110 | + tcg_debug_assert(!temp_readonly(ts)); | ||
111 | |||
112 | if (NEED_SYNC_ARG(i)) { | ||
113 | temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i)); | ||
114 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) | ||
115 | ts = arg_temp(arg); | ||
116 | |||
117 | /* ENV should not be modified. */ | ||
118 | - tcg_debug_assert(ts->kind != TEMP_FIXED); | ||
119 | + tcg_debug_assert(!temp_readonly(ts)); | ||
120 | |||
121 | reg = tcg_target_call_oarg_regs[i]; | ||
122 | tcg_debug_assert(s->reg_to_temp[reg] == NULL); | ||
123 | -- | 70 | -- |
124 | 2.25.1 | 71 | 2.34.1 |
125 | |||
126 | diff view generated by jsdifflib |
1 | This does require finishing the conversion to tcg_target_op_def. | 1 | With ISA v3.0, we can use ADDPCIS instead of BCL+MFLR to load NIA. |
---|---|---|---|
2 | Remove quite a lot of ifdefs, since we can reference opcodes | ||
3 | even if they are not implemented. | ||
4 | 2 | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 4 | --- |
7 | tcg/tci/tcg-target-constr.h | 28 +++ | 5 | tcg/ppc/tcg-target.c.inc | 25 ++++++++++++++++++++++--- |
8 | tcg/tci/tcg-target.c.inc | 360 ++++++++++++++---------------------- | 6 | 1 file changed, 22 insertions(+), 3 deletions(-) |
9 | 2 files changed, 163 insertions(+), 225 deletions(-) | ||
10 | create mode 100644 tcg/tci/tcg-target-constr.h | ||
11 | 7 | ||
12 | diff --git a/tcg/tci/tcg-target-constr.h b/tcg/tci/tcg-target-constr.h | 8 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
13 | new file mode 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
14 | index XXXXXXX..XXXXXXX | 10 | --- a/tcg/ppc/tcg-target.c.inc |
15 | --- /dev/null | 11 | +++ b/tcg/ppc/tcg-target.c.inc |
16 | +++ b/tcg/tci/tcg-target-constr.h | 12 | @@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) |
17 | @@ -XXX,XX +XXX,XX @@ | 13 | #define CRNAND XO19(225) |
18 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | 14 | #define CROR XO19(449) |
19 | +/* | 15 | #define CRNOR XO19( 33) |
20 | + * TCI target-specific operand constaints. | 16 | +#define ADDPCIS XO19( 2) |
21 | + * Copyright (c) 2020 Linaro | 17 | |
22 | + */ | 18 | #define EXTSB XO31(954) |
19 | #define EXTSH XO31(922) | ||
20 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c) | ||
21 | tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2)); | ||
22 | } | ||
23 | |||
24 | +static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm) | ||
25 | +{ | ||
26 | + uint32_t d0, d1, d2; | ||
23 | + | 27 | + |
24 | +C_O0_I2(r, r) | 28 | + tcg_debug_assert((imm & 0xffff) == 0); |
25 | +C_O0_I2(r, ri) | 29 | + tcg_debug_assert(imm == (int32_t)imm); |
26 | +C_O0_I2(r, S) | ||
27 | +C_O0_I3(r, r, S) | ||
28 | +C_O0_I3(r, S, S) | ||
29 | +C_O0_I4(r, r, S, S) | ||
30 | +C_O1_I1(r, L) | ||
31 | +C_O1_I1(r, r) | ||
32 | +C_O1_I2(r, 0, r) | ||
33 | +C_O1_I2(r, L, L) | ||
34 | +C_O1_I2(r, ri, ri) | ||
35 | +C_O1_I2(r, r, r) | ||
36 | +C_O1_I2(r, r, ri) | ||
37 | +C_O2_I1(r, r, L) | ||
38 | +C_O2_I2(r, r, L, L) | ||
39 | + | 30 | + |
40 | +#if TCG_TARGET_REG_BITS == 32 | 31 | + d2 = extract32(imm, 16, 1); |
41 | +C_O0_I4(r, r, ri, ri) | 32 | + d1 = extract32(imm, 17, 5); |
42 | +C_O1_I4(r, r, r, ri, ri) | 33 | + d0 = extract32(imm, 22, 10); |
43 | +C_O2_I2(r, r, r, r) | 34 | + tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2); |
44 | +C_O2_I4(r, r, r, r, r, r) | 35 | +} |
45 | +#endif | 36 | + |
46 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | 37 | static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags) |
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/tcg/tci/tcg-target.c.inc | ||
49 | +++ b/tcg/tci/tcg-target.c.inc | ||
50 | @@ -XXX,XX +XXX,XX @@ | ||
51 | /* Bitfield n...m (in 32 bit value). */ | ||
52 | #define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m) | ||
53 | |||
54 | -/* Macros used in tcg_target_op_defs. */ | ||
55 | -#define R "r" | ||
56 | -#define RI "ri" | ||
57 | -#if TCG_TARGET_REG_BITS == 32 | ||
58 | -# define R64 "r", "r" | ||
59 | -#else | ||
60 | -# define R64 "r" | ||
61 | -#endif | ||
62 | -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | ||
63 | -# define L "L", "L" | ||
64 | -# define S "S", "S" | ||
65 | -#else | ||
66 | -# define L "L" | ||
67 | -# define S "S" | ||
68 | -#endif | ||
69 | - | ||
70 | -/* TODO: documentation. */ | ||
71 | -static const TCGTargetOpDef tcg_target_op_defs[] = { | ||
72 | - { INDEX_op_exit_tb, { NULL } }, | ||
73 | - { INDEX_op_goto_tb, { NULL } }, | ||
74 | - { INDEX_op_br, { NULL } }, | ||
75 | - | ||
76 | - { INDEX_op_ld8u_i32, { R, R } }, | ||
77 | - { INDEX_op_ld8s_i32, { R, R } }, | ||
78 | - { INDEX_op_ld16u_i32, { R, R } }, | ||
79 | - { INDEX_op_ld16s_i32, { R, R } }, | ||
80 | - { INDEX_op_ld_i32, { R, R } }, | ||
81 | - { INDEX_op_st8_i32, { R, R } }, | ||
82 | - { INDEX_op_st16_i32, { R, R } }, | ||
83 | - { INDEX_op_st_i32, { R, R } }, | ||
84 | - | ||
85 | - { INDEX_op_add_i32, { R, RI, RI } }, | ||
86 | - { INDEX_op_sub_i32, { R, RI, RI } }, | ||
87 | - { INDEX_op_mul_i32, { R, RI, RI } }, | ||
88 | -#if TCG_TARGET_HAS_div_i32 | ||
89 | - { INDEX_op_div_i32, { R, R, R } }, | ||
90 | - { INDEX_op_divu_i32, { R, R, R } }, | ||
91 | - { INDEX_op_rem_i32, { R, R, R } }, | ||
92 | - { INDEX_op_remu_i32, { R, R, R } }, | ||
93 | -#elif TCG_TARGET_HAS_div2_i32 | ||
94 | - { INDEX_op_div2_i32, { R, R, "0", "1", R } }, | ||
95 | - { INDEX_op_divu2_i32, { R, R, "0", "1", R } }, | ||
96 | -#endif | ||
97 | - /* TODO: Does R, RI, RI result in faster code than R, R, RI? | ||
98 | - If both operands are constants, we can optimize. */ | ||
99 | - { INDEX_op_and_i32, { R, RI, RI } }, | ||
100 | -#if TCG_TARGET_HAS_andc_i32 | ||
101 | - { INDEX_op_andc_i32, { R, RI, RI } }, | ||
102 | -#endif | ||
103 | -#if TCG_TARGET_HAS_eqv_i32 | ||
104 | - { INDEX_op_eqv_i32, { R, RI, RI } }, | ||
105 | -#endif | ||
106 | -#if TCG_TARGET_HAS_nand_i32 | ||
107 | - { INDEX_op_nand_i32, { R, RI, RI } }, | ||
108 | -#endif | ||
109 | -#if TCG_TARGET_HAS_nor_i32 | ||
110 | - { INDEX_op_nor_i32, { R, RI, RI } }, | ||
111 | -#endif | ||
112 | - { INDEX_op_or_i32, { R, RI, RI } }, | ||
113 | -#if TCG_TARGET_HAS_orc_i32 | ||
114 | - { INDEX_op_orc_i32, { R, RI, RI } }, | ||
115 | -#endif | ||
116 | - { INDEX_op_xor_i32, { R, RI, RI } }, | ||
117 | - { INDEX_op_shl_i32, { R, RI, RI } }, | ||
118 | - { INDEX_op_shr_i32, { R, RI, RI } }, | ||
119 | - { INDEX_op_sar_i32, { R, RI, RI } }, | ||
120 | -#if TCG_TARGET_HAS_rot_i32 | ||
121 | - { INDEX_op_rotl_i32, { R, RI, RI } }, | ||
122 | - { INDEX_op_rotr_i32, { R, RI, RI } }, | ||
123 | -#endif | ||
124 | -#if TCG_TARGET_HAS_deposit_i32 | ||
125 | - { INDEX_op_deposit_i32, { R, "0", R } }, | ||
126 | -#endif | ||
127 | - | ||
128 | - { INDEX_op_brcond_i32, { R, RI } }, | ||
129 | - | ||
130 | - { INDEX_op_setcond_i32, { R, R, RI } }, | ||
131 | -#if TCG_TARGET_REG_BITS == 64 | ||
132 | - { INDEX_op_setcond_i64, { R, R, RI } }, | ||
133 | -#endif /* TCG_TARGET_REG_BITS == 64 */ | ||
134 | - | ||
135 | -#if TCG_TARGET_REG_BITS == 32 | ||
136 | - /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */ | ||
137 | - { INDEX_op_add2_i32, { R, R, R, R, R, R } }, | ||
138 | - { INDEX_op_sub2_i32, { R, R, R, R, R, R } }, | ||
139 | - { INDEX_op_brcond2_i32, { R, R, RI, RI } }, | ||
140 | - { INDEX_op_mulu2_i32, { R, R, R, R } }, | ||
141 | - { INDEX_op_setcond2_i32, { R, R, R, RI, RI } }, | ||
142 | -#endif | ||
143 | - | ||
144 | -#if TCG_TARGET_HAS_not_i32 | ||
145 | - { INDEX_op_not_i32, { R, R } }, | ||
146 | -#endif | ||
147 | -#if TCG_TARGET_HAS_neg_i32 | ||
148 | - { INDEX_op_neg_i32, { R, R } }, | ||
149 | -#endif | ||
150 | - | ||
151 | -#if TCG_TARGET_REG_BITS == 64 | ||
152 | - { INDEX_op_ld8u_i64, { R, R } }, | ||
153 | - { INDEX_op_ld8s_i64, { R, R } }, | ||
154 | - { INDEX_op_ld16u_i64, { R, R } }, | ||
155 | - { INDEX_op_ld16s_i64, { R, R } }, | ||
156 | - { INDEX_op_ld32u_i64, { R, R } }, | ||
157 | - { INDEX_op_ld32s_i64, { R, R } }, | ||
158 | - { INDEX_op_ld_i64, { R, R } }, | ||
159 | - | ||
160 | - { INDEX_op_st8_i64, { R, R } }, | ||
161 | - { INDEX_op_st16_i64, { R, R } }, | ||
162 | - { INDEX_op_st32_i64, { R, R } }, | ||
163 | - { INDEX_op_st_i64, { R, R } }, | ||
164 | - | ||
165 | - { INDEX_op_add_i64, { R, RI, RI } }, | ||
166 | - { INDEX_op_sub_i64, { R, RI, RI } }, | ||
167 | - { INDEX_op_mul_i64, { R, RI, RI } }, | ||
168 | -#if TCG_TARGET_HAS_div_i64 | ||
169 | - { INDEX_op_div_i64, { R, R, R } }, | ||
170 | - { INDEX_op_divu_i64, { R, R, R } }, | ||
171 | - { INDEX_op_rem_i64, { R, R, R } }, | ||
172 | - { INDEX_op_remu_i64, { R, R, R } }, | ||
173 | -#elif TCG_TARGET_HAS_div2_i64 | ||
174 | - { INDEX_op_div2_i64, { R, R, "0", "1", R } }, | ||
175 | - { INDEX_op_divu2_i64, { R, R, "0", "1", R } }, | ||
176 | -#endif | ||
177 | - { INDEX_op_and_i64, { R, RI, RI } }, | ||
178 | -#if TCG_TARGET_HAS_andc_i64 | ||
179 | - { INDEX_op_andc_i64, { R, RI, RI } }, | ||
180 | -#endif | ||
181 | -#if TCG_TARGET_HAS_eqv_i64 | ||
182 | - { INDEX_op_eqv_i64, { R, RI, RI } }, | ||
183 | -#endif | ||
184 | -#if TCG_TARGET_HAS_nand_i64 | ||
185 | - { INDEX_op_nand_i64, { R, RI, RI } }, | ||
186 | -#endif | ||
187 | -#if TCG_TARGET_HAS_nor_i64 | ||
188 | - { INDEX_op_nor_i64, { R, RI, RI } }, | ||
189 | -#endif | ||
190 | - { INDEX_op_or_i64, { R, RI, RI } }, | ||
191 | -#if TCG_TARGET_HAS_orc_i64 | ||
192 | - { INDEX_op_orc_i64, { R, RI, RI } }, | ||
193 | -#endif | ||
194 | - { INDEX_op_xor_i64, { R, RI, RI } }, | ||
195 | - { INDEX_op_shl_i64, { R, RI, RI } }, | ||
196 | - { INDEX_op_shr_i64, { R, RI, RI } }, | ||
197 | - { INDEX_op_sar_i64, { R, RI, RI } }, | ||
198 | -#if TCG_TARGET_HAS_rot_i64 | ||
199 | - { INDEX_op_rotl_i64, { R, RI, RI } }, | ||
200 | - { INDEX_op_rotr_i64, { R, RI, RI } }, | ||
201 | -#endif | ||
202 | -#if TCG_TARGET_HAS_deposit_i64 | ||
203 | - { INDEX_op_deposit_i64, { R, "0", R } }, | ||
204 | -#endif | ||
205 | - { INDEX_op_brcond_i64, { R, RI } }, | ||
206 | - | ||
207 | -#if TCG_TARGET_HAS_ext8s_i64 | ||
208 | - { INDEX_op_ext8s_i64, { R, R } }, | ||
209 | -#endif | ||
210 | -#if TCG_TARGET_HAS_ext16s_i64 | ||
211 | - { INDEX_op_ext16s_i64, { R, R } }, | ||
212 | -#endif | ||
213 | -#if TCG_TARGET_HAS_ext32s_i64 | ||
214 | - { INDEX_op_ext32s_i64, { R, R } }, | ||
215 | -#endif | ||
216 | -#if TCG_TARGET_HAS_ext8u_i64 | ||
217 | - { INDEX_op_ext8u_i64, { R, R } }, | ||
218 | -#endif | ||
219 | -#if TCG_TARGET_HAS_ext16u_i64 | ||
220 | - { INDEX_op_ext16u_i64, { R, R } }, | ||
221 | -#endif | ||
222 | -#if TCG_TARGET_HAS_ext32u_i64 | ||
223 | - { INDEX_op_ext32u_i64, { R, R } }, | ||
224 | -#endif | ||
225 | - { INDEX_op_ext_i32_i64, { R, R } }, | ||
226 | - { INDEX_op_extu_i32_i64, { R, R } }, | ||
227 | -#if TCG_TARGET_HAS_bswap16_i64 | ||
228 | - { INDEX_op_bswap16_i64, { R, R } }, | ||
229 | -#endif | ||
230 | -#if TCG_TARGET_HAS_bswap32_i64 | ||
231 | - { INDEX_op_bswap32_i64, { R, R } }, | ||
232 | -#endif | ||
233 | -#if TCG_TARGET_HAS_bswap64_i64 | ||
234 | - { INDEX_op_bswap64_i64, { R, R } }, | ||
235 | -#endif | ||
236 | -#if TCG_TARGET_HAS_not_i64 | ||
237 | - { INDEX_op_not_i64, { R, R } }, | ||
238 | -#endif | ||
239 | -#if TCG_TARGET_HAS_neg_i64 | ||
240 | - { INDEX_op_neg_i64, { R, R } }, | ||
241 | -#endif | ||
242 | -#endif /* TCG_TARGET_REG_BITS == 64 */ | ||
243 | - | ||
244 | - { INDEX_op_qemu_ld_i32, { R, L } }, | ||
245 | - { INDEX_op_qemu_ld_i64, { R64, L } }, | ||
246 | - | ||
247 | - { INDEX_op_qemu_st_i32, { R, S } }, | ||
248 | - { INDEX_op_qemu_st_i64, { R64, S } }, | ||
249 | - | ||
250 | -#if TCG_TARGET_HAS_ext8s_i32 | ||
251 | - { INDEX_op_ext8s_i32, { R, R } }, | ||
252 | -#endif | ||
253 | -#if TCG_TARGET_HAS_ext16s_i32 | ||
254 | - { INDEX_op_ext16s_i32, { R, R } }, | ||
255 | -#endif | ||
256 | -#if TCG_TARGET_HAS_ext8u_i32 | ||
257 | - { INDEX_op_ext8u_i32, { R, R } }, | ||
258 | -#endif | ||
259 | -#if TCG_TARGET_HAS_ext16u_i32 | ||
260 | - { INDEX_op_ext16u_i32, { R, R } }, | ||
261 | -#endif | ||
262 | - | ||
263 | -#if TCG_TARGET_HAS_bswap16_i32 | ||
264 | - { INDEX_op_bswap16_i32, { R, R } }, | ||
265 | -#endif | ||
266 | -#if TCG_TARGET_HAS_bswap32_i32 | ||
267 | - { INDEX_op_bswap32_i32, { R, R } }, | ||
268 | -#endif | ||
269 | - | ||
270 | - { INDEX_op_mb, { } }, | ||
271 | - { -1 }, | ||
272 | -}; | ||
273 | +/* Define all constraint sets. */ | ||
274 | +#include "../tcg-constr.c.inc" | ||
275 | |||
276 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
277 | { | 38 | { |
278 | - int i, n = ARRAY_SIZE(tcg_target_op_defs); | 39 | TCGReg tmp = dst == src ? TCG_REG_R0 : dst; |
279 | + switch (op) { | 40 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tb_start(TCGContext *s) |
280 | + case INDEX_op_ld8u_i32: | 41 | { |
281 | + case INDEX_op_ld8s_i32: | 42 | /* Load TCG_REG_TB. */ |
282 | + case INDEX_op_ld16u_i32: | 43 | if (USE_REG_TB) { |
283 | + case INDEX_op_ld16s_i32: | 44 | - /* bcl 20,31,$+4 (preferred form for getting nia) */ |
284 | + case INDEX_op_ld_i32: | 45 | - tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK); |
285 | + case INDEX_op_ld8u_i64: | 46 | - tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR); |
286 | + case INDEX_op_ld8s_i64: | 47 | + if (have_isa_3_00) { |
287 | + case INDEX_op_ld16u_i64: | 48 | + /* lnia REG_TB */ |
288 | + case INDEX_op_ld16s_i64: | 49 | + tcg_out_addpcis(s, TCG_REG_TB, 0); |
289 | + case INDEX_op_ld32u_i64: | 50 | + } else { |
290 | + case INDEX_op_ld32s_i64: | 51 | + /* bcl 20,31,$+4 (preferred form for getting nia) */ |
291 | + case INDEX_op_ld_i64: | 52 | + tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK); |
292 | + case INDEX_op_not_i32: | 53 | + tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR); |
293 | + case INDEX_op_not_i64: | 54 | + } |
294 | + case INDEX_op_neg_i32: | ||
295 | + case INDEX_op_neg_i64: | ||
296 | + case INDEX_op_ext8s_i32: | ||
297 | + case INDEX_op_ext8s_i64: | ||
298 | + case INDEX_op_ext16s_i32: | ||
299 | + case INDEX_op_ext16s_i64: | ||
300 | + case INDEX_op_ext8u_i32: | ||
301 | + case INDEX_op_ext8u_i64: | ||
302 | + case INDEX_op_ext16u_i32: | ||
303 | + case INDEX_op_ext16u_i64: | ||
304 | + case INDEX_op_ext32s_i64: | ||
305 | + case INDEX_op_ext32u_i64: | ||
306 | + case INDEX_op_ext_i32_i64: | ||
307 | + case INDEX_op_extu_i32_i64: | ||
308 | + case INDEX_op_bswap16_i32: | ||
309 | + case INDEX_op_bswap16_i64: | ||
310 | + case INDEX_op_bswap32_i32: | ||
311 | + case INDEX_op_bswap32_i64: | ||
312 | + case INDEX_op_bswap64_i64: | ||
313 | + return C_O1_I1(r, r); | ||
314 | |||
315 | - for (i = 0; i < n; ++i) { | ||
316 | - if (tcg_target_op_defs[i].op == op) { | ||
317 | - return &tcg_target_op_defs[i]; | ||
318 | - } | ||
319 | + case INDEX_op_st8_i32: | ||
320 | + case INDEX_op_st16_i32: | ||
321 | + case INDEX_op_st_i32: | ||
322 | + case INDEX_op_st8_i64: | ||
323 | + case INDEX_op_st16_i64: | ||
324 | + case INDEX_op_st32_i64: | ||
325 | + case INDEX_op_st_i64: | ||
326 | + return C_O0_I2(r, r); | ||
327 | + | ||
328 | + case INDEX_op_div_i32: | ||
329 | + case INDEX_op_div_i64: | ||
330 | + case INDEX_op_divu_i32: | ||
331 | + case INDEX_op_divu_i64: | ||
332 | + case INDEX_op_rem_i32: | ||
333 | + case INDEX_op_rem_i64: | ||
334 | + case INDEX_op_remu_i32: | ||
335 | + case INDEX_op_remu_i64: | ||
336 | + return C_O1_I2(r, r, r); | ||
337 | + | ||
338 | + case INDEX_op_add_i32: | ||
339 | + case INDEX_op_add_i64: | ||
340 | + case INDEX_op_sub_i32: | ||
341 | + case INDEX_op_sub_i64: | ||
342 | + case INDEX_op_mul_i32: | ||
343 | + case INDEX_op_mul_i64: | ||
344 | + case INDEX_op_and_i32: | ||
345 | + case INDEX_op_and_i64: | ||
346 | + case INDEX_op_andc_i32: | ||
347 | + case INDEX_op_andc_i64: | ||
348 | + case INDEX_op_eqv_i32: | ||
349 | + case INDEX_op_eqv_i64: | ||
350 | + case INDEX_op_nand_i32: | ||
351 | + case INDEX_op_nand_i64: | ||
352 | + case INDEX_op_nor_i32: | ||
353 | + case INDEX_op_nor_i64: | ||
354 | + case INDEX_op_or_i32: | ||
355 | + case INDEX_op_or_i64: | ||
356 | + case INDEX_op_orc_i32: | ||
357 | + case INDEX_op_orc_i64: | ||
358 | + case INDEX_op_xor_i32: | ||
359 | + case INDEX_op_xor_i64: | ||
360 | + case INDEX_op_shl_i32: | ||
361 | + case INDEX_op_shl_i64: | ||
362 | + case INDEX_op_shr_i32: | ||
363 | + case INDEX_op_shr_i64: | ||
364 | + case INDEX_op_sar_i32: | ||
365 | + case INDEX_op_sar_i64: | ||
366 | + case INDEX_op_rotl_i32: | ||
367 | + case INDEX_op_rotl_i64: | ||
368 | + case INDEX_op_rotr_i32: | ||
369 | + case INDEX_op_rotr_i64: | ||
370 | + /* TODO: Does R, RI, RI result in faster code than R, R, RI? */ | ||
371 | + return C_O1_I2(r, ri, ri); | ||
372 | + | ||
373 | + case INDEX_op_deposit_i32: | ||
374 | + case INDEX_op_deposit_i64: | ||
375 | + return C_O1_I2(r, 0, r); | ||
376 | + | ||
377 | + case INDEX_op_brcond_i32: | ||
378 | + case INDEX_op_brcond_i64: | ||
379 | + return C_O0_I2(r, ri); | ||
380 | + | ||
381 | + case INDEX_op_setcond_i32: | ||
382 | + case INDEX_op_setcond_i64: | ||
383 | + return C_O1_I2(r, r, ri); | ||
384 | + | ||
385 | +#if TCG_TARGET_REG_BITS == 32 | ||
386 | + /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */ | ||
387 | + case INDEX_op_add2_i32: | ||
388 | + case INDEX_op_sub2_i32: | ||
389 | + return C_O2_I4(r, r, r, r, r, r); | ||
390 | + case INDEX_op_brcond2_i32: | ||
391 | + return C_O0_I4(r, r, ri, ri); | ||
392 | + case INDEX_op_mulu2_i32: | ||
393 | + return C_O2_I2(r, r, r, r); | ||
394 | + case INDEX_op_setcond2_i32 | ||
395 | + return C_O1_I4(r, r, r, ri, ri); | ||
396 | +#endif | ||
397 | + | ||
398 | + case INDEX_op_qemu_ld_i32: | ||
399 | + return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
400 | + ? C_O1_I1(r, L) | ||
401 | + : C_O1_I2(r, L, L)); | ||
402 | + case INDEX_op_qemu_ld_i64: | ||
403 | + return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) | ||
404 | + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) | ||
405 | + : C_O2_I2(r, r, L, L)); | ||
406 | + case INDEX_op_qemu_st_i32: | ||
407 | + return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
408 | + ? C_O0_I2(r, S) | ||
409 | + : C_O0_I3(r, S, S)); | ||
410 | + case INDEX_op_qemu_st_i64: | ||
411 | + return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, S) | ||
412 | + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, S) | ||
413 | + : C_O0_I4(r, r, S, S)); | ||
414 | + | ||
415 | + default: | ||
416 | + return NULL; | ||
417 | } | 55 | } |
418 | - return NULL; | ||
419 | } | 56 | } |
420 | 57 | ||
421 | static const int tcg_target_reg_alloc_order[] = { | ||
422 | -- | 58 | -- |
423 | 2.25.1 | 59 | 2.34.1 |
424 | |||
425 | diff view generated by jsdifflib |
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | --- | 2 | --- |
3 | tcg/optimize.c | 108 ++++++++++++++++++++++--------------------------- | 3 | tcg/ppc/tcg-target.c.inc | 13 +++++++++++++ |
4 | 1 file changed, 49 insertions(+), 59 deletions(-) | 4 | 1 file changed, 13 insertions(+) |
5 | 5 | ||
6 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 6 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
7 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
8 | --- a/tcg/optimize.c | 8 | --- a/tcg/ppc/tcg-target.c.inc |
9 | +++ b/tcg/optimize.c | 9 | +++ b/tcg/ppc/tcg-target.c.inc |
10 | @@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2) | 10 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
11 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | 11 | return; |
12 | } | ||
13 | |||
14 | -static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, uint64_t val) | ||
15 | -{ | ||
16 | - const TCGOpDef *def; | ||
17 | - TCGOpcode new_op; | ||
18 | - uint64_t mask; | ||
19 | - TempOptInfo *di = arg_info(dst); | ||
20 | - | ||
21 | - def = &tcg_op_defs[op->opc]; | ||
22 | - if (def->flags & TCG_OPF_VECTOR) { | ||
23 | - new_op = INDEX_op_dupi_vec; | ||
24 | - } else if (def->flags & TCG_OPF_64BIT) { | ||
25 | - new_op = INDEX_op_movi_i64; | ||
26 | - } else { | ||
27 | - new_op = INDEX_op_movi_i32; | ||
28 | - } | ||
29 | - op->opc = new_op; | ||
30 | - /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ | ||
31 | - op->args[0] = dst; | ||
32 | - op->args[1] = val; | ||
33 | - | ||
34 | - reset_temp(dst); | ||
35 | - di->is_const = true; | ||
36 | - di->val = val; | ||
37 | - mask = val; | ||
38 | - if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) { | ||
39 | - /* High bits of the destination are now garbage. */ | ||
40 | - mask |= ~0xffffffffull; | ||
41 | - } | ||
42 | - di->mask = mask; | ||
43 | -} | ||
44 | - | ||
45 | static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | ||
46 | { | ||
47 | TCGTemp *dst_ts = arg_temp(dst); | ||
48 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | ||
49 | } | 12 | } |
50 | } | 13 | |
51 | 14 | + /* Load addresses within 2GB with 2 insns. */ | |
52 | +static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used, | 15 | + if (have_isa_3_00) { |
53 | + TCGOp *op, TCGArg dst, uint64_t val) | 16 | + intptr_t hi = tcg_pcrel_diff(s, (void *)arg) - 4; |
54 | +{ | 17 | + int16_t lo = hi; |
55 | + const TCGOpDef *def = &tcg_op_defs[op->opc]; | ||
56 | + TCGType type; | ||
57 | + TCGTemp *tv; | ||
58 | + | 18 | + |
59 | + if (def->flags & TCG_OPF_VECTOR) { | 19 | + hi -= lo; |
60 | + type = TCGOP_VECL(op) + TCG_TYPE_V64; | 20 | + if (hi == (int32_t)hi) { |
61 | + } else if (def->flags & TCG_OPF_64BIT) { | 21 | + tcg_out_addpcis(s, TCG_REG_TMP2, hi); |
62 | + type = TCG_TYPE_I64; | 22 | + tcg_out32(s, ADDI | TAI(ret, TCG_REG_TMP2, lo)); |
63 | + } else { | 23 | + return; |
64 | + type = TCG_TYPE_I32; | 24 | + } |
65 | + } | 25 | + } |
66 | + | 26 | + |
67 | + /* Convert movi to mov with constant temp. */ | 27 | /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */ |
68 | + tv = tcg_constant_internal(type, val); | 28 | if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) { |
69 | + init_ts_info(temps_used, tv); | 29 | tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff); |
70 | + tcg_opt_gen_mov(s, op, dst, temp_arg(tv)); | ||
71 | +} | ||
72 | + | ||
73 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) | ||
74 | { | ||
75 | uint64_t l64, h64; | ||
76 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
77 | nb_temps = s->nb_temps; | ||
78 | nb_globals = s->nb_globals; | ||
79 | |||
80 | - bitmap_zero(temps_used.l, nb_temps); | ||
81 | + memset(&temps_used, 0, sizeof(temps_used)); | ||
82 | for (i = 0; i < nb_temps; ++i) { | ||
83 | s->temps[i].state_ptr = NULL; | ||
84 | } | ||
85 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
86 | CASE_OP_32_64(rotr): | ||
87 | if (arg_is_const(op->args[1]) | ||
88 | && arg_info(op->args[1])->val == 0) { | ||
89 | - tcg_opt_gen_movi(s, op, op->args[0], 0); | ||
90 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
91 | continue; | ||
92 | } | ||
93 | break; | ||
94 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
95 | |||
96 | if (partmask == 0) { | ||
97 | tcg_debug_assert(nb_oargs == 1); | ||
98 | - tcg_opt_gen_movi(s, op, op->args[0], 0); | ||
99 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
100 | continue; | ||
101 | } | ||
102 | if (affected == 0) { | ||
103 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
104 | CASE_OP_32_64(mulsh): | ||
105 | if (arg_is_const(op->args[2]) | ||
106 | && arg_info(op->args[2])->val == 0) { | ||
107 | - tcg_opt_gen_movi(s, op, op->args[0], 0); | ||
108 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
109 | continue; | ||
110 | } | ||
111 | break; | ||
112 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
113 | CASE_OP_32_64_VEC(sub): | ||
114 | CASE_OP_32_64_VEC(xor): | ||
115 | if (args_are_copies(op->args[1], op->args[2])) { | ||
116 | - tcg_opt_gen_movi(s, op, op->args[0], 0); | ||
117 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); | ||
118 | continue; | ||
119 | } | ||
120 | break; | ||
121 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
122 | break; | ||
123 | CASE_OP_32_64(movi): | ||
124 | case INDEX_op_dupi_vec: | ||
125 | - tcg_opt_gen_movi(s, op, op->args[0], op->args[1]); | ||
126 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], op->args[1]); | ||
127 | break; | ||
128 | |||
129 | case INDEX_op_dup_vec: | ||
130 | if (arg_is_const(op->args[1])) { | ||
131 | tmp = arg_info(op->args[1])->val; | ||
132 | tmp = dup_const(TCGOP_VECE(op), tmp); | ||
133 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
134 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
135 | break; | ||
136 | } | ||
137 | goto do_default; | ||
138 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
139 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
140 | tmp = arg_info(op->args[1])->val; | ||
141 | if (tmp == arg_info(op->args[2])->val) { | ||
142 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
143 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
144 | break; | ||
145 | } | ||
146 | } else if (args_are_copies(op->args[1], op->args[2])) { | ||
147 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
148 | case INDEX_op_extrh_i64_i32: | ||
149 | if (arg_is_const(op->args[1])) { | ||
150 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); | ||
151 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
152 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
153 | break; | ||
154 | } | ||
155 | goto do_default; | ||
156 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
157 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
158 | tmp = do_constant_folding(opc, arg_info(op->args[1])->val, | ||
159 | arg_info(op->args[2])->val); | ||
160 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
161 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
162 | break; | ||
163 | } | ||
164 | goto do_default; | ||
165 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
166 | TCGArg v = arg_info(op->args[1])->val; | ||
167 | if (v != 0) { | ||
168 | tmp = do_constant_folding(opc, v, 0); | ||
169 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
170 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
171 | } else { | ||
172 | tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); | ||
173 | } | ||
174 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
175 | tmp = deposit64(arg_info(op->args[1])->val, | ||
176 | op->args[3], op->args[4], | ||
177 | arg_info(op->args[2])->val); | ||
178 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
179 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
180 | break; | ||
181 | } | ||
182 | goto do_default; | ||
183 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
184 | if (arg_is_const(op->args[1])) { | ||
185 | tmp = extract64(arg_info(op->args[1])->val, | ||
186 | op->args[2], op->args[3]); | ||
187 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
188 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
189 | break; | ||
190 | } | ||
191 | goto do_default; | ||
192 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
193 | if (arg_is_const(op->args[1])) { | ||
194 | tmp = sextract64(arg_info(op->args[1])->val, | ||
195 | op->args[2], op->args[3]); | ||
196 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
197 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
198 | break; | ||
199 | } | ||
200 | goto do_default; | ||
201 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
202 | tmp = (int32_t)(((uint32_t)v1 >> shr) | | ||
203 | ((uint32_t)v2 << (32 - shr))); | ||
204 | } | ||
205 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
206 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
207 | break; | ||
208 | } | ||
209 | goto do_default; | ||
210 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
211 | tmp = do_constant_folding_cond(opc, op->args[1], | ||
212 | op->args[2], op->args[3]); | ||
213 | if (tmp != 2) { | ||
214 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
215 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
216 | break; | ||
217 | } | ||
218 | goto do_default; | ||
219 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
220 | op->args[1], op->args[2]); | ||
221 | if (tmp != 2) { | ||
222 | if (tmp) { | ||
223 | - bitmap_zero(temps_used.l, nb_temps); | ||
224 | + memset(&temps_used, 0, sizeof(temps_used)); | ||
225 | op->opc = INDEX_op_br; | ||
226 | op->args[0] = op->args[3]; | ||
227 | } else { | ||
228 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
229 | uint64_t a = ((uint64_t)ah << 32) | al; | ||
230 | uint64_t b = ((uint64_t)bh << 32) | bl; | ||
231 | TCGArg rl, rh; | ||
232 | - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); | ||
233 | + TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); | ||
234 | |||
235 | if (opc == INDEX_op_add2_i32) { | ||
236 | a += b; | ||
237 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
238 | |||
239 | rl = op->args[0]; | ||
240 | rh = op->args[1]; | ||
241 | - tcg_opt_gen_movi(s, op, rl, (int32_t)a); | ||
242 | - tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32)); | ||
243 | + tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a); | ||
244 | + tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32)); | ||
245 | break; | ||
246 | } | ||
247 | goto do_default; | ||
248 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
249 | uint32_t b = arg_info(op->args[3])->val; | ||
250 | uint64_t r = (uint64_t)a * b; | ||
251 | TCGArg rl, rh; | ||
252 | - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); | ||
253 | + TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); | ||
254 | |||
255 | rl = op->args[0]; | ||
256 | rh = op->args[1]; | ||
257 | - tcg_opt_gen_movi(s, op, rl, (int32_t)r); | ||
258 | - tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32)); | ||
259 | + tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r); | ||
260 | + tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32)); | ||
261 | break; | ||
262 | } | ||
263 | goto do_default; | ||
264 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
265 | if (tmp != 2) { | ||
266 | if (tmp) { | ||
267 | do_brcond_true: | ||
268 | - bitmap_zero(temps_used.l, nb_temps); | ||
269 | + memset(&temps_used, 0, sizeof(temps_used)); | ||
270 | op->opc = INDEX_op_br; | ||
271 | op->args[0] = op->args[5]; | ||
272 | } else { | ||
273 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
274 | /* Simplify LT/GE comparisons vs zero to a single compare | ||
275 | vs the high word of the input. */ | ||
276 | do_brcond_high: | ||
277 | - bitmap_zero(temps_used.l, nb_temps); | ||
278 | + memset(&temps_used, 0, sizeof(temps_used)); | ||
279 | op->opc = INDEX_op_brcond_i32; | ||
280 | op->args[0] = op->args[1]; | ||
281 | op->args[1] = op->args[3]; | ||
282 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
283 | goto do_default; | ||
284 | } | ||
285 | do_brcond_low: | ||
286 | - bitmap_zero(temps_used.l, nb_temps); | ||
287 | + memset(&temps_used, 0, sizeof(temps_used)); | ||
288 | op->opc = INDEX_op_brcond_i32; | ||
289 | op->args[1] = op->args[2]; | ||
290 | op->args[2] = op->args[4]; | ||
291 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
292 | op->args[5]); | ||
293 | if (tmp != 2) { | ||
294 | do_setcond_const: | ||
295 | - tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
296 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | ||
297 | } else if ((op->args[5] == TCG_COND_LT | ||
298 | || op->args[5] == TCG_COND_GE) | ||
299 | && arg_is_const(op->args[3]) | ||
300 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
301 | block, otherwise we only trash the output args. "mask" is | ||
302 | the non-zero bits mask for the first output arg. */ | ||
303 | if (def->flags & TCG_OPF_BB_END) { | ||
304 | - bitmap_zero(temps_used.l, nb_temps); | ||
305 | + memset(&temps_used, 0, sizeof(temps_used)); | ||
306 | } else { | ||
307 | do_reset_output: | ||
308 | for (i = 0; i < nb_oargs; i++) { | ||
309 | -- | 30 | -- |
310 | 2.25.1 | 31 | 2.34.1 |
311 | |||
312 | diff view generated by jsdifflib |
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | --- | 2 | --- |
3 | tcg/sparc/tcg-target-constr.h | 27 +++++++++++++ | 3 | tcg/ppc/tcg-target.c.inc | 12 ++++++++++++ |
4 | tcg/sparc/tcg-target.c.inc | 74 ++++++++++++----------------------- | 4 | 1 file changed, 12 insertions(+) |
5 | 2 files changed, 51 insertions(+), 50 deletions(-) | ||
6 | create mode 100644 tcg/sparc/tcg-target-constr.h | ||
7 | 5 | ||
8 | diff --git a/tcg/sparc/tcg-target-constr.h b/tcg/sparc/tcg-target-constr.h | 6 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
9 | new file mode 100644 | ||
10 | index XXXXXXX..XXXXXXX | ||
11 | --- /dev/null | ||
12 | +++ b/tcg/sparc/tcg-target-constr.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
15 | +/* | ||
16 | + * Sparc target-specific operand constaints. | ||
17 | + * Copyright (c) 2020 Linaro | ||
18 | + */ | ||
19 | + | ||
20 | +C_O0_I1(r) | ||
21 | +C_O0_I2(rZ, r) | ||
22 | +C_O0_I2(RZ, r) | ||
23 | +C_O0_I2(rZ, rJ) | ||
24 | +C_O0_I2(RZ, RJ) | ||
25 | +C_O0_I2(sZ, A) | ||
26 | +C_O0_I2(SZ, A) | ||
27 | +C_O1_I1(r, A) | ||
28 | +C_O1_I1(R, A) | ||
29 | +C_O1_I1(r, r) | ||
30 | +C_O1_I1(r, R) | ||
31 | +C_O1_I1(R, r) | ||
32 | +C_O1_I1(R, R) | ||
33 | +C_O1_I2(R, R, R) | ||
34 | +C_O1_I2(r, rZ, rJ) | ||
35 | +C_O1_I2(R, RZ, RJ) | ||
36 | +C_O1_I4(r, rZ, rJ, rI, 0) | ||
37 | +C_O1_I4(R, RZ, RJ, RI, 0) | ||
38 | +C_O2_I2(r, r, rZ, rJ) | ||
39 | +C_O2_I4(R, R, RZ, RZ, RJ, RI) | ||
40 | +C_O2_I4(r, r, rZ, rZ, rJ, rJ) | ||
41 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc | ||
42 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
43 | --- a/tcg/sparc/tcg-target.c.inc | 8 | --- a/tcg/ppc/tcg-target.c.inc |
44 | +++ b/tcg/sparc/tcg-target.c.inc | 9 | +++ b/tcg/ppc/tcg-target.c.inc |
45 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | 10 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
11 | tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0)); | ||
12 | return; | ||
46 | } | 13 | } |
47 | } | 14 | + if (have_isa_3_00) { |
48 | 15 | + tcg_out_addpcis(s, TCG_REG_TMP2, 0); | |
49 | +/* Define all constraint sets. */ | 16 | + new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0); |
50 | +#include "../tcg-constr.c.inc" | 17 | + tcg_out32(s, LD | TAI(ret, TCG_REG_TMP2, 0)); |
51 | + | 18 | + return; |
52 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | 19 | + } |
53 | { | 20 | |
54 | - static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; | 21 | tmp = arg >> 31 >> 1; |
55 | - static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; | 22 | tcg_out_movi(s, TCG_TYPE_I32, ret, tmp); |
56 | - static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } }; | 23 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, |
57 | - static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } }; | 24 | if (USE_REG_TB) { |
58 | - static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } }; | 25 | rel = R_PPC_ADDR16; |
59 | - static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } }; | 26 | add = ppc_tbrel_diff(s, NULL); |
60 | - static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } }; | 27 | + } else if (have_isa_3_00) { |
61 | - static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; | 28 | + tcg_out_addpcis(s, TCG_REG_TMP1, 0); |
62 | - static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } }; | 29 | + rel = R_PPC_REL14; |
63 | - static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } }; | 30 | + add = 0; |
64 | - static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } }; | 31 | } else { |
65 | - static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } }; | 32 | rel = R_PPC_ADDR32; |
66 | - static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } }; | 33 | add = 0; |
67 | - static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } }; | 34 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, |
68 | - static const TCGTargetOpDef r_rZ_rJ | 35 | if (USE_REG_TB) { |
69 | - = { .args_ct_str = { "r", "rZ", "rJ" } }; | 36 | tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0)); |
70 | - static const TCGTargetOpDef R_RZ_RJ | 37 | load_insn |= RA(TCG_REG_TB); |
71 | - = { .args_ct_str = { "R", "RZ", "RJ" } }; | 38 | + } else if (have_isa_3_00) { |
72 | - static const TCGTargetOpDef r_r_rZ_rJ | 39 | + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0)); |
73 | - = { .args_ct_str = { "r", "r", "rZ", "rJ" } }; | 40 | } else { |
74 | - static const TCGTargetOpDef movc_32 | 41 | tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0)); |
75 | - = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } }; | 42 | tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0)); |
76 | - static const TCGTargetOpDef movc_64 | ||
77 | - = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } }; | ||
78 | - static const TCGTargetOpDef add2_32 | ||
79 | - = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } }; | ||
80 | - static const TCGTargetOpDef add2_64 | ||
81 | - = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } }; | ||
82 | - | ||
83 | switch (op) { | ||
84 | case INDEX_op_goto_ptr: | ||
85 | - return &r; | ||
86 | + return C_O0_I1(r); | ||
87 | |||
88 | case INDEX_op_ld8u_i32: | ||
89 | case INDEX_op_ld8s_i32: | ||
90 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
91 | case INDEX_op_ld_i32: | ||
92 | case INDEX_op_neg_i32: | ||
93 | case INDEX_op_not_i32: | ||
94 | - return &r_r; | ||
95 | + return C_O1_I1(r, r); | ||
96 | |||
97 | case INDEX_op_st8_i32: | ||
98 | case INDEX_op_st16_i32: | ||
99 | case INDEX_op_st_i32: | ||
100 | - return &rZ_r; | ||
101 | + return C_O0_I2(rZ, r); | ||
102 | |||
103 | case INDEX_op_add_i32: | ||
104 | case INDEX_op_mul_i32: | ||
105 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
106 | case INDEX_op_shr_i32: | ||
107 | case INDEX_op_sar_i32: | ||
108 | case INDEX_op_setcond_i32: | ||
109 | - return &r_rZ_rJ; | ||
110 | + return C_O1_I2(r, rZ, rJ); | ||
111 | |||
112 | case INDEX_op_brcond_i32: | ||
113 | - return &rZ_rJ; | ||
114 | + return C_O0_I2(rZ, rJ); | ||
115 | case INDEX_op_movcond_i32: | ||
116 | - return &movc_32; | ||
117 | + return C_O1_I4(r, rZ, rJ, rI, 0); | ||
118 | case INDEX_op_add2_i32: | ||
119 | case INDEX_op_sub2_i32: | ||
120 | - return &add2_32; | ||
121 | + return C_O2_I4(r, r, rZ, rZ, rJ, rJ); | ||
122 | case INDEX_op_mulu2_i32: | ||
123 | case INDEX_op_muls2_i32: | ||
124 | - return &r_r_rZ_rJ; | ||
125 | + return C_O2_I2(r, r, rZ, rJ); | ||
126 | |||
127 | case INDEX_op_ld8u_i64: | ||
128 | case INDEX_op_ld8s_i64: | ||
129 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
130 | case INDEX_op_ld_i64: | ||
131 | case INDEX_op_ext_i32_i64: | ||
132 | case INDEX_op_extu_i32_i64: | ||
133 | - return &R_r; | ||
134 | + return C_O1_I1(R, r); | ||
135 | |||
136 | case INDEX_op_st8_i64: | ||
137 | case INDEX_op_st16_i64: | ||
138 | case INDEX_op_st32_i64: | ||
139 | case INDEX_op_st_i64: | ||
140 | - return &RZ_r; | ||
141 | + return C_O0_I2(RZ, r); | ||
142 | |||
143 | case INDEX_op_add_i64: | ||
144 | case INDEX_op_mul_i64: | ||
145 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
146 | case INDEX_op_shr_i64: | ||
147 | case INDEX_op_sar_i64: | ||
148 | case INDEX_op_setcond_i64: | ||
149 | - return &R_RZ_RJ; | ||
150 | + return C_O1_I2(R, RZ, RJ); | ||
151 | |||
152 | case INDEX_op_neg_i64: | ||
153 | case INDEX_op_not_i64: | ||
154 | case INDEX_op_ext32s_i64: | ||
155 | case INDEX_op_ext32u_i64: | ||
156 | - return &R_R; | ||
157 | + return C_O1_I1(R, R); | ||
158 | |||
159 | case INDEX_op_extrl_i64_i32: | ||
160 | case INDEX_op_extrh_i64_i32: | ||
161 | - return &r_R; | ||
162 | + return C_O1_I1(r, R); | ||
163 | |||
164 | case INDEX_op_brcond_i64: | ||
165 | - return &RZ_RJ; | ||
166 | + return C_O0_I2(RZ, RJ); | ||
167 | case INDEX_op_movcond_i64: | ||
168 | - return &movc_64; | ||
169 | + return C_O1_I4(R, RZ, RJ, RI, 0); | ||
170 | case INDEX_op_add2_i64: | ||
171 | case INDEX_op_sub2_i64: | ||
172 | - return &add2_64; | ||
173 | + return C_O2_I4(R, R, RZ, RZ, RJ, RI); | ||
174 | case INDEX_op_muluh_i64: | ||
175 | - return &R_R_R; | ||
176 | + return C_O1_I2(R, R, R); | ||
177 | |||
178 | case INDEX_op_qemu_ld_i32: | ||
179 | - return &r_A; | ||
180 | + return C_O1_I1(r, A); | ||
181 | case INDEX_op_qemu_ld_i64: | ||
182 | - return &R_A; | ||
183 | + return C_O1_I1(R, A); | ||
184 | case INDEX_op_qemu_st_i32: | ||
185 | - return &sZ_A; | ||
186 | + return C_O0_I2(sZ, A); | ||
187 | case INDEX_op_qemu_st_i64: | ||
188 | - return &SZ_A; | ||
189 | + return C_O0_I2(SZ, A); | ||
190 | |||
191 | default: | ||
192 | return NULL; | ||
193 | -- | 43 | -- |
194 | 2.25.1 | 44 | 2.34.1 |
195 | |||
196 | diff view generated by jsdifflib |
1 | This uses an existing hole in the TCGArgConstraint structure | ||
---|---|---|---|
2 | and will be convenient for keeping the data in one place. | ||
3 | |||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 2 | --- |
6 | include/tcg/tcg.h | 2 +- | 3 | tcg/ppc/tcg-target.c.inc | 11 +++++++++-- |
7 | tcg/tcg.c | 35 +++++++++++++++++------------------ | 4 | 1 file changed, 9 insertions(+), 2 deletions(-) |
8 | 2 files changed, 18 insertions(+), 19 deletions(-) | ||
9 | 5 | ||
10 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 6 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
11 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/include/tcg/tcg.h | 8 | --- a/tcg/ppc/tcg-target.c.inc |
13 | +++ b/include/tcg/tcg.h | 9 | +++ b/tcg/ppc/tcg-target.c.inc |
14 | @@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void); | 10 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) |
15 | typedef struct TCGArgConstraint { | 11 | static void tcg_out_goto_tb(TCGContext *s, int which) |
16 | uint16_t ct; | 12 | { |
17 | uint8_t alias_index; | 13 | uintptr_t ptr = get_jmp_target_addr(s, which); |
18 | + uint8_t sort_index; | 14 | + int16_t lo; |
19 | TCGRegSet regs; | 15 | |
20 | } TCGArgConstraint; | 16 | /* Direct branch will be patched by tb_target_set_jmp_target. */ |
21 | 17 | set_jmp_insn_offset(s, which); | |
22 | @@ -XXX,XX +XXX,XX @@ typedef struct TCGOpDef { | 18 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which) |
23 | uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; | 19 | if (USE_REG_TB) { |
24 | uint8_t flags; | 20 | ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr); |
25 | TCGArgConstraint *args_ct; | 21 | tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset); |
26 | - int *sorted_args; | 22 | + } else if (have_isa_3_00) { |
27 | #if defined(CONFIG_DEBUG_TCG) | 23 | + ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4; |
28 | int used; | 24 | + lo = offset; |
29 | #endif | 25 | + tcg_out_addpcis(s, TCG_REG_TMP1, offset - lo); |
30 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 26 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo); |
31 | index XXXXXXX..XXXXXXX 100644 | 27 | } else { |
32 | --- a/tcg/tcg.c | 28 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr); |
33 | +++ b/tcg/tcg.c | 29 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr); |
34 | @@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s) | 30 | + lo = ptr; |
35 | int op, total_args, n, i; | 31 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - lo); |
36 | TCGOpDef *def; | 32 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo); |
37 | TCGArgConstraint *args_ct; | ||
38 | - int *sorted_args; | ||
39 | TCGTemp *ts; | ||
40 | |||
41 | memset(s, 0, sizeof(*s)); | ||
42 | @@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s) | ||
43 | } | 33 | } |
44 | 34 | ||
45 | args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args); | 35 | tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR); |
46 | - sorted_args = g_malloc(sizeof(int) * total_args); | ||
47 | |||
48 | for(op = 0; op < NB_OPS; op++) { | ||
49 | def = &tcg_op_defs[op]; | ||
50 | def->args_ct = args_ct; | ||
51 | - def->sorted_args = sorted_args; | ||
52 | n = def->nb_iargs + def->nb_oargs; | ||
53 | - sorted_args += n; | ||
54 | args_ct += n; | ||
55 | } | ||
56 | |||
57 | @@ -XXX,XX +XXX,XX @@ static int get_constraint_priority(const TCGOpDef *def, int k) | ||
58 | /* sort from highest priority to lowest */ | ||
59 | static void sort_constraints(TCGOpDef *def, int start, int n) | ||
60 | { | ||
61 | - int i, j, p1, p2, tmp; | ||
62 | + int i, j; | ||
63 | + TCGArgConstraint *a = def->args_ct; | ||
64 | |||
65 | - for(i = 0; i < n; i++) | ||
66 | - def->sorted_args[start + i] = start + i; | ||
67 | - if (n <= 1) | ||
68 | + for (i = 0; i < n; i++) { | ||
69 | + a[start + i].sort_index = start + i; | ||
70 | + } | ||
71 | + if (n <= 1) { | ||
72 | return; | ||
73 | - for(i = 0; i < n - 1; i++) { | ||
74 | - for(j = i + 1; j < n; j++) { | ||
75 | - p1 = get_constraint_priority(def, def->sorted_args[start + i]); | ||
76 | - p2 = get_constraint_priority(def, def->sorted_args[start + j]); | ||
77 | + } | ||
78 | + for (i = 0; i < n - 1; i++) { | ||
79 | + for (j = i + 1; j < n; j++) { | ||
80 | + int p1 = get_constraint_priority(def, a[start + i].sort_index); | ||
81 | + int p2 = get_constraint_priority(def, a[start + j].sort_index); | ||
82 | if (p1 < p2) { | ||
83 | - tmp = def->sorted_args[start + i]; | ||
84 | - def->sorted_args[start + i] = def->sorted_args[start + j]; | ||
85 | - def->sorted_args[start + j] = tmp; | ||
86 | + int tmp = a[start + i].sort_index; | ||
87 | + a[start + i].sort_index = a[start + j].sort_index; | ||
88 | + a[start + j].sort_index = tmp; | ||
89 | } | ||
90 | } | ||
91 | } | ||
92 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
93 | for (k = 0; k < nb_iargs; k++) { | ||
94 | TCGRegSet i_preferred_regs, o_preferred_regs; | ||
95 | |||
96 | - i = def->sorted_args[nb_oargs + k]; | ||
97 | + i = def->args_ct[nb_oargs + k].sort_index; | ||
98 | arg = op->args[i]; | ||
99 | arg_ct = &def->args_ct[i]; | ||
100 | ts = arg_temp(arg); | ||
101 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
102 | int k2, i2; | ||
103 | reg = ts->reg; | ||
104 | for (k2 = 0 ; k2 < k ; k2++) { | ||
105 | - i2 = def->sorted_args[nb_oargs + k2]; | ||
106 | + i2 = def->args_ct[nb_oargs + k2].sort_index; | ||
107 | if ((def->args_ct[i2].ct & TCG_CT_IALIAS) && | ||
108 | reg == new_args[i2]) { | ||
109 | goto allocate_in_reg; | ||
110 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
111 | |||
112 | /* satisfy the output constraints */ | ||
113 | for(k = 0; k < nb_oargs; k++) { | ||
114 | - i = def->sorted_args[k]; | ||
115 | + i = def->args_ct[k].sort_index; | ||
116 | arg = op->args[i]; | ||
117 | arg_ct = &def->args_ct[i]; | ||
118 | ts = arg_temp(arg); | ||
119 | -- | 36 | -- |
120 | 2.25.1 | 37 | 2.34.1 |
121 | |||
122 | diff view generated by jsdifflib |
1 | These will hold a single constant for the duration of the TB. | 1 | PADDI can load 34-bit immediates and 34-bit pc-relative addresses. |
---|---|---|---|
2 | They are hashed, so that each value has one temp across the TB. | ||
3 | 2 | ||
4 | Not used yet, this is all infrastructure. | 3 | Reviewed-by: Jordan Niethe <jniethe5@gmail.com> |
5 | |||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 5 | --- |
8 | include/tcg/tcg.h | 24 +++++- | 6 | tcg/ppc/tcg-target.c.inc | 51 ++++++++++++++++++++++++++++++++++++++++ |
9 | tcg/optimize.c | 13 +++- | 7 | 1 file changed, 51 insertions(+) |
10 | tcg/tcg.c | 195 ++++++++++++++++++++++++++++++++++++---------- | ||
11 | 3 files changed, 188 insertions(+), 44 deletions(-) | ||
12 | 8 | ||
13 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 9 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
14 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/include/tcg/tcg.h | 11 | --- a/tcg/ppc/tcg-target.c.inc |
16 | +++ b/include/tcg/tcg.h | 12 | +++ b/tcg/ppc/tcg-target.c.inc |
17 | @@ -XXX,XX +XXX,XX @@ typedef enum TCGTempKind { | 13 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, |
18 | TEMP_GLOBAL, | 14 | return true; |
19 | /* Temp is in a fixed register. */ | ||
20 | TEMP_FIXED, | ||
21 | + /* Temp is a fixed constant. */ | ||
22 | + TEMP_CONST, | ||
23 | } TCGTempKind; | ||
24 | |||
25 | typedef struct TCGTemp { | ||
26 | @@ -XXX,XX +XXX,XX @@ struct TCGContext { | ||
27 | QSIMPLEQ_HEAD(, TCGOp) plugin_ops; | ||
28 | #endif | ||
29 | |||
30 | + GHashTable *const_table[TCG_TYPE_COUNT]; | ||
31 | TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; | ||
32 | TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ | ||
33 | |||
34 | @@ -XXX,XX +XXX,XX @@ struct TCGContext { | ||
35 | |||
36 | static inline bool temp_readonly(TCGTemp *ts) | ||
37 | { | ||
38 | - return ts->kind == TEMP_FIXED; | ||
39 | + return ts->kind >= TEMP_FIXED; | ||
40 | } | 15 | } |
41 | 16 | ||
42 | extern TCGContext tcg_init_ctx; | 17 | +/* Ensure that the prefixed instruction does not cross a 64-byte boundary. */ |
43 | @@ -XXX,XX +XXX,XX @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc); | 18 | +static bool tcg_out_need_prefix_align(TCGContext *s) |
44 | |||
45 | void tcg_optimize(TCGContext *s); | ||
46 | |||
47 | +/* Allocate a new temporary and initialize it with a constant. */ | ||
48 | TCGv_i32 tcg_const_i32(int32_t val); | ||
49 | TCGv_i64 tcg_const_i64(int64_t val); | ||
50 | TCGv_i32 tcg_const_local_i32(int32_t val); | ||
51 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_const_ones_vec(TCGType); | ||
52 | TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec); | ||
53 | TCGv_vec tcg_const_ones_vec_matching(TCGv_vec); | ||
54 | |||
55 | +/* | ||
56 | + * Locate or create a read-only temporary that is a constant. | ||
57 | + * This kind of temporary need not and should not be freed. | ||
58 | + */ | ||
59 | +TCGTemp *tcg_constant_internal(TCGType type, int64_t val); | ||
60 | + | ||
61 | +static inline TCGv_i32 tcg_constant_i32(int32_t val) | ||
62 | +{ | 19 | +{ |
63 | + return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val)); | 20 | + return ((uintptr_t)s->code_ptr & 0x3f) == 0x3c; |
64 | +} | 21 | +} |
65 | + | 22 | + |
66 | +static inline TCGv_i64 tcg_constant_i64(int64_t val) | 23 | +static void tcg_out_prefix_align(TCGContext *s) |
67 | +{ | 24 | +{ |
68 | + return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val)); | 25 | + if (tcg_out_need_prefix_align(s)) { |
26 | + tcg_out32(s, NOP); | ||
27 | + } | ||
69 | +} | 28 | +} |
70 | + | 29 | + |
71 | +TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val); | 30 | +static ptrdiff_t tcg_pcrel_diff_for_prefix(TCGContext *s, const void *target) |
72 | + | ||
73 | #if UINTPTR_MAX == UINT32_MAX | ||
74 | # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) | ||
75 | # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x))) | ||
76 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/tcg/optimize.c | ||
79 | +++ b/tcg/optimize.c | ||
80 | @@ -XXX,XX +XXX,XX @@ static void init_ts_info(TempOptInfo *infos, | ||
81 | ts->state_ptr = ti; | ||
82 | ti->next_copy = ts; | ||
83 | ti->prev_copy = ts; | ||
84 | - ti->is_const = false; | ||
85 | - ti->mask = -1; | ||
86 | + if (ts->kind == TEMP_CONST) { | ||
87 | + ti->is_const = true; | ||
88 | + ti->val = ti->mask = ts->val; | ||
89 | + if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { | ||
90 | + /* High bits of a 32-bit quantity are garbage. */ | ||
91 | + ti->mask |= ~0xffffffffull; | ||
92 | + } | ||
93 | + } else { | ||
94 | + ti->is_const = false; | ||
95 | + ti->mask = -1; | ||
96 | + } | ||
97 | set_bit(idx, temps_used->l); | ||
98 | } | ||
99 | } | ||
100 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
101 | index XXXXXXX..XXXXXXX 100644 | ||
102 | --- a/tcg/tcg.c | ||
103 | +++ b/tcg/tcg.c | ||
104 | @@ -XXX,XX +XXX,XX @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, | ||
105 | bigendian = 1; | ||
106 | #endif | ||
107 | |||
108 | - if (base_ts->kind != TEMP_FIXED) { | ||
109 | + switch (base_ts->kind) { | ||
110 | + case TEMP_FIXED: | ||
111 | + break; | ||
112 | + case TEMP_GLOBAL: | ||
113 | /* We do not support double-indirect registers. */ | ||
114 | tcg_debug_assert(!base_ts->indirect_reg); | ||
115 | base_ts->indirect_base = 1; | ||
116 | s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64 | ||
117 | ? 2 : 1); | ||
118 | indirect_reg = 1; | ||
119 | + break; | ||
120 | + default: | ||
121 | + g_assert_not_reached(); | ||
122 | } | ||
123 | |||
124 | if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { | ||
125 | @@ -XXX,XX +XXX,XX @@ void tcg_temp_free_internal(TCGTemp *ts) | ||
126 | TCGContext *s = tcg_ctx; | ||
127 | int k, idx; | ||
128 | |||
129 | + /* In order to simplify users of tcg_constant_*, silently ignore free. */ | ||
130 | + if (ts->kind == TEMP_CONST) { | ||
131 | + return; | ||
132 | + } | ||
133 | + | ||
134 | #if defined(CONFIG_DEBUG_TCG) | ||
135 | s->temps_in_use--; | ||
136 | if (s->temps_in_use < 0) { | ||
137 | @@ -XXX,XX +XXX,XX @@ void tcg_temp_free_internal(TCGTemp *ts) | ||
138 | set_bit(idx, s->free_temps[k].l); | ||
139 | } | ||
140 | |||
141 | +TCGTemp *tcg_constant_internal(TCGType type, int64_t val) | ||
142 | +{ | 31 | +{ |
143 | + TCGContext *s = tcg_ctx; | 32 | + return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0); |
144 | + GHashTable *h = s->const_table[type]; | ||
145 | + TCGTemp *ts; | ||
146 | + | ||
147 | + if (h == NULL) { | ||
148 | + h = g_hash_table_new(g_int64_hash, g_int64_equal); | ||
149 | + s->const_table[type] = h; | ||
150 | + } | ||
151 | + | ||
152 | + ts = g_hash_table_lookup(h, &val); | ||
153 | + if (ts == NULL) { | ||
154 | + ts = tcg_temp_alloc(s); | ||
155 | + | ||
156 | + if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { | ||
157 | + TCGTemp *ts2 = tcg_temp_alloc(s); | ||
158 | + | ||
159 | + ts->base_type = TCG_TYPE_I64; | ||
160 | + ts->type = TCG_TYPE_I32; | ||
161 | + ts->kind = TEMP_CONST; | ||
162 | + ts->temp_allocated = 1; | ||
163 | + /* | ||
164 | + * Retain the full value of the 64-bit constant in the low | ||
165 | + * part, so that the hash table works. Actual uses will | ||
166 | + * truncate the value to the low part. | ||
167 | + */ | ||
168 | + ts->val = val; | ||
169 | + | ||
170 | + tcg_debug_assert(ts2 == ts + 1); | ||
171 | + ts2->base_type = TCG_TYPE_I64; | ||
172 | + ts2->type = TCG_TYPE_I32; | ||
173 | + ts2->kind = TEMP_CONST; | ||
174 | + ts2->temp_allocated = 1; | ||
175 | + ts2->val = val >> 32; | ||
176 | + } else { | ||
177 | + ts->base_type = type; | ||
178 | + ts->type = type; | ||
179 | + ts->kind = TEMP_CONST; | ||
180 | + ts->temp_allocated = 1; | ||
181 | + ts->val = val; | ||
182 | + } | ||
183 | + g_hash_table_insert(h, &ts->val, ts); | ||
184 | + } | ||
185 | + | ||
186 | + return ts; | ||
187 | +} | 33 | +} |
188 | + | 34 | + |
189 | +TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val) | 35 | +/* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */ |
36 | +static void tcg_out_mls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt, | ||
37 | + unsigned ra, tcg_target_long imm, bool r) | ||
190 | +{ | 38 | +{ |
191 | + val = dup_const(vece, val); | 39 | + tcg_insn_unit p, i; |
192 | + return temp_tcgv_vec(tcg_constant_internal(type, val)); | 40 | + |
41 | + p = OPCD(1) | (2 << 24) | (r << 20) | ((imm >> 16) & 0x3ffff); | ||
42 | + i = opc | TAI(rt, ra, imm); | ||
43 | + | ||
44 | + tcg_out_prefix_align(s); | ||
45 | + tcg_out32(s, p); | ||
46 | + tcg_out32(s, i); | ||
193 | +} | 47 | +} |
194 | + | 48 | + |
195 | TCGv_i32 tcg_const_i32(int32_t val) | 49 | static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, |
196 | { | 50 | TCGReg base, tcg_target_long offset); |
197 | TCGv_i32 t0; | 51 | |
198 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_start(TCGContext *s) | 52 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
199 | TCGTempVal val = TEMP_VAL_MEM; | 53 | return; |
200 | 54 | } | |
201 | switch (ts->kind) { | 55 | |
202 | + case TEMP_CONST: | 56 | + /* |
203 | + val = TEMP_VAL_CONST; | 57 | + * Load values up to 34 bits, and pc-relative addresses, |
204 | + break; | 58 | + * with one prefixed insn. |
205 | case TEMP_FIXED: | 59 | + */ |
206 | val = TEMP_VAL_REG; | 60 | + if (have_isa_3_10) { |
207 | break; | 61 | + if (arg == sextract64(arg, 0, 34)) { |
208 | @@ -XXX,XX +XXX,XX @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, | 62 | + /* pli ret,value = paddi ret,0,value,0 */ |
209 | case TEMP_NORMAL: | 63 | + tcg_out_mls_d(s, ADDI, ret, 0, arg, 0); |
210 | snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); | 64 | + return; |
211 | break; | ||
212 | + case TEMP_CONST: | ||
213 | + switch (ts->type) { | ||
214 | + case TCG_TYPE_I32: | ||
215 | + snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val); | ||
216 | + break; | ||
217 | +#if TCG_TARGET_REG_BITS > 32 | ||
218 | + case TCG_TYPE_I64: | ||
219 | + snprintf(buf, buf_size, "$0x%" PRIx64, ts->val); | ||
220 | + break; | ||
221 | +#endif | ||
222 | + case TCG_TYPE_V64: | ||
223 | + case TCG_TYPE_V128: | ||
224 | + case TCG_TYPE_V256: | ||
225 | + snprintf(buf, buf_size, "v%d$0x%" PRIx64, | ||
226 | + 64 << (ts->type - TCG_TYPE_V64), ts->val); | ||
227 | + break; | ||
228 | + default: | ||
229 | + g_assert_not_reached(); | ||
230 | + } | 65 | + } |
231 | + break; | ||
232 | } | ||
233 | return buf; | ||
234 | } | ||
235 | @@ -XXX,XX +XXX,XX @@ static void la_bb_end(TCGContext *s, int ng, int nt) | ||
236 | state = TS_DEAD | TS_MEM; | ||
237 | break; | ||
238 | case TEMP_NORMAL: | ||
239 | + case TEMP_CONST: | ||
240 | state = TS_DEAD; | ||
241 | break; | ||
242 | default: | ||
243 | @@ -XXX,XX +XXX,XX @@ static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet); | ||
244 | mark it free; otherwise mark it dead. */ | ||
245 | static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) | ||
246 | { | ||
247 | - if (temp_readonly(ts)) { | ||
248 | + TCGTempVal new_type; | ||
249 | + | 66 | + |
250 | + switch (ts->kind) { | 67 | + tmp = tcg_pcrel_diff_for_prefix(s, (void *)arg); |
251 | + case TEMP_FIXED: | 68 | + if (tmp == sextract64(tmp, 0, 34)) { |
252 | return; | 69 | + /* pla ret,value = paddi ret,0,value,1 */ |
253 | + case TEMP_GLOBAL: | 70 | + tcg_out_mls_d(s, ADDI, ret, 0, tmp, 1); |
254 | + case TEMP_LOCAL: | 71 | + return; |
255 | + new_type = TEMP_VAL_MEM; | ||
256 | + break; | ||
257 | + case TEMP_NORMAL: | ||
258 | + new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD; | ||
259 | + break; | ||
260 | + case TEMP_CONST: | ||
261 | + new_type = TEMP_VAL_CONST; | ||
262 | + break; | ||
263 | + default: | ||
264 | + g_assert_not_reached(); | ||
265 | } | ||
266 | if (ts->val_type == TEMP_VAL_REG) { | ||
267 | s->reg_to_temp[ts->reg] = NULL; | ||
268 | } | ||
269 | - ts->val_type = (free_or_dead < 0 | ||
270 | - || ts->kind != TEMP_NORMAL | ||
271 | - ? TEMP_VAL_MEM : TEMP_VAL_DEAD); | ||
272 | + ts->val_type = new_type; | ||
273 | } | ||
274 | |||
275 | /* Mark a temporary as dead. */ | ||
276 | @@ -XXX,XX +XXX,XX @@ static inline void temp_dead(TCGContext *s, TCGTemp *ts) | ||
277 | static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, | ||
278 | TCGRegSet preferred_regs, int free_or_dead) | ||
279 | { | ||
280 | - if (temp_readonly(ts)) { | ||
281 | - return; | ||
282 | - } | ||
283 | - if (!ts->mem_coherent) { | ||
284 | + if (!temp_readonly(ts) && !ts->mem_coherent) { | ||
285 | if (!ts->mem_allocated) { | ||
286 | temp_allocate_frame(s, ts); | ||
287 | } | ||
288 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) | ||
289 | |||
290 | for (i = s->nb_globals; i < s->nb_temps; i++) { | ||
291 | TCGTemp *ts = &s->temps[i]; | ||
292 | - if (ts->kind == TEMP_LOCAL) { | ||
293 | + | ||
294 | + switch (ts->kind) { | ||
295 | + case TEMP_LOCAL: | ||
296 | temp_save(s, ts, allocated_regs); | ||
297 | - } else { | ||
298 | + break; | ||
299 | + case TEMP_NORMAL: | ||
300 | /* The liveness analysis already ensures that temps are dead. | ||
301 | Keep an tcg_debug_assert for safety. */ | ||
302 | tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); | ||
303 | + break; | ||
304 | + case TEMP_CONST: | ||
305 | + /* Similarly, we should have freed any allocated register. */ | ||
306 | + tcg_debug_assert(ts->val_type == TEMP_VAL_CONST); | ||
307 | + break; | ||
308 | + default: | ||
309 | + g_assert_not_reached(); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
314 | i_preferred_regs = o_preferred_regs = 0; | ||
315 | if (arg_ct->ialias) { | ||
316 | o_preferred_regs = op->output_pref[arg_ct->alias_index]; | ||
317 | - if (ts->kind == TEMP_FIXED) { | ||
318 | - /* if fixed register, we must allocate a new register | ||
319 | - if the alias is not the same register */ | ||
320 | - if (arg != op->args[arg_ct->alias_index]) { | ||
321 | - goto allocate_in_reg; | ||
322 | - } | ||
323 | - } else { | ||
324 | - /* if the input is aliased to an output and if it is | ||
325 | - not dead after the instruction, we must allocate | ||
326 | - a new register and move it */ | ||
327 | - if (!IS_DEAD_ARG(i)) { | ||
328 | - goto allocate_in_reg; | ||
329 | - } | ||
330 | |||
331 | - /* check if the current register has already been allocated | ||
332 | - for another input aliased to an output */ | ||
333 | - if (ts->val_type == TEMP_VAL_REG) { | ||
334 | - int k2, i2; | ||
335 | - reg = ts->reg; | ||
336 | - for (k2 = 0 ; k2 < k ; k2++) { | ||
337 | - i2 = def->args_ct[nb_oargs + k2].sort_index; | ||
338 | - if (def->args_ct[i2].ialias && reg == new_args[i2]) { | ||
339 | - goto allocate_in_reg; | ||
340 | - } | ||
341 | + /* | ||
342 | + * If the input is readonly, then it cannot also be an | ||
343 | + * output and aliased to itself. If the input is not | ||
344 | + * dead after the instruction, we must allocate a new | ||
345 | + * register and move it. | ||
346 | + */ | ||
347 | + if (temp_readonly(ts) || !IS_DEAD_ARG(i)) { | ||
348 | + goto allocate_in_reg; | ||
349 | + } | ||
350 | + | ||
351 | + /* | ||
352 | + * Check if the current register has already been allocated | ||
353 | + * for another input aliased to an output. | ||
354 | + */ | ||
355 | + if (ts->val_type == TEMP_VAL_REG) { | ||
356 | + reg = ts->reg; | ||
357 | + for (int k2 = 0; k2 < k; k2++) { | ||
358 | + int i2 = def->args_ct[nb_oargs + k2].sort_index; | ||
359 | + if (def->args_ct[i2].ialias && reg == new_args[i2]) { | ||
360 | + goto allocate_in_reg; | ||
361 | } | ||
362 | } | ||
363 | - i_preferred_regs = o_preferred_regs; | ||
364 | } | ||
365 | + i_preferred_regs = o_preferred_regs; | ||
366 | } | ||
367 | |||
368 | temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs); | ||
369 | reg = ts->reg; | ||
370 | |||
371 | - if (tcg_regset_test_reg(arg_ct->regs, reg)) { | ||
372 | - /* nothing to do : the constraint is satisfied */ | ||
373 | - } else { | ||
374 | - allocate_in_reg: | ||
375 | - /* allocate a new register matching the constraint | ||
376 | - and move the temporary register into it */ | ||
377 | + if (!tcg_regset_test_reg(arg_ct->regs, reg)) { | ||
378 | + allocate_in_reg: | ||
379 | + /* | ||
380 | + * Allocate a new register matching the constraint | ||
381 | + * and move the temporary register into it. | ||
382 | + */ | ||
383 | temp_load(s, ts, tcg_target_available_regs[ts->type], | ||
384 | i_allocated_regs, 0); | ||
385 | reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs, | ||
386 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
387 | } | ||
388 | #endif | ||
389 | |||
390 | + for (i = 0; i < TCG_TYPE_COUNT; ++i) { | ||
391 | + if (s->const_table[i]) { | ||
392 | + g_hash_table_destroy(s->const_table[i]); | ||
393 | + s->const_table[i] = NULL; | ||
394 | + } | 72 | + } |
395 | + } | 73 | + } |
396 | + | 74 | + |
397 | tcg_reg_alloc_start(s); | 75 | /* Load 32-bit immediates with two insns. Note that we've already |
398 | 76 | eliminated bare ADDIS, so we know both insns are required. */ | |
399 | s->code_buf = tb->tc.ptr; | 77 | if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { |
400 | -- | 78 | -- |
401 | 2.25.1 | 79 | 2.34.1 |
402 | |||
403 | diff view generated by jsdifflib |
1 | With larger vector sizes, it turns out oprsz == maxsz, and we only | 1 | When the offset is out of range of the non-prefixed insn, but |
---|---|---|---|
2 | need to represent mismatch for oprsz <= 32. We do, however, need | 2 | fits the 34-bit immediate of the prefixed insn, use that. |
3 | to represent larger oprsz and do so without reducing SIMD_DATA_BITS. | ||
4 | 3 | ||
5 | Reduce the size of the oprsz field and increase the maxsz field. | 4 | Reviewed-by: Jordan Niethe <jniethe5@gmail.com> |
6 | Steal the oprsz value of 24 to indicate equality with maxsz. | ||
7 | |||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 6 | --- |
11 | include/tcg/tcg-gvec-desc.h | 38 ++++++++++++++++++++++++------------- | 7 | tcg/ppc/tcg-target.c.inc | 66 ++++++++++++++++++++++++++++++++++++++++ |
12 | tcg/tcg-op-gvec.c | 35 ++++++++++++++++++++++++++-------- | 8 | 1 file changed, 66 insertions(+) |
13 | 2 files changed, 52 insertions(+), 21 deletions(-) | ||
14 | 9 | ||
15 | diff --git a/include/tcg/tcg-gvec-desc.h b/include/tcg/tcg-gvec-desc.h | 10 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
16 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/tcg/tcg-gvec-desc.h | 12 | --- a/tcg/ppc/tcg-target.c.inc |
18 | +++ b/include/tcg/tcg-gvec-desc.h | 13 | +++ b/tcg/ppc/tcg-target.c.inc |
19 | @@ -XXX,XX +XXX,XX @@ | 14 | @@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) |
20 | #ifndef TCG_TCG_GVEC_DESC_H | 15 | #define STDX XO31(149) |
21 | #define TCG_TCG_GVEC_DESC_H | 16 | #define STQ XO62( 2) |
22 | 17 | ||
23 | -/* ??? These bit widths are set for ARM SVE, maxing out at 256 byte vectors. */ | 18 | +#define PLWA OPCD( 41) |
24 | -#define SIMD_OPRSZ_SHIFT 0 | 19 | +#define PLD OPCD( 57) |
25 | -#define SIMD_OPRSZ_BITS 5 | 20 | +#define PLXSD OPCD( 42) |
26 | +/* | 21 | +#define PLXV OPCD(25 * 2 + 1) /* force tx=1 */ |
27 | + * This configuration allows MAXSZ to represent 2048 bytes, and | 22 | + |
28 | + * OPRSZ to match MAXSZ, or represent the smaller values 8, 16, or 32. | 23 | +#define PSTD OPCD( 61) |
29 | + * | 24 | +#define PSTXSD OPCD( 46) |
30 | + * Encode this with: | 25 | +#define PSTXV OPCD(27 * 2 + 1) /* force sx=1 */ |
31 | + * 0, 1, 3 -> 8, 16, 32 | 26 | + |
32 | + * 2 -> maxsz | 27 | #define ADDIC OPCD( 12) |
33 | + * | 28 | #define ADDI OPCD( 14) |
34 | + * This steals the input that would otherwise map to 24 to match maxsz. | 29 | #define ADDIS OPCD( 15) |
35 | + */ | 30 | @@ -XXX,XX +XXX,XX @@ static ptrdiff_t tcg_pcrel_diff_for_prefix(TCGContext *s, const void *target) |
36 | +#define SIMD_MAXSZ_SHIFT 0 | 31 | return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0); |
37 | +#define SIMD_MAXSZ_BITS 8 | 32 | } |
38 | 33 | ||
39 | -#define SIMD_MAXSZ_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS) | 34 | +/* Output Type 00 Prefix - 8-Byte Load/Store Form (8LS:D) */ |
40 | -#define SIMD_MAXSZ_BITS 5 | 35 | +static void tcg_out_8ls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt, |
41 | +#define SIMD_OPRSZ_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS) | 36 | + unsigned ra, tcg_target_long imm, bool r) |
42 | +#define SIMD_OPRSZ_BITS 2 | 37 | +{ |
43 | 38 | + tcg_insn_unit p, i; | |
44 | -#define SIMD_DATA_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS) | 39 | + |
45 | +#define SIMD_DATA_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS) | 40 | + p = OPCD(1) | (r << 20) | ((imm >> 16) & 0x3ffff); |
46 | #define SIMD_DATA_BITS (32 - SIMD_DATA_SHIFT) | 41 | + i = opc | TAI(rt, ra, imm); |
47 | 42 | + | |
48 | /* Create a descriptor from components. */ | 43 | + tcg_out_prefix_align(s); |
49 | uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data); | 44 | + tcg_out32(s, p); |
50 | 45 | + tcg_out32(s, i); | |
51 | -/* Extract the operation size from a descriptor. */ | ||
52 | -static inline intptr_t simd_oprsz(uint32_t desc) | ||
53 | -{ | ||
54 | - return (extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS) + 1) * 8; | ||
55 | -} | ||
56 | - | ||
57 | /* Extract the max vector size from a descriptor. */ | ||
58 | static inline intptr_t simd_maxsz(uint32_t desc) | ||
59 | { | ||
60 | - return (extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) + 1) * 8; | ||
61 | + return extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) * 8 + 8; | ||
62 | +} | 46 | +} |
63 | + | 47 | + |
64 | +/* Extract the operation size from a descriptor. */ | 48 | /* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */ |
65 | +static inline intptr_t simd_oprsz(uint32_t desc) | 49 | static void tcg_out_mls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt, |
66 | +{ | 50 | unsigned ra, tcg_target_long imm, bool r) |
67 | + uint32_t f = extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS); | 51 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, |
68 | + intptr_t o = f * 8 + 8; | 52 | break; |
69 | + intptr_t m = simd_maxsz(desc); | 53 | } |
70 | + return f == 2 ? m : o; | 54 | |
71 | } | 55 | + /* For unaligned or large offsets, use the prefixed form. */ |
72 | 56 | + if (have_isa_3_10 | |
73 | /* Extract the operation-specific data from a descriptor. */ | 57 | + && (offset != (int16_t)offset || (offset & align)) |
74 | diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c | 58 | + && offset == sextract64(offset, 0, 34)) { |
75 | index XXXXXXX..XXXXXXX 100644 | 59 | + /* |
76 | --- a/tcg/tcg-op-gvec.c | 60 | + * Note that the MLS:D insns retain their un-prefixed opcode, |
77 | +++ b/tcg/tcg-op-gvec.c | 61 | + * while the 8LS:D insns use a different opcode space. |
78 | @@ -XXX,XX +XXX,XX @@ static const TCGOpcode vecop_list_empty[1] = { 0 }; | 62 | + */ |
79 | of the operand offsets so that we can check them all at once. */ | 63 | + switch (opi) { |
80 | static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs) | 64 | + case LBZ: |
81 | { | 65 | + case LHZ: |
82 | - uint32_t opr_align = oprsz >= 16 ? 15 : 7; | 66 | + case LHA: |
83 | - uint32_t max_align = maxsz >= 16 || oprsz >= 16 ? 15 : 7; | 67 | + case LWZ: |
84 | - tcg_debug_assert(oprsz > 0); | 68 | + case STB: |
85 | - tcg_debug_assert(oprsz <= maxsz); | 69 | + case STH: |
86 | - tcg_debug_assert((oprsz & opr_align) == 0); | 70 | + case STW: |
87 | + uint32_t max_align; | 71 | + case ADDI: |
88 | + | 72 | + tcg_out_mls_d(s, opi, rt, base, offset, 0); |
89 | + switch (oprsz) { | 73 | + return; |
90 | + case 8: | 74 | + case LWA: |
91 | + case 16: | 75 | + tcg_out_8ls_d(s, PLWA, rt, base, offset, 0); |
92 | + case 32: | 76 | + return; |
93 | + tcg_debug_assert(oprsz <= maxsz); | 77 | + case LD: |
94 | + break; | 78 | + tcg_out_8ls_d(s, PLD, rt, base, offset, 0); |
95 | + default: | 79 | + return; |
96 | + tcg_debug_assert(oprsz == maxsz); | 80 | + case STD: |
97 | + break; | 81 | + tcg_out_8ls_d(s, PSTD, rt, base, offset, 0); |
98 | + } | 82 | + return; |
99 | + tcg_debug_assert(maxsz <= (8 << SIMD_MAXSZ_BITS)); | 83 | + case LXSD: |
100 | + | 84 | + tcg_out_8ls_d(s, PLXSD, rt & 31, base, offset, 0); |
101 | + max_align = maxsz >= 16 ? 15 : 7; | 85 | + return; |
102 | tcg_debug_assert((maxsz & max_align) == 0); | 86 | + case STXSD: |
103 | tcg_debug_assert((ofs & max_align) == 0); | 87 | + tcg_out_8ls_d(s, PSTXSD, rt & 31, base, offset, 0); |
104 | } | 88 | + return; |
105 | @@ -XXX,XX +XXX,XX @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) | 89 | + case LXV: |
106 | { | 90 | + tcg_out_8ls_d(s, PLXV, rt & 31, base, offset, 0); |
107 | uint32_t desc = 0; | 91 | + return; |
108 | 92 | + case STXV: | |
109 | - assert(oprsz % 8 == 0 && oprsz <= (8 << SIMD_OPRSZ_BITS)); | 93 | + tcg_out_8ls_d(s, PSTXV, rt & 31, base, offset, 0); |
110 | - assert(maxsz % 8 == 0 && maxsz <= (8 << SIMD_MAXSZ_BITS)); | 94 | + return; |
111 | - assert(data == sextract32(data, 0, SIMD_DATA_BITS)); | 95 | + } |
112 | + check_size_align(oprsz, maxsz, 0); | ||
113 | + tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS)); | ||
114 | |||
115 | oprsz = (oprsz / 8) - 1; | ||
116 | maxsz = (maxsz / 8) - 1; | ||
117 | + | ||
118 | + /* | ||
119 | + * We have just asserted in check_size_align that either | ||
120 | + * oprsz is {8,16,32} or matches maxsz. Encode the final | ||
121 | + * case with '2', as that would otherwise map to 24. | ||
122 | + */ | ||
123 | + if (oprsz == maxsz) { | ||
124 | + oprsz = 2; | ||
125 | + } | 96 | + } |
126 | + | 97 | + |
127 | desc = deposit32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS, oprsz); | 98 | /* For unaligned, or very large offsets, use the indexed form. */ |
128 | desc = deposit32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS, maxsz); | 99 | if (offset & align || offset != (int32_t)offset || opi == 0) { |
129 | desc = deposit32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS, data); | 100 | if (rs == base) { |
130 | -- | 101 | -- |
131 | 2.25.1 | 102 | 2.34.1 |
132 | |||
133 | diff view generated by jsdifflib |
1 | There are several ways we can expand a vector dup of a 64-bit | 1 | The prefixed instruction has a pc-relative form to use here. |
---|---|---|---|
2 | element on a 32-bit host. | ||
3 | 2 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 4 | --- |
6 | tcg/tcg.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ | 5 | tcg/ppc/tcg-target.c.inc | 24 ++++++++++++++++++++++++ |
7 | 1 file changed, 97 insertions(+) | 6 | 1 file changed, 24 insertions(+) |
8 | 7 | ||
9 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 8 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
10 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/tcg/tcg.c | 10 | --- a/tcg/ppc/tcg-target.c.inc |
12 | +++ b/tcg/tcg.c | 11 | +++ b/tcg/ppc/tcg-target.c.inc |
13 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | 12 | @@ -XXX,XX +XXX,XX @@ |
14 | } | 13 | #define ALL_GENERAL_REGS 0xffffffffu |
14 | #define ALL_VECTOR_REGS 0xffffffff00000000ull | ||
15 | |||
16 | +#ifndef R_PPC64_PCREL34 | ||
17 | +#define R_PPC64_PCREL34 132 | ||
18 | +#endif | ||
19 | + | ||
20 | #define have_isel (cpuinfo & CPUINFO_ISEL) | ||
21 | |||
22 | #ifndef CONFIG_SOFTMMU | ||
23 | @@ -XXX,XX +XXX,XX @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target) | ||
24 | return false; | ||
15 | } | 25 | } |
16 | 26 | ||
17 | +static void tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op) | 27 | +static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target) |
18 | +{ | 28 | +{ |
19 | + const TCGLifeData arg_life = op->life; | 29 | + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); |
20 | + TCGTemp *ots, *itsl, *itsh; | 30 | + ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx); |
21 | + TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64; | ||
22 | + | 31 | + |
23 | + /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */ | 32 | + if (disp == sextract64(disp, 0, 34)) { |
24 | + tcg_debug_assert(TCG_TARGET_REG_BITS == 32); | 33 | + src_rw[0] = (src_rw[0] & ~0x3ffff) | ((disp >> 16) & 0x3ffff); |
25 | + tcg_debug_assert(TCGOP_VECE(op) == MO_64); | 34 | + src_rw[1] = (src_rw[1] & ~0xffff) | (disp & 0xffff); |
26 | + | 35 | + return true; |
27 | + ots = arg_temp(op->args[0]); | ||
28 | + itsl = arg_temp(op->args[1]); | ||
29 | + itsh = arg_temp(op->args[2]); | ||
30 | + | ||
31 | + /* ENV should not be modified. */ | ||
32 | + tcg_debug_assert(!temp_readonly(ots)); | ||
33 | + | ||
34 | + /* Allocate the output register now. */ | ||
35 | + if (ots->val_type != TEMP_VAL_REG) { | ||
36 | + TCGRegSet allocated_regs = s->reserved_regs; | ||
37 | + TCGRegSet dup_out_regs = | ||
38 | + tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs; | ||
39 | + | ||
40 | + /* Make sure to not spill the input registers. */ | ||
41 | + if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) { | ||
42 | + tcg_regset_set_reg(allocated_regs, itsl->reg); | ||
43 | + } | ||
44 | + if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) { | ||
45 | + tcg_regset_set_reg(allocated_regs, itsh->reg); | ||
46 | + } | ||
47 | + | ||
48 | + ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs, | ||
49 | + op->output_pref[0], ots->indirect_base); | ||
50 | + ots->val_type = TEMP_VAL_REG; | ||
51 | + ots->mem_coherent = 0; | ||
52 | + s->reg_to_temp[ots->reg] = ots; | ||
53 | + } | 36 | + } |
54 | + | 37 | + return false; |
55 | + /* Promote dup2 of immediates to dupi_vec. */ | ||
56 | + if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) { | ||
57 | + uint64_t val = deposit64(itsl->val, 32, 32, itsh->val); | ||
58 | + MemOp vece = MO_64; | ||
59 | + | ||
60 | + if (val == dup_const(MO_8, val)) { | ||
61 | + vece = MO_8; | ||
62 | + } else if (val == dup_const(MO_16, val)) { | ||
63 | + vece = MO_16; | ||
64 | + } else if (val == dup_const(MO_32, val)) { | ||
65 | + vece = MO_32; | ||
66 | + } | ||
67 | + | ||
68 | + tcg_out_dupi_vec(s, vtype, vece, ots->reg, val); | ||
69 | + goto done; | ||
70 | + } | ||
71 | + | ||
72 | + /* If the two inputs form one 64-bit value, try dupm_vec. */ | ||
73 | + if (itsl + 1 == itsh && | ||
74 | + itsl->base_type == TCG_TYPE_I64 && | ||
75 | + itsh->base_type == TCG_TYPE_I64) { | ||
76 | + if (!itsl->mem_coherent) { | ||
77 | + temp_sync(s, itsl, s->reserved_regs, 0, 0); | ||
78 | + } | ||
79 | + if (!itsl->mem_coherent) { | ||
80 | + temp_sync(s, itsl, s->reserved_regs, 0, 0); | ||
81 | + } | ||
82 | +#ifdef HOST_WORDS_BIGENDIAN | ||
83 | + TCGTemp *its = itsh; | ||
84 | +#else | ||
85 | + TCGTemp *its = itsl; | ||
86 | +#endif | ||
87 | + if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg, | ||
88 | + its->mem_base->reg, its->mem_offset)) { | ||
89 | + goto done; | ||
90 | + } | ||
91 | + } | ||
92 | + | ||
93 | + /* Fall back to generic expansion. */ | ||
94 | + tcg_reg_alloc_op(s, op); | ||
95 | + return; | ||
96 | + | ||
97 | + done: | ||
98 | + if (IS_DEAD_ARG(1)) { | ||
99 | + temp_dead(s, itsl); | ||
100 | + } | ||
101 | + if (IS_DEAD_ARG(2)) { | ||
102 | + temp_dead(s, itsh); | ||
103 | + } | ||
104 | + if (NEED_SYNC_ARG(0)) { | ||
105 | + temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0)); | ||
106 | + } else if (IS_DEAD_ARG(0)) { | ||
107 | + temp_dead(s, ots); | ||
108 | + } | ||
109 | +} | 38 | +} |
110 | + | 39 | + |
111 | #ifdef TCG_TARGET_STACK_GROWSUP | 40 | /* test if a constant matches the constraint */ |
112 | #define STACK_DIR(x) (-(x)) | 41 | static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) |
113 | #else | 42 | { |
114 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | 43 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, |
115 | case INDEX_op_dup_vec: | 44 | return reloc_pc14(code_ptr, target); |
116 | tcg_reg_alloc_dup(s, op); | 45 | case R_PPC_REL24: |
117 | break; | 46 | return reloc_pc24(code_ptr, target); |
118 | + case INDEX_op_dup2_vec: | 47 | + case R_PPC64_PCREL34: |
119 | + tcg_reg_alloc_dup2(s, op); | 48 | + return reloc_pc34(code_ptr, target); |
120 | + break; | 49 | case R_PPC_ADDR16: |
121 | case INDEX_op_insn_start: | 50 | /* |
122 | if (num_insns >= 0) { | 51 | * We are (slightly) abusing this relocation type. In particular, |
123 | size_t off = tcg_current_code_size(s); | 52 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
53 | tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0)); | ||
54 | return; | ||
55 | } | ||
56 | + if (have_isa_3_10) { | ||
57 | + tcg_out_8ls_d(s, PLD, ret, 0, 0, 1); | ||
58 | + new_pool_label(s, arg, R_PPC64_PCREL34, s->code_ptr - 2, 0); | ||
59 | + return; | ||
60 | + } | ||
61 | if (have_isa_3_00) { | ||
62 | tcg_out_addpcis(s, TCG_REG_TMP2, 0); | ||
63 | new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0); | ||
124 | -- | 64 | -- |
125 | 2.25.1 | 65 | 2.34.1 |
126 | |||
127 | diff view generated by jsdifflib |
1 | This wasn't actually used for anything, really. All variable | 1 | The prefixed instructions have a pc-relative form to use here. |
---|---|---|---|
2 | operands must accept registers, and which are indicated by the | ||
3 | set in TCGArgConstraint.regs. | ||
4 | 2 | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 4 | --- |
7 | include/tcg/tcg.h | 1 - | 5 | tcg/ppc/tcg-target.c.inc | 9 +++++++++ |
8 | tcg/tcg.c | 15 ++++----------- | 6 | 1 file changed, 9 insertions(+) |
9 | tcg/aarch64/tcg-target.c.inc | 3 --- | ||
10 | tcg/arm/tcg-target.c.inc | 3 --- | ||
11 | tcg/i386/tcg-target.c.inc | 11 ----------- | ||
12 | tcg/mips/tcg-target.c.inc | 3 --- | ||
13 | tcg/ppc/tcg-target.c.inc | 5 ----- | ||
14 | tcg/riscv/tcg-target.c.inc | 2 -- | ||
15 | tcg/s390/tcg-target.c.inc | 4 ---- | ||
16 | tcg/sparc/tcg-target.c.inc | 5 ----- | ||
17 | tcg/tci/tcg-target.c.inc | 1 - | ||
18 | 11 files changed, 4 insertions(+), 49 deletions(-) | ||
19 | 7 | ||
20 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/include/tcg/tcg.h | ||
23 | +++ b/include/tcg/tcg.h | ||
24 | @@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void); | ||
25 | #define TCG_CT_ALIAS 0x80 | ||
26 | #define TCG_CT_IALIAS 0x40 | ||
27 | #define TCG_CT_NEWREG 0x20 /* output requires a new register */ | ||
28 | -#define TCG_CT_REG 0x01 | ||
29 | #define TCG_CT_CONST 0x02 /* any constant of register size */ | ||
30 | |||
31 | typedef struct TCGArgConstraint { | ||
32 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/tcg/tcg.c | ||
35 | +++ b/tcg/tcg.c | ||
36 | @@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) | ||
37 | /* we give more priority to constraints with less registers */ | ||
38 | static int get_constraint_priority(const TCGOpDef *def, int k) | ||
39 | { | ||
40 | - const TCGArgConstraint *arg_ct; | ||
41 | + const TCGArgConstraint *arg_ct = &def->args_ct[k]; | ||
42 | + int n; | ||
43 | |||
44 | - int i, n; | ||
45 | - arg_ct = &def->args_ct[k]; | ||
46 | if (arg_ct->ct & TCG_CT_ALIAS) { | ||
47 | /* an alias is equivalent to a single register */ | ||
48 | n = 1; | ||
49 | } else { | ||
50 | - if (!(arg_ct->ct & TCG_CT_REG)) | ||
51 | - return 0; | ||
52 | - n = 0; | ||
53 | - for(i = 0; i < TCG_TARGET_NB_REGS; i++) { | ||
54 | - if (tcg_regset_test_reg(arg_ct->regs, i)) | ||
55 | - n++; | ||
56 | - } | ||
57 | + n = ctpop64(arg_ct->regs); | ||
58 | } | ||
59 | return TCG_TARGET_NB_REGS - n + 1; | ||
60 | } | ||
61 | @@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s) | ||
62 | int oarg = *ct_str - '0'; | ||
63 | tcg_debug_assert(ct_str == tdefs->args_ct_str[i]); | ||
64 | tcg_debug_assert(oarg < def->nb_oargs); | ||
65 | - tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG); | ||
66 | + tcg_debug_assert(def->args_ct[oarg].regs != 0); | ||
67 | /* TCG_CT_ALIAS is for the output arguments. | ||
68 | The input is tagged with TCG_CT_IALIAS. */ | ||
69 | def->args_ct[i] = def->args_ct[oarg]; | ||
70 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
71 | index XXXXXXX..XXXXXXX 100644 | ||
72 | --- a/tcg/aarch64/tcg-target.c.inc | ||
73 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
74 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
75 | { | ||
76 | switch (*ct_str++) { | ||
77 | case 'r': /* general registers */ | ||
78 | - ct->ct |= TCG_CT_REG; | ||
79 | ct->regs |= 0xffffffffu; | ||
80 | break; | ||
81 | case 'w': /* advsimd registers */ | ||
82 | - ct->ct |= TCG_CT_REG; | ||
83 | ct->regs |= 0xffffffff00000000ull; | ||
84 | break; | ||
85 | case 'l': /* qemu_ld / qemu_st address, data_reg */ | ||
86 | - ct->ct |= TCG_CT_REG; | ||
87 | ct->regs = 0xffffffffu; | ||
88 | #ifdef CONFIG_SOFTMMU | ||
89 | /* x0 and x1 will be overwritten when reading the tlb entry, | ||
90 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
91 | index XXXXXXX..XXXXXXX 100644 | ||
92 | --- a/tcg/arm/tcg-target.c.inc | ||
93 | +++ b/tcg/arm/tcg-target.c.inc | ||
94 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
95 | break; | ||
96 | |||
97 | case 'r': | ||
98 | - ct->ct |= TCG_CT_REG; | ||
99 | ct->regs = 0xffff; | ||
100 | break; | ||
101 | |||
102 | /* qemu_ld address */ | ||
103 | case 'l': | ||
104 | - ct->ct |= TCG_CT_REG; | ||
105 | ct->regs = 0xffff; | ||
106 | #ifdef CONFIG_SOFTMMU | ||
107 | /* r0-r2,lr will be overwritten when reading the tlb entry, | ||
108 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
109 | |||
110 | /* qemu_st address & data */ | ||
111 | case 's': | ||
112 | - ct->ct |= TCG_CT_REG; | ||
113 | ct->regs = 0xffff; | ||
114 | /* r0-r2 will be overwritten when reading the tlb entry (softmmu only) | ||
115 | and r0-r1 doing the byte swapping, so don't use these. */ | ||
116 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
117 | index XXXXXXX..XXXXXXX 100644 | ||
118 | --- a/tcg/i386/tcg-target.c.inc | ||
119 | +++ b/tcg/i386/tcg-target.c.inc | ||
120 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
121 | { | ||
122 | switch(*ct_str++) { | ||
123 | case 'a': | ||
124 | - ct->ct |= TCG_CT_REG; | ||
125 | tcg_regset_set_reg(ct->regs, TCG_REG_EAX); | ||
126 | break; | ||
127 | case 'b': | ||
128 | - ct->ct |= TCG_CT_REG; | ||
129 | tcg_regset_set_reg(ct->regs, TCG_REG_EBX); | ||
130 | break; | ||
131 | case 'c': | ||
132 | - ct->ct |= TCG_CT_REG; | ||
133 | tcg_regset_set_reg(ct->regs, TCG_REG_ECX); | ||
134 | break; | ||
135 | case 'd': | ||
136 | - ct->ct |= TCG_CT_REG; | ||
137 | tcg_regset_set_reg(ct->regs, TCG_REG_EDX); | ||
138 | break; | ||
139 | case 'S': | ||
140 | - ct->ct |= TCG_CT_REG; | ||
141 | tcg_regset_set_reg(ct->regs, TCG_REG_ESI); | ||
142 | break; | ||
143 | case 'D': | ||
144 | - ct->ct |= TCG_CT_REG; | ||
145 | tcg_regset_set_reg(ct->regs, TCG_REG_EDI); | ||
146 | break; | ||
147 | case 'q': | ||
148 | /* A register that can be used as a byte operand. */ | ||
149 | - ct->ct |= TCG_CT_REG; | ||
150 | ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf; | ||
151 | break; | ||
152 | case 'Q': | ||
153 | /* A register with an addressable second byte (e.g. %ah). */ | ||
154 | - ct->ct |= TCG_CT_REG; | ||
155 | ct->regs = 0xf; | ||
156 | break; | ||
157 | case 'r': | ||
158 | /* A general register. */ | ||
159 | - ct->ct |= TCG_CT_REG; | ||
160 | ct->regs |= ALL_GENERAL_REGS; | ||
161 | break; | ||
162 | case 'W': | ||
163 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
164 | break; | ||
165 | case 'x': | ||
166 | /* A vector register. */ | ||
167 | - ct->ct |= TCG_CT_REG; | ||
168 | ct->regs |= ALL_VECTOR_REGS; | ||
169 | break; | ||
170 | |||
171 | /* qemu_ld/st address constraint */ | ||
172 | case 'L': | ||
173 | - ct->ct |= TCG_CT_REG; | ||
174 | ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; | ||
175 | tcg_regset_reset_reg(ct->regs, TCG_REG_L0); | ||
176 | tcg_regset_reset_reg(ct->regs, TCG_REG_L1); | ||
177 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc | ||
178 | index XXXXXXX..XXXXXXX 100644 | ||
179 | --- a/tcg/mips/tcg-target.c.inc | ||
180 | +++ b/tcg/mips/tcg-target.c.inc | ||
181 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
182 | { | ||
183 | switch(*ct_str++) { | ||
184 | case 'r': | ||
185 | - ct->ct |= TCG_CT_REG; | ||
186 | ct->regs = 0xffffffff; | ||
187 | break; | ||
188 | case 'L': /* qemu_ld input arg constraint */ | ||
189 | - ct->ct |= TCG_CT_REG; | ||
190 | ct->regs = 0xffffffff; | ||
191 | tcg_regset_reset_reg(ct->regs, TCG_REG_A0); | ||
192 | #if defined(CONFIG_SOFTMMU) | ||
193 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
194 | #endif | ||
195 | break; | ||
196 | case 'S': /* qemu_st constraint */ | ||
197 | - ct->ct |= TCG_CT_REG; | ||
198 | ct->regs = 0xffffffff; | ||
199 | tcg_regset_reset_reg(ct->regs, TCG_REG_A0); | ||
200 | #if defined(CONFIG_SOFTMMU) | ||
201 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | 8 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
202 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
203 | --- a/tcg/ppc/tcg-target.c.inc | 10 | --- a/tcg/ppc/tcg-target.c.inc |
204 | +++ b/tcg/ppc/tcg-target.c.inc | 11 | +++ b/tcg/ppc/tcg-target.c.inc |
205 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | 12 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, |
206 | { | 13 | if (USE_REG_TB) { |
207 | switch (*ct_str++) { | 14 | rel = R_PPC_ADDR16; |
208 | case 'A': case 'B': case 'C': case 'D': | 15 | add = ppc_tbrel_diff(s, NULL); |
209 | - ct->ct |= TCG_CT_REG; | 16 | + } else if (have_isa_3_10) { |
210 | tcg_regset_set_reg(ct->regs, 3 + ct_str[0] - 'A'); | 17 | + if (type == TCG_TYPE_V64) { |
211 | break; | 18 | + tcg_out_8ls_d(s, PLXSD, ret & 31, 0, 0, 1); |
212 | case 'r': | 19 | + new_pool_label(s, val, R_PPC64_PCREL34, s->code_ptr - 2, 0); |
213 | - ct->ct |= TCG_CT_REG; | 20 | + } else { |
214 | ct->regs = 0xffffffff; | 21 | + tcg_out_8ls_d(s, PLXV, ret & 31, 0, 0, 1); |
215 | break; | 22 | + new_pool_l2(s, R_PPC64_PCREL34, s->code_ptr - 2, 0, val, val); |
216 | case 'v': | 23 | + } |
217 | - ct->ct |= TCG_CT_REG; | 24 | + return; |
218 | ct->regs = 0xffffffff00000000ull; | 25 | } else if (have_isa_3_00) { |
219 | break; | 26 | tcg_out_addpcis(s, TCG_REG_TMP1, 0); |
220 | case 'L': /* qemu_ld constraint */ | 27 | rel = R_PPC_REL14; |
221 | - ct->ct |= TCG_CT_REG; | ||
222 | ct->regs = 0xffffffff; | ||
223 | tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
224 | #ifdef CONFIG_SOFTMMU | ||
225 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
226 | #endif | ||
227 | break; | ||
228 | case 'S': /* qemu_st constraint */ | ||
229 | - ct->ct |= TCG_CT_REG; | ||
230 | ct->regs = 0xffffffff; | ||
231 | tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
232 | #ifdef CONFIG_SOFTMMU | ||
233 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc | ||
234 | index XXXXXXX..XXXXXXX 100644 | ||
235 | --- a/tcg/riscv/tcg-target.c.inc | ||
236 | +++ b/tcg/riscv/tcg-target.c.inc | ||
237 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
238 | { | ||
239 | switch (*ct_str++) { | ||
240 | case 'r': | ||
241 | - ct->ct |= TCG_CT_REG; | ||
242 | ct->regs = 0xffffffff; | ||
243 | break; | ||
244 | case 'L': | ||
245 | /* qemu_ld/qemu_st constraint */ | ||
246 | - ct->ct |= TCG_CT_REG; | ||
247 | ct->regs = 0xffffffff; | ||
248 | /* qemu_ld/qemu_st uses TCG_REG_TMP0 */ | ||
249 | #if defined(CONFIG_SOFTMMU) | ||
250 | diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc | ||
251 | index XXXXXXX..XXXXXXX 100644 | ||
252 | --- a/tcg/s390/tcg-target.c.inc | ||
253 | +++ b/tcg/s390/tcg-target.c.inc | ||
254 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
255 | { | ||
256 | switch (*ct_str++) { | ||
257 | case 'r': /* all registers */ | ||
258 | - ct->ct |= TCG_CT_REG; | ||
259 | ct->regs = 0xffff; | ||
260 | break; | ||
261 | case 'L': /* qemu_ld/st constraint */ | ||
262 | - ct->ct |= TCG_CT_REG; | ||
263 | ct->regs = 0xffff; | ||
264 | tcg_regset_reset_reg(ct->regs, TCG_REG_R2); | ||
265 | tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
266 | tcg_regset_reset_reg(ct->regs, TCG_REG_R4); | ||
267 | break; | ||
268 | case 'a': /* force R2 for division */ | ||
269 | - ct->ct |= TCG_CT_REG; | ||
270 | ct->regs = 0; | ||
271 | tcg_regset_set_reg(ct->regs, TCG_REG_R2); | ||
272 | break; | ||
273 | case 'b': /* force R3 for division */ | ||
274 | - ct->ct |= TCG_CT_REG; | ||
275 | ct->regs = 0; | ||
276 | tcg_regset_set_reg(ct->regs, TCG_REG_R3); | ||
277 | break; | ||
278 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc | ||
279 | index XXXXXXX..XXXXXXX 100644 | ||
280 | --- a/tcg/sparc/tcg-target.c.inc | ||
281 | +++ b/tcg/sparc/tcg-target.c.inc | ||
282 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
283 | { | ||
284 | switch (*ct_str++) { | ||
285 | case 'r': | ||
286 | - ct->ct |= TCG_CT_REG; | ||
287 | ct->regs = 0xffffffff; | ||
288 | break; | ||
289 | case 'R': | ||
290 | - ct->ct |= TCG_CT_REG; | ||
291 | ct->regs = ALL_64; | ||
292 | break; | ||
293 | case 'A': /* qemu_ld/st address constraint */ | ||
294 | - ct->ct |= TCG_CT_REG; | ||
295 | ct->regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff; | ||
296 | reserve_helpers: | ||
297 | tcg_regset_reset_reg(ct->regs, TCG_REG_O0); | ||
298 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
299 | tcg_regset_reset_reg(ct->regs, TCG_REG_O2); | ||
300 | break; | ||
301 | case 's': /* qemu_st data 32-bit constraint */ | ||
302 | - ct->ct |= TCG_CT_REG; | ||
303 | ct->regs = 0xffffffff; | ||
304 | goto reserve_helpers; | ||
305 | case 'S': /* qemu_st data 64-bit constraint */ | ||
306 | - ct->ct |= TCG_CT_REG; | ||
307 | ct->regs = ALL_64; | ||
308 | goto reserve_helpers; | ||
309 | case 'I': | ||
310 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
311 | index XXXXXXX..XXXXXXX 100644 | ||
312 | --- a/tcg/tci/tcg-target.c.inc | ||
313 | +++ b/tcg/tci/tcg-target.c.inc | ||
314 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
315 | case 'r': | ||
316 | case 'L': /* qemu_ld constraint */ | ||
317 | case 'S': /* qemu_st constraint */ | ||
318 | - ct->ct |= TCG_CT_REG; | ||
319 | ct->regs = BIT(TCG_TARGET_NB_REGS) - 1; | ||
320 | break; | ||
321 | default: | ||
322 | -- | 28 | -- |
323 | 2.25.1 | 29 | 2.34.1 |
324 | |||
325 | diff view generated by jsdifflib |
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | --- | 2 | --- |
3 | tcg/ppc/tcg-target-constr.h | 37 ++++++++++ | 3 | tcg/ppc/tcg-target.c.inc | 3 +++ |
4 | tcg/ppc/tcg-target.c.inc | 135 +++++++++++++++--------------------- | 4 | 1 file changed, 3 insertions(+) |
5 | 2 files changed, 94 insertions(+), 78 deletions(-) | ||
6 | create mode 100644 tcg/ppc/tcg-target-constr.h | ||
7 | 5 | ||
8 | diff --git a/tcg/ppc/tcg-target-constr.h b/tcg/ppc/tcg-target-constr.h | ||
9 | new file mode 100644 | ||
10 | index XXXXXXX..XXXXXXX | ||
11 | --- /dev/null | ||
12 | +++ b/tcg/ppc/tcg-target-constr.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
15 | +/* | ||
16 | + * PowerPC target-specific operand constaints. | ||
17 | + * Copyright (c) 2020 Linaro | ||
18 | + */ | ||
19 | + | ||
20 | +C_O0_I1(r) | ||
21 | +C_O0_I2(r, r) | ||
22 | +C_O0_I2(r, ri) | ||
23 | +C_O0_I2(S, S) | ||
24 | +C_O0_I2(v, r) | ||
25 | +C_O0_I3(S, S, S) | ||
26 | +C_O0_I4(r, r, ri, ri) | ||
27 | +C_O0_I4(S, S, S, S) | ||
28 | +C_O1_I1(r, L) | ||
29 | +C_O1_I1(r, r) | ||
30 | +C_O1_I1(v, r) | ||
31 | +C_O1_I1(v, v) | ||
32 | +C_O1_I1(v, vr) | ||
33 | +C_O1_I2(r, 0, rZ) | ||
34 | +C_O1_I2(r, L, L) | ||
35 | +C_O1_I2(r, rI, ri) | ||
36 | +C_O1_I2(r, rI, rT) | ||
37 | +C_O1_I2(r, r, r) | ||
38 | +C_O1_I2(r, r, ri) | ||
39 | +C_O1_I2(r, r, rI) | ||
40 | +C_O1_I2(r, r, rT) | ||
41 | +C_O1_I2(r, r, rU) | ||
42 | +C_O1_I2(r, r, rZW) | ||
43 | +C_O1_I2(v, v, v) | ||
44 | +C_O1_I3(v, v, v, v) | ||
45 | +C_O1_I4(r, r, ri, rZ, rZ) | ||
46 | +C_O1_I4(r, r, r, ri, ri) | ||
47 | +C_O2_I1(L, L, L) | ||
48 | +C_O2_I2(L, L, L, L) | ||
49 | +C_O2_I4(r, r, rI, rZM, r, r) | ||
50 | +C_O2_I4(r, r, r, r, rI, rZM) | ||
51 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | 6 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
52 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/tcg/ppc/tcg-target.c.inc | 8 | --- a/tcg/ppc/tcg-target.c.inc |
54 | +++ b/tcg/ppc/tcg-target.c.inc | 9 | +++ b/tcg/ppc/tcg-target.c.inc |
55 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | 10 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which) |
56 | va_end(va); | 11 | if (USE_REG_TB) { |
57 | } | 12 | ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr); |
58 | 13 | tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset); | |
59 | +/* Define all constraint sets. */ | 14 | + } else if (have_isa_3_10) { |
60 | +#include "../tcg-constr.c.inc" | 15 | + ptrdiff_t offset = tcg_pcrel_diff_for_prefix(s, (void *)ptr); |
61 | + | 16 | + tcg_out_8ls_d(s, PLD, TCG_REG_TMP1, 0, offset, 1); |
62 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | 17 | } else if (have_isa_3_00) { |
63 | { | 18 | ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4; |
64 | - static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; | 19 | lo = offset; |
65 | - static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; | ||
66 | - static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; | ||
67 | - static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } }; | ||
68 | - static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; | ||
69 | - static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; | ||
70 | - static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; | ||
71 | - static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; | ||
72 | - static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } }; | ||
73 | - static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; | ||
74 | - static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; | ||
75 | - static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } }; | ||
76 | - static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } }; | ||
77 | - static const TCGTargetOpDef r_rI_ri | ||
78 | - = { .args_ct_str = { "r", "rI", "ri" } }; | ||
79 | - static const TCGTargetOpDef r_rI_rT | ||
80 | - = { .args_ct_str = { "r", "rI", "rT" } }; | ||
81 | - static const TCGTargetOpDef r_r_rZW | ||
82 | - = { .args_ct_str = { "r", "r", "rZW" } }; | ||
83 | - static const TCGTargetOpDef L_L_L_L | ||
84 | - = { .args_ct_str = { "L", "L", "L", "L" } }; | ||
85 | - static const TCGTargetOpDef S_S_S_S | ||
86 | - = { .args_ct_str = { "S", "S", "S", "S" } }; | ||
87 | - static const TCGTargetOpDef movc | ||
88 | - = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } }; | ||
89 | - static const TCGTargetOpDef dep | ||
90 | - = { .args_ct_str = { "r", "0", "rZ" } }; | ||
91 | - static const TCGTargetOpDef br2 | ||
92 | - = { .args_ct_str = { "r", "r", "ri", "ri" } }; | ||
93 | - static const TCGTargetOpDef setc2 | ||
94 | - = { .args_ct_str = { "r", "r", "r", "ri", "ri" } }; | ||
95 | - static const TCGTargetOpDef add2 | ||
96 | - = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } }; | ||
97 | - static const TCGTargetOpDef sub2 | ||
98 | - = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } }; | ||
99 | - static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } }; | ||
100 | - static const TCGTargetOpDef v_vr = { .args_ct_str = { "v", "vr" } }; | ||
101 | - static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } }; | ||
102 | - static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } }; | ||
103 | - static const TCGTargetOpDef v_v_v_v | ||
104 | - = { .args_ct_str = { "v", "v", "v", "v" } }; | ||
105 | - | ||
106 | switch (op) { | ||
107 | case INDEX_op_goto_ptr: | ||
108 | - return &r; | ||
109 | + return C_O0_I1(r); | ||
110 | |||
111 | case INDEX_op_ld8u_i32: | ||
112 | case INDEX_op_ld8s_i32: | ||
113 | case INDEX_op_ld16u_i32: | ||
114 | case INDEX_op_ld16s_i32: | ||
115 | case INDEX_op_ld_i32: | ||
116 | - case INDEX_op_st8_i32: | ||
117 | - case INDEX_op_st16_i32: | ||
118 | - case INDEX_op_st_i32: | ||
119 | case INDEX_op_ctpop_i32: | ||
120 | case INDEX_op_neg_i32: | ||
121 | case INDEX_op_not_i32: | ||
122 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
123 | case INDEX_op_ld32u_i64: | ||
124 | case INDEX_op_ld32s_i64: | ||
125 | case INDEX_op_ld_i64: | ||
126 | - case INDEX_op_st8_i64: | ||
127 | - case INDEX_op_st16_i64: | ||
128 | - case INDEX_op_st32_i64: | ||
129 | - case INDEX_op_st_i64: | ||
130 | case INDEX_op_ctpop_i64: | ||
131 | case INDEX_op_neg_i64: | ||
132 | case INDEX_op_not_i64: | ||
133 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
134 | case INDEX_op_bswap32_i64: | ||
135 | case INDEX_op_bswap64_i64: | ||
136 | case INDEX_op_extract_i64: | ||
137 | - return &r_r; | ||
138 | + return C_O1_I1(r, r); | ||
139 | + | ||
140 | + case INDEX_op_st8_i32: | ||
141 | + case INDEX_op_st16_i32: | ||
142 | + case INDEX_op_st_i32: | ||
143 | + case INDEX_op_st8_i64: | ||
144 | + case INDEX_op_st16_i64: | ||
145 | + case INDEX_op_st32_i64: | ||
146 | + case INDEX_op_st_i64: | ||
147 | + return C_O0_I2(r, r); | ||
148 | |||
149 | case INDEX_op_add_i32: | ||
150 | case INDEX_op_and_i32: | ||
151 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
152 | case INDEX_op_rotl_i64: | ||
153 | case INDEX_op_rotr_i64: | ||
154 | case INDEX_op_setcond_i64: | ||
155 | - return &r_r_ri; | ||
156 | + return C_O1_I2(r, r, ri); | ||
157 | + | ||
158 | case INDEX_op_mul_i32: | ||
159 | case INDEX_op_mul_i64: | ||
160 | - return &r_r_rI; | ||
161 | + return C_O1_I2(r, r, rI); | ||
162 | + | ||
163 | case INDEX_op_div_i32: | ||
164 | case INDEX_op_divu_i32: | ||
165 | case INDEX_op_nand_i32: | ||
166 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
167 | case INDEX_op_divu_i64: | ||
168 | case INDEX_op_mulsh_i64: | ||
169 | case INDEX_op_muluh_i64: | ||
170 | - return &r_r_r; | ||
171 | + return C_O1_I2(r, r, r); | ||
172 | + | ||
173 | case INDEX_op_sub_i32: | ||
174 | - return &r_rI_ri; | ||
175 | + return C_O1_I2(r, rI, ri); | ||
176 | case INDEX_op_add_i64: | ||
177 | - return &r_r_rT; | ||
178 | + return C_O1_I2(r, r, rT); | ||
179 | case INDEX_op_or_i64: | ||
180 | case INDEX_op_xor_i64: | ||
181 | - return &r_r_rU; | ||
182 | + return C_O1_I2(r, r, rU); | ||
183 | case INDEX_op_sub_i64: | ||
184 | - return &r_rI_rT; | ||
185 | + return C_O1_I2(r, rI, rT); | ||
186 | case INDEX_op_clz_i32: | ||
187 | case INDEX_op_ctz_i32: | ||
188 | case INDEX_op_clz_i64: | ||
189 | case INDEX_op_ctz_i64: | ||
190 | - return &r_r_rZW; | ||
191 | + return C_O1_I2(r, r, rZW); | ||
192 | |||
193 | case INDEX_op_brcond_i32: | ||
194 | case INDEX_op_brcond_i64: | ||
195 | - return &r_ri; | ||
196 | + return C_O0_I2(r, ri); | ||
197 | |||
198 | case INDEX_op_movcond_i32: | ||
199 | case INDEX_op_movcond_i64: | ||
200 | - return &movc; | ||
201 | + return C_O1_I4(r, r, ri, rZ, rZ); | ||
202 | case INDEX_op_deposit_i32: | ||
203 | case INDEX_op_deposit_i64: | ||
204 | - return &dep; | ||
205 | + return C_O1_I2(r, 0, rZ); | ||
206 | case INDEX_op_brcond2_i32: | ||
207 | - return &br2; | ||
208 | + return C_O0_I4(r, r, ri, ri); | ||
209 | case INDEX_op_setcond2_i32: | ||
210 | - return &setc2; | ||
211 | + return C_O1_I4(r, r, r, ri, ri); | ||
212 | case INDEX_op_add2_i64: | ||
213 | case INDEX_op_add2_i32: | ||
214 | - return &add2; | ||
215 | + return C_O2_I4(r, r, r, r, rI, rZM); | ||
216 | case INDEX_op_sub2_i64: | ||
217 | case INDEX_op_sub2_i32: | ||
218 | - return &sub2; | ||
219 | + return C_O2_I4(r, r, rI, rZM, r, r); | ||
220 | |||
221 | case INDEX_op_qemu_ld_i32: | ||
222 | return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 | ||
223 | - ? &r_L : &r_L_L); | ||
224 | + ? C_O1_I1(r, L) | ||
225 | + : C_O1_I2(r, L, L)); | ||
226 | + | ||
227 | case INDEX_op_qemu_st_i32: | ||
228 | return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 | ||
229 | - ? &S_S : &S_S_S); | ||
230 | + ? C_O0_I2(S, S) | ||
231 | + : C_O0_I3(S, S, S)); | ||
232 | + | ||
233 | case INDEX_op_qemu_ld_i64: | ||
234 | - return (TCG_TARGET_REG_BITS == 64 ? &r_L | ||
235 | - : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L); | ||
236 | + return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) | ||
237 | + : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L) | ||
238 | + : C_O2_I2(L, L, L, L)); | ||
239 | + | ||
240 | case INDEX_op_qemu_st_i64: | ||
241 | - return (TCG_TARGET_REG_BITS == 64 ? &S_S | ||
242 | - : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S); | ||
243 | + return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S) | ||
244 | + : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S) | ||
245 | + : C_O0_I4(S, S, S, S)); | ||
246 | |||
247 | case INDEX_op_add_vec: | ||
248 | case INDEX_op_sub_vec: | ||
249 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
250 | case INDEX_op_ppc_mulou_vec: | ||
251 | case INDEX_op_ppc_pkum_vec: | ||
252 | case INDEX_op_dup2_vec: | ||
253 | - return &v_v_v; | ||
254 | + return C_O1_I2(v, v, v); | ||
255 | + | ||
256 | case INDEX_op_not_vec: | ||
257 | case INDEX_op_neg_vec: | ||
258 | - return &v_v; | ||
259 | + return C_O1_I1(v, v); | ||
260 | + | ||
261 | case INDEX_op_dup_vec: | ||
262 | - return have_isa_3_00 ? &v_vr : &v_v; | ||
263 | + return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v); | ||
264 | + | ||
265 | case INDEX_op_ld_vec: | ||
266 | - case INDEX_op_st_vec: | ||
267 | case INDEX_op_dupm_vec: | ||
268 | - return &v_r; | ||
269 | + return C_O1_I1(v, r); | ||
270 | + | ||
271 | + case INDEX_op_st_vec: | ||
272 | + return C_O0_I2(v, r); | ||
273 | + | ||
274 | case INDEX_op_bitsel_vec: | ||
275 | case INDEX_op_ppc_msum_vec: | ||
276 | - return &v_v_v_v; | ||
277 | + return C_O1_I3(v, v, v, v); | ||
278 | |||
279 | default: | ||
280 | return NULL; | ||
281 | -- | 20 | -- |
282 | 2.25.1 | 21 | 2.34.1 |
283 | |||
284 | diff view generated by jsdifflib |
1 | The union is unused; let "regs" appear in the main structure | 1 | This appears to slightly improve performance on power9/10. |
---|---|---|---|
2 | without the "u.regs" wrapping. | ||
3 | 2 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 4 | --- |
6 | include/tcg/tcg.h | 4 +--- | 5 | tcg/ppc/tcg-target.c.inc | 2 +- |
7 | tcg/tcg.c | 22 +++++++++++----------- | 6 | 1 file changed, 1 insertion(+), 1 deletion(-) |
8 | tcg/aarch64/tcg-target.c.inc | 14 +++++++------- | ||
9 | tcg/arm/tcg-target.c.inc | 26 +++++++++++++------------- | ||
10 | tcg/i386/tcg-target.c.inc | 26 +++++++++++++------------- | ||
11 | tcg/mips/tcg-target.c.inc | 18 +++++++++--------- | ||
12 | tcg/ppc/tcg-target.c.inc | 24 ++++++++++++------------ | ||
13 | tcg/riscv/tcg-target.c.inc | 14 +++++++------- | ||
14 | tcg/s390/tcg-target.c.inc | 18 +++++++++--------- | ||
15 | tcg/sparc/tcg-target.c.inc | 16 ++++++++-------- | ||
16 | tcg/tci/tcg-target.c.inc | 2 +- | ||
17 | 11 files changed, 91 insertions(+), 93 deletions(-) | ||
18 | 7 | ||
19 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/tcg/tcg.h | ||
22 | +++ b/include/tcg/tcg.h | ||
23 | @@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void); | ||
24 | typedef struct TCGArgConstraint { | ||
25 | uint16_t ct; | ||
26 | uint8_t alias_index; | ||
27 | - union { | ||
28 | - TCGRegSet regs; | ||
29 | - } u; | ||
30 | + TCGRegSet regs; | ||
31 | } TCGArgConstraint; | ||
32 | |||
33 | #define TCG_MAX_OP_ARGS 16 | ||
34 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/tcg/tcg.c | ||
37 | +++ b/tcg/tcg.c | ||
38 | @@ -XXX,XX +XXX,XX @@ static int get_constraint_priority(const TCGOpDef *def, int k) | ||
39 | return 0; | ||
40 | n = 0; | ||
41 | for(i = 0; i < TCG_TARGET_NB_REGS; i++) { | ||
42 | - if (tcg_regset_test_reg(arg_ct->u.regs, i)) | ||
43 | + if (tcg_regset_test_reg(arg_ct->regs, i)) | ||
44 | n++; | ||
45 | } | ||
46 | } | ||
47 | @@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s) | ||
48 | /* Incomplete TCGTargetOpDef entry. */ | ||
49 | tcg_debug_assert(ct_str != NULL); | ||
50 | |||
51 | - def->args_ct[i].u.regs = 0; | ||
52 | + def->args_ct[i].regs = 0; | ||
53 | def->args_ct[i].ct = 0; | ||
54 | while (*ct_str != '\0') { | ||
55 | switch(*ct_str) { | ||
56 | @@ -XXX,XX +XXX,XX @@ static void liveness_pass_1(TCGContext *s) | ||
57 | pset = la_temp_pref(ts); | ||
58 | set = *pset; | ||
59 | |||
60 | - set &= ct->u.regs; | ||
61 | + set &= ct->regs; | ||
62 | if (ct->ct & TCG_CT_IALIAS) { | ||
63 | set &= op->output_pref[ct->alias_index]; | ||
64 | } | ||
65 | /* If the combination is not possible, restart. */ | ||
66 | if (set == 0) { | ||
67 | - set = ct->u.regs; | ||
68 | + set = ct->regs; | ||
69 | } | ||
70 | *pset = set; | ||
71 | } | ||
72 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | - dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].u.regs; | ||
77 | - dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].u.regs; | ||
78 | + dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs; | ||
79 | + dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs; | ||
80 | |||
81 | /* Allocate the output register now. */ | ||
82 | if (ots->val_type != TEMP_VAL_REG) { | ||
83 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
84 | } | ||
85 | } | ||
86 | |||
87 | - temp_load(s, ts, arg_ct->u.regs, i_allocated_regs, i_preferred_regs); | ||
88 | + temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs); | ||
89 | reg = ts->reg; | ||
90 | |||
91 | - if (tcg_regset_test_reg(arg_ct->u.regs, reg)) { | ||
92 | + if (tcg_regset_test_reg(arg_ct->regs, reg)) { | ||
93 | /* nothing to do : the constraint is satisfied */ | ||
94 | } else { | ||
95 | allocate_in_reg: | ||
96 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
97 | and move the temporary register into it */ | ||
98 | temp_load(s, ts, tcg_target_available_regs[ts->type], | ||
99 | i_allocated_regs, 0); | ||
100 | - reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs, | ||
101 | + reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs, | ||
102 | o_preferred_regs, ts->indirect_base); | ||
103 | if (!tcg_out_mov(s, ts->type, reg, ts->reg)) { | ||
104 | /* | ||
105 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
106 | && !const_args[arg_ct->alias_index]) { | ||
107 | reg = new_args[arg_ct->alias_index]; | ||
108 | } else if (arg_ct->ct & TCG_CT_NEWREG) { | ||
109 | - reg = tcg_reg_alloc(s, arg_ct->u.regs, | ||
110 | + reg = tcg_reg_alloc(s, arg_ct->regs, | ||
111 | i_allocated_regs | o_allocated_regs, | ||
112 | op->output_pref[k], ts->indirect_base); | ||
113 | } else { | ||
114 | - reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs, | ||
115 | + reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs, | ||
116 | op->output_pref[k], ts->indirect_base); | ||
117 | } | ||
118 | tcg_regset_set_reg(o_allocated_regs, reg); | ||
119 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
120 | index XXXXXXX..XXXXXXX 100644 | ||
121 | --- a/tcg/aarch64/tcg-target.c.inc | ||
122 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
123 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
124 | switch (*ct_str++) { | ||
125 | case 'r': /* general registers */ | ||
126 | ct->ct |= TCG_CT_REG; | ||
127 | - ct->u.regs |= 0xffffffffu; | ||
128 | + ct->regs |= 0xffffffffu; | ||
129 | break; | ||
130 | case 'w': /* advsimd registers */ | ||
131 | ct->ct |= TCG_CT_REG; | ||
132 | - ct->u.regs |= 0xffffffff00000000ull; | ||
133 | + ct->regs |= 0xffffffff00000000ull; | ||
134 | break; | ||
135 | case 'l': /* qemu_ld / qemu_st address, data_reg */ | ||
136 | ct->ct |= TCG_CT_REG; | ||
137 | - ct->u.regs = 0xffffffffu; | ||
138 | + ct->regs = 0xffffffffu; | ||
139 | #ifdef CONFIG_SOFTMMU | ||
140 | /* x0 and x1 will be overwritten when reading the tlb entry, | ||
141 | and x2, and x3 for helper args, better to avoid using them. */ | ||
142 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0); | ||
143 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1); | ||
144 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2); | ||
145 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); | ||
146 | + tcg_regset_reset_reg(ct->regs, TCG_REG_X0); | ||
147 | + tcg_regset_reset_reg(ct->regs, TCG_REG_X1); | ||
148 | + tcg_regset_reset_reg(ct->regs, TCG_REG_X2); | ||
149 | + tcg_regset_reset_reg(ct->regs, TCG_REG_X3); | ||
150 | #endif | ||
151 | break; | ||
152 | case 'A': /* Valid for arithmetic immediate (positive or negative). */ | ||
153 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
154 | index XXXXXXX..XXXXXXX 100644 | ||
155 | --- a/tcg/arm/tcg-target.c.inc | ||
156 | +++ b/tcg/arm/tcg-target.c.inc | ||
157 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
158 | |||
159 | case 'r': | ||
160 | ct->ct |= TCG_CT_REG; | ||
161 | - ct->u.regs = 0xffff; | ||
162 | + ct->regs = 0xffff; | ||
163 | break; | ||
164 | |||
165 | /* qemu_ld address */ | ||
166 | case 'l': | ||
167 | ct->ct |= TCG_CT_REG; | ||
168 | - ct->u.regs = 0xffff; | ||
169 | + ct->regs = 0xffff; | ||
170 | #ifdef CONFIG_SOFTMMU | ||
171 | /* r0-r2,lr will be overwritten when reading the tlb entry, | ||
172 | so don't use these. */ | ||
173 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | ||
174 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | ||
175 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); | ||
176 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); | ||
177 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14); | ||
178 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R0); | ||
179 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R1); | ||
180 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R2); | ||
181 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
182 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R14); | ||
183 | #endif | ||
184 | break; | ||
185 | |||
186 | /* qemu_st address & data */ | ||
187 | case 's': | ||
188 | ct->ct |= TCG_CT_REG; | ||
189 | - ct->u.regs = 0xffff; | ||
190 | + ct->regs = 0xffff; | ||
191 | /* r0-r2 will be overwritten when reading the tlb entry (softmmu only) | ||
192 | and r0-r1 doing the byte swapping, so don't use these. */ | ||
193 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | ||
194 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | ||
195 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R0); | ||
196 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R1); | ||
197 | #if defined(CONFIG_SOFTMMU) | ||
198 | /* Avoid clashes with registers being used for helper args */ | ||
199 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); | ||
200 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R2); | ||
201 | #if TARGET_LONG_BITS == 64 | ||
202 | /* Avoid clashes with registers being used for helper args */ | ||
203 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); | ||
204 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
205 | #endif | ||
206 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14); | ||
207 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R14); | ||
208 | #endif | ||
209 | break; | ||
210 | |||
211 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
212 | index XXXXXXX..XXXXXXX 100644 | ||
213 | --- a/tcg/i386/tcg-target.c.inc | ||
214 | +++ b/tcg/i386/tcg-target.c.inc | ||
215 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
216 | switch(*ct_str++) { | ||
217 | case 'a': | ||
218 | ct->ct |= TCG_CT_REG; | ||
219 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX); | ||
220 | + tcg_regset_set_reg(ct->regs, TCG_REG_EAX); | ||
221 | break; | ||
222 | case 'b': | ||
223 | ct->ct |= TCG_CT_REG; | ||
224 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); | ||
225 | + tcg_regset_set_reg(ct->regs, TCG_REG_EBX); | ||
226 | break; | ||
227 | case 'c': | ||
228 | ct->ct |= TCG_CT_REG; | ||
229 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); | ||
230 | + tcg_regset_set_reg(ct->regs, TCG_REG_ECX); | ||
231 | break; | ||
232 | case 'd': | ||
233 | ct->ct |= TCG_CT_REG; | ||
234 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX); | ||
235 | + tcg_regset_set_reg(ct->regs, TCG_REG_EDX); | ||
236 | break; | ||
237 | case 'S': | ||
238 | ct->ct |= TCG_CT_REG; | ||
239 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI); | ||
240 | + tcg_regset_set_reg(ct->regs, TCG_REG_ESI); | ||
241 | break; | ||
242 | case 'D': | ||
243 | ct->ct |= TCG_CT_REG; | ||
244 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI); | ||
245 | + tcg_regset_set_reg(ct->regs, TCG_REG_EDI); | ||
246 | break; | ||
247 | case 'q': | ||
248 | /* A register that can be used as a byte operand. */ | ||
249 | ct->ct |= TCG_CT_REG; | ||
250 | - ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf; | ||
251 | + ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf; | ||
252 | break; | ||
253 | case 'Q': | ||
254 | /* A register with an addressable second byte (e.g. %ah). */ | ||
255 | ct->ct |= TCG_CT_REG; | ||
256 | - ct->u.regs = 0xf; | ||
257 | + ct->regs = 0xf; | ||
258 | break; | ||
259 | case 'r': | ||
260 | /* A general register. */ | ||
261 | ct->ct |= TCG_CT_REG; | ||
262 | - ct->u.regs |= ALL_GENERAL_REGS; | ||
263 | + ct->regs |= ALL_GENERAL_REGS; | ||
264 | break; | ||
265 | case 'W': | ||
266 | /* With TZCNT/LZCNT, we can have operand-size as an input. */ | ||
267 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
268 | case 'x': | ||
269 | /* A vector register. */ | ||
270 | ct->ct |= TCG_CT_REG; | ||
271 | - ct->u.regs |= ALL_VECTOR_REGS; | ||
272 | + ct->regs |= ALL_VECTOR_REGS; | ||
273 | break; | ||
274 | |||
275 | /* qemu_ld/st address constraint */ | ||
276 | case 'L': | ||
277 | ct->ct |= TCG_CT_REG; | ||
278 | - ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; | ||
279 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0); | ||
280 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1); | ||
281 | + ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; | ||
282 | + tcg_regset_reset_reg(ct->regs, TCG_REG_L0); | ||
283 | + tcg_regset_reset_reg(ct->regs, TCG_REG_L1); | ||
284 | break; | ||
285 | |||
286 | case 'e': | ||
287 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc | ||
288 | index XXXXXXX..XXXXXXX 100644 | ||
289 | --- a/tcg/mips/tcg-target.c.inc | ||
290 | +++ b/tcg/mips/tcg-target.c.inc | ||
291 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
292 | switch(*ct_str++) { | ||
293 | case 'r': | ||
294 | ct->ct |= TCG_CT_REG; | ||
295 | - ct->u.regs = 0xffffffff; | ||
296 | + ct->regs = 0xffffffff; | ||
297 | break; | ||
298 | case 'L': /* qemu_ld input arg constraint */ | ||
299 | ct->ct |= TCG_CT_REG; | ||
300 | - ct->u.regs = 0xffffffff; | ||
301 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); | ||
302 | + ct->regs = 0xffffffff; | ||
303 | + tcg_regset_reset_reg(ct->regs, TCG_REG_A0); | ||
304 | #if defined(CONFIG_SOFTMMU) | ||
305 | if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
306 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); | ||
307 | + tcg_regset_reset_reg(ct->regs, TCG_REG_A2); | ||
308 | } | ||
309 | #endif | ||
310 | break; | ||
311 | case 'S': /* qemu_st constraint */ | ||
312 | ct->ct |= TCG_CT_REG; | ||
313 | - ct->u.regs = 0xffffffff; | ||
314 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); | ||
315 | + ct->regs = 0xffffffff; | ||
316 | + tcg_regset_reset_reg(ct->regs, TCG_REG_A0); | ||
317 | #if defined(CONFIG_SOFTMMU) | ||
318 | if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
319 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); | ||
320 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3); | ||
321 | + tcg_regset_reset_reg(ct->regs, TCG_REG_A2); | ||
322 | + tcg_regset_reset_reg(ct->regs, TCG_REG_A3); | ||
323 | } else { | ||
324 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1); | ||
325 | + tcg_regset_reset_reg(ct->regs, TCG_REG_A1); | ||
326 | } | ||
327 | #endif | ||
328 | break; | ||
329 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | 8 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
330 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
331 | --- a/tcg/ppc/tcg-target.c.inc | 10 | --- a/tcg/ppc/tcg-target.c.inc |
332 | +++ b/tcg/ppc/tcg-target.c.inc | 11 | +++ b/tcg/ppc/tcg-target.c.inc |
333 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | 12 | @@ -XXX,XX +XXX,XX @@ |
334 | switch (*ct_str++) { | 13 | #define TCG_VEC_TMP2 TCG_REG_V1 |
335 | case 'A': case 'B': case 'C': case 'D': | 14 | |
336 | ct->ct |= TCG_CT_REG; | 15 | #define TCG_REG_TB TCG_REG_R31 |
337 | - tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A'); | 16 | -#define USE_REG_TB (TCG_TARGET_REG_BITS == 64) |
338 | + tcg_regset_set_reg(ct->regs, 3 + ct_str[0] - 'A'); | 17 | +#define USE_REG_TB (TCG_TARGET_REG_BITS == 64 && !have_isa_3_00) |
339 | break; | 18 | |
340 | case 'r': | 19 | /* Shorthand for size of a pointer. Avoid promotion to unsigned. */ |
341 | ct->ct |= TCG_CT_REG; | 20 | #define SZP ((int)sizeof(void *)) |
342 | - ct->u.regs = 0xffffffff; | ||
343 | + ct->regs = 0xffffffff; | ||
344 | break; | ||
345 | case 'v': | ||
346 | ct->ct |= TCG_CT_REG; | ||
347 | - ct->u.regs = 0xffffffff00000000ull; | ||
348 | + ct->regs = 0xffffffff00000000ull; | ||
349 | break; | ||
350 | case 'L': /* qemu_ld constraint */ | ||
351 | ct->ct |= TCG_CT_REG; | ||
352 | - ct->u.regs = 0xffffffff; | ||
353 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); | ||
354 | + ct->regs = 0xffffffff; | ||
355 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
356 | #ifdef CONFIG_SOFTMMU | ||
357 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); | ||
358 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); | ||
359 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R4); | ||
360 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R5); | ||
361 | #endif | ||
362 | break; | ||
363 | case 'S': /* qemu_st constraint */ | ||
364 | ct->ct |= TCG_CT_REG; | ||
365 | - ct->u.regs = 0xffffffff; | ||
366 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); | ||
367 | + ct->regs = 0xffffffff; | ||
368 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
369 | #ifdef CONFIG_SOFTMMU | ||
370 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); | ||
371 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); | ||
372 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); | ||
373 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R4); | ||
374 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R5); | ||
375 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R6); | ||
376 | #endif | ||
377 | break; | ||
378 | case 'I': | ||
379 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc | ||
380 | index XXXXXXX..XXXXXXX 100644 | ||
381 | --- a/tcg/riscv/tcg-target.c.inc | ||
382 | +++ b/tcg/riscv/tcg-target.c.inc | ||
383 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
384 | switch (*ct_str++) { | ||
385 | case 'r': | ||
386 | ct->ct |= TCG_CT_REG; | ||
387 | - ct->u.regs = 0xffffffff; | ||
388 | + ct->regs = 0xffffffff; | ||
389 | break; | ||
390 | case 'L': | ||
391 | /* qemu_ld/qemu_st constraint */ | ||
392 | ct->ct |= TCG_CT_REG; | ||
393 | - ct->u.regs = 0xffffffff; | ||
394 | + ct->regs = 0xffffffff; | ||
395 | /* qemu_ld/qemu_st uses TCG_REG_TMP0 */ | ||
396 | #if defined(CONFIG_SOFTMMU) | ||
397 | - tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[0]); | ||
398 | - tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[1]); | ||
399 | - tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[2]); | ||
400 | - tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[3]); | ||
401 | - tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[4]); | ||
402 | + tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[0]); | ||
403 | + tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[1]); | ||
404 | + tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[2]); | ||
405 | + tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[3]); | ||
406 | + tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[4]); | ||
407 | #endif | ||
408 | break; | ||
409 | case 'I': | ||
410 | diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc | ||
411 | index XXXXXXX..XXXXXXX 100644 | ||
412 | --- a/tcg/s390/tcg-target.c.inc | ||
413 | +++ b/tcg/s390/tcg-target.c.inc | ||
414 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
415 | switch (*ct_str++) { | ||
416 | case 'r': /* all registers */ | ||
417 | ct->ct |= TCG_CT_REG; | ||
418 | - ct->u.regs = 0xffff; | ||
419 | + ct->regs = 0xffff; | ||
420 | break; | ||
421 | case 'L': /* qemu_ld/st constraint */ | ||
422 | ct->ct |= TCG_CT_REG; | ||
423 | - ct->u.regs = 0xffff; | ||
424 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); | ||
425 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); | ||
426 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); | ||
427 | + ct->regs = 0xffff; | ||
428 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R2); | ||
429 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R3); | ||
430 | + tcg_regset_reset_reg(ct->regs, TCG_REG_R4); | ||
431 | break; | ||
432 | case 'a': /* force R2 for division */ | ||
433 | ct->ct |= TCG_CT_REG; | ||
434 | - ct->u.regs = 0; | ||
435 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_R2); | ||
436 | + ct->regs = 0; | ||
437 | + tcg_regset_set_reg(ct->regs, TCG_REG_R2); | ||
438 | break; | ||
439 | case 'b': /* force R3 for division */ | ||
440 | ct->ct |= TCG_CT_REG; | ||
441 | - ct->u.regs = 0; | ||
442 | - tcg_regset_set_reg(ct->u.regs, TCG_REG_R3); | ||
443 | + ct->regs = 0; | ||
444 | + tcg_regset_set_reg(ct->regs, TCG_REG_R3); | ||
445 | break; | ||
446 | case 'A': | ||
447 | ct->ct |= TCG_CT_CONST_S33; | ||
448 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc | ||
449 | index XXXXXXX..XXXXXXX 100644 | ||
450 | --- a/tcg/sparc/tcg-target.c.inc | ||
451 | +++ b/tcg/sparc/tcg-target.c.inc | ||
452 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
453 | switch (*ct_str++) { | ||
454 | case 'r': | ||
455 | ct->ct |= TCG_CT_REG; | ||
456 | - ct->u.regs = 0xffffffff; | ||
457 | + ct->regs = 0xffffffff; | ||
458 | break; | ||
459 | case 'R': | ||
460 | ct->ct |= TCG_CT_REG; | ||
461 | - ct->u.regs = ALL_64; | ||
462 | + ct->regs = ALL_64; | ||
463 | break; | ||
464 | case 'A': /* qemu_ld/st address constraint */ | ||
465 | ct->ct |= TCG_CT_REG; | ||
466 | - ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff; | ||
467 | + ct->regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff; | ||
468 | reserve_helpers: | ||
469 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0); | ||
470 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1); | ||
471 | - tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2); | ||
472 | + tcg_regset_reset_reg(ct->regs, TCG_REG_O0); | ||
473 | + tcg_regset_reset_reg(ct->regs, TCG_REG_O1); | ||
474 | + tcg_regset_reset_reg(ct->regs, TCG_REG_O2); | ||
475 | break; | ||
476 | case 's': /* qemu_st data 32-bit constraint */ | ||
477 | ct->ct |= TCG_CT_REG; | ||
478 | - ct->u.regs = 0xffffffff; | ||
479 | + ct->regs = 0xffffffff; | ||
480 | goto reserve_helpers; | ||
481 | case 'S': /* qemu_st data 64-bit constraint */ | ||
482 | ct->ct |= TCG_CT_REG; | ||
483 | - ct->u.regs = ALL_64; | ||
484 | + ct->regs = ALL_64; | ||
485 | goto reserve_helpers; | ||
486 | case 'I': | ||
487 | ct->ct |= TCG_CT_CONST_S11; | ||
488 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
489 | index XXXXXXX..XXXXXXX 100644 | ||
490 | --- a/tcg/tci/tcg-target.c.inc | ||
491 | +++ b/tcg/tci/tcg-target.c.inc | ||
492 | @@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct, | ||
493 | case 'L': /* qemu_ld constraint */ | ||
494 | case 'S': /* qemu_st constraint */ | ||
495 | ct->ct |= TCG_CT_REG; | ||
496 | - ct->u.regs = BIT(TCG_TARGET_NB_REGS) - 1; | ||
497 | + ct->regs = BIT(TCG_TARGET_NB_REGS) - 1; | ||
498 | break; | ||
499 | default: | ||
500 | return NULL; | ||
501 | -- | 21 | -- |
502 | 2.25.1 | 22 | 2.34.1 |
503 | |||
504 | diff view generated by jsdifflib |
1 | Begin disconnecting CONFIG_SOFTMMU from !CONFIG_USER_ONLY. | ||
---|---|---|---|
2 | Introduce a variable which can be set at startup to select | ||
3 | one method or another for user-only. | ||
4 | |||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
2 | --- | 7 | --- |
3 | include/tcg/tcg.h | 1 + | 8 | include/tcg/tcg.h | 8 ++++++-- |
4 | tcg/tcg-op-gvec.c | 125 ++++++++++++++++++---------------------------- | 9 | tcg/tcg-op-ldst.c | 14 +++++++------- |
5 | tcg/tcg.c | 8 +++ | 10 | tcg/tcg.c | 9 ++++++--- |
6 | 3 files changed, 58 insertions(+), 76 deletions(-) | 11 | 3 files changed, 19 insertions(+), 12 deletions(-) |
7 | 12 | ||
8 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 13 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h |
9 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
10 | --- a/include/tcg/tcg.h | 15 | --- a/include/tcg/tcg.h |
11 | +++ b/include/tcg/tcg.h | 16 | +++ b/include/tcg/tcg.h |
12 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_i64 tcg_constant_i64(int64_t val) | 17 | @@ -XXX,XX +XXX,XX @@ struct TCGContext { |
18 | int nb_ops; | ||
19 | TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ | ||
20 | |||
21 | -#ifdef CONFIG_SOFTMMU | ||
22 | int page_mask; | ||
23 | uint8_t page_bits; | ||
24 | uint8_t tlb_dyn_max_bits; | ||
25 | -#endif | ||
26 | uint8_t insn_start_words; | ||
27 | TCGBar guest_mo; | ||
28 | |||
29 | @@ -XXX,XX +XXX,XX @@ static inline bool temp_readonly(TCGTemp *ts) | ||
30 | return ts->kind >= TEMP_FIXED; | ||
13 | } | 31 | } |
14 | 32 | ||
15 | TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val); | 33 | +#ifdef CONFIG_USER_ONLY |
16 | +TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val); | 34 | +extern bool tcg_use_softmmu; |
17 | 35 | +#else | |
18 | #if UINTPTR_MAX == UINT32_MAX | 36 | +#define tcg_use_softmmu true |
19 | # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) | 37 | +#endif |
20 | diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c | 38 | + |
39 | extern __thread TCGContext *tcg_ctx; | ||
40 | extern const void *tcg_code_gen_epilogue; | ||
41 | extern uintptr_t tcg_splitwx_diff; | ||
42 | diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c | ||
21 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/tcg/tcg-op-gvec.c | 44 | --- a/tcg/tcg-op-ldst.c |
23 | +++ b/tcg/tcg-op-gvec.c | 45 | +++ b/tcg/tcg-op-ldst.c |
24 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, | 46 | @@ -XXX,XX +XXX,XX @@ |
25 | gen_helper_gvec_2 *fn) | 47 | |
48 | static void check_max_alignment(unsigned a_bits) | ||
26 | { | 49 | { |
27 | TCGv_ptr a0, a1; | 50 | -#if defined(CONFIG_SOFTMMU) |
28 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | 51 | /* |
29 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | 52 | * The requested alignment cannot overlap the TLB flags. |
30 | 53 | * FIXME: Must keep the count up-to-date with "exec/cpu-all.h". | |
31 | a0 = tcg_temp_new_ptr(); | 54 | */ |
32 | a1 = tcg_temp_new_ptr(); | 55 | - tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits); |
33 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, | 56 | -#endif |
34 | 57 | + if (tcg_use_softmmu) { | |
35 | tcg_temp_free_ptr(a0); | 58 | + tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits); |
36 | tcg_temp_free_ptr(a1); | 59 | + } |
37 | - tcg_temp_free_i32(desc); | ||
38 | } | 60 | } |
39 | 61 | ||
40 | /* Generate a call to a gvec-style helper with two vector operands | 62 | static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) |
41 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, | 63 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx, |
42 | gen_helper_gvec_2i *fn) | 64 | */ |
65 | static bool use_two_i64_for_i128(MemOp mop) | ||
43 | { | 66 | { |
44 | TCGv_ptr a0, a1; | 67 | -#ifdef CONFIG_SOFTMMU |
45 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | 68 | /* Two softmmu tlb lookups is larger than one function call. */ |
46 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | 69 | - return false; |
47 | 70 | -#else | |
48 | a0 = tcg_temp_new_ptr(); | 71 | + if (tcg_use_softmmu) { |
49 | a1 = tcg_temp_new_ptr(); | 72 | + return false; |
50 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, | 73 | + } |
51 | 74 | + | |
52 | tcg_temp_free_ptr(a0); | 75 | /* |
53 | tcg_temp_free_ptr(a1); | 76 | * For user-only, two 64-bit operations may well be smaller than a call. |
54 | - tcg_temp_free_i32(desc); | 77 | * Determine if that would be legal for the requested atomicity. |
78 | @@ -XXX,XX +XXX,XX @@ static bool use_two_i64_for_i128(MemOp mop) | ||
79 | default: | ||
80 | g_assert_not_reached(); | ||
81 | } | ||
82 | -#endif | ||
55 | } | 83 | } |
56 | 84 | ||
57 | /* Generate a call to a gvec-style helper with three vector operands. */ | 85 | static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) |
58 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
59 | gen_helper_gvec_3 *fn) | ||
60 | { | ||
61 | TCGv_ptr a0, a1, a2; | ||
62 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | ||
63 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | ||
64 | |||
65 | a0 = tcg_temp_new_ptr(); | ||
66 | a1 = tcg_temp_new_ptr(); | ||
67 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
68 | tcg_temp_free_ptr(a0); | ||
69 | tcg_temp_free_ptr(a1); | ||
70 | tcg_temp_free_ptr(a2); | ||
71 | - tcg_temp_free_i32(desc); | ||
72 | } | ||
73 | |||
74 | /* Generate a call to a gvec-style helper with four vector operands. */ | ||
75 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
76 | int32_t data, gen_helper_gvec_4 *fn) | ||
77 | { | ||
78 | TCGv_ptr a0, a1, a2, a3; | ||
79 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | ||
80 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | ||
81 | |||
82 | a0 = tcg_temp_new_ptr(); | ||
83 | a1 = tcg_temp_new_ptr(); | ||
84 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
85 | tcg_temp_free_ptr(a1); | ||
86 | tcg_temp_free_ptr(a2); | ||
87 | tcg_temp_free_ptr(a3); | ||
88 | - tcg_temp_free_i32(desc); | ||
89 | } | ||
90 | |||
91 | /* Generate a call to a gvec-style helper with five vector operands. */ | ||
92 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
93 | uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn) | ||
94 | { | ||
95 | TCGv_ptr a0, a1, a2, a3, a4; | ||
96 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | ||
97 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | ||
98 | |||
99 | a0 = tcg_temp_new_ptr(); | ||
100 | a1 = tcg_temp_new_ptr(); | ||
101 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
102 | tcg_temp_free_ptr(a2); | ||
103 | tcg_temp_free_ptr(a3); | ||
104 | tcg_temp_free_ptr(a4); | ||
105 | - tcg_temp_free_i32(desc); | ||
106 | } | ||
107 | |||
108 | /* Generate a call to a gvec-style helper with three vector operands | ||
109 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs, | ||
110 | int32_t data, gen_helper_gvec_2_ptr *fn) | ||
111 | { | ||
112 | TCGv_ptr a0, a1; | ||
113 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | ||
114 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | ||
115 | |||
116 | a0 = tcg_temp_new_ptr(); | ||
117 | a1 = tcg_temp_new_ptr(); | ||
118 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs, | ||
119 | |||
120 | tcg_temp_free_ptr(a0); | ||
121 | tcg_temp_free_ptr(a1); | ||
122 | - tcg_temp_free_i32(desc); | ||
123 | } | ||
124 | |||
125 | /* Generate a call to a gvec-style helper with three vector operands | ||
126 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
127 | int32_t data, gen_helper_gvec_3_ptr *fn) | ||
128 | { | ||
129 | TCGv_ptr a0, a1, a2; | ||
130 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | ||
131 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | ||
132 | |||
133 | a0 = tcg_temp_new_ptr(); | ||
134 | a1 = tcg_temp_new_ptr(); | ||
135 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
136 | tcg_temp_free_ptr(a0); | ||
137 | tcg_temp_free_ptr(a1); | ||
138 | tcg_temp_free_ptr(a2); | ||
139 | - tcg_temp_free_i32(desc); | ||
140 | } | ||
141 | |||
142 | /* Generate a call to a gvec-style helper with four vector operands | ||
143 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
144 | gen_helper_gvec_4_ptr *fn) | ||
145 | { | ||
146 | TCGv_ptr a0, a1, a2, a3; | ||
147 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | ||
148 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | ||
149 | |||
150 | a0 = tcg_temp_new_ptr(); | ||
151 | a1 = tcg_temp_new_ptr(); | ||
152 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
153 | tcg_temp_free_ptr(a1); | ||
154 | tcg_temp_free_ptr(a2); | ||
155 | tcg_temp_free_ptr(a3); | ||
156 | - tcg_temp_free_i32(desc); | ||
157 | } | ||
158 | |||
159 | /* Generate a call to a gvec-style helper with five vector operands | ||
160 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
161 | gen_helper_gvec_5_ptr *fn) | ||
162 | { | ||
163 | TCGv_ptr a0, a1, a2, a3, a4; | ||
164 | - TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); | ||
165 | + TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); | ||
166 | |||
167 | a0 = tcg_temp_new_ptr(); | ||
168 | a1 = tcg_temp_new_ptr(); | ||
169 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
170 | tcg_temp_free_ptr(a2); | ||
171 | tcg_temp_free_ptr(a3); | ||
172 | tcg_temp_free_ptr(a4); | ||
173 | - tcg_temp_free_i32(desc); | ||
174 | } | ||
175 | |||
176 | /* Return true if we want to implement something of OPRSZ bytes | ||
177 | @@ -XXX,XX +XXX,XX @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, | ||
178 | || (TCG_TARGET_REG_BITS == 64 | ||
179 | && (in_c == 0 || in_c == -1 | ||
180 | || !check_size_impl(oprsz, 4)))) { | ||
181 | - t_64 = tcg_const_i64(in_c); | ||
182 | + t_64 = tcg_constant_i64(in_c); | ||
183 | } else { | ||
184 | - t_32 = tcg_const_i32(in_c); | ||
185 | + t_32 = tcg_constant_i32(in_c); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | @@ -XXX,XX +XXX,XX @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, | ||
190 | /* Otherwise implement out of line. */ | ||
191 | t_ptr = tcg_temp_new_ptr(); | ||
192 | tcg_gen_addi_ptr(t_ptr, cpu_env, dofs); | ||
193 | - t_desc = tcg_const_i32(simd_desc(oprsz, maxsz, 0)); | ||
194 | + t_desc = tcg_constant_i32(simd_desc(oprsz, maxsz, 0)); | ||
195 | |||
196 | if (vece == MO_64) { | ||
197 | if (in_64) { | ||
198 | gen_helper_gvec_dup64(t_ptr, t_desc, in_64); | ||
199 | } else { | ||
200 | - t_64 = tcg_const_i64(in_c); | ||
201 | + t_64 = tcg_constant_i64(in_c); | ||
202 | gen_helper_gvec_dup64(t_ptr, t_desc, t_64); | ||
203 | - tcg_temp_free_i64(t_64); | ||
204 | } | ||
205 | } else { | ||
206 | typedef void dup_fn(TCGv_ptr, TCGv_i32, TCGv_i32); | ||
207 | @@ -XXX,XX +XXX,XX @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, | ||
208 | |||
209 | if (in_32) { | ||
210 | fns[vece](t_ptr, t_desc, in_32); | ||
211 | - } else { | ||
212 | + } else if (in_64) { | ||
213 | t_32 = tcg_temp_new_i32(); | ||
214 | - if (in_64) { | ||
215 | - tcg_gen_extrl_i64_i32(t_32, in_64); | ||
216 | - } else if (vece == MO_8) { | ||
217 | - tcg_gen_movi_i32(t_32, in_c & 0xff); | ||
218 | - } else if (vece == MO_16) { | ||
219 | - tcg_gen_movi_i32(t_32, in_c & 0xffff); | ||
220 | - } else { | ||
221 | - tcg_gen_movi_i32(t_32, in_c); | ||
222 | - } | ||
223 | + tcg_gen_extrl_i64_i32(t_32, in_64); | ||
224 | fns[vece](t_ptr, t_desc, t_32); | ||
225 | tcg_temp_free_i32(t_32); | ||
226 | + } else { | ||
227 | + if (vece == MO_8) { | ||
228 | + in_c &= 0xff; | ||
229 | + } else if (vece == MO_16) { | ||
230 | + in_c &= 0xffff; | ||
231 | + } | ||
232 | + t_32 = tcg_constant_i32(in_c); | ||
233 | + fns[vece](t_ptr, t_desc, t_32); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | tcg_temp_free_ptr(t_ptr); | ||
238 | - tcg_temp_free_i32(t_desc); | ||
239 | return; | ||
240 | |||
241 | done: | ||
242 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, | ||
243 | if (g->fno) { | ||
244 | tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno); | ||
245 | } else { | ||
246 | - TCGv_i64 tcg_c = tcg_const_i64(c); | ||
247 | + TCGv_i64 tcg_c = tcg_constant_i64(c); | ||
248 | tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz, | ||
249 | maxsz, c, g->fnoi); | ||
250 | - tcg_temp_free_i64(tcg_c); | ||
251 | } | ||
252 | oprsz = maxsz; | ||
253 | } | ||
254 | @@ -XXX,XX +XXX,XX @@ static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) | ||
255 | |||
256 | void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
257 | { | ||
258 | - TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80)); | ||
259 | + TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80)); | ||
260 | gen_addv_mask(d, a, b, m); | ||
261 | - tcg_temp_free_i64(m); | ||
262 | } | ||
263 | |||
264 | void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
265 | { | ||
266 | - TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000)); | ||
267 | + TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000)); | ||
268 | gen_addv_mask(d, a, b, m); | ||
269 | - tcg_temp_free_i64(m); | ||
270 | } | ||
271 | |||
272 | void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
273 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
274 | void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
275 | int64_t c, uint32_t oprsz, uint32_t maxsz) | ||
276 | { | ||
277 | - TCGv_i64 tmp = tcg_const_i64(c); | ||
278 | + TCGv_i64 tmp = tcg_constant_i64(c); | ||
279 | tcg_gen_gvec_adds(vece, dofs, aofs, tmp, oprsz, maxsz); | ||
280 | - tcg_temp_free_i64(tmp); | ||
281 | } | ||
282 | |||
283 | static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 }; | ||
284 | @@ -XXX,XX +XXX,XX @@ static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) | ||
285 | |||
286 | void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
287 | { | ||
288 | - TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80)); | ||
289 | + TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80)); | ||
290 | gen_subv_mask(d, a, b, m); | ||
291 | - tcg_temp_free_i64(m); | ||
292 | } | ||
293 | |||
294 | void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
295 | { | ||
296 | - TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000)); | ||
297 | + TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000)); | ||
298 | gen_subv_mask(d, a, b, m); | ||
299 | - tcg_temp_free_i64(m); | ||
300 | } | ||
301 | |||
302 | void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
303 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
304 | void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
305 | int64_t c, uint32_t oprsz, uint32_t maxsz) | ||
306 | { | ||
307 | - TCGv_i64 tmp = tcg_const_i64(c); | ||
308 | + TCGv_i64 tmp = tcg_constant_i64(c); | ||
309 | tcg_gen_gvec_muls(vece, dofs, aofs, tmp, oprsz, maxsz); | ||
310 | - tcg_temp_free_i64(tmp); | ||
311 | } | ||
312 | |||
313 | void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
314 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
315 | |||
316 | static void tcg_gen_usadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
317 | { | ||
318 | - TCGv_i32 max = tcg_const_i32(-1); | ||
319 | + TCGv_i32 max = tcg_constant_i32(-1); | ||
320 | tcg_gen_add_i32(d, a, b); | ||
321 | tcg_gen_movcond_i32(TCG_COND_LTU, d, d, a, max, d); | ||
322 | - tcg_temp_free_i32(max); | ||
323 | } | ||
324 | |||
325 | static void tcg_gen_usadd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
326 | { | ||
327 | - TCGv_i64 max = tcg_const_i64(-1); | ||
328 | + TCGv_i64 max = tcg_constant_i64(-1); | ||
329 | tcg_gen_add_i64(d, a, b); | ||
330 | tcg_gen_movcond_i64(TCG_COND_LTU, d, d, a, max, d); | ||
331 | - tcg_temp_free_i64(max); | ||
332 | } | ||
333 | |||
334 | void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
335 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
336 | |||
337 | static void tcg_gen_ussub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
338 | { | ||
339 | - TCGv_i32 min = tcg_const_i32(0); | ||
340 | + TCGv_i32 min = tcg_constant_i32(0); | ||
341 | tcg_gen_sub_i32(d, a, b); | ||
342 | tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, min, d); | ||
343 | - tcg_temp_free_i32(min); | ||
344 | } | ||
345 | |||
346 | static void tcg_gen_ussub_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
347 | { | ||
348 | - TCGv_i64 min = tcg_const_i64(0); | ||
349 | + TCGv_i64 min = tcg_constant_i64(0); | ||
350 | tcg_gen_sub_i64(d, a, b); | ||
351 | tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, min, d); | ||
352 | - tcg_temp_free_i64(min); | ||
353 | } | ||
354 | |||
355 | void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
356 | @@ -XXX,XX +XXX,XX @@ static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m) | ||
357 | |||
358 | void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 b) | ||
359 | { | ||
360 | - TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80)); | ||
361 | + TCGv_i64 m = tcg_constant_i64(dup_const(MO_8, 0x80)); | ||
362 | gen_negv_mask(d, b, m); | ||
363 | - tcg_temp_free_i64(m); | ||
364 | } | ||
365 | |||
366 | void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b) | ||
367 | { | ||
368 | - TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000)); | ||
369 | + TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000)); | ||
370 | gen_negv_mask(d, b, m); | ||
371 | - tcg_temp_free_i64(m); | ||
372 | } | ||
373 | |||
374 | void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b) | ||
375 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
376 | void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
377 | int64_t c, uint32_t oprsz, uint32_t maxsz) | ||
378 | { | ||
379 | - TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c)); | ||
380 | + TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c)); | ||
381 | tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands); | ||
382 | - tcg_temp_free_i64(tmp); | ||
383 | } | ||
384 | |||
385 | static const GVecGen2s gop_xors = { | ||
386 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
387 | void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
388 | int64_t c, uint32_t oprsz, uint32_t maxsz) | ||
389 | { | ||
390 | - TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c)); | ||
391 | + TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c)); | ||
392 | tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors); | ||
393 | - tcg_temp_free_i64(tmp); | ||
394 | } | ||
395 | |||
396 | static const GVecGen2s gop_ors = { | ||
397 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
398 | void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
399 | int64_t c, uint32_t oprsz, uint32_t maxsz) | ||
400 | { | ||
401 | - TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c)); | ||
402 | + TCGv_i64 tmp = tcg_constant_i64(dup_const(vece, c)); | ||
403 | tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors); | ||
404 | - tcg_temp_free_i64(tmp); | ||
405 | } | ||
406 | |||
407 | void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c) | ||
408 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d, | ||
409 | TCGv_vec a, TCGv_vec b) | ||
410 | { | ||
411 | TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
412 | + TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1); | ||
413 | |||
414 | - tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); | ||
415 | - tcg_gen_and_vec(vece, t, t, b); | ||
416 | + tcg_gen_and_vec(vece, t, b, m); | ||
417 | tcg_gen_shlv_vec(vece, d, a, t); | ||
418 | tcg_temp_free_vec(t); | ||
419 | } | ||
420 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d, | ||
421 | TCGv_vec a, TCGv_vec b) | ||
422 | { | ||
423 | TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
424 | + TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1); | ||
425 | |||
426 | - tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); | ||
427 | - tcg_gen_and_vec(vece, t, t, b); | ||
428 | + tcg_gen_and_vec(vece, t, b, m); | ||
429 | tcg_gen_shrv_vec(vece, d, a, t); | ||
430 | tcg_temp_free_vec(t); | ||
431 | } | ||
432 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d, | ||
433 | TCGv_vec a, TCGv_vec b) | ||
434 | { | ||
435 | TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
436 | + TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1); | ||
437 | |||
438 | - tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); | ||
439 | - tcg_gen_and_vec(vece, t, t, b); | ||
440 | + tcg_gen_and_vec(vece, t, b, m); | ||
441 | tcg_gen_sarv_vec(vece, d, a, t); | ||
442 | tcg_temp_free_vec(t); | ||
443 | } | ||
444 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d, | ||
445 | TCGv_vec a, TCGv_vec b) | ||
446 | { | ||
447 | TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
448 | + TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1); | ||
449 | |||
450 | - tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); | ||
451 | - tcg_gen_and_vec(vece, t, t, b); | ||
452 | + tcg_gen_and_vec(vece, t, b, m); | ||
453 | tcg_gen_rotlv_vec(vece, d, a, t); | ||
454 | tcg_temp_free_vec(t); | ||
455 | } | ||
456 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d, | ||
457 | TCGv_vec a, TCGv_vec b) | ||
458 | { | ||
459 | TCGv_vec t = tcg_temp_new_vec_matching(d); | ||
460 | + TCGv_vec m = tcg_constant_vec_matching(d, vece, (8 << vece) - 1); | ||
461 | |||
462 | - tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); | ||
463 | - tcg_gen_and_vec(vece, t, t, b); | ||
464 | + tcg_gen_and_vec(vece, t, b, m); | ||
465 | tcg_gen_rotrv_vec(vece, d, a, t); | ||
466 | tcg_temp_free_vec(t); | ||
467 | } | ||
468 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 86 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
469 | index XXXXXXX..XXXXXXX 100644 | 87 | index XXXXXXX..XXXXXXX 100644 |
470 | --- a/tcg/tcg.c | 88 | --- a/tcg/tcg.c |
471 | +++ b/tcg/tcg.c | 89 | +++ b/tcg/tcg.c |
472 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val) | 90 | @@ -XXX,XX +XXX,XX @@ static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc, |
473 | return temp_tcgv_vec(tcg_constant_internal(type, val)); | 91 | MemOp host_atom, bool allow_two_ops) |
92 | __attribute__((unused)); | ||
93 | |||
94 | +#ifdef CONFIG_USER_ONLY | ||
95 | +bool tcg_use_softmmu; | ||
96 | +#endif | ||
97 | + | ||
98 | TCGContext tcg_init_ctx; | ||
99 | __thread TCGContext *tcg_ctx; | ||
100 | |||
101 | @@ -XXX,XX +XXX,XX @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which) | ||
102 | return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]); | ||
474 | } | 103 | } |
475 | 104 | ||
476 | +TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val) | 105 | -#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER) |
477 | +{ | 106 | -static int tlb_mask_table_ofs(TCGContext *s, int which) |
478 | + TCGTemp *t = tcgv_vec_temp(match); | 107 | +static int __attribute__((unused)) |
479 | + | 108 | +tlb_mask_table_ofs(TCGContext *s, int which) |
480 | + tcg_debug_assert(t->temp_allocated != 0); | ||
481 | + return tcg_constant_vec(t->base_type, vece, val); | ||
482 | +} | ||
483 | + | ||
484 | TCGv_i32 tcg_const_i32(int32_t val) | ||
485 | { | 109 | { |
486 | TCGv_i32 t0; | 110 | return (offsetof(CPUNegativeOffsetState, tlb.f[which]) - |
111 | sizeof(CPUNegativeOffsetState)); | ||
112 | } | ||
113 | -#endif | ||
114 | |||
115 | /* Signal overflow, starting over with fewer guest insns. */ | ||
116 | static G_NORETURN | ||
487 | -- | 117 | -- |
488 | 2.25.1 | 118 | 2.34.1 |
489 | 119 | ||
490 | 120 | diff view generated by jsdifflib |
1 | These are easier to set and test when they have their own fields. | 1 | Provide a define to allow !tcg_use_softmmu code paths to |
---|---|---|---|
2 | Reduce the size of alias_index and sort_index to 4 bits, which is | 2 | compile in system mode, but require elimination. |
3 | sufficient for TCG_MAX_OP_ARGS. This leaves only the bits indicating | ||
4 | constants within the ct field. | ||
5 | 3 | ||
6 | Move all initialization to allocation time, rather than init | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
7 | individual fields in process_op_defs. | ||
8 | |||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 6 | --- |
11 | include/tcg/tcg.h | 14 +++++++------- | 7 | tcg/tcg.c | 4 ++++ |
12 | tcg/tcg.c | 28 ++++++++++++---------------- | 8 | 1 file changed, 4 insertions(+) |
13 | 2 files changed, 19 insertions(+), 23 deletions(-) | ||
14 | 9 | ||
15 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/include/tcg/tcg.h | ||
18 | +++ b/include/tcg/tcg.h | ||
19 | @@ -XXX,XX +XXX,XX @@ int64_t tcg_cpu_exec_time(void); | ||
20 | void tcg_dump_info(void); | ||
21 | void tcg_dump_op_count(void); | ||
22 | |||
23 | -#define TCG_CT_ALIAS 0x80 | ||
24 | -#define TCG_CT_IALIAS 0x40 | ||
25 | -#define TCG_CT_NEWREG 0x20 /* output requires a new register */ | ||
26 | -#define TCG_CT_CONST 0x02 /* any constant of register size */ | ||
27 | +#define TCG_CT_CONST 1 /* any constant of register size */ | ||
28 | |||
29 | typedef struct TCGArgConstraint { | ||
30 | - uint16_t ct; | ||
31 | - uint8_t alias_index; | ||
32 | - uint8_t sort_index; | ||
33 | + unsigned ct : 16; | ||
34 | + unsigned alias_index : 4; | ||
35 | + unsigned sort_index : 4; | ||
36 | + bool oalias : 1; | ||
37 | + bool ialias : 1; | ||
38 | + bool newreg : 1; | ||
39 | TCGRegSet regs; | ||
40 | } TCGArgConstraint; | ||
41 | |||
42 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 10 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
43 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
44 | --- a/tcg/tcg.c | 12 | --- a/tcg/tcg.c |
45 | +++ b/tcg/tcg.c | 13 | +++ b/tcg/tcg.c |
46 | @@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s) | 14 | @@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece); |
47 | total_args += n; | 15 | static int tcg_out_ldst_finalize(TCGContext *s); |
48 | } | 16 | #endif |
49 | 17 | ||
50 | - args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args); | 18 | +#ifndef CONFIG_USER_ONLY |
51 | + args_ct = g_new0(TCGArgConstraint, total_args); | 19 | +#define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; }) |
52 | 20 | +#endif | |
53 | for(op = 0; op < NB_OPS; op++) { | 21 | + |
54 | def = &tcg_op_defs[op]; | 22 | typedef struct TCGLdstHelperParam { |
55 | @@ -XXX,XX +XXX,XX @@ static int get_constraint_priority(const TCGOpDef *def, int k) | 23 | TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg); |
56 | const TCGArgConstraint *arg_ct = &def->args_ct[k]; | 24 | unsigned ntmp; |
57 | int n; | ||
58 | |||
59 | - if (arg_ct->ct & TCG_CT_ALIAS) { | ||
60 | + if (arg_ct->oalias) { | ||
61 | /* an alias is equivalent to a single register */ | ||
62 | n = 1; | ||
63 | } else { | ||
64 | @@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s) | ||
65 | /* Incomplete TCGTargetOpDef entry. */ | ||
66 | tcg_debug_assert(ct_str != NULL); | ||
67 | |||
68 | - def->args_ct[i].regs = 0; | ||
69 | - def->args_ct[i].ct = 0; | ||
70 | while (*ct_str != '\0') { | ||
71 | switch(*ct_str) { | ||
72 | case '0' ... '9': | ||
73 | @@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s) | ||
74 | tcg_debug_assert(ct_str == tdefs->args_ct_str[i]); | ||
75 | tcg_debug_assert(oarg < def->nb_oargs); | ||
76 | tcg_debug_assert(def->args_ct[oarg].regs != 0); | ||
77 | - /* TCG_CT_ALIAS is for the output arguments. | ||
78 | - The input is tagged with TCG_CT_IALIAS. */ | ||
79 | def->args_ct[i] = def->args_ct[oarg]; | ||
80 | - def->args_ct[oarg].ct |= TCG_CT_ALIAS; | ||
81 | + /* The output sets oalias. */ | ||
82 | + def->args_ct[oarg].oalias = true; | ||
83 | def->args_ct[oarg].alias_index = i; | ||
84 | - def->args_ct[i].ct |= TCG_CT_IALIAS; | ||
85 | + /* The input sets ialias. */ | ||
86 | + def->args_ct[i].ialias = true; | ||
87 | def->args_ct[i].alias_index = oarg; | ||
88 | } | ||
89 | ct_str++; | ||
90 | break; | ||
91 | case '&': | ||
92 | - def->args_ct[i].ct |= TCG_CT_NEWREG; | ||
93 | + def->args_ct[i].newreg = true; | ||
94 | ct_str++; | ||
95 | break; | ||
96 | case 'i': | ||
97 | @@ -XXX,XX +XXX,XX @@ static void liveness_pass_1(TCGContext *s) | ||
98 | set = *pset; | ||
99 | |||
100 | set &= ct->regs; | ||
101 | - if (ct->ct & TCG_CT_IALIAS) { | ||
102 | + if (ct->ialias) { | ||
103 | set &= op->output_pref[ct->alias_index]; | ||
104 | } | ||
105 | /* If the combination is not possible, restart. */ | ||
106 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
107 | } | ||
108 | |||
109 | i_preferred_regs = o_preferred_regs = 0; | ||
110 | - if (arg_ct->ct & TCG_CT_IALIAS) { | ||
111 | + if (arg_ct->ialias) { | ||
112 | o_preferred_regs = op->output_pref[arg_ct->alias_index]; | ||
113 | if (ts->fixed_reg) { | ||
114 | /* if fixed register, we must allocate a new register | ||
115 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
116 | reg = ts->reg; | ||
117 | for (k2 = 0 ; k2 < k ; k2++) { | ||
118 | i2 = def->args_ct[nb_oargs + k2].sort_index; | ||
119 | - if ((def->args_ct[i2].ct & TCG_CT_IALIAS) && | ||
120 | - reg == new_args[i2]) { | ||
121 | + if (def->args_ct[i2].ialias && reg == new_args[i2]) { | ||
122 | goto allocate_in_reg; | ||
123 | } | ||
124 | } | ||
125 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
126 | /* ENV should not be modified. */ | ||
127 | tcg_debug_assert(!ts->fixed_reg); | ||
128 | |||
129 | - if ((arg_ct->ct & TCG_CT_ALIAS) | ||
130 | - && !const_args[arg_ct->alias_index]) { | ||
131 | + if (arg_ct->oalias && !const_args[arg_ct->alias_index]) { | ||
132 | reg = new_args[arg_ct->alias_index]; | ||
133 | - } else if (arg_ct->ct & TCG_CT_NEWREG) { | ||
134 | + } else if (arg_ct->newreg) { | ||
135 | reg = tcg_reg_alloc(s, arg_ct->regs, | ||
136 | i_allocated_regs | o_allocated_regs, | ||
137 | op->output_pref[k], ts->indirect_base); | ||
138 | -- | 25 | -- |
139 | 2.25.1 | 26 | 2.34.1 |
140 | 27 | ||
141 | 28 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The last user of this field disappeared in f69d277ece4. | ||
2 | 1 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | include/tcg/tcg.h | 3 --- | ||
6 | 1 file changed, 3 deletions(-) | ||
7 | |||
8 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | ||
9 | index XXXXXXX..XXXXXXX 100644 | ||
10 | --- a/include/tcg/tcg.h | ||
11 | +++ b/include/tcg/tcg.h | ||
12 | @@ -XXX,XX +XXX,XX @@ typedef struct TCGOpDef { | ||
13 | uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; | ||
14 | uint8_t flags; | ||
15 | TCGArgConstraint *args_ct; | ||
16 | -#if defined(CONFIG_DEBUG_TCG) | ||
17 | - int used; | ||
18 | -#endif | ||
19 | } TCGOpDef; | ||
20 | |||
21 | extern TCGOpDef tcg_op_defs[]; | ||
22 | -- | ||
23 | 2.25.1 | ||
24 | |||
25 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The previous change wrongly stated that 32-bit avx2 should have | ||
2 | used VPBROADCASTW. But that's a 16-bit broadcast and we want a | ||
3 | 32-bit broadcast. | ||
4 | 1 | ||
5 | Fixes: 7b60ef3264e | ||
6 | Cc: qemu-stable@nongnu.org | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | --- | ||
9 | tcg/i386/tcg-target.c.inc | 2 +- | ||
10 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
11 | |||
12 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/tcg/i386/tcg-target.c.inc | ||
15 | +++ b/tcg/i386/tcg-target.c.inc | ||
16 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
17 | new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4); | ||
18 | } else { | ||
19 | if (have_avx2) { | ||
20 | - tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTW + vex_l, ret); | ||
21 | + tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret); | ||
22 | } else { | ||
23 | tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret); | ||
24 | } | ||
25 | -- | ||
26 | 2.25.1 | ||
27 | |||
28 | diff view generated by jsdifflib |
1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
---|---|---|---|
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
2 | --- | 3 | --- |
3 | tcg/arm/tcg-target-constr.h | 30 ++++++++++++ | 4 | tcg/arm/tcg-target.c.inc | 203 +++++++++++++++++++-------------------- |
4 | tcg/arm/tcg-target.c.inc | 93 +++++++++++++------------------------ | 5 | 1 file changed, 97 insertions(+), 106 deletions(-) |
5 | 2 files changed, 63 insertions(+), 60 deletions(-) | ||
6 | create mode 100644 tcg/arm/tcg-target-constr.h | ||
7 | 6 | ||
8 | diff --git a/tcg/arm/tcg-target-constr.h b/tcg/arm/tcg-target-constr.h | ||
9 | new file mode 100644 | ||
10 | index XXXXXXX..XXXXXXX | ||
11 | --- /dev/null | ||
12 | +++ b/tcg/arm/tcg-target-constr.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
15 | +/* | ||
16 | + * ARM32 target-specific operand constaints. | ||
17 | + * Copyright (c) 2020 Linaro | ||
18 | + */ | ||
19 | + | ||
20 | +C_O0_I1(r) | ||
21 | +C_O0_I2(r, r) | ||
22 | +C_O0_I2(r, rIN) | ||
23 | +C_O0_I2(s, s) | ||
24 | +C_O0_I3(s, s, s) | ||
25 | +C_O0_I4(r, r, rI, rI) | ||
26 | +C_O0_I4(s, s, s, s) | ||
27 | +C_O1_I1(r, l) | ||
28 | +C_O1_I1(r, r) | ||
29 | +C_O1_I2(r, 0, rZ) | ||
30 | +C_O1_I2(r, l, l) | ||
31 | +C_O1_I2(r, r, r) | ||
32 | +C_O1_I2(r, r, rI) | ||
33 | +C_O1_I2(r, r, rIK) | ||
34 | +C_O1_I2(r, r, rIN) | ||
35 | +C_O1_I2(r, r, ri) | ||
36 | +C_O1_I2(r, rZ, rZ) | ||
37 | +C_O1_I4(r, r, r, rI, rI) | ||
38 | +C_O1_I4(r, r, rIN, rIK, 0) | ||
39 | +C_O2_I1(r, r, l) | ||
40 | +C_O2_I2(r, r, l, l) | ||
41 | +C_O2_I2(r, r, r, r) | ||
42 | +C_O2_I4(r, r, r, r, rIN, rIK) | ||
43 | +C_O2_I4(r, r, rI, rI, rIN, rIK) | ||
44 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | 7 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
45 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
46 | --- a/tcg/arm/tcg-target.c.inc | 9 | --- a/tcg/arm/tcg-target.c.inc |
47 | +++ b/tcg/arm/tcg-target.c.inc | 10 | +++ b/tcg/arm/tcg-target.c.inc |
48 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | 11 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) |
12 | |||
13 | #define TCG_REG_TMP TCG_REG_R12 | ||
14 | #define TCG_VEC_TMP TCG_REG_Q15 | ||
15 | -#ifndef CONFIG_SOFTMMU | ||
16 | #define TCG_REG_GUEST_BASE TCG_REG_R11 | ||
17 | -#endif | ||
18 | |||
19 | typedef enum { | ||
20 | COND_EQ = 0x0, | ||
21 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, | ||
22 | * r0-r3 will be overwritten when reading the tlb entry (system-mode only); | ||
23 | * r14 will be overwritten by the BLNE branching to the slow path. | ||
24 | */ | ||
25 | -#ifdef CONFIG_SOFTMMU | ||
26 | #define ALL_QLDST_REGS \ | ||
27 | - (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \ | ||
28 | - (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \ | ||
29 | - (1 << TCG_REG_R14))) | ||
30 | -#else | ||
31 | -#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14)) | ||
32 | -#endif | ||
33 | + (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14))) | ||
34 | |||
35 | /* | ||
36 | * ARM immediates for ALU instructions are made of an unsigned 8-bit | ||
37 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||
38 | MemOp opc = get_memop(oi); | ||
39 | unsigned a_mask; | ||
40 | |||
41 | -#ifdef CONFIG_SOFTMMU | ||
42 | - *h = (HostAddress){ | ||
43 | - .cond = COND_AL, | ||
44 | - .base = addrlo, | ||
45 | - .index = TCG_REG_R1, | ||
46 | - .index_scratch = true, | ||
47 | - }; | ||
48 | -#else | ||
49 | - *h = (HostAddress){ | ||
50 | - .cond = COND_AL, | ||
51 | - .base = addrlo, | ||
52 | - .index = guest_base ? TCG_REG_GUEST_BASE : -1, | ||
53 | - .index_scratch = false, | ||
54 | - }; | ||
55 | -#endif | ||
56 | + if (tcg_use_softmmu) { | ||
57 | + *h = (HostAddress){ | ||
58 | + .cond = COND_AL, | ||
59 | + .base = addrlo, | ||
60 | + .index = TCG_REG_R1, | ||
61 | + .index_scratch = true, | ||
62 | + }; | ||
63 | + } else { | ||
64 | + *h = (HostAddress){ | ||
65 | + .cond = COND_AL, | ||
66 | + .base = addrlo, | ||
67 | + .index = guest_base ? TCG_REG_GUEST_BASE : -1, | ||
68 | + .index_scratch = false, | ||
69 | + }; | ||
70 | + } | ||
71 | |||
72 | h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); | ||
73 | a_mask = (1 << h->aa.align) - 1; | ||
74 | |||
75 | -#ifdef CONFIG_SOFTMMU | ||
76 | - int mem_index = get_mmuidx(oi); | ||
77 | - int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
78 | - : offsetof(CPUTLBEntry, addr_write); | ||
79 | - int fast_off = tlb_mask_table_ofs(s, mem_index); | ||
80 | - unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; | ||
81 | - TCGReg t_addr; | ||
82 | + if (tcg_use_softmmu) { | ||
83 | + int mem_index = get_mmuidx(oi); | ||
84 | + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
85 | + : offsetof(CPUTLBEntry, addr_write); | ||
86 | + int fast_off = tlb_mask_table_ofs(s, mem_index); | ||
87 | + unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; | ||
88 | + TCGReg t_addr; | ||
89 | |||
90 | - ldst = new_ldst_label(s); | ||
91 | - ldst->is_ld = is_ld; | ||
92 | - ldst->oi = oi; | ||
93 | - ldst->addrlo_reg = addrlo; | ||
94 | - ldst->addrhi_reg = addrhi; | ||
95 | + ldst = new_ldst_label(s); | ||
96 | + ldst->is_ld = is_ld; | ||
97 | + ldst->oi = oi; | ||
98 | + ldst->addrlo_reg = addrlo; | ||
99 | + ldst->addrhi_reg = addrhi; | ||
100 | |||
101 | - /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ | ||
102 | - QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); | ||
103 | - QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); | ||
104 | - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); | ||
105 | + /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ | ||
106 | + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); | ||
107 | + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); | ||
108 | + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); | ||
109 | |||
110 | - /* Extract the tlb index from the address into R0. */ | ||
111 | - tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, | ||
112 | - SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); | ||
113 | + /* Extract the tlb index from the address into R0. */ | ||
114 | + tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, | ||
115 | + SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); | ||
116 | |||
117 | - /* | ||
118 | - * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. | ||
119 | - * Load the tlb comparator into R2/R3 and the fast path addend into R1. | ||
120 | - */ | ||
121 | - QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
122 | - if (cmp_off == 0) { | ||
123 | - if (s->addr_type == TCG_TYPE_I32) { | ||
124 | - tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); | ||
125 | + /* | ||
126 | + * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. | ||
127 | + * Load the tlb comparator into R2/R3 and the fast path addend into R1. | ||
128 | + */ | ||
129 | + QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
130 | + if (cmp_off == 0) { | ||
131 | + if (s->addr_type == TCG_TYPE_I32) { | ||
132 | + tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, | ||
133 | + TCG_REG_R1, TCG_REG_R0); | ||
134 | + } else { | ||
135 | + tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, | ||
136 | + TCG_REG_R1, TCG_REG_R0); | ||
137 | + } | ||
138 | } else { | ||
139 | - tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); | ||
140 | + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | ||
141 | + TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); | ||
142 | + if (s->addr_type == TCG_TYPE_I32) { | ||
143 | + tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); | ||
144 | + } else { | ||
145 | + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); | ||
146 | + } | ||
147 | } | ||
148 | - } else { | ||
149 | - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | ||
150 | - TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); | ||
151 | - if (s->addr_type == TCG_TYPE_I32) { | ||
152 | - tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); | ||
153 | + | ||
154 | + /* Load the tlb addend. */ | ||
155 | + tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, | ||
156 | + offsetof(CPUTLBEntry, addend)); | ||
157 | + | ||
158 | + /* | ||
159 | + * Check alignment, check comparators. | ||
160 | + * Do this in 2-4 insns. Use MOVW for v7, if possible, | ||
161 | + * to reduce the number of sequential conditional instructions. | ||
162 | + * Almost all guests have at least 4k pages, which means that we need | ||
163 | + * to clear at least 9 bits even for an 8-byte memory, which means it | ||
164 | + * isn't worth checking for an immediate operand for BIC. | ||
165 | + * | ||
166 | + * For unaligned accesses, test the page of the last unit of alignment. | ||
167 | + * This leaves the least significant alignment bits unchanged, and of | ||
168 | + * course must be zero. | ||
169 | + */ | ||
170 | + t_addr = addrlo; | ||
171 | + if (a_mask < s_mask) { | ||
172 | + t_addr = TCG_REG_R0; | ||
173 | + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, | ||
174 | + addrlo, s_mask - a_mask); | ||
175 | + } | ||
176 | + if (use_armv7_instructions && s->page_bits <= 16) { | ||
177 | + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); | ||
178 | + tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, | ||
179 | + t_addr, TCG_REG_TMP, 0); | ||
180 | + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | ||
181 | + TCG_REG_R2, TCG_REG_TMP, 0); | ||
182 | } else { | ||
183 | - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); | ||
184 | + if (a_mask) { | ||
185 | + tcg_debug_assert(a_mask <= 0xff); | ||
186 | + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); | ||
187 | + } | ||
188 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, | ||
189 | + SHIFT_IMM_LSR(s->page_bits)); | ||
190 | + tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, | ||
191 | + 0, TCG_REG_R2, TCG_REG_TMP, | ||
192 | + SHIFT_IMM_LSL(s->page_bits)); | ||
193 | } | ||
194 | - } | ||
195 | |||
196 | - /* Load the tlb addend. */ | ||
197 | - tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, | ||
198 | - offsetof(CPUTLBEntry, addend)); | ||
199 | - | ||
200 | - /* | ||
201 | - * Check alignment, check comparators. | ||
202 | - * Do this in 2-4 insns. Use MOVW for v7, if possible, | ||
203 | - * to reduce the number of sequential conditional instructions. | ||
204 | - * Almost all guests have at least 4k pages, which means that we need | ||
205 | - * to clear at least 9 bits even for an 8-byte memory, which means it | ||
206 | - * isn't worth checking for an immediate operand for BIC. | ||
207 | - * | ||
208 | - * For unaligned accesses, test the page of the last unit of alignment. | ||
209 | - * This leaves the least significant alignment bits unchanged, and of | ||
210 | - * course must be zero. | ||
211 | - */ | ||
212 | - t_addr = addrlo; | ||
213 | - if (a_mask < s_mask) { | ||
214 | - t_addr = TCG_REG_R0; | ||
215 | - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, | ||
216 | - addrlo, s_mask - a_mask); | ||
217 | - } | ||
218 | - if (use_armv7_instructions && s->page_bits <= 16) { | ||
219 | - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); | ||
220 | - tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, | ||
221 | - t_addr, TCG_REG_TMP, 0); | ||
222 | - tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); | ||
223 | - } else { | ||
224 | - if (a_mask) { | ||
225 | - tcg_debug_assert(a_mask <= 0xff); | ||
226 | - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); | ||
227 | + if (s->addr_type != TCG_TYPE_I32) { | ||
228 | + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); | ||
229 | } | ||
230 | - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, | ||
231 | - SHIFT_IMM_LSR(s->page_bits)); | ||
232 | - tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, | ||
233 | - 0, TCG_REG_R2, TCG_REG_TMP, | ||
234 | - SHIFT_IMM_LSL(s->page_bits)); | ||
235 | - } | ||
236 | - | ||
237 | - if (s->addr_type != TCG_TYPE_I32) { | ||
238 | - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); | ||
239 | - } | ||
240 | -#else | ||
241 | - if (a_mask) { | ||
242 | + } else if (a_mask) { | ||
243 | ldst = new_ldst_label(s); | ||
244 | ldst->is_ld = is_ld; | ||
245 | ldst->oi = oi; | ||
246 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||
247 | /* tst addr, #mask */ | ||
248 | tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); | ||
49 | } | 249 | } |
250 | -#endif | ||
251 | |||
252 | return ldst; | ||
50 | } | 253 | } |
51 | 254 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | |
52 | +/* Define all constraint sets. */ | 255 | |
53 | +#include "../tcg-constr.c.inc" | 256 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); |
54 | + | 257 | |
55 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | 258 | -#ifndef CONFIG_SOFTMMU |
56 | { | 259 | - if (guest_base) { |
57 | - static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; | 260 | + if (!tcg_use_softmmu && guest_base) { |
58 | - static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; | 261 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); |
59 | - static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } }; | 262 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); |
60 | - static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; | 263 | } |
61 | - static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; | 264 | -#endif |
62 | - static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } }; | 265 | |
63 | - static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } }; | 266 | tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); |
64 | - static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } }; | 267 | |
65 | - static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; | ||
66 | - static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; | ||
67 | - static const TCGTargetOpDef r_r_rIN | ||
68 | - = { .args_ct_str = { "r", "r", "rIN" } }; | ||
69 | - static const TCGTargetOpDef r_r_rIK | ||
70 | - = { .args_ct_str = { "r", "r", "rIK" } }; | ||
71 | - static const TCGTargetOpDef r_r_r_r | ||
72 | - = { .args_ct_str = { "r", "r", "r", "r" } }; | ||
73 | - static const TCGTargetOpDef r_r_l_l | ||
74 | - = { .args_ct_str = { "r", "r", "l", "l" } }; | ||
75 | - static const TCGTargetOpDef s_s_s_s | ||
76 | - = { .args_ct_str = { "s", "s", "s", "s" } }; | ||
77 | - static const TCGTargetOpDef br | ||
78 | - = { .args_ct_str = { "r", "rIN" } }; | ||
79 | - static const TCGTargetOpDef ext2 | ||
80 | - = { .args_ct_str = { "r", "rZ", "rZ" } }; | ||
81 | - static const TCGTargetOpDef dep | ||
82 | - = { .args_ct_str = { "r", "0", "rZ" } }; | ||
83 | - static const TCGTargetOpDef movc | ||
84 | - = { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } }; | ||
85 | - static const TCGTargetOpDef add2 | ||
86 | - = { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } }; | ||
87 | - static const TCGTargetOpDef sub2 | ||
88 | - = { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } }; | ||
89 | - static const TCGTargetOpDef br2 | ||
90 | - = { .args_ct_str = { "r", "r", "rI", "rI" } }; | ||
91 | - static const TCGTargetOpDef setc2 | ||
92 | - = { .args_ct_str = { "r", "r", "r", "rI", "rI" } }; | ||
93 | - | ||
94 | switch (op) { | ||
95 | case INDEX_op_goto_ptr: | ||
96 | - return &r; | ||
97 | + return C_O0_I1(r); | ||
98 | |||
99 | case INDEX_op_ld8u_i32: | ||
100 | case INDEX_op_ld8s_i32: | ||
101 | case INDEX_op_ld16u_i32: | ||
102 | case INDEX_op_ld16s_i32: | ||
103 | case INDEX_op_ld_i32: | ||
104 | - case INDEX_op_st8_i32: | ||
105 | - case INDEX_op_st16_i32: | ||
106 | - case INDEX_op_st_i32: | ||
107 | case INDEX_op_neg_i32: | ||
108 | case INDEX_op_not_i32: | ||
109 | case INDEX_op_bswap16_i32: | ||
110 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
111 | case INDEX_op_ext16u_i32: | ||
112 | case INDEX_op_extract_i32: | ||
113 | case INDEX_op_sextract_i32: | ||
114 | - return &r_r; | ||
115 | + return C_O1_I1(r, r); | ||
116 | + | ||
117 | + case INDEX_op_st8_i32: | ||
118 | + case INDEX_op_st16_i32: | ||
119 | + case INDEX_op_st_i32: | ||
120 | + return C_O0_I2(r, r); | ||
121 | |||
122 | case INDEX_op_add_i32: | ||
123 | case INDEX_op_sub_i32: | ||
124 | case INDEX_op_setcond_i32: | ||
125 | - return &r_r_rIN; | ||
126 | + return C_O1_I2(r, r, rIN); | ||
127 | + | ||
128 | case INDEX_op_and_i32: | ||
129 | case INDEX_op_andc_i32: | ||
130 | case INDEX_op_clz_i32: | ||
131 | case INDEX_op_ctz_i32: | ||
132 | - return &r_r_rIK; | ||
133 | + return C_O1_I2(r, r, rIK); | ||
134 | + | ||
135 | case INDEX_op_mul_i32: | ||
136 | case INDEX_op_div_i32: | ||
137 | case INDEX_op_divu_i32: | ||
138 | - return &r_r_r; | ||
139 | + return C_O1_I2(r, r, r); | ||
140 | + | ||
141 | case INDEX_op_mulu2_i32: | ||
142 | case INDEX_op_muls2_i32: | ||
143 | - return &r_r_r_r; | ||
144 | + return C_O2_I2(r, r, r, r); | ||
145 | + | ||
146 | case INDEX_op_or_i32: | ||
147 | case INDEX_op_xor_i32: | ||
148 | - return &r_r_rI; | ||
149 | + return C_O1_I2(r, r, rI); | ||
150 | + | ||
151 | case INDEX_op_shl_i32: | ||
152 | case INDEX_op_shr_i32: | ||
153 | case INDEX_op_sar_i32: | ||
154 | case INDEX_op_rotl_i32: | ||
155 | case INDEX_op_rotr_i32: | ||
156 | - return &r_r_ri; | ||
157 | + return C_O1_I2(r, r, ri); | ||
158 | |||
159 | case INDEX_op_brcond_i32: | ||
160 | - return &br; | ||
161 | + return C_O0_I2(r, rIN); | ||
162 | case INDEX_op_deposit_i32: | ||
163 | - return &dep; | ||
164 | + return C_O1_I2(r, 0, rZ); | ||
165 | case INDEX_op_extract2_i32: | ||
166 | - return &ext2; | ||
167 | + return C_O1_I2(r, rZ, rZ); | ||
168 | case INDEX_op_movcond_i32: | ||
169 | - return &movc; | ||
170 | + return C_O1_I4(r, r, rIN, rIK, 0); | ||
171 | case INDEX_op_add2_i32: | ||
172 | - return &add2; | ||
173 | + return C_O2_I4(r, r, r, r, rIN, rIK); | ||
174 | case INDEX_op_sub2_i32: | ||
175 | - return &sub2; | ||
176 | + return C_O2_I4(r, r, rI, rI, rIN, rIK); | ||
177 | case INDEX_op_brcond2_i32: | ||
178 | - return &br2; | ||
179 | + return C_O0_I4(r, r, rI, rI); | ||
180 | case INDEX_op_setcond2_i32: | ||
181 | - return &setc2; | ||
182 | + return C_O1_I4(r, r, r, rI, rI); | ||
183 | |||
184 | case INDEX_op_qemu_ld_i32: | ||
185 | - return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l; | ||
186 | + return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l); | ||
187 | case INDEX_op_qemu_ld_i64: | ||
188 | - return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l; | ||
189 | + return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l); | ||
190 | case INDEX_op_qemu_st_i32: | ||
191 | - return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s; | ||
192 | + return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s); | ||
193 | case INDEX_op_qemu_st_i64: | ||
194 | - return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s; | ||
195 | + return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s); | ||
196 | |||
197 | default: | ||
198 | return NULL; | ||
199 | -- | 268 | -- |
200 | 2.25.1 | 269 | 2.34.1 |
201 | 270 | ||
202 | 271 | diff view generated by jsdifflib |
1 | Improve rotrv_vec to reduce "t1 = -v2, t2 = t1 + c" to | 1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | "t1 = -v, t2 = c - v2". This avoids a serial dependency | ||
3 | between t1 and t2. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 3 | --- |
7 | tcg/aarch64/tcg-target.c.inc | 10 +++++----- | 4 | tcg/aarch64/tcg-target.c.inc | 177 +++++++++++++++++------------------ |
8 | 1 file changed, 5 insertions(+), 5 deletions(-) | 5 | 1 file changed, 88 insertions(+), 89 deletions(-) |
9 | 6 | ||
10 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | 7 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc |
11 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tcg/aarch64/tcg-target.c.inc | 9 | --- a/tcg/aarch64/tcg-target.c.inc |
13 | +++ b/tcg/aarch64/tcg-target.c.inc | 10 | +++ b/tcg/aarch64/tcg-target.c.inc |
14 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | 11 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) |
15 | TCGArg a0, ...) | 12 | #define TCG_REG_TMP2 TCG_REG_X30 |
13 | #define TCG_VEC_TMP0 TCG_REG_V31 | ||
14 | |||
15 | -#ifndef CONFIG_SOFTMMU | ||
16 | #define TCG_REG_GUEST_BASE TCG_REG_X28 | ||
17 | -#endif | ||
18 | |||
19 | static bool reloc_pc26(tcg_insn_unit *src_rw, const tcg_insn_unit *target) | ||
16 | { | 20 | { |
17 | va_list va; | 21 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, |
18 | - TCGv_vec v0, v1, v2, t1, t2; | 22 | s_bits == MO_128); |
19 | + TCGv_vec v0, v1, v2, t1, t2, c1; | 23 | a_mask = (1 << h->aa.align) - 1; |
20 | TCGArg a2; | 24 | |
21 | 25 | -#ifdef CONFIG_SOFTMMU | |
22 | va_start(va, a0); | 26 | - unsigned s_mask = (1u << s_bits) - 1; |
23 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | 27 | - unsigned mem_index = get_mmuidx(oi); |
24 | 28 | - TCGReg addr_adj; | |
25 | case INDEX_op_rotlv_vec: | 29 | - TCGType mask_type; |
26 | t1 = tcg_temp_new_vec(type); | 30 | - uint64_t compare_mask; |
27 | - tcg_gen_dupi_vec(vece, t1, 8 << vece); | 31 | + if (tcg_use_softmmu) { |
28 | - tcg_gen_sub_vec(vece, t1, v2, t1); | 32 | + unsigned s_mask = (1u << s_bits) - 1; |
29 | + c1 = tcg_constant_vec(type, vece, 8 << vece); | 33 | + unsigned mem_index = get_mmuidx(oi); |
30 | + tcg_gen_sub_vec(vece, t1, v2, c1); | 34 | + TCGReg addr_adj; |
31 | /* Right shifts are negative left shifts for AArch64. */ | 35 | + TCGType mask_type; |
32 | vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), | 36 | + uint64_t compare_mask; |
33 | tcgv_vec_arg(v1), tcgv_vec_arg(t1)); | 37 | |
34 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | 38 | - ldst = new_ldst_label(s); |
35 | case INDEX_op_rotrv_vec: | 39 | - ldst->is_ld = is_ld; |
36 | t1 = tcg_temp_new_vec(type); | 40 | - ldst->oi = oi; |
37 | t2 = tcg_temp_new_vec(type); | 41 | - ldst->addrlo_reg = addr_reg; |
38 | + c1 = tcg_constant_vec(type, vece, 8 << vece); | 42 | - |
39 | tcg_gen_neg_vec(vece, t1, v2); | 43 | - mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32 |
40 | - tcg_gen_dupi_vec(vece, t2, 8 << vece); | 44 | - ? TCG_TYPE_I64 : TCG_TYPE_I32); |
41 | - tcg_gen_add_vec(vece, t2, t1, t2); | 45 | - |
42 | + tcg_gen_sub_vec(vece, t2, c1, v2); | 46 | - /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ |
43 | /* Right shifts are negative left shifts for AArch64. */ | 47 | - QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); |
44 | vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), | 48 | - QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); |
45 | tcgv_vec_arg(v1), tcgv_vec_arg(t1)); | 49 | - tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0, |
50 | - tlb_mask_table_ofs(s, mem_index), 1, 0); | ||
51 | - | ||
52 | - /* Extract the TLB index from the address into X0. */ | ||
53 | - tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64, | ||
54 | - TCG_REG_TMP0, TCG_REG_TMP0, addr_reg, | ||
55 | - s->page_bits - CPU_TLB_ENTRY_BITS); | ||
56 | - | ||
57 | - /* Add the tlb_table pointer, forming the CPUTLBEntry address in TMP1. */ | ||
58 | - tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0); | ||
59 | - | ||
60 | - /* Load the tlb comparator into TMP0, and the fast path addend into TMP1. */ | ||
61 | - QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
62 | - tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1, | ||
63 | - is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
64 | - : offsetof(CPUTLBEntry, addr_write)); | ||
65 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, | ||
66 | - offsetof(CPUTLBEntry, addend)); | ||
67 | - | ||
68 | - /* | ||
69 | - * For aligned accesses, we check the first byte and include the alignment | ||
70 | - * bits within the address. For unaligned access, we check that we don't | ||
71 | - * cross pages using the address of the last byte of the access. | ||
72 | - */ | ||
73 | - if (a_mask >= s_mask) { | ||
74 | - addr_adj = addr_reg; | ||
75 | - } else { | ||
76 | - addr_adj = TCG_REG_TMP2; | ||
77 | - tcg_out_insn(s, 3401, ADDI, addr_type, | ||
78 | - addr_adj, addr_reg, s_mask - a_mask); | ||
79 | - } | ||
80 | - compare_mask = (uint64_t)s->page_mask | a_mask; | ||
81 | - | ||
82 | - /* Store the page mask part of the address into TMP2. */ | ||
83 | - tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2, | ||
84 | - addr_adj, compare_mask); | ||
85 | - | ||
86 | - /* Perform the address comparison. */ | ||
87 | - tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0); | ||
88 | - | ||
89 | - /* If not equal, we jump to the slow path. */ | ||
90 | - ldst->label_ptr[0] = s->code_ptr; | ||
91 | - tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); | ||
92 | - | ||
93 | - h->base = TCG_REG_TMP1; | ||
94 | - h->index = addr_reg; | ||
95 | - h->index_ext = addr_type; | ||
96 | -#else | ||
97 | - if (a_mask) { | ||
98 | ldst = new_ldst_label(s); | ||
99 | - | ||
100 | ldst->is_ld = is_ld; | ||
101 | ldst->oi = oi; | ||
102 | ldst->addrlo_reg = addr_reg; | ||
103 | |||
104 | - /* tst addr, #mask */ | ||
105 | - tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask); | ||
106 | + mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32 | ||
107 | + ? TCG_TYPE_I64 : TCG_TYPE_I32); | ||
108 | |||
109 | - /* b.ne slow_path */ | ||
110 | + /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ | ||
111 | + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); | ||
112 | + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); | ||
113 | + tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0, | ||
114 | + tlb_mask_table_ofs(s, mem_index), 1, 0); | ||
115 | + | ||
116 | + /* Extract the TLB index from the address into X0. */ | ||
117 | + tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64, | ||
118 | + TCG_REG_TMP0, TCG_REG_TMP0, addr_reg, | ||
119 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
120 | + | ||
121 | + /* Add the tlb_table pointer, forming the CPUTLBEntry address. */ | ||
122 | + tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0); | ||
123 | + | ||
124 | + /* Load the tlb comparator into TMP0, and the fast path addend. */ | ||
125 | + QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
126 | + tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1, | ||
127 | + is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
128 | + : offsetof(CPUTLBEntry, addr_write)); | ||
129 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, | ||
130 | + offsetof(CPUTLBEntry, addend)); | ||
131 | + | ||
132 | + /* | ||
133 | + * For aligned accesses, we check the first byte and include | ||
134 | + * the alignment bits within the address. For unaligned access, | ||
135 | + * we check that we don't cross pages using the address of the | ||
136 | + * last byte of the access. | ||
137 | + */ | ||
138 | + if (a_mask >= s_mask) { | ||
139 | + addr_adj = addr_reg; | ||
140 | + } else { | ||
141 | + addr_adj = TCG_REG_TMP2; | ||
142 | + tcg_out_insn(s, 3401, ADDI, addr_type, | ||
143 | + addr_adj, addr_reg, s_mask - a_mask); | ||
144 | + } | ||
145 | + compare_mask = (uint64_t)s->page_mask | a_mask; | ||
146 | + | ||
147 | + /* Store the page mask part of the address into TMP2. */ | ||
148 | + tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2, | ||
149 | + addr_adj, compare_mask); | ||
150 | + | ||
151 | + /* Perform the address comparison. */ | ||
152 | + tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0); | ||
153 | + | ||
154 | + /* If not equal, we jump to the slow path. */ | ||
155 | ldst->label_ptr[0] = s->code_ptr; | ||
156 | tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); | ||
157 | - } | ||
158 | |||
159 | - if (guest_base || addr_type == TCG_TYPE_I32) { | ||
160 | - h->base = TCG_REG_GUEST_BASE; | ||
161 | + h->base = TCG_REG_TMP1; | ||
162 | h->index = addr_reg; | ||
163 | h->index_ext = addr_type; | ||
164 | } else { | ||
165 | - h->base = addr_reg; | ||
166 | - h->index = TCG_REG_XZR; | ||
167 | - h->index_ext = TCG_TYPE_I64; | ||
168 | + if (a_mask) { | ||
169 | + ldst = new_ldst_label(s); | ||
170 | + | ||
171 | + ldst->is_ld = is_ld; | ||
172 | + ldst->oi = oi; | ||
173 | + ldst->addrlo_reg = addr_reg; | ||
174 | + | ||
175 | + /* tst addr, #mask */ | ||
176 | + tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask); | ||
177 | + | ||
178 | + /* b.ne slow_path */ | ||
179 | + ldst->label_ptr[0] = s->code_ptr; | ||
180 | + tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); | ||
181 | + } | ||
182 | + | ||
183 | + if (guest_base || addr_type == TCG_TYPE_I32) { | ||
184 | + h->base = TCG_REG_GUEST_BASE; | ||
185 | + h->index = addr_reg; | ||
186 | + h->index_ext = addr_type; | ||
187 | + } else { | ||
188 | + h->base = addr_reg; | ||
189 | + h->index = TCG_REG_XZR; | ||
190 | + h->index_ext = TCG_TYPE_I64; | ||
191 | + } | ||
192 | } | ||
193 | -#endif | ||
194 | |||
195 | return ldst; | ||
196 | } | ||
197 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
198 | tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, | ||
199 | CPU_TEMP_BUF_NLONGS * sizeof(long)); | ||
200 | |||
201 | -#if !defined(CONFIG_SOFTMMU) | ||
202 | - /* | ||
203 | - * Note that XZR cannot be encoded in the address base register slot, | ||
204 | - * as that actually encodes SP. Depending on the guest, we may need | ||
205 | - * to zero-extend the guest address via the address index register slot, | ||
206 | - * therefore we need to load even a zero guest base into a register. | ||
207 | - */ | ||
208 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); | ||
209 | - tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); | ||
210 | -#endif | ||
211 | + if (!tcg_use_softmmu) { | ||
212 | + /* | ||
213 | + * Note that XZR cannot be encoded in the address base register slot, | ||
214 | + * as that actually encodes SP. Depending on the guest, we may need | ||
215 | + * to zero-extend the guest address via the address index register slot, | ||
216 | + * therefore we need to load even a zero guest base into a register. | ||
217 | + */ | ||
218 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); | ||
219 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); | ||
220 | + } | ||
221 | |||
222 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | ||
223 | tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]); | ||
46 | -- | 224 | -- |
47 | 2.25.1 | 225 | 2.34.1 |
48 | 226 | ||
49 | 227 | diff view generated by jsdifflib |
1 | While we don't store more than tcg_target_long in TCGTemp, | 1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | we shouldn't be limited to that for code generation. We will | ||
3 | be able to use this for INDEX_op_dup2_vec with 2 constants. | ||
4 | |||
5 | Also pass along the minimal vece that may be said to apply | ||
6 | to the constant. This allows some simplification in the | ||
7 | various backends. | ||
8 | |||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 3 | --- |
11 | tcg/tcg.c | 31 +++++++++++++++++++++++++----- | 4 | tcg/i386/tcg-target.c.inc | 198 +++++++++++++++++++------------------- |
12 | tcg/aarch64/tcg-target.c.inc | 12 ++++++------ | 5 | 1 file changed, 98 insertions(+), 100 deletions(-) |
13 | tcg/i386/tcg-target.c.inc | 22 ++++++++++++--------- | ||
14 | tcg/ppc/tcg-target.c.inc | 37 +++++++++++++++++++++++------------- | ||
15 | 4 files changed, 69 insertions(+), 33 deletions(-) | ||
16 | 6 | ||
17 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/tcg/tcg.c | ||
20 | +++ b/tcg/tcg.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, | ||
22 | TCGReg dst, TCGReg src); | ||
23 | static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, | ||
24 | TCGReg dst, TCGReg base, intptr_t offset); | ||
25 | -static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
26 | - TCGReg dst, tcg_target_long arg); | ||
27 | +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, | ||
28 | + TCGReg dst, int64_t arg); | ||
29 | static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, | ||
30 | unsigned vece, const TCGArg *args, | ||
31 | const int *const_args); | ||
32 | @@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, | ||
33 | { | ||
34 | g_assert_not_reached(); | ||
35 | } | ||
36 | -static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
37 | - TCGReg dst, tcg_target_long arg) | ||
38 | +static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, | ||
39 | + TCGReg dst, int64_t arg) | ||
40 | { | ||
41 | g_assert_not_reached(); | ||
42 | } | ||
43 | @@ -XXX,XX +XXX,XX @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, | ||
44 | if (ts->type <= TCG_TYPE_I64) { | ||
45 | tcg_out_movi(s, ts->type, reg, ts->val); | ||
46 | } else { | ||
47 | - tcg_out_dupi_vec(s, ts->type, reg, ts->val); | ||
48 | + uint64_t val = ts->val; | ||
49 | + MemOp vece = MO_64; | ||
50 | + | ||
51 | + /* | ||
52 | + * Find the minimal vector element that matches the constant. | ||
53 | + * The targets will, in general, have to do this search anyway, | ||
54 | + * do this generically. | ||
55 | + */ | ||
56 | + if (TCG_TARGET_REG_BITS == 32) { | ||
57 | + val = dup_const(MO_32, val); | ||
58 | + vece = MO_32; | ||
59 | + } | ||
60 | + if (val == dup_const(MO_8, val)) { | ||
61 | + vece = MO_8; | ||
62 | + } else if (val == dup_const(MO_16, val)) { | ||
63 | + vece = MO_16; | ||
64 | + } else if (TCG_TARGET_REG_BITS == 64 && | ||
65 | + val == dup_const(MO_32, val)) { | ||
66 | + vece = MO_32; | ||
67 | + } | ||
68 | + | ||
69 | + tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val); | ||
70 | } | ||
71 | ts->mem_coherent = 0; | ||
72 | break; | ||
73 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
74 | index XXXXXXX..XXXXXXX 100644 | ||
75 | --- a/tcg/aarch64/tcg-target.c.inc | ||
76 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
77 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, | ||
78 | tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); | ||
79 | } | ||
80 | |||
81 | -static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
82 | - TCGReg rd, tcg_target_long v64) | ||
83 | +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, | ||
84 | + TCGReg rd, int64_t v64) | ||
85 | { | ||
86 | bool q = type == TCG_TYPE_V128; | ||
87 | int cmode, imm8, i; | ||
88 | |||
89 | /* Test all bytes equal first. */ | ||
90 | - if (v64 == dup_const(MO_8, v64)) { | ||
91 | + if (vece == MO_8) { | ||
92 | imm8 = (uint8_t)v64; | ||
93 | tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8); | ||
94 | return; | ||
95 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
96 | * cannot find an expansion there's no point checking a larger | ||
97 | * width because we already know by replication it cannot match. | ||
98 | */ | ||
99 | - if (v64 == dup_const(MO_16, v64)) { | ||
100 | + if (vece == MO_16) { | ||
101 | uint16_t v16 = v64; | ||
102 | |||
103 | if (is_shimm16(v16, &cmode, &imm8)) { | ||
104 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
105 | tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff); | ||
106 | tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8); | ||
107 | return; | ||
108 | - } else if (v64 == dup_const(MO_32, v64)) { | ||
109 | + } else if (vece == MO_32) { | ||
110 | uint32_t v32 = v64; | ||
111 | uint32_t n32 = ~v32; | ||
112 | |||
113 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
114 | tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); | ||
115 | break; | ||
116 | } | ||
117 | - tcg_out_dupi_vec(s, type, TCG_VEC_TMP, 0); | ||
118 | + tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); | ||
119 | a2 = TCG_VEC_TMP; | ||
120 | } | ||
121 | insn = cmp_insn[cond]; | ||
122 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | 7 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc |
123 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
124 | --- a/tcg/i386/tcg-target.c.inc | 9 | --- a/tcg/i386/tcg-target.c.inc |
125 | +++ b/tcg/i386/tcg-target.c.inc | 10 | +++ b/tcg/i386/tcg-target.c.inc |
126 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, | 11 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) |
12 | # define ALL_VECTOR_REGS 0x00ff0000u | ||
13 | # define ALL_BYTEL_REGS 0x0000000fu | ||
14 | #endif | ||
15 | -#ifdef CONFIG_SOFTMMU | ||
16 | -# define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1)) | ||
17 | -#else | ||
18 | -# define SOFTMMU_RESERVE_REGS 0 | ||
19 | -#endif | ||
20 | +#define SOFTMMU_RESERVE_REGS \ | ||
21 | + (tcg_use_softmmu ? (1 << TCG_REG_L0) | (1 << TCG_REG_L1) : 0) | ||
22 | |||
23 | /* For 64-bit, we always know that CMOV is available. */ | ||
24 | #if TCG_TARGET_REG_BITS == 64 | ||
25 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
127 | return true; | 26 | return true; |
128 | } | 27 | } |
129 | 28 | ||
130 | -static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | 29 | -#ifndef CONFIG_SOFTMMU |
131 | - TCGReg ret, tcg_target_long arg) | 30 | +#ifdef CONFIG_USER_ONLY |
132 | +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, | 31 | static HostAddress x86_guest_base = { |
133 | + TCGReg ret, int64_t arg) | 32 | .index = -1 |
134 | { | 33 | }; |
135 | int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); | 34 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) |
136 | 35 | } | |
137 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | 36 | return 0; |
138 | return; | 37 | } |
139 | } | 38 | +#define setup_guest_base_seg setup_guest_base_seg |
39 | #elif defined(__x86_64__) && \ | ||
40 | (defined (__FreeBSD__) || defined (__FreeBSD_kernel__)) | ||
41 | # include <machine/sysarch.h> | ||
42 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) | ||
43 | } | ||
44 | return 0; | ||
45 | } | ||
46 | +#define setup_guest_base_seg setup_guest_base_seg | ||
47 | +#endif | ||
48 | #else | ||
49 | -static inline int setup_guest_base_seg(void) | ||
50 | -{ | ||
51 | - return 0; | ||
52 | -} | ||
53 | -#endif /* setup_guest_base_seg */ | ||
54 | -#endif /* !SOFTMMU */ | ||
55 | +# define x86_guest_base (*(HostAddress *)({ qemu_build_not_reached(); NULL; })) | ||
56 | +#endif /* CONFIG_USER_ONLY */ | ||
57 | +#ifndef setup_guest_base_seg | ||
58 | +# define setup_guest_base_seg() 0 | ||
59 | +#endif | ||
60 | |||
61 | #define MIN_TLB_MASK_TABLE_OFS INT_MIN | ||
62 | |||
63 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||
64 | MemOp s_bits = opc & MO_SIZE; | ||
65 | unsigned a_mask; | ||
66 | |||
67 | -#ifdef CONFIG_SOFTMMU | ||
68 | - h->index = TCG_REG_L0; | ||
69 | - h->ofs = 0; | ||
70 | - h->seg = 0; | ||
71 | -#else | ||
72 | - *h = x86_guest_base; | ||
73 | -#endif | ||
74 | + if (tcg_use_softmmu) { | ||
75 | + h->index = TCG_REG_L0; | ||
76 | + h->ofs = 0; | ||
77 | + h->seg = 0; | ||
78 | + } else { | ||
79 | + *h = x86_guest_base; | ||
80 | + } | ||
81 | h->base = addrlo; | ||
82 | h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128); | ||
83 | a_mask = (1 << h->aa.align) - 1; | ||
84 | |||
85 | -#ifdef CONFIG_SOFTMMU | ||
86 | - int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
87 | - : offsetof(CPUTLBEntry, addr_write); | ||
88 | - TCGType ttype = TCG_TYPE_I32; | ||
89 | - TCGType tlbtype = TCG_TYPE_I32; | ||
90 | - int trexw = 0, hrexw = 0, tlbrexw = 0; | ||
91 | - unsigned mem_index = get_mmuidx(oi); | ||
92 | - unsigned s_mask = (1 << s_bits) - 1; | ||
93 | - int fast_ofs = tlb_mask_table_ofs(s, mem_index); | ||
94 | - int tlb_mask; | ||
95 | + if (tcg_use_softmmu) { | ||
96 | + int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
97 | + : offsetof(CPUTLBEntry, addr_write); | ||
98 | + TCGType ttype = TCG_TYPE_I32; | ||
99 | + TCGType tlbtype = TCG_TYPE_I32; | ||
100 | + int trexw = 0, hrexw = 0, tlbrexw = 0; | ||
101 | + unsigned mem_index = get_mmuidx(oi); | ||
102 | + unsigned s_mask = (1 << s_bits) - 1; | ||
103 | + int fast_ofs = tlb_mask_table_ofs(s, mem_index); | ||
104 | + int tlb_mask; | ||
105 | |||
106 | - ldst = new_ldst_label(s); | ||
107 | - ldst->is_ld = is_ld; | ||
108 | - ldst->oi = oi; | ||
109 | - ldst->addrlo_reg = addrlo; | ||
110 | - ldst->addrhi_reg = addrhi; | ||
111 | + ldst = new_ldst_label(s); | ||
112 | + ldst->is_ld = is_ld; | ||
113 | + ldst->oi = oi; | ||
114 | + ldst->addrlo_reg = addrlo; | ||
115 | + ldst->addrhi_reg = addrhi; | ||
140 | 116 | ||
141 | - if (TCG_TARGET_REG_BITS == 64) { | 117 | - if (TCG_TARGET_REG_BITS == 64) { |
142 | + if (TCG_TARGET_REG_BITS == 32 && vece < MO_64) { | 118 | - ttype = s->addr_type; |
143 | + if (have_avx2) { | 119 | - trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW); |
144 | + tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret); | 120 | - if (TCG_TYPE_PTR == TCG_TYPE_I64) { |
145 | + } else { | 121 | - hrexw = P_REXW; |
146 | + tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret); | 122 | - if (s->page_bits + s->tlb_dyn_max_bits > 32) { |
147 | + } | 123 | - tlbtype = TCG_TYPE_I64; |
148 | + new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0); | 124 | - tlbrexw = P_REXW; |
149 | + } else { | ||
150 | if (type == TCG_TYPE_V64) { | ||
151 | tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret); | ||
152 | } else if (have_avx2) { | ||
153 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
154 | } else { | ||
155 | tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret); | ||
156 | } | ||
157 | - new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4); | ||
158 | - } else { | ||
159 | - if (have_avx2) { | ||
160 | - tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret); | ||
161 | + if (TCG_TARGET_REG_BITS == 64) { | 125 | + if (TCG_TARGET_REG_BITS == 64) { |
162 | + new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4); | 126 | + ttype = s->addr_type; |
163 | } else { | 127 | + trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW); |
164 | - tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret); | 128 | + if (TCG_TYPE_PTR == TCG_TYPE_I64) { |
165 | + new_pool_l2(s, R_386_32, s->code_ptr - 4, 0, arg, arg >> 32); | 129 | + hrexw = P_REXW; |
166 | } | 130 | + if (s->page_bits + s->tlb_dyn_max_bits > 32) { |
167 | - new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0); | 131 | + tlbtype = TCG_TYPE_I64; |
168 | } | 132 | + tlbrexw = P_REXW; |
169 | } | 133 | + } |
170 | 134 | } | |
171 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | ||
172 | index XXXXXXX..XXXXXXX 100644 | ||
173 | --- a/tcg/ppc/tcg-target.c.inc | ||
174 | +++ b/tcg/ppc/tcg-target.c.inc | ||
175 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, | ||
176 | } | ||
177 | } | ||
178 | |||
179 | -static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, | ||
180 | - tcg_target_long val) | ||
181 | +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, | ||
182 | + TCGReg ret, int64_t val) | ||
183 | { | ||
184 | uint32_t load_insn; | ||
185 | int rel, low; | ||
186 | intptr_t add; | ||
187 | |||
188 | - low = (int8_t)val; | ||
189 | - if (low >= -16 && low < 16) { | ||
190 | - if (val == (tcg_target_long)dup_const(MO_8, low)) { | ||
191 | + switch (vece) { | ||
192 | + case MO_8: | ||
193 | + low = (int8_t)val; | ||
194 | + if (low >= -16 && low < 16) { | ||
195 | tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16)); | ||
196 | return; | ||
197 | } | ||
198 | - if (val == (tcg_target_long)dup_const(MO_16, low)) { | ||
199 | + if (have_isa_3_00) { | ||
200 | + tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11)); | ||
201 | + return; | ||
202 | + } | ||
203 | + break; | ||
204 | + | ||
205 | + case MO_16: | ||
206 | + low = (int16_t)val; | ||
207 | + if (low >= -16 && low < 16) { | ||
208 | tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16)); | ||
209 | return; | ||
210 | } | ||
211 | - if (val == (tcg_target_long)dup_const(MO_32, low)) { | ||
212 | + break; | ||
213 | + | ||
214 | + case MO_32: | ||
215 | + low = (int32_t)val; | ||
216 | + if (low >= -16 && low < 16) { | ||
217 | tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16)); | ||
218 | return; | ||
219 | } | 135 | } |
220 | - } | 136 | - } |
221 | - if (have_isa_3_00 && val == (tcg_target_long)dup_const(MO_8, val)) { | 137 | |
222 | - tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11)); | 138 | - tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo); |
223 | - return; | 139 | - tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0, |
224 | + break; | 140 | - s->page_bits - CPU_TLB_ENTRY_BITS); |
225 | } | 141 | + tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo); |
142 | + tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0, | ||
143 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
144 | |||
145 | - tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0, | ||
146 | - fast_ofs + offsetof(CPUTLBDescFast, mask)); | ||
147 | + tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0, | ||
148 | + fast_ofs + offsetof(CPUTLBDescFast, mask)); | ||
149 | |||
150 | - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0, | ||
151 | - fast_ofs + offsetof(CPUTLBDescFast, table)); | ||
152 | + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0, | ||
153 | + fast_ofs + offsetof(CPUTLBDescFast, table)); | ||
154 | |||
155 | - /* | ||
156 | - * If the required alignment is at least as large as the access, simply | ||
157 | - * copy the address and mask. For lesser alignments, check that we don't | ||
158 | - * cross pages for the complete access. | ||
159 | - */ | ||
160 | - if (a_mask >= s_mask) { | ||
161 | - tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); | ||
162 | - } else { | ||
163 | - tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, | ||
164 | - addrlo, s_mask - a_mask); | ||
165 | - } | ||
166 | - tlb_mask = s->page_mask | a_mask; | ||
167 | - tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); | ||
168 | + /* | ||
169 | + * If the required alignment is at least as large as the access, | ||
170 | + * simply copy the address and mask. For lesser alignments, | ||
171 | + * check that we don't cross pages for the complete access. | ||
172 | + */ | ||
173 | + if (a_mask >= s_mask) { | ||
174 | + tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); | ||
175 | + } else { | ||
176 | + tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, | ||
177 | + addrlo, s_mask - a_mask); | ||
178 | + } | ||
179 | + tlb_mask = s->page_mask | a_mask; | ||
180 | + tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); | ||
181 | |||
182 | - /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ | ||
183 | - tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, | ||
184 | - TCG_REG_L1, TCG_REG_L0, cmp_ofs); | ||
185 | - | ||
186 | - /* jne slow_path */ | ||
187 | - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); | ||
188 | - ldst->label_ptr[0] = s->code_ptr; | ||
189 | - s->code_ptr += 4; | ||
190 | - | ||
191 | - if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) { | ||
192 | - /* cmp 4(TCG_REG_L0), addrhi */ | ||
193 | - tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, cmp_ofs + 4); | ||
194 | + /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ | ||
195 | + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, | ||
196 | + TCG_REG_L1, TCG_REG_L0, cmp_ofs); | ||
197 | |||
198 | /* jne slow_path */ | ||
199 | tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); | ||
200 | - ldst->label_ptr[1] = s->code_ptr; | ||
201 | + ldst->label_ptr[0] = s->code_ptr; | ||
202 | s->code_ptr += 4; | ||
203 | - } | ||
204 | |||
205 | - /* TLB Hit. */ | ||
206 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0, | ||
207 | - offsetof(CPUTLBEntry, addend)); | ||
208 | -#else | ||
209 | - if (a_mask) { | ||
210 | + if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) { | ||
211 | + /* cmp 4(TCG_REG_L0), addrhi */ | ||
212 | + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, | ||
213 | + TCG_REG_L0, cmp_ofs + 4); | ||
214 | + | ||
215 | + /* jne slow_path */ | ||
216 | + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); | ||
217 | + ldst->label_ptr[1] = s->code_ptr; | ||
218 | + s->code_ptr += 4; | ||
219 | + } | ||
220 | + | ||
221 | + /* TLB Hit. */ | ||
222 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0, | ||
223 | + offsetof(CPUTLBEntry, addend)); | ||
224 | + } else if (a_mask) { | ||
225 | ldst = new_ldst_label(s); | ||
226 | |||
227 | ldst->is_ld = is_ld; | ||
228 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||
229 | ldst->label_ptr[0] = s->code_ptr; | ||
230 | s->code_ptr += 4; | ||
231 | } | ||
232 | -#endif | ||
233 | |||
234 | return ldst; | ||
235 | } | ||
236 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
237 | tcg_out_push(s, tcg_target_callee_save_regs[i]); | ||
238 | } | ||
239 | |||
240 | -#if TCG_TARGET_REG_BITS == 32 | ||
241 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, | ||
242 | - (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4); | ||
243 | - tcg_out_addi(s, TCG_REG_ESP, -stack_addend); | ||
244 | - /* jmp *tb. */ | ||
245 | - tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP, | ||
246 | - (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 | ||
247 | - + stack_addend); | ||
248 | -#else | ||
249 | -# if !defined(CONFIG_SOFTMMU) | ||
250 | - if (guest_base) { | ||
251 | + if (!tcg_use_softmmu && guest_base) { | ||
252 | int seg = setup_guest_base_seg(); | ||
253 | if (seg != 0) { | ||
254 | x86_guest_base.seg = seg; | ||
255 | } else if (guest_base == (int32_t)guest_base) { | ||
256 | x86_guest_base.ofs = guest_base; | ||
257 | } else { | ||
258 | + assert(TCG_TARGET_REG_BITS == 64); | ||
259 | /* Choose R12 because, as a base, it requires a SIB byte. */ | ||
260 | x86_guest_base.index = TCG_REG_R12; | ||
261 | tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base); | ||
262 | tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index); | ||
263 | } | ||
264 | } | ||
265 | -# endif | ||
266 | - tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | ||
267 | - tcg_out_addi(s, TCG_REG_ESP, -stack_addend); | ||
268 | - /* jmp *tb. */ | ||
269 | - tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]); | ||
270 | -#endif | ||
271 | + | ||
272 | + if (TCG_TARGET_REG_BITS == 32) { | ||
273 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, | ||
274 | + (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4); | ||
275 | + tcg_out_addi(s, TCG_REG_ESP, -stack_addend); | ||
276 | + /* jmp *tb. */ | ||
277 | + tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP, | ||
278 | + (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 | ||
279 | + + stack_addend); | ||
280 | + } else { | ||
281 | + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | ||
282 | + tcg_out_addi(s, TCG_REG_ESP, -stack_addend); | ||
283 | + /* jmp *tb. */ | ||
284 | + tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]); | ||
285 | + } | ||
226 | 286 | ||
227 | /* | 287 | /* |
228 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, | 288 | * Return path for goto_ptr. Set return value to 0, a-la exit_tb, |
229 | if (TCG_TARGET_REG_BITS == 64) { | ||
230 | new_pool_label(s, val, rel, s->code_ptr, add); | ||
231 | } else { | ||
232 | - new_pool_l2(s, rel, s->code_ptr, add, val, val); | ||
233 | + new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val); | ||
234 | } | ||
235 | } else { | ||
236 | load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); | ||
237 | if (TCG_TARGET_REG_BITS == 64) { | ||
238 | new_pool_l2(s, rel, s->code_ptr, add, val, val); | ||
239 | } else { | ||
240 | - new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val); | ||
241 | + new_pool_l4(s, rel, s->code_ptr, add, | ||
242 | + val >> 32, val, val >> 32, val); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | -- | 289 | -- |
247 | 2.25.1 | 290 | 2.34.1 |
248 | 291 | ||
249 | 292 | diff view generated by jsdifflib |
1 | We must do this before we adjust tcg_out_movi_i32, lest the | 1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | under-the-hood poking that we do for icount be broken. | ||
3 | |||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 3 | --- |
7 | include/exec/gen-icount.h | 25 +++++++++++++------------ | 4 | tcg/loongarch64/tcg-target.c.inc | 126 +++++++++++++++---------------- |
8 | 1 file changed, 13 insertions(+), 12 deletions(-) | 5 | 1 file changed, 61 insertions(+), 65 deletions(-) |
9 | 6 | ||
10 | diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h | 7 | diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc |
11 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/include/exec/gen-icount.h | 9 | --- a/tcg/loongarch64/tcg-target.c.inc |
13 | +++ b/include/exec/gen-icount.h | 10 | +++ b/tcg/loongarch64/tcg-target.c.inc |
14 | @@ -XXX,XX +XXX,XX @@ static inline void gen_io_end(void) | 11 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) |
15 | 12 | return TCG_REG_A0 + slot; | |
16 | static inline void gen_tb_start(TranslationBlock *tb) | 13 | } |
17 | { | 14 | |
18 | - TCGv_i32 count, imm; | 15 | -#ifndef CONFIG_SOFTMMU |
19 | + TCGv_i32 count; | 16 | -#define USE_GUEST_BASE (guest_base != 0) |
20 | 17 | #define TCG_GUEST_BASE_REG TCG_REG_S1 | |
21 | tcg_ctx->exitreq_label = gen_new_label(); | 18 | -#endif |
22 | if (tb_cflags(tb) & CF_USE_ICOUNT) { | 19 | |
23 | @@ -XXX,XX +XXX,XX @@ static inline void gen_tb_start(TranslationBlock *tb) | 20 | #define TCG_CT_CONST_ZERO 0x100 |
24 | offsetof(ArchCPU, env)); | 21 | #define TCG_CT_CONST_S12 0x200 |
25 | 22 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | |
26 | if (tb_cflags(tb) & CF_USE_ICOUNT) { | 23 | h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); |
27 | - imm = tcg_temp_new_i32(); | 24 | a_bits = h->aa.align; |
28 | - /* We emit a movi with a dummy immediate argument. Keep the insn index | 25 | |
29 | - * of the movi so that we later (when we know the actual insn count) | 26 | -#ifdef CONFIG_SOFTMMU |
30 | - * can update the immediate argument with the actual insn count. */ | 27 | - unsigned s_bits = opc & MO_SIZE; |
31 | - tcg_gen_movi_i32(imm, 0xdeadbeef); | 28 | - int mem_index = get_mmuidx(oi); |
32 | + /* | 29 | - int fast_ofs = tlb_mask_table_ofs(s, mem_index); |
33 | + * We emit a sub with a dummy immediate argument. Keep the insn index | 30 | - int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); |
34 | + * of the sub so that we later (when we know the actual insn count) | 31 | - int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); |
35 | + * can update the argument with the actual insn count. | 32 | + if (tcg_use_softmmu) { |
36 | + */ | 33 | + unsigned s_bits = opc & MO_SIZE; |
37 | + tcg_gen_sub_i32(count, count, tcg_constant_i32(0)); | 34 | + int mem_index = get_mmuidx(oi); |
38 | icount_start_insn = tcg_last_op(); | 35 | + int fast_ofs = tlb_mask_table_ofs(s, mem_index); |
36 | + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); | ||
37 | + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); | ||
38 | |||
39 | - ldst = new_ldst_label(s); | ||
40 | - ldst->is_ld = is_ld; | ||
41 | - ldst->oi = oi; | ||
42 | - ldst->addrlo_reg = addr_reg; | ||
39 | - | 43 | - |
40 | - tcg_gen_sub_i32(count, count, imm); | 44 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); |
41 | - tcg_temp_free_i32(imm); | 45 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); |
46 | - | ||
47 | - tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, | ||
48 | - s->page_bits - CPU_TLB_ENTRY_BITS); | ||
49 | - tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); | ||
50 | - tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); | ||
51 | - | ||
52 | - /* Load the tlb comparator and the addend. */ | ||
53 | - QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
54 | - tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, | ||
55 | - is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
56 | - : offsetof(CPUTLBEntry, addr_write)); | ||
57 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, | ||
58 | - offsetof(CPUTLBEntry, addend)); | ||
59 | - | ||
60 | - /* | ||
61 | - * For aligned accesses, we check the first byte and include the alignment | ||
62 | - * bits within the address. For unaligned access, we check that we don't | ||
63 | - * cross pages using the address of the last byte of the access. | ||
64 | - */ | ||
65 | - if (a_bits < s_bits) { | ||
66 | - unsigned a_mask = (1u << a_bits) - 1; | ||
67 | - unsigned s_mask = (1u << s_bits) - 1; | ||
68 | - tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); | ||
69 | - } else { | ||
70 | - tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); | ||
71 | - } | ||
72 | - tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, | ||
73 | - a_bits, s->page_bits - 1); | ||
74 | - | ||
75 | - /* Compare masked address with the TLB entry. */ | ||
76 | - ldst->label_ptr[0] = s->code_ptr; | ||
77 | - tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); | ||
78 | - | ||
79 | - h->index = TCG_REG_TMP2; | ||
80 | -#else | ||
81 | - if (a_bits) { | ||
82 | ldst = new_ldst_label(s); | ||
83 | - | ||
84 | ldst->is_ld = is_ld; | ||
85 | ldst->oi = oi; | ||
86 | ldst->addrlo_reg = addr_reg; | ||
87 | |||
88 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); | ||
89 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); | ||
90 | + | ||
91 | + tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, | ||
92 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
93 | + tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); | ||
94 | + tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); | ||
95 | + | ||
96 | + /* Load the tlb comparator and the addend. */ | ||
97 | + QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
98 | + tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, | ||
99 | + is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
100 | + : offsetof(CPUTLBEntry, addr_write)); | ||
101 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, | ||
102 | + offsetof(CPUTLBEntry, addend)); | ||
103 | + | ||
104 | /* | ||
105 | - * Without micro-architecture details, we don't know which of | ||
106 | - * bstrpick or andi is faster, so use bstrpick as it's not | ||
107 | - * constrained by imm field width. Not to say alignments >= 2^12 | ||
108 | - * are going to happen any time soon. | ||
109 | + * For aligned accesses, we check the first byte and include the | ||
110 | + * alignment bits within the address. For unaligned access, we | ||
111 | + * check that we don't cross pages using the address of the last | ||
112 | + * byte of the access. | ||
113 | */ | ||
114 | - tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); | ||
115 | + if (a_bits < s_bits) { | ||
116 | + unsigned a_mask = (1u << a_bits) - 1; | ||
117 | + unsigned s_mask = (1u << s_bits) - 1; | ||
118 | + tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask); | ||
119 | + } else { | ||
120 | + tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); | ||
121 | + } | ||
122 | + tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, | ||
123 | + a_bits, s->page_bits - 1); | ||
124 | |||
125 | + /* Compare masked address with the TLB entry. */ | ||
126 | ldst->label_ptr[0] = s->code_ptr; | ||
127 | - tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); | ||
128 | - } | ||
129 | + tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); | ||
130 | |||
131 | - h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
132 | -#endif | ||
133 | + h->index = TCG_REG_TMP2; | ||
134 | + } else { | ||
135 | + if (a_bits) { | ||
136 | + ldst = new_ldst_label(s); | ||
137 | + | ||
138 | + ldst->is_ld = is_ld; | ||
139 | + ldst->oi = oi; | ||
140 | + ldst->addrlo_reg = addr_reg; | ||
141 | + | ||
142 | + /* | ||
143 | + * Without micro-architecture details, we don't know which of | ||
144 | + * bstrpick or andi is faster, so use bstrpick as it's not | ||
145 | + * constrained by imm field width. Not to say alignments >= 2^12 | ||
146 | + * are going to happen any time soon. | ||
147 | + */ | ||
148 | + tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); | ||
149 | + | ||
150 | + ldst->label_ptr[0] = s->code_ptr; | ||
151 | + tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); | ||
152 | + } | ||
153 | + | ||
154 | + h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
155 | + } | ||
156 | |||
157 | if (addr_type == TCG_TYPE_I32) { | ||
158 | h->base = TCG_REG_TMP0; | ||
159 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
160 | TCG_REG_SP, SAVE_OFS + i * REG_SIZE); | ||
42 | } | 161 | } |
43 | 162 | ||
44 | tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); | 163 | -#if !defined(CONFIG_SOFTMMU) |
45 | @@ -XXX,XX +XXX,XX @@ static inline void gen_tb_start(TranslationBlock *tb) | 164 | - if (USE_GUEST_BASE) { |
46 | static inline void gen_tb_end(TranslationBlock *tb, int num_insns) | 165 | + if (!tcg_use_softmmu && guest_base) { |
47 | { | 166 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); |
48 | if (tb_cflags(tb) & CF_USE_ICOUNT) { | 167 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
49 | - /* Update the num_insn immediate parameter now that we know | ||
50 | - * the actual insn count. */ | ||
51 | - tcg_set_insn_param(icount_start_insn, 1, num_insns); | ||
52 | + /* | ||
53 | + * Update the num_insn immediate parameter now that we know | ||
54 | + * the actual insn count. | ||
55 | + */ | ||
56 | + tcg_set_insn_param(icount_start_insn, 2, | ||
57 | + tcgv_i32_arg(tcg_constant_i32(num_insns))); | ||
58 | } | 168 | } |
59 | 169 | -#endif | |
60 | gen_set_label(tcg_ctx->exitreq_label); | 170 | |
171 | /* Call generated code */ | ||
172 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | ||
61 | -- | 173 | -- |
62 | 2.25.1 | 174 | 2.34.1 |
63 | 175 | ||
64 | 176 | diff view generated by jsdifflib |
1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
---|---|---|---|
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
2 | --- | 3 | --- |
3 | tcg/mips/tcg-target-constr.h | 31 ++++++++++++ | 4 | tcg/mips/tcg-target.c.inc | 231 +++++++++++++++++++------------------- |
4 | tcg/mips/tcg-target.c.inc | 95 ++++++++++++------------------------ | 5 | 1 file changed, 113 insertions(+), 118 deletions(-) |
5 | 2 files changed, 61 insertions(+), 65 deletions(-) | ||
6 | create mode 100644 tcg/mips/tcg-target-constr.h | ||
7 | 6 | ||
8 | diff --git a/tcg/mips/tcg-target-constr.h b/tcg/mips/tcg-target-constr.h | ||
9 | new file mode 100644 | ||
10 | index XXXXXXX..XXXXXXX | ||
11 | --- /dev/null | ||
12 | +++ b/tcg/mips/tcg-target-constr.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
15 | +/* | ||
16 | + * MIPS target-specific operand constaints. | ||
17 | + * Copyright (c) 2020 Linaro | ||
18 | + */ | ||
19 | + | ||
20 | +C_O0_I1(r) | ||
21 | +C_O0_I2(rZ, r) | ||
22 | +C_O0_I2(rZ, rZ) | ||
23 | +C_O0_I2(SZ, S) | ||
24 | +C_O0_I3(SZ, S, S) | ||
25 | +C_O0_I3(SZ, SZ, S) | ||
26 | +C_O0_I4(rZ, rZ, rZ, rZ) | ||
27 | +C_O0_I4(SZ, SZ, S, S) | ||
28 | +C_O1_I1(r, L) | ||
29 | +C_O1_I1(r, r) | ||
30 | +C_O1_I2(r, 0, rZ) | ||
31 | +C_O1_I2(r, L, L) | ||
32 | +C_O1_I2(r, r, ri) | ||
33 | +C_O1_I2(r, r, rI) | ||
34 | +C_O1_I2(r, r, rIK) | ||
35 | +C_O1_I2(r, r, rJ) | ||
36 | +C_O1_I2(r, r, rWZ) | ||
37 | +C_O1_I2(r, rZ, rN) | ||
38 | +C_O1_I2(r, rZ, rZ) | ||
39 | +C_O1_I4(r, rZ, rZ, rZ, 0) | ||
40 | +C_O1_I4(r, rZ, rZ, rZ, rZ) | ||
41 | +C_O2_I1(r, r, L) | ||
42 | +C_O2_I2(r, r, L, L) | ||
43 | +C_O2_I2(r, r, r, r) | ||
44 | +C_O2_I4(r, r, rZ, rZ, rN, rN) | ||
45 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc | 7 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc |
46 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
47 | --- a/tcg/mips/tcg-target.c.inc | 9 | --- a/tcg/mips/tcg-target.c.inc |
48 | +++ b/tcg/mips/tcg-target.c.inc | 10 | +++ b/tcg/mips/tcg-target.c.inc |
49 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | 11 | @@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
12 | #define TCG_TMP2 TCG_REG_T8 | ||
13 | #define TCG_TMP3 TCG_REG_T7 | ||
14 | |||
15 | -#ifndef CONFIG_SOFTMMU | ||
16 | #define TCG_GUEST_BASE_REG TCG_REG_S7 | ||
17 | -#endif | ||
18 | #if TCG_TARGET_REG_BITS == 64 | ||
19 | #define TCG_REG_TB TCG_REG_S6 | ||
20 | #else | ||
21 | -#define TCG_REG_TB (qemu_build_not_reached(), TCG_REG_ZERO) | ||
22 | +#define TCG_REG_TB ({ qemu_build_not_reached(); TCG_REG_ZERO; }) | ||
23 | #endif | ||
24 | |||
25 | /* check if we really need so many registers :P */ | ||
26 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||
27 | a_bits = h->aa.align; | ||
28 | a_mask = (1 << a_bits) - 1; | ||
29 | |||
30 | -#ifdef CONFIG_SOFTMMU | ||
31 | - unsigned s_mask = (1 << s_bits) - 1; | ||
32 | - int mem_index = get_mmuidx(oi); | ||
33 | - int fast_off = tlb_mask_table_ofs(s, mem_index); | ||
34 | - int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); | ||
35 | - int table_off = fast_off + offsetof(CPUTLBDescFast, table); | ||
36 | - int add_off = offsetof(CPUTLBEntry, addend); | ||
37 | - int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
38 | - : offsetof(CPUTLBEntry, addr_write); | ||
39 | + if (tcg_use_softmmu) { | ||
40 | + unsigned s_mask = (1 << s_bits) - 1; | ||
41 | + int mem_index = get_mmuidx(oi); | ||
42 | + int fast_off = tlb_mask_table_ofs(s, mem_index); | ||
43 | + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); | ||
44 | + int table_off = fast_off + offsetof(CPUTLBDescFast, table); | ||
45 | + int add_off = offsetof(CPUTLBEntry, addend); | ||
46 | + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
47 | + : offsetof(CPUTLBEntry, addr_write); | ||
48 | |||
49 | - ldst = new_ldst_label(s); | ||
50 | - ldst->is_ld = is_ld; | ||
51 | - ldst->oi = oi; | ||
52 | - ldst->addrlo_reg = addrlo; | ||
53 | - ldst->addrhi_reg = addrhi; | ||
54 | - | ||
55 | - /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ | ||
56 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); | ||
57 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); | ||
58 | - | ||
59 | - /* Extract the TLB index from the address into TMP3. */ | ||
60 | - if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { | ||
61 | - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo, | ||
62 | - s->page_bits - CPU_TLB_ENTRY_BITS); | ||
63 | - } else { | ||
64 | - tcg_out_dsrl(s, TCG_TMP3, addrlo, | ||
65 | - s->page_bits - CPU_TLB_ENTRY_BITS); | ||
66 | - } | ||
67 | - tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); | ||
68 | - | ||
69 | - /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ | ||
70 | - tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); | ||
71 | - | ||
72 | - if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { | ||
73 | - /* Load the (low half) tlb comparator. */ | ||
74 | - tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, | ||
75 | - cmp_off + HOST_BIG_ENDIAN * 4); | ||
76 | - } else { | ||
77 | - tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off); | ||
78 | - } | ||
79 | - | ||
80 | - if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { | ||
81 | - /* Load the tlb addend for the fast path. */ | ||
82 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); | ||
83 | - } | ||
84 | - | ||
85 | - /* | ||
86 | - * Mask the page bits, keeping the alignment bits to compare against. | ||
87 | - * For unaligned accesses, compare against the end of the access to | ||
88 | - * verify that it does not cross a page boundary. | ||
89 | - */ | ||
90 | - tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask); | ||
91 | - if (a_mask < s_mask) { | ||
92 | - if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { | ||
93 | - tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask); | ||
94 | - } else { | ||
95 | - tcg_out_opc_imm(s, OPC_DADDIU, TCG_TMP2, addrlo, s_mask - a_mask); | ||
96 | - } | ||
97 | - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); | ||
98 | - } else { | ||
99 | - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo); | ||
100 | - } | ||
101 | - | ||
102 | - /* Zero extend a 32-bit guest address for a 64-bit host. */ | ||
103 | - if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { | ||
104 | - tcg_out_ext32u(s, TCG_TMP2, addrlo); | ||
105 | - addrlo = TCG_TMP2; | ||
106 | - } | ||
107 | - | ||
108 | - ldst->label_ptr[0] = s->code_ptr; | ||
109 | - tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); | ||
110 | - | ||
111 | - /* Load and test the high half tlb comparator. */ | ||
112 | - if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { | ||
113 | - /* delay slot */ | ||
114 | - tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); | ||
115 | - | ||
116 | - /* Load the tlb addend for the fast path. */ | ||
117 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); | ||
118 | - | ||
119 | - ldst->label_ptr[1] = s->code_ptr; | ||
120 | - tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0); | ||
121 | - } | ||
122 | - | ||
123 | - /* delay slot */ | ||
124 | - base = TCG_TMP3; | ||
125 | - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo); | ||
126 | -#else | ||
127 | - if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) { | ||
128 | ldst = new_ldst_label(s); | ||
129 | - | ||
130 | ldst->is_ld = is_ld; | ||
131 | ldst->oi = oi; | ||
132 | ldst->addrlo_reg = addrlo; | ||
133 | ldst->addrhi_reg = addrhi; | ||
134 | |||
135 | - /* We are expecting a_bits to max out at 7, much lower than ANDI. */ | ||
136 | - tcg_debug_assert(a_bits < 16); | ||
137 | - tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask); | ||
138 | + /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ | ||
139 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); | ||
140 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); | ||
141 | + | ||
142 | + /* Extract the TLB index from the address into TMP3. */ | ||
143 | + if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { | ||
144 | + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo, | ||
145 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
146 | + } else { | ||
147 | + tcg_out_dsrl(s, TCG_TMP3, addrlo, | ||
148 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
149 | + } | ||
150 | + tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); | ||
151 | + | ||
152 | + /* Add the tlb_table pointer, creating the CPUTLBEntry address. */ | ||
153 | + tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); | ||
154 | + | ||
155 | + if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { | ||
156 | + /* Load the (low half) tlb comparator. */ | ||
157 | + tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, | ||
158 | + cmp_off + HOST_BIG_ENDIAN * 4); | ||
159 | + } else { | ||
160 | + tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off); | ||
161 | + } | ||
162 | + | ||
163 | + if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { | ||
164 | + /* Load the tlb addend for the fast path. */ | ||
165 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); | ||
166 | + } | ||
167 | + | ||
168 | + /* | ||
169 | + * Mask the page bits, keeping the alignment bits to compare against. | ||
170 | + * For unaligned accesses, compare against the end of the access to | ||
171 | + * verify that it does not cross a page boundary. | ||
172 | + */ | ||
173 | + tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask); | ||
174 | + if (a_mask < s_mask) { | ||
175 | + tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32 | ||
176 | + || addr_type == TCG_TYPE_I32 | ||
177 | + ? OPC_ADDIU : OPC_DADDIU), | ||
178 | + TCG_TMP2, addrlo, s_mask - a_mask); | ||
179 | + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); | ||
180 | + } else { | ||
181 | + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo); | ||
182 | + } | ||
183 | + | ||
184 | + /* Zero extend a 32-bit guest address for a 64-bit host. */ | ||
185 | + if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { | ||
186 | + tcg_out_ext32u(s, TCG_TMP2, addrlo); | ||
187 | + addrlo = TCG_TMP2; | ||
188 | + } | ||
189 | |||
190 | ldst->label_ptr[0] = s->code_ptr; | ||
191 | - if (use_mips32r6_instructions) { | ||
192 | - tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0); | ||
193 | - } else { | ||
194 | - tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO); | ||
195 | - tcg_out_nop(s); | ||
196 | - } | ||
197 | - } | ||
198 | + tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); | ||
199 | |||
200 | - base = addrlo; | ||
201 | - if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { | ||
202 | - tcg_out_ext32u(s, TCG_REG_A0, base); | ||
203 | - base = TCG_REG_A0; | ||
204 | - } | ||
205 | - if (guest_base) { | ||
206 | - if (guest_base == (int16_t)guest_base) { | ||
207 | - tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base); | ||
208 | - } else { | ||
209 | - tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base, | ||
210 | - TCG_GUEST_BASE_REG); | ||
211 | + /* Load and test the high half tlb comparator. */ | ||
212 | + if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { | ||
213 | + /* delay slot */ | ||
214 | + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); | ||
215 | + | ||
216 | + /* Load the tlb addend for the fast path. */ | ||
217 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); | ||
218 | + | ||
219 | + ldst->label_ptr[1] = s->code_ptr; | ||
220 | + tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0); | ||
221 | + } | ||
222 | + | ||
223 | + /* delay slot */ | ||
224 | + base = TCG_TMP3; | ||
225 | + tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo); | ||
226 | + } else { | ||
227 | + if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) { | ||
228 | + ldst = new_ldst_label(s); | ||
229 | + | ||
230 | + ldst->is_ld = is_ld; | ||
231 | + ldst->oi = oi; | ||
232 | + ldst->addrlo_reg = addrlo; | ||
233 | + ldst->addrhi_reg = addrhi; | ||
234 | + | ||
235 | + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ | ||
236 | + tcg_debug_assert(a_bits < 16); | ||
237 | + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask); | ||
238 | + | ||
239 | + ldst->label_ptr[0] = s->code_ptr; | ||
240 | + if (use_mips32r6_instructions) { | ||
241 | + tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0); | ||
242 | + } else { | ||
243 | + tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO); | ||
244 | + tcg_out_nop(s); | ||
245 | + } | ||
246 | + } | ||
247 | + | ||
248 | + base = addrlo; | ||
249 | + if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { | ||
250 | + tcg_out_ext32u(s, TCG_REG_A0, base); | ||
251 | + base = TCG_REG_A0; | ||
252 | + } | ||
253 | + if (guest_base) { | ||
254 | + if (guest_base == (int16_t)guest_base) { | ||
255 | + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base); | ||
256 | + } else { | ||
257 | + tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base, | ||
258 | + TCG_GUEST_BASE_REG); | ||
259 | + } | ||
260 | + base = TCG_REG_A0; | ||
261 | } | ||
262 | - base = TCG_REG_A0; | ||
50 | } | 263 | } |
51 | } | 264 | -#endif |
52 | 265 | ||
53 | +/* Define all constraint sets. */ | 266 | h->base = base; |
54 | +#include "../tcg-constr.c.inc" | 267 | return ldst; |
55 | + | 268 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) |
56 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | 269 | TCG_REG_SP, SAVE_OFS + i * REG_SIZE); |
57 | { | 270 | } |
58 | - static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; | 271 | |
59 | - static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; | 272 | -#ifndef CONFIG_SOFTMMU |
60 | - static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; | 273 | - if (guest_base != (int16_t)guest_base) { |
61 | - static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; | 274 | + if (!tcg_use_softmmu && guest_base != (int16_t)guest_base) { |
62 | - static const TCGTargetOpDef SZ_S = { .args_ct_str = { "SZ", "S" } }; | 275 | /* |
63 | - static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } }; | 276 | * The function call abi for n32 and n64 will have loaded $25 (t9) |
64 | - static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; | 277 | * with the address of the prologue, so we can use that instead |
65 | - static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; | 278 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) |
66 | - static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; | 279 | TCG_TARGET_REG_BITS == 64 ? TCG_REG_T9 : 0); |
67 | - static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; | 280 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
68 | - static const TCGTargetOpDef r_r_rJ = { .args_ct_str = { "r", "r", "rJ" } }; | 281 | } |
69 | - static const TCGTargetOpDef SZ_S_S = { .args_ct_str = { "SZ", "S", "S" } }; | 282 | -#endif |
70 | - static const TCGTargetOpDef SZ_SZ_S | 283 | |
71 | - = { .args_ct_str = { "SZ", "SZ", "S" } }; | 284 | if (TCG_TARGET_REG_BITS == 64) { |
72 | - static const TCGTargetOpDef SZ_SZ_S_S | 285 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); |
73 | - = { .args_ct_str = { "SZ", "SZ", "S", "S" } }; | ||
74 | - static const TCGTargetOpDef r_rZ_rN | ||
75 | - = { .args_ct_str = { "r", "rZ", "rN" } }; | ||
76 | - static const TCGTargetOpDef r_rZ_rZ | ||
77 | - = { .args_ct_str = { "r", "rZ", "rZ" } }; | ||
78 | - static const TCGTargetOpDef r_r_rIK | ||
79 | - = { .args_ct_str = { "r", "r", "rIK" } }; | ||
80 | - static const TCGTargetOpDef r_r_rWZ | ||
81 | - = { .args_ct_str = { "r", "r", "rWZ" } }; | ||
82 | - static const TCGTargetOpDef r_r_r_r | ||
83 | - = { .args_ct_str = { "r", "r", "r", "r" } }; | ||
84 | - static const TCGTargetOpDef r_r_L_L | ||
85 | - = { .args_ct_str = { "r", "r", "L", "L" } }; | ||
86 | - static const TCGTargetOpDef dep | ||
87 | - = { .args_ct_str = { "r", "0", "rZ" } }; | ||
88 | - static const TCGTargetOpDef movc | ||
89 | - = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "0" } }; | ||
90 | - static const TCGTargetOpDef movc_r6 | ||
91 | - = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; | ||
92 | - static const TCGTargetOpDef add2 | ||
93 | - = { .args_ct_str = { "r", "r", "rZ", "rZ", "rN", "rN" } }; | ||
94 | - static const TCGTargetOpDef br2 | ||
95 | - = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; | ||
96 | - static const TCGTargetOpDef setc2 | ||
97 | - = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; | ||
98 | - | ||
99 | switch (op) { | ||
100 | case INDEX_op_goto_ptr: | ||
101 | - return &r; | ||
102 | + return C_O0_I1(r); | ||
103 | |||
104 | case INDEX_op_ld8u_i32: | ||
105 | case INDEX_op_ld8s_i32: | ||
106 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
107 | case INDEX_op_extrl_i64_i32: | ||
108 | case INDEX_op_extrh_i64_i32: | ||
109 | case INDEX_op_extract_i64: | ||
110 | - return &r_r; | ||
111 | + return C_O1_I1(r, r); | ||
112 | |||
113 | case INDEX_op_st8_i32: | ||
114 | case INDEX_op_st16_i32: | ||
115 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
116 | case INDEX_op_st16_i64: | ||
117 | case INDEX_op_st32_i64: | ||
118 | case INDEX_op_st_i64: | ||
119 | - return &rZ_r; | ||
120 | + return C_O0_I2(rZ, r); | ||
121 | |||
122 | case INDEX_op_add_i32: | ||
123 | case INDEX_op_add_i64: | ||
124 | - return &r_r_rJ; | ||
125 | + return C_O1_I2(r, r, rJ); | ||
126 | case INDEX_op_sub_i32: | ||
127 | case INDEX_op_sub_i64: | ||
128 | - return &r_rZ_rN; | ||
129 | + return C_O1_I2(r, rZ, rN); | ||
130 | case INDEX_op_mul_i32: | ||
131 | case INDEX_op_mulsh_i32: | ||
132 | case INDEX_op_muluh_i32: | ||
133 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
134 | case INDEX_op_remu_i64: | ||
135 | case INDEX_op_nor_i64: | ||
136 | case INDEX_op_setcond_i64: | ||
137 | - return &r_rZ_rZ; | ||
138 | + return C_O1_I2(r, rZ, rZ); | ||
139 | case INDEX_op_muls2_i32: | ||
140 | case INDEX_op_mulu2_i32: | ||
141 | case INDEX_op_muls2_i64: | ||
142 | case INDEX_op_mulu2_i64: | ||
143 | - return &r_r_r_r; | ||
144 | + return C_O2_I2(r, r, r, r); | ||
145 | case INDEX_op_and_i32: | ||
146 | case INDEX_op_and_i64: | ||
147 | - return &r_r_rIK; | ||
148 | + return C_O1_I2(r, r, rIK); | ||
149 | case INDEX_op_or_i32: | ||
150 | case INDEX_op_xor_i32: | ||
151 | case INDEX_op_or_i64: | ||
152 | case INDEX_op_xor_i64: | ||
153 | - return &r_r_rI; | ||
154 | + return C_O1_I2(r, r, rI); | ||
155 | case INDEX_op_shl_i32: | ||
156 | case INDEX_op_shr_i32: | ||
157 | case INDEX_op_sar_i32: | ||
158 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
159 | case INDEX_op_sar_i64: | ||
160 | case INDEX_op_rotr_i64: | ||
161 | case INDEX_op_rotl_i64: | ||
162 | - return &r_r_ri; | ||
163 | + return C_O1_I2(r, r, ri); | ||
164 | case INDEX_op_clz_i32: | ||
165 | case INDEX_op_clz_i64: | ||
166 | - return &r_r_rWZ; | ||
167 | + return C_O1_I2(r, r, rWZ); | ||
168 | |||
169 | case INDEX_op_deposit_i32: | ||
170 | case INDEX_op_deposit_i64: | ||
171 | - return &dep; | ||
172 | + return C_O1_I2(r, 0, rZ); | ||
173 | case INDEX_op_brcond_i32: | ||
174 | case INDEX_op_brcond_i64: | ||
175 | - return &rZ_rZ; | ||
176 | + return C_O0_I2(rZ, rZ); | ||
177 | case INDEX_op_movcond_i32: | ||
178 | case INDEX_op_movcond_i64: | ||
179 | - return use_mips32r6_instructions ? &movc_r6 : &movc; | ||
180 | - | ||
181 | + return (use_mips32r6_instructions | ||
182 | + ? C_O1_I4(r, rZ, rZ, rZ, rZ) | ||
183 | + : C_O1_I4(r, rZ, rZ, rZ, 0)); | ||
184 | case INDEX_op_add2_i32: | ||
185 | case INDEX_op_sub2_i32: | ||
186 | - return &add2; | ||
187 | + return C_O2_I4(r, r, rZ, rZ, rN, rN); | ||
188 | case INDEX_op_setcond2_i32: | ||
189 | - return &setc2; | ||
190 | + return C_O1_I4(r, rZ, rZ, rZ, rZ); | ||
191 | case INDEX_op_brcond2_i32: | ||
192 | - return &br2; | ||
193 | + return C_O0_I4(rZ, rZ, rZ, rZ); | ||
194 | |||
195 | case INDEX_op_qemu_ld_i32: | ||
196 | return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 | ||
197 | - ? &r_L : &r_L_L); | ||
198 | + ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); | ||
199 | case INDEX_op_qemu_st_i32: | ||
200 | return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 | ||
201 | - ? &SZ_S : &SZ_S_S); | ||
202 | + ? C_O0_I2(SZ, S) : C_O0_I3(SZ, S, S)); | ||
203 | case INDEX_op_qemu_ld_i64: | ||
204 | - return (TCG_TARGET_REG_BITS == 64 ? &r_L | ||
205 | - : TARGET_LONG_BITS == 32 ? &r_r_L : &r_r_L_L); | ||
206 | + return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) | ||
207 | + : TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, L) | ||
208 | + : C_O2_I2(r, r, L, L)); | ||
209 | case INDEX_op_qemu_st_i64: | ||
210 | - return (TCG_TARGET_REG_BITS == 64 ? &SZ_S | ||
211 | - : TARGET_LONG_BITS == 32 ? &SZ_SZ_S : &SZ_SZ_S_S); | ||
212 | + return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(SZ, S) | ||
213 | + : TARGET_LONG_BITS == 32 ? C_O0_I3(SZ, SZ, S) | ||
214 | + : C_O0_I4(SZ, SZ, S, S)); | ||
215 | |||
216 | default: | ||
217 | return NULL; | ||
218 | -- | 286 | -- |
219 | 2.25.1 | 287 | 2.34.1 |
220 | 288 | ||
221 | 289 | diff view generated by jsdifflib |
1 | These are now completely covered by mov from a | 1 | Fix TCG_GUEST_BASE_REG to use 'TCG_REG_R30' instead of '30'. |
---|---|---|---|
2 | TYPE_CONST temporary. | ||
3 | 2 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Reviewed-by: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 5 | --- |
8 | include/tcg/tcg-opc.h | 3 --- | 6 | tcg/ppc/tcg-target.c.inc | 284 ++++++++++++++++++++------------------- |
9 | tcg/optimize.c | 4 ---- | 7 | 1 file changed, 143 insertions(+), 141 deletions(-) |
10 | tcg/tcg-op-vec.c | 1 - | ||
11 | tcg/tcg.c | 18 +----------------- | ||
12 | tcg/aarch64/tcg-target.c.inc | 3 --- | ||
13 | tcg/arm/tcg-target.c.inc | 1 - | ||
14 | tcg/i386/tcg-target.c.inc | 3 --- | ||
15 | tcg/mips/tcg-target.c.inc | 2 -- | ||
16 | tcg/ppc/tcg-target.c.inc | 3 --- | ||
17 | tcg/riscv/tcg-target.c.inc | 2 -- | ||
18 | tcg/s390/tcg-target.c.inc | 2 -- | ||
19 | tcg/sparc/tcg-target.c.inc | 2 -- | ||
20 | tcg/tci/tcg-target.c.inc | 2 -- | ||
21 | 13 files changed, 1 insertion(+), 45 deletions(-) | ||
22 | 8 | ||
23 | diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/include/tcg/tcg-opc.h | ||
26 | +++ b/include/tcg/tcg-opc.h | ||
27 | @@ -XXX,XX +XXX,XX @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END) | ||
28 | DEF(mb, 0, 0, 1, 0) | ||
29 | |||
30 | DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) | ||
31 | -DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT) | ||
32 | DEF(setcond_i32, 1, 2, 1, 0) | ||
33 | DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32)) | ||
34 | /* load/store */ | ||
35 | @@ -XXX,XX +XXX,XX @@ DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32)) | ||
36 | DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32)) | ||
37 | |||
38 | DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) | ||
39 | -DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) | ||
40 | DEF(setcond_i64, 1, 2, 1, IMPL64) | ||
41 | DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64)) | ||
42 | /* load/store */ | ||
43 | @@ -XXX,XX +XXX,XX @@ DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1, | ||
44 | #define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) | ||
45 | |||
46 | DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) | ||
47 | -DEF(dupi_vec, 1, 0, 1, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) | ||
48 | |||
49 | DEF(dup_vec, 1, 1, 0, IMPLVEC) | ||
50 | DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32)) | ||
51 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/tcg/optimize.c | ||
54 | +++ b/tcg/optimize.c | ||
55 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
56 | CASE_OP_32_64_VEC(mov): | ||
57 | tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); | ||
58 | break; | ||
59 | - CASE_OP_32_64(movi): | ||
60 | - case INDEX_op_dupi_vec: | ||
61 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], op->args[1]); | ||
62 | - break; | ||
63 | |||
64 | case INDEX_op_dup_vec: | ||
65 | if (arg_is_const(op->args[1])) { | ||
66 | diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/tcg/tcg-op-vec.c | ||
69 | +++ b/tcg/tcg-op-vec.c | ||
70 | @@ -XXX,XX +XXX,XX @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list, | ||
71 | case INDEX_op_xor_vec: | ||
72 | case INDEX_op_mov_vec: | ||
73 | case INDEX_op_dup_vec: | ||
74 | - case INDEX_op_dupi_vec: | ||
75 | case INDEX_op_dup2_vec: | ||
76 | case INDEX_op_ld_vec: | ||
77 | case INDEX_op_st_vec: | ||
78 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
79 | index XXXXXXX..XXXXXXX 100644 | ||
80 | --- a/tcg/tcg.c | ||
81 | +++ b/tcg/tcg.c | ||
82 | @@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op) | ||
83 | return TCG_TARGET_HAS_goto_ptr; | ||
84 | |||
85 | case INDEX_op_mov_i32: | ||
86 | - case INDEX_op_movi_i32: | ||
87 | case INDEX_op_setcond_i32: | ||
88 | case INDEX_op_brcond_i32: | ||
89 | case INDEX_op_ld8u_i32: | ||
90 | @@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op) | ||
91 | return TCG_TARGET_REG_BITS == 32; | ||
92 | |||
93 | case INDEX_op_mov_i64: | ||
94 | - case INDEX_op_movi_i64: | ||
95 | case INDEX_op_setcond_i64: | ||
96 | case INDEX_op_brcond_i64: | ||
97 | case INDEX_op_ld8u_i64: | ||
98 | @@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op) | ||
99 | |||
100 | case INDEX_op_mov_vec: | ||
101 | case INDEX_op_dup_vec: | ||
102 | - case INDEX_op_dupi_vec: | ||
103 | case INDEX_op_dupm_vec: | ||
104 | case INDEX_op_ld_vec: | ||
105 | case INDEX_op_st_vec: | ||
106 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | - * Specialized code generation for INDEX_op_movi_*. | ||
111 | + * Specialized code generation for INDEX_op_mov_* with a constant. | ||
112 | */ | ||
113 | static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, | ||
114 | tcg_target_ulong val, TCGLifeData arg_life, | ||
115 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, | ||
116 | } | ||
117 | } | ||
118 | |||
119 | -static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op) | ||
120 | -{ | ||
121 | - TCGTemp *ots = arg_temp(op->args[0]); | ||
122 | - tcg_target_ulong val = op->args[1]; | ||
123 | - | ||
124 | - tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]); | ||
125 | -} | ||
126 | - | ||
127 | /* | ||
128 | * Specialized code generation for INDEX_op_mov_*. | ||
129 | */ | ||
130 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
131 | case INDEX_op_mov_vec: | ||
132 | tcg_reg_alloc_mov(s, op); | ||
133 | break; | ||
134 | - case INDEX_op_movi_i32: | ||
135 | - case INDEX_op_movi_i64: | ||
136 | - case INDEX_op_dupi_vec: | ||
137 | - tcg_reg_alloc_movi(s, op); | ||
138 | - break; | ||
139 | case INDEX_op_dup_vec: | ||
140 | tcg_reg_alloc_dup(s, op); | ||
141 | break; | ||
142 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
143 | index XXXXXXX..XXXXXXX 100644 | ||
144 | --- a/tcg/aarch64/tcg-target.c.inc | ||
145 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
146 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
147 | |||
148 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | ||
149 | case INDEX_op_mov_i64: | ||
150 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | ||
151 | - case INDEX_op_movi_i64: | ||
152 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | ||
153 | default: | ||
154 | g_assert_not_reached(); | ||
155 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
156 | break; | ||
157 | |||
158 | case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ | ||
159 | - case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ | ||
160 | case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ | ||
161 | default: | ||
162 | g_assert_not_reached(); | ||
163 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
164 | index XXXXXXX..XXXXXXX 100644 | ||
165 | --- a/tcg/arm/tcg-target.c.inc | ||
166 | +++ b/tcg/arm/tcg-target.c.inc | ||
167 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
168 | break; | ||
169 | |||
170 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | ||
171 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | ||
172 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | ||
173 | default: | ||
174 | tcg_abort(); | ||
175 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
176 | index XXXXXXX..XXXXXXX 100644 | ||
177 | --- a/tcg/i386/tcg-target.c.inc | ||
178 | +++ b/tcg/i386/tcg-target.c.inc | ||
179 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
180 | break; | ||
181 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | ||
182 | case INDEX_op_mov_i64: | ||
183 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | ||
184 | - case INDEX_op_movi_i64: | ||
185 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | ||
186 | default: | ||
187 | tcg_abort(); | ||
188 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
189 | break; | ||
190 | |||
191 | case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ | ||
192 | - case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ | ||
193 | case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ | ||
194 | default: | ||
195 | g_assert_not_reached(); | ||
196 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc | ||
197 | index XXXXXXX..XXXXXXX 100644 | ||
198 | --- a/tcg/mips/tcg-target.c.inc | ||
199 | +++ b/tcg/mips/tcg-target.c.inc | ||
200 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
201 | break; | ||
202 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | ||
203 | case INDEX_op_mov_i64: | ||
204 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | ||
205 | - case INDEX_op_movi_i64: | ||
206 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | ||
207 | default: | ||
208 | tcg_abort(); | ||
209 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | 9 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
210 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
211 | --- a/tcg/ppc/tcg-target.c.inc | 11 | --- a/tcg/ppc/tcg-target.c.inc |
212 | +++ b/tcg/ppc/tcg-target.c.inc | 12 | +++ b/tcg/ppc/tcg-target.c.inc |
213 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | 13 | @@ -XXX,XX +XXX,XX @@ |
214 | 14 | ||
215 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | 15 | #define have_isel (cpuinfo & CPUINFO_ISEL) |
216 | case INDEX_op_mov_i64: | 16 | |
217 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | 17 | -#ifndef CONFIG_SOFTMMU |
218 | - case INDEX_op_movi_i64: | 18 | -#define TCG_GUEST_BASE_REG 30 |
219 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | 19 | -#endif |
220 | default: | 20 | +#define TCG_GUEST_BASE_REG TCG_REG_R30 |
221 | tcg_abort(); | 21 | |
222 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | 22 | #ifdef CONFIG_DEBUG_TCG |
223 | return; | 23 | static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = { |
224 | 24 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | |
225 | case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ | 25 | s_bits == MO_128); |
226 | - case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ | 26 | a_bits = h->aa.align; |
227 | case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ | 27 | |
228 | default: | 28 | -#ifdef CONFIG_SOFTMMU |
229 | g_assert_not_reached(); | 29 | - int mem_index = get_mmuidx(oi); |
230 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc | 30 | - int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) |
231 | index XXXXXXX..XXXXXXX 100644 | 31 | - : offsetof(CPUTLBEntry, addr_write); |
232 | --- a/tcg/riscv/tcg-target.c.inc | 32 | - int fast_off = tlb_mask_table_ofs(s, mem_index); |
233 | +++ b/tcg/riscv/tcg-target.c.inc | 33 | - int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); |
234 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | 34 | - int table_off = fast_off + offsetof(CPUTLBDescFast, table); |
235 | 35 | + if (tcg_use_softmmu) { | |
236 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | 36 | + int mem_index = get_mmuidx(oi); |
237 | case INDEX_op_mov_i64: | 37 | + int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read) |
238 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | 38 | + : offsetof(CPUTLBEntry, addr_write); |
239 | - case INDEX_op_movi_i64: | 39 | + int fast_off = tlb_mask_table_ofs(s, mem_index); |
240 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | 40 | + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); |
241 | default: | 41 | + int table_off = fast_off + offsetof(CPUTLBDescFast, table); |
242 | g_assert_not_reached(); | 42 | |
243 | diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc | 43 | - ldst = new_ldst_label(s); |
244 | index XXXXXXX..XXXXXXX 100644 | 44 | - ldst->is_ld = is_ld; |
245 | --- a/tcg/s390/tcg-target.c.inc | 45 | - ldst->oi = oi; |
246 | +++ b/tcg/s390/tcg-target.c.inc | 46 | - ldst->addrlo_reg = addrlo; |
247 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, | 47 | - ldst->addrhi_reg = addrhi; |
248 | 48 | - | |
249 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | 49 | - /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ |
250 | case INDEX_op_mov_i64: | 50 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off); |
251 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | 51 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off); |
252 | - case INDEX_op_movi_i64: | 52 | - |
253 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | 53 | - /* Extract the page index, shifted into place for tlb index. */ |
254 | default: | 54 | - if (TCG_TARGET_REG_BITS == 32) { |
255 | tcg_abort(); | 55 | - tcg_out_shri32(s, TCG_REG_R0, addrlo, |
256 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc | 56 | - s->page_bits - CPU_TLB_ENTRY_BITS); |
257 | index XXXXXXX..XXXXXXX 100644 | 57 | - } else { |
258 | --- a/tcg/sparc/tcg-target.c.inc | 58 | - tcg_out_shri64(s, TCG_REG_R0, addrlo, |
259 | +++ b/tcg/sparc/tcg-target.c.inc | 59 | - s->page_bits - CPU_TLB_ENTRY_BITS); |
260 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | 60 | - } |
261 | 61 | - tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0)); | |
262 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | 62 | - |
263 | case INDEX_op_mov_i64: | 63 | - /* |
264 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | 64 | - * Load the (low part) TLB comparator into TMP2. |
265 | - case INDEX_op_movi_i64: | 65 | - * For 64-bit host, always load the entire 64-bit slot for simplicity. |
266 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | 66 | - * We will ignore the high bits with tcg_out_cmp(..., addr_type). |
267 | default: | 67 | - */ |
268 | tcg_abort(); | 68 | - if (TCG_TARGET_REG_BITS == 64) { |
269 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | 69 | - if (cmp_off == 0) { |
270 | index XXXXXXX..XXXXXXX 100644 | 70 | - tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2)); |
271 | --- a/tcg/tci/tcg-target.c.inc | 71 | - } else { |
272 | +++ b/tcg/tci/tcg-target.c.inc | 72 | - tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2)); |
273 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | 73 | - tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off); |
274 | break; | 74 | - } |
275 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | 75 | - } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) { |
276 | case INDEX_op_mov_i64: | 76 | - tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2)); |
277 | - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | 77 | - } else { |
278 | - case INDEX_op_movi_i64: | 78 | - tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2)); |
279 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | 79 | - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1, |
280 | default: | 80 | - cmp_off + 4 * HOST_BIG_ENDIAN); |
281 | tcg_abort(); | 81 | - } |
82 | - | ||
83 | - /* | ||
84 | - * Load the TLB addend for use on the fast path. | ||
85 | - * Do this asap to minimize any load use delay. | ||
86 | - */ | ||
87 | - if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { | ||
88 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, | ||
89 | - offsetof(CPUTLBEntry, addend)); | ||
90 | - } | ||
91 | - | ||
92 | - /* Clear the non-page, non-alignment bits from the address in R0. */ | ||
93 | - if (TCG_TARGET_REG_BITS == 32) { | ||
94 | - /* | ||
95 | - * We don't support unaligned accesses on 32-bits. | ||
96 | - * Preserve the bottom bits and thus trigger a comparison | ||
97 | - * failure on unaligned accesses. | ||
98 | - */ | ||
99 | - if (a_bits < s_bits) { | ||
100 | - a_bits = s_bits; | ||
101 | - } | ||
102 | - tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, | ||
103 | - (32 - a_bits) & 31, 31 - s->page_bits); | ||
104 | - } else { | ||
105 | - TCGReg t = addrlo; | ||
106 | - | ||
107 | - /* | ||
108 | - * If the access is unaligned, we need to make sure we fail if we | ||
109 | - * cross a page boundary. The trick is to add the access size-1 | ||
110 | - * to the address before masking the low bits. That will make the | ||
111 | - * address overflow to the next page if we cross a page boundary, | ||
112 | - * which will then force a mismatch of the TLB compare. | ||
113 | - */ | ||
114 | - if (a_bits < s_bits) { | ||
115 | - unsigned a_mask = (1 << a_bits) - 1; | ||
116 | - unsigned s_mask = (1 << s_bits) - 1; | ||
117 | - tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask)); | ||
118 | - t = TCG_REG_R0; | ||
119 | - } | ||
120 | - | ||
121 | - /* Mask the address for the requested alignment. */ | ||
122 | - if (addr_type == TCG_TYPE_I32) { | ||
123 | - tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, | ||
124 | - (32 - a_bits) & 31, 31 - s->page_bits); | ||
125 | - } else if (a_bits == 0) { | ||
126 | - tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits); | ||
127 | - } else { | ||
128 | - tcg_out_rld(s, RLDICL, TCG_REG_R0, t, | ||
129 | - 64 - s->page_bits, s->page_bits - a_bits); | ||
130 | - tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0); | ||
131 | - } | ||
132 | - } | ||
133 | - | ||
134 | - if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { | ||
135 | - /* Low part comparison into cr7. */ | ||
136 | - tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, | ||
137 | - 0, 7, TCG_TYPE_I32); | ||
138 | - | ||
139 | - /* Load the high part TLB comparator into TMP2. */ | ||
140 | - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1, | ||
141 | - cmp_off + 4 * !HOST_BIG_ENDIAN); | ||
142 | - | ||
143 | - /* Load addend, deferred for this case. */ | ||
144 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, | ||
145 | - offsetof(CPUTLBEntry, addend)); | ||
146 | - | ||
147 | - /* High part comparison into cr6. */ | ||
148 | - tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32); | ||
149 | - | ||
150 | - /* Combine comparisons into cr7. */ | ||
151 | - tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); | ||
152 | - } else { | ||
153 | - /* Full comparison into cr7. */ | ||
154 | - tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, addr_type); | ||
155 | - } | ||
156 | - | ||
157 | - /* Load a pointer into the current opcode w/conditional branch-link. */ | ||
158 | - ldst->label_ptr[0] = s->code_ptr; | ||
159 | - tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); | ||
160 | - | ||
161 | - h->base = TCG_REG_TMP1; | ||
162 | -#else | ||
163 | - if (a_bits) { | ||
164 | ldst = new_ldst_label(s); | ||
165 | ldst->is_ld = is_ld; | ||
166 | ldst->oi = oi; | ||
167 | ldst->addrlo_reg = addrlo; | ||
168 | ldst->addrhi_reg = addrhi; | ||
169 | |||
170 | - /* We are expecting a_bits to max out at 7, much lower than ANDI. */ | ||
171 | - tcg_debug_assert(a_bits < 16); | ||
172 | - tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1)); | ||
173 | + /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ | ||
174 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off); | ||
175 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off); | ||
176 | |||
177 | + /* Extract the page index, shifted into place for tlb index. */ | ||
178 | + if (TCG_TARGET_REG_BITS == 32) { | ||
179 | + tcg_out_shri32(s, TCG_REG_R0, addrlo, | ||
180 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
181 | + } else { | ||
182 | + tcg_out_shri64(s, TCG_REG_R0, addrlo, | ||
183 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
184 | + } | ||
185 | + tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0)); | ||
186 | + | ||
187 | + /* | ||
188 | + * Load the (low part) TLB comparator into TMP2. | ||
189 | + * For 64-bit host, always load the entire 64-bit slot for simplicity. | ||
190 | + * We will ignore the high bits with tcg_out_cmp(..., addr_type). | ||
191 | + */ | ||
192 | + if (TCG_TARGET_REG_BITS == 64) { | ||
193 | + if (cmp_off == 0) { | ||
194 | + tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, | ||
195 | + TCG_REG_TMP1, TCG_REG_TMP2)); | ||
196 | + } else { | ||
197 | + tcg_out32(s, ADD | TAB(TCG_REG_TMP1, | ||
198 | + TCG_REG_TMP1, TCG_REG_TMP2)); | ||
199 | + tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2, | ||
200 | + TCG_REG_TMP1, cmp_off); | ||
201 | + } | ||
202 | + } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) { | ||
203 | + tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2, | ||
204 | + TCG_REG_TMP1, TCG_REG_TMP2)); | ||
205 | + } else { | ||
206 | + tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2)); | ||
207 | + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1, | ||
208 | + cmp_off + 4 * HOST_BIG_ENDIAN); | ||
209 | + } | ||
210 | + | ||
211 | + /* | ||
212 | + * Load the TLB addend for use on the fast path. | ||
213 | + * Do this asap to minimize any load use delay. | ||
214 | + */ | ||
215 | + if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { | ||
216 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, | ||
217 | + offsetof(CPUTLBEntry, addend)); | ||
218 | + } | ||
219 | + | ||
220 | + /* Clear the non-page, non-alignment bits from the address in R0. */ | ||
221 | + if (TCG_TARGET_REG_BITS == 32) { | ||
222 | + /* | ||
223 | + * We don't support unaligned accesses on 32-bits. | ||
224 | + * Preserve the bottom bits and thus trigger a comparison | ||
225 | + * failure on unaligned accesses. | ||
226 | + */ | ||
227 | + if (a_bits < s_bits) { | ||
228 | + a_bits = s_bits; | ||
229 | + } | ||
230 | + tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, | ||
231 | + (32 - a_bits) & 31, 31 - s->page_bits); | ||
232 | + } else { | ||
233 | + TCGReg t = addrlo; | ||
234 | + | ||
235 | + /* | ||
236 | + * If the access is unaligned, we need to make sure we fail if we | ||
237 | + * cross a page boundary. The trick is to add the access size-1 | ||
238 | + * to the address before masking the low bits. That will make the | ||
239 | + * address overflow to the next page if we cross a page boundary, | ||
240 | + * which will then force a mismatch of the TLB compare. | ||
241 | + */ | ||
242 | + if (a_bits < s_bits) { | ||
243 | + unsigned a_mask = (1 << a_bits) - 1; | ||
244 | + unsigned s_mask = (1 << s_bits) - 1; | ||
245 | + tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask)); | ||
246 | + t = TCG_REG_R0; | ||
247 | + } | ||
248 | + | ||
249 | + /* Mask the address for the requested alignment. */ | ||
250 | + if (addr_type == TCG_TYPE_I32) { | ||
251 | + tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, | ||
252 | + (32 - a_bits) & 31, 31 - s->page_bits); | ||
253 | + } else if (a_bits == 0) { | ||
254 | + tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits); | ||
255 | + } else { | ||
256 | + tcg_out_rld(s, RLDICL, TCG_REG_R0, t, | ||
257 | + 64 - s->page_bits, s->page_bits - a_bits); | ||
258 | + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0); | ||
259 | + } | ||
260 | + } | ||
261 | + | ||
262 | + if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { | ||
263 | + /* Low part comparison into cr7. */ | ||
264 | + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, | ||
265 | + 0, 7, TCG_TYPE_I32); | ||
266 | + | ||
267 | + /* Load the high part TLB comparator into TMP2. */ | ||
268 | + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1, | ||
269 | + cmp_off + 4 * !HOST_BIG_ENDIAN); | ||
270 | + | ||
271 | + /* Load addend, deferred for this case. */ | ||
272 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, | ||
273 | + offsetof(CPUTLBEntry, addend)); | ||
274 | + | ||
275 | + /* High part comparison into cr6. */ | ||
276 | + tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, | ||
277 | + 0, 6, TCG_TYPE_I32); | ||
278 | + | ||
279 | + /* Combine comparisons into cr7. */ | ||
280 | + tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); | ||
281 | + } else { | ||
282 | + /* Full comparison into cr7. */ | ||
283 | + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, | ||
284 | + 0, 7, addr_type); | ||
285 | + } | ||
286 | + | ||
287 | + /* Load a pointer into the current opcode w/conditional branch-link. */ | ||
288 | ldst->label_ptr[0] = s->code_ptr; | ||
289 | - tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); | ||
290 | - } | ||
291 | + tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); | ||
292 | |||
293 | - h->base = guest_base ? TCG_GUEST_BASE_REG : 0; | ||
294 | -#endif | ||
295 | + h->base = TCG_REG_TMP1; | ||
296 | + } else { | ||
297 | + if (a_bits) { | ||
298 | + ldst = new_ldst_label(s); | ||
299 | + ldst->is_ld = is_ld; | ||
300 | + ldst->oi = oi; | ||
301 | + ldst->addrlo_reg = addrlo; | ||
302 | + ldst->addrhi_reg = addrhi; | ||
303 | + | ||
304 | + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ | ||
305 | + tcg_debug_assert(a_bits < 16); | ||
306 | + tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1)); | ||
307 | + | ||
308 | + ldst->label_ptr[0] = s->code_ptr; | ||
309 | + tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); | ||
310 | + } | ||
311 | + | ||
312 | + h->base = guest_base ? TCG_GUEST_BASE_REG : 0; | ||
313 | + } | ||
314 | |||
315 | if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { | ||
316 | /* Zero-extend the guest address for use in the host address. */ | ||
317 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
318 | } | ||
319 | tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); | ||
320 | |||
321 | -#ifndef CONFIG_SOFTMMU | ||
322 | - if (guest_base) { | ||
323 | + if (!tcg_use_softmmu && guest_base) { | ||
324 | tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); | ||
325 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); | ||
326 | } | ||
327 | -#endif | ||
328 | |||
329 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | ||
330 | tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); | ||
282 | -- | 331 | -- |
283 | 2.25.1 | 332 | 2.34.1 |
284 | 333 | ||
285 | 334 | diff view generated by jsdifflib |
1 | The temp_fixed, temp_global, temp_local bits are all related. | 1 | Fixes: 92c041c59b ("tcg/riscv: Add the prologue generation and register the JIT") |
---|---|---|---|
2 | Combine them into a single enumeration. | ||
3 | |||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 3 | --- |
7 | include/tcg/tcg.h | 20 +++++--- | 4 | tcg/riscv/tcg-target.c.inc | 6 ++++-- |
8 | tcg/optimize.c | 8 +-- | 5 | 1 file changed, 4 insertions(+), 2 deletions(-) |
9 | tcg/tcg.c | 122 ++++++++++++++++++++++++++++------------------ | ||
10 | 3 files changed, 90 insertions(+), 60 deletions(-) | ||
11 | 6 | ||
12 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 7 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/include/tcg/tcg.h | 9 | --- a/tcg/riscv/tcg-target.c.inc |
15 | +++ b/include/tcg/tcg.h | 10 | +++ b/tcg/riscv/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ typedef enum TCGTempVal { | 11 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) |
17 | TEMP_VAL_CONST, | ||
18 | } TCGTempVal; | ||
19 | |||
20 | +typedef enum TCGTempKind { | ||
21 | + /* Temp is dead at the end of all basic blocks. */ | ||
22 | + TEMP_NORMAL, | ||
23 | + /* Temp is saved across basic blocks but dead at the end of TBs. */ | ||
24 | + TEMP_LOCAL, | ||
25 | + /* Temp is saved across both basic blocks and translation blocks. */ | ||
26 | + TEMP_GLOBAL, | ||
27 | + /* Temp is in a fixed register. */ | ||
28 | + TEMP_FIXED, | ||
29 | +} TCGTempKind; | ||
30 | + | ||
31 | typedef struct TCGTemp { | ||
32 | TCGReg reg:8; | ||
33 | TCGTempVal val_type:8; | ||
34 | TCGType base_type:8; | ||
35 | TCGType type:8; | ||
36 | - unsigned int fixed_reg:1; | ||
37 | + TCGTempKind kind:3; | ||
38 | unsigned int indirect_reg:1; | ||
39 | unsigned int indirect_base:1; | ||
40 | unsigned int mem_coherent:1; | ||
41 | unsigned int mem_allocated:1; | ||
42 | - /* If true, the temp is saved across both basic blocks and | ||
43 | - translation blocks. */ | ||
44 | - unsigned int temp_global:1; | ||
45 | - /* If true, the temp is saved across basic blocks but dead | ||
46 | - at the end of translation blocks. If false, the temp is | ||
47 | - dead at the end of basic blocks. */ | ||
48 | - unsigned int temp_local:1; | ||
49 | unsigned int temp_allocated:1; | ||
50 | |||
51 | tcg_target_long val; | ||
52 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/tcg/optimize.c | ||
55 | +++ b/tcg/optimize.c | ||
56 | @@ -XXX,XX +XXX,XX @@ static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) | ||
57 | TCGTemp *i; | ||
58 | |||
59 | /* If this is already a global, we can't do better. */ | ||
60 | - if (ts->temp_global) { | ||
61 | + if (ts->kind >= TEMP_GLOBAL) { | ||
62 | return ts; | ||
63 | } | 12 | } |
64 | 13 | ||
65 | /* Search for a global first. */ | 14 | #if !defined(CONFIG_SOFTMMU) |
66 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { | 15 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); |
67 | - if (i->temp_global) { | 16 | - tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
68 | + if (i->kind >= TEMP_GLOBAL) { | 17 | + if (guest_base) { |
69 | return i; | 18 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); |
70 | } | 19 | + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
71 | } | 20 | + } |
72 | |||
73 | /* If it is a temp, search for a temp local. */ | ||
74 | - if (!ts->temp_local) { | ||
75 | + if (ts->kind == TEMP_NORMAL) { | ||
76 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { | ||
77 | - if (ts->temp_local) { | ||
78 | + if (i->kind >= TEMP_LOCAL) { | ||
79 | return i; | ||
80 | } | ||
81 | } | ||
82 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
83 | index XXXXXXX..XXXXXXX 100644 | ||
84 | --- a/tcg/tcg.c | ||
85 | +++ b/tcg/tcg.c | ||
86 | @@ -XXX,XX +XXX,XX @@ static inline TCGTemp *tcg_global_alloc(TCGContext *s) | ||
87 | tcg_debug_assert(s->nb_globals == s->nb_temps); | ||
88 | s->nb_globals++; | ||
89 | ts = tcg_temp_alloc(s); | ||
90 | - ts->temp_global = 1; | ||
91 | + ts->kind = TEMP_GLOBAL; | ||
92 | |||
93 | return ts; | ||
94 | } | ||
95 | @@ -XXX,XX +XXX,XX @@ static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, | ||
96 | ts = tcg_global_alloc(s); | ||
97 | ts->base_type = type; | ||
98 | ts->type = type; | ||
99 | - ts->fixed_reg = 1; | ||
100 | + ts->kind = TEMP_FIXED; | ||
101 | ts->reg = reg; | ||
102 | ts->name = name; | ||
103 | tcg_regset_set_reg(s->reserved_regs, reg); | ||
104 | @@ -XXX,XX +XXX,XX @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, | ||
105 | bigendian = 1; | ||
106 | #endif | 21 | #endif |
107 | 22 | ||
108 | - if (!base_ts->fixed_reg) { | 23 | /* Call generated code */ |
109 | + if (base_ts->kind != TEMP_FIXED) { | ||
110 | /* We do not support double-indirect registers. */ | ||
111 | tcg_debug_assert(!base_ts->indirect_reg); | ||
112 | base_ts->indirect_base = 1; | ||
113 | @@ -XXX,XX +XXX,XX @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, | ||
114 | TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local) | ||
115 | { | ||
116 | TCGContext *s = tcg_ctx; | ||
117 | + TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL; | ||
118 | TCGTemp *ts; | ||
119 | int idx, k; | ||
120 | |||
121 | @@ -XXX,XX +XXX,XX @@ TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local) | ||
122 | ts = &s->temps[idx]; | ||
123 | ts->temp_allocated = 1; | ||
124 | tcg_debug_assert(ts->base_type == type); | ||
125 | - tcg_debug_assert(ts->temp_local == temp_local); | ||
126 | + tcg_debug_assert(ts->kind == kind); | ||
127 | } else { | ||
128 | ts = tcg_temp_alloc(s); | ||
129 | if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { | ||
130 | @@ -XXX,XX +XXX,XX @@ TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local) | ||
131 | ts->base_type = type; | ||
132 | ts->type = TCG_TYPE_I32; | ||
133 | ts->temp_allocated = 1; | ||
134 | - ts->temp_local = temp_local; | ||
135 | + ts->kind = kind; | ||
136 | |||
137 | tcg_debug_assert(ts2 == ts + 1); | ||
138 | ts2->base_type = TCG_TYPE_I64; | ||
139 | ts2->type = TCG_TYPE_I32; | ||
140 | ts2->temp_allocated = 1; | ||
141 | - ts2->temp_local = temp_local; | ||
142 | + ts2->kind = kind; | ||
143 | } else { | ||
144 | ts->base_type = type; | ||
145 | ts->type = type; | ||
146 | ts->temp_allocated = 1; | ||
147 | - ts->temp_local = temp_local; | ||
148 | + ts->kind = kind; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | @@ -XXX,XX +XXX,XX @@ void tcg_temp_free_internal(TCGTemp *ts) | ||
153 | } | ||
154 | #endif | ||
155 | |||
156 | - tcg_debug_assert(ts->temp_global == 0); | ||
157 | + tcg_debug_assert(ts->kind < TEMP_GLOBAL); | ||
158 | tcg_debug_assert(ts->temp_allocated != 0); | ||
159 | ts->temp_allocated = 0; | ||
160 | |||
161 | idx = temp_idx(ts); | ||
162 | - k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0); | ||
163 | + k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT); | ||
164 | set_bit(idx, s->free_temps[k].l); | ||
165 | } | ||
166 | |||
167 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) | ||
168 | static void tcg_reg_alloc_start(TCGContext *s) | ||
169 | { | ||
170 | int i, n; | ||
171 | - TCGTemp *ts; | ||
172 | |||
173 | - for (i = 0, n = s->nb_globals; i < n; i++) { | ||
174 | - ts = &s->temps[i]; | ||
175 | - ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM); | ||
176 | - } | ||
177 | - for (n = s->nb_temps; i < n; i++) { | ||
178 | - ts = &s->temps[i]; | ||
179 | - ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD); | ||
180 | - ts->mem_allocated = 0; | ||
181 | - ts->fixed_reg = 0; | ||
182 | + for (i = 0, n = s->nb_temps; i < n; i++) { | ||
183 | + TCGTemp *ts = &s->temps[i]; | ||
184 | + TCGTempVal val = TEMP_VAL_MEM; | ||
185 | + | ||
186 | + switch (ts->kind) { | ||
187 | + case TEMP_FIXED: | ||
188 | + val = TEMP_VAL_REG; | ||
189 | + break; | ||
190 | + case TEMP_GLOBAL: | ||
191 | + break; | ||
192 | + case TEMP_NORMAL: | ||
193 | + val = TEMP_VAL_DEAD; | ||
194 | + /* fall through */ | ||
195 | + case TEMP_LOCAL: | ||
196 | + ts->mem_allocated = 0; | ||
197 | + break; | ||
198 | + default: | ||
199 | + g_assert_not_reached(); | ||
200 | + } | ||
201 | + ts->val_type = val; | ||
202 | } | ||
203 | |||
204 | memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp)); | ||
205 | @@ -XXX,XX +XXX,XX @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, | ||
206 | { | ||
207 | int idx = temp_idx(ts); | ||
208 | |||
209 | - if (ts->temp_global) { | ||
210 | + switch (ts->kind) { | ||
211 | + case TEMP_FIXED: | ||
212 | + case TEMP_GLOBAL: | ||
213 | pstrcpy(buf, buf_size, ts->name); | ||
214 | - } else if (ts->temp_local) { | ||
215 | + break; | ||
216 | + case TEMP_LOCAL: | ||
217 | snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); | ||
218 | - } else { | ||
219 | + break; | ||
220 | + case TEMP_NORMAL: | ||
221 | snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); | ||
222 | + break; | ||
223 | } | ||
224 | return buf; | ||
225 | } | ||
226 | @@ -XXX,XX +XXX,XX @@ static void la_bb_end(TCGContext *s, int ng, int nt) | ||
227 | { | ||
228 | int i; | ||
229 | |||
230 | - for (i = 0; i < ng; ++i) { | ||
231 | - s->temps[i].state = TS_DEAD | TS_MEM; | ||
232 | - la_reset_pref(&s->temps[i]); | ||
233 | - } | ||
234 | - for (i = ng; i < nt; ++i) { | ||
235 | - s->temps[i].state = (s->temps[i].temp_local | ||
236 | - ? TS_DEAD | TS_MEM | ||
237 | - : TS_DEAD); | ||
238 | - la_reset_pref(&s->temps[i]); | ||
239 | + for (i = 0; i < nt; ++i) { | ||
240 | + TCGTemp *ts = &s->temps[i]; | ||
241 | + int state; | ||
242 | + | ||
243 | + switch (ts->kind) { | ||
244 | + case TEMP_FIXED: | ||
245 | + case TEMP_GLOBAL: | ||
246 | + case TEMP_LOCAL: | ||
247 | + state = TS_DEAD | TS_MEM; | ||
248 | + break; | ||
249 | + case TEMP_NORMAL: | ||
250 | + state = TS_DEAD; | ||
251 | + break; | ||
252 | + default: | ||
253 | + g_assert_not_reached(); | ||
254 | + } | ||
255 | + ts->state = state; | ||
256 | + la_reset_pref(ts); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | @@ -XXX,XX +XXX,XX @@ static void check_regs(TCGContext *s) | ||
261 | } | ||
262 | for (k = 0; k < s->nb_temps; k++) { | ||
263 | ts = &s->temps[k]; | ||
264 | - if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg | ||
265 | + if (ts->val_type == TEMP_VAL_REG | ||
266 | + && ts->kind != TEMP_FIXED | ||
267 | && s->reg_to_temp[ts->reg] != ts) { | ||
268 | printf("Inconsistency for temp %s:\n", | ||
269 | tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); | ||
270 | @@ -XXX,XX +XXX,XX @@ static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet); | ||
271 | mark it free; otherwise mark it dead. */ | ||
272 | static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) | ||
273 | { | ||
274 | - if (ts->fixed_reg) { | ||
275 | + if (ts->kind == TEMP_FIXED) { | ||
276 | return; | ||
277 | } | ||
278 | if (ts->val_type == TEMP_VAL_REG) { | ||
279 | s->reg_to_temp[ts->reg] = NULL; | ||
280 | } | ||
281 | ts->val_type = (free_or_dead < 0 | ||
282 | - || ts->temp_local | ||
283 | - || ts->temp_global | ||
284 | + || ts->kind != TEMP_NORMAL | ||
285 | ? TEMP_VAL_MEM : TEMP_VAL_DEAD); | ||
286 | } | ||
287 | |||
288 | @@ -XXX,XX +XXX,XX @@ static inline void temp_dead(TCGContext *s, TCGTemp *ts) | ||
289 | static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, | ||
290 | TCGRegSet preferred_regs, int free_or_dead) | ||
291 | { | ||
292 | - if (ts->fixed_reg) { | ||
293 | + if (ts->kind == TEMP_FIXED) { | ||
294 | return; | ||
295 | } | ||
296 | if (!ts->mem_coherent) { | ||
297 | @@ -XXX,XX +XXX,XX @@ static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) | ||
298 | { | ||
299 | /* The liveness analysis already ensures that globals are back | ||
300 | in memory. Keep an tcg_debug_assert for safety. */ | ||
301 | - tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg); | ||
302 | + tcg_debug_assert(ts->val_type == TEMP_VAL_MEM | ||
303 | + || ts->kind == TEMP_FIXED); | ||
304 | } | ||
305 | |||
306 | /* save globals to their canonical location and assume they can be | ||
307 | @@ -XXX,XX +XXX,XX @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) | ||
308 | for (i = 0, n = s->nb_globals; i < n; i++) { | ||
309 | TCGTemp *ts = &s->temps[i]; | ||
310 | tcg_debug_assert(ts->val_type != TEMP_VAL_REG | ||
311 | - || ts->fixed_reg | ||
312 | + || ts->kind == TEMP_FIXED | ||
313 | || ts->mem_coherent); | ||
314 | } | ||
315 | } | ||
316 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) | ||
317 | |||
318 | for (i = s->nb_globals; i < s->nb_temps; i++) { | ||
319 | TCGTemp *ts = &s->temps[i]; | ||
320 | - if (ts->temp_local) { | ||
321 | + if (ts->kind == TEMP_LOCAL) { | ||
322 | temp_save(s, ts, allocated_regs); | ||
323 | } else { | ||
324 | /* The liveness analysis already ensures that temps are dead. | ||
325 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, | ||
326 | TCGRegSet preferred_regs) | ||
327 | { | ||
328 | /* ENV should not be modified. */ | ||
329 | - tcg_debug_assert(!ots->fixed_reg); | ||
330 | + tcg_debug_assert(ots->kind != TEMP_FIXED); | ||
331 | |||
332 | /* The movi is not explicitly generated here. */ | ||
333 | if (ots->val_type == TEMP_VAL_REG) { | ||
334 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) | ||
335 | ts = arg_temp(op->args[1]); | ||
336 | |||
337 | /* ENV should not be modified. */ | ||
338 | - tcg_debug_assert(!ots->fixed_reg); | ||
339 | + tcg_debug_assert(ots->kind != TEMP_FIXED); | ||
340 | |||
341 | /* Note that otype != itype for no-op truncation. */ | ||
342 | otype = ots->type; | ||
343 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) | ||
344 | } | ||
345 | temp_dead(s, ots); | ||
346 | } else { | ||
347 | - if (IS_DEAD_ARG(1) && !ts->fixed_reg) { | ||
348 | + if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) { | ||
349 | /* the mov can be suppressed */ | ||
350 | if (ots->val_type == TEMP_VAL_REG) { | ||
351 | s->reg_to_temp[ots->reg] = NULL; | ||
352 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) | ||
353 | * Store the source register into the destination slot | ||
354 | * and leave the destination temp as TEMP_VAL_MEM. | ||
355 | */ | ||
356 | - assert(!ots->fixed_reg); | ||
357 | + assert(ots->kind != TEMP_FIXED); | ||
358 | if (!ts->mem_allocated) { | ||
359 | temp_allocate_frame(s, ots); | ||
360 | } | ||
361 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) | ||
362 | its = arg_temp(op->args[1]); | ||
363 | |||
364 | /* ENV should not be modified. */ | ||
365 | - tcg_debug_assert(!ots->fixed_reg); | ||
366 | + tcg_debug_assert(ots->kind != TEMP_FIXED); | ||
367 | |||
368 | itype = its->type; | ||
369 | vece = TCGOP_VECE(op); | ||
370 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
371 | i_preferred_regs = o_preferred_regs = 0; | ||
372 | if (arg_ct->ialias) { | ||
373 | o_preferred_regs = op->output_pref[arg_ct->alias_index]; | ||
374 | - if (ts->fixed_reg) { | ||
375 | + if (ts->kind == TEMP_FIXED) { | ||
376 | /* if fixed register, we must allocate a new register | ||
377 | if the alias is not the same register */ | ||
378 | if (arg != op->args[arg_ct->alias_index]) { | ||
379 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
380 | ts = arg_temp(arg); | ||
381 | |||
382 | /* ENV should not be modified. */ | ||
383 | - tcg_debug_assert(!ts->fixed_reg); | ||
384 | + tcg_debug_assert(ts->kind != TEMP_FIXED); | ||
385 | |||
386 | if (arg_ct->oalias && !const_args[arg_ct->alias_index]) { | ||
387 | reg = new_args[arg_ct->alias_index]; | ||
388 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) | ||
389 | ts = arg_temp(op->args[i]); | ||
390 | |||
391 | /* ENV should not be modified. */ | ||
392 | - tcg_debug_assert(!ts->fixed_reg); | ||
393 | + tcg_debug_assert(ts->kind != TEMP_FIXED); | ||
394 | |||
395 | if (NEED_SYNC_ARG(i)) { | ||
396 | temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i)); | ||
397 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) | ||
398 | ts = arg_temp(arg); | ||
399 | |||
400 | /* ENV should not be modified. */ | ||
401 | - tcg_debug_assert(!ts->fixed_reg); | ||
402 | + tcg_debug_assert(ts->kind != TEMP_FIXED); | ||
403 | |||
404 | reg = tcg_target_call_oarg_regs[i]; | ||
405 | tcg_debug_assert(s->reg_to_temp[reg] == NULL); | ||
406 | -- | 24 | -- |
407 | 2.25.1 | 25 | 2.34.1 |
408 | |||
409 | diff view generated by jsdifflib |
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | --- | 2 | --- |
3 | tcg/riscv/tcg-target-constr.h | 25 +++++++++++ | 3 | tcg/riscv/tcg-target.c.inc | 185 +++++++++++++++++++------------------ |
4 | tcg/riscv/tcg-target.c.inc | 82 ++++++++++------------------------- | 4 | 1 file changed, 94 insertions(+), 91 deletions(-) |
5 | 2 files changed, 49 insertions(+), 58 deletions(-) | ||
6 | create mode 100644 tcg/riscv/tcg-target-constr.h | ||
7 | 5 | ||
8 | diff --git a/tcg/riscv/tcg-target-constr.h b/tcg/riscv/tcg-target-constr.h | ||
9 | new file mode 100644 | ||
10 | index XXXXXXX..XXXXXXX | ||
11 | --- /dev/null | ||
12 | +++ b/tcg/riscv/tcg-target-constr.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
15 | +/* | ||
16 | + * RISC-V target-specific operand constaints. | ||
17 | + * Copyright (c) 2020 Linaro | ||
18 | + */ | ||
19 | + | ||
20 | +C_O0_I1(r) | ||
21 | +C_O0_I2(LZ, L) | ||
22 | +C_O0_I2(rZ, r) | ||
23 | +C_O0_I2(rZ, rZ) | ||
24 | +C_O0_I3(LZ, L, L) | ||
25 | +C_O0_I3(LZ, LZ, L) | ||
26 | +C_O0_I4(LZ, LZ, L, L) | ||
27 | +C_O0_I4(rZ, rZ, rZ, rZ) | ||
28 | +C_O1_I1(r, L) | ||
29 | +C_O1_I1(r, r) | ||
30 | +C_O1_I2(r, L, L) | ||
31 | +C_O1_I2(r, r, ri) | ||
32 | +C_O1_I2(r, r, rI) | ||
33 | +C_O1_I2(r, rZ, rN) | ||
34 | +C_O1_I2(r, rZ, rZ) | ||
35 | +C_O1_I4(r, rZ, rZ, rZ, rZ) | ||
36 | +C_O2_I1(r, r, L) | ||
37 | +C_O2_I2(r, r, L, L) | ||
38 | +C_O2_I4(r, r, rZ, rZ, rM, rM) | ||
39 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc | 6 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc |
40 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/tcg/riscv/tcg-target.c.inc | 8 | --- a/tcg/riscv/tcg-target.c.inc |
42 | +++ b/tcg/riscv/tcg-target.c.inc | 9 | +++ b/tcg/riscv/tcg-target.c.inc |
43 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | 10 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, |
11 | aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); | ||
12 | a_mask = (1u << aa.align) - 1; | ||
13 | |||
14 | -#ifdef CONFIG_SOFTMMU | ||
15 | - unsigned s_bits = opc & MO_SIZE; | ||
16 | - unsigned s_mask = (1u << s_bits) - 1; | ||
17 | - int mem_index = get_mmuidx(oi); | ||
18 | - int fast_ofs = tlb_mask_table_ofs(s, mem_index); | ||
19 | - int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); | ||
20 | - int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); | ||
21 | - int compare_mask; | ||
22 | - TCGReg addr_adj; | ||
23 | + if (tcg_use_softmmu) { | ||
24 | + unsigned s_bits = opc & MO_SIZE; | ||
25 | + unsigned s_mask = (1u << s_bits) - 1; | ||
26 | + int mem_index = get_mmuidx(oi); | ||
27 | + int fast_ofs = tlb_mask_table_ofs(s, mem_index); | ||
28 | + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); | ||
29 | + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); | ||
30 | + int compare_mask; | ||
31 | + TCGReg addr_adj; | ||
32 | |||
33 | - ldst = new_ldst_label(s); | ||
34 | - ldst->is_ld = is_ld; | ||
35 | - ldst->oi = oi; | ||
36 | - ldst->addrlo_reg = addr_reg; | ||
37 | - | ||
38 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); | ||
39 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); | ||
40 | - | ||
41 | - tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, | ||
42 | - s->page_bits - CPU_TLB_ENTRY_BITS); | ||
43 | - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); | ||
44 | - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); | ||
45 | - | ||
46 | - /* | ||
47 | - * For aligned accesses, we check the first byte and include the alignment | ||
48 | - * bits within the address. For unaligned access, we check that we don't | ||
49 | - * cross pages using the address of the last byte of the access. | ||
50 | - */ | ||
51 | - addr_adj = addr_reg; | ||
52 | - if (a_mask < s_mask) { | ||
53 | - addr_adj = TCG_REG_TMP0; | ||
54 | - tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI, | ||
55 | - addr_adj, addr_reg, s_mask - a_mask); | ||
56 | - } | ||
57 | - compare_mask = s->page_mask | a_mask; | ||
58 | - if (compare_mask == sextreg(compare_mask, 0, 12)) { | ||
59 | - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); | ||
60 | - } else { | ||
61 | - tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask); | ||
62 | - tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); | ||
63 | - } | ||
64 | - | ||
65 | - /* Load the tlb comparator and the addend. */ | ||
66 | - QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
67 | - tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, | ||
68 | - is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
69 | - : offsetof(CPUTLBEntry, addr_write)); | ||
70 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, | ||
71 | - offsetof(CPUTLBEntry, addend)); | ||
72 | - | ||
73 | - /* Compare masked address with the TLB entry. */ | ||
74 | - ldst->label_ptr[0] = s->code_ptr; | ||
75 | - tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); | ||
76 | - | ||
77 | - /* TLB Hit - translate address using addend. */ | ||
78 | - if (addr_type != TCG_TYPE_I32) { | ||
79 | - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); | ||
80 | - } else if (have_zba) { | ||
81 | - tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); | ||
82 | - } else { | ||
83 | - tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); | ||
84 | - tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2); | ||
85 | - } | ||
86 | - *pbase = TCG_REG_TMP0; | ||
87 | -#else | ||
88 | - TCGReg base; | ||
89 | - | ||
90 | - if (a_mask) { | ||
91 | ldst = new_ldst_label(s); | ||
92 | ldst->is_ld = is_ld; | ||
93 | ldst->oi = oi; | ||
94 | ldst->addrlo_reg = addr_reg; | ||
95 | |||
96 | - /* We are expecting alignment max 7, so we can always use andi. */ | ||
97 | - tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); | ||
98 | - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); | ||
99 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); | ||
100 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); | ||
101 | |||
102 | - ldst->label_ptr[0] = s->code_ptr; | ||
103 | - tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); | ||
104 | - } | ||
105 | + tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, | ||
106 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
107 | + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); | ||
108 | + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); | ||
109 | |||
110 | - if (guest_base != 0) { | ||
111 | - base = TCG_REG_TMP0; | ||
112 | - if (addr_type != TCG_TYPE_I32) { | ||
113 | - tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG); | ||
114 | - } else if (have_zba) { | ||
115 | - tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG); | ||
116 | - } else { | ||
117 | - tcg_out_ext32u(s, base, addr_reg); | ||
118 | - tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG); | ||
119 | + /* | ||
120 | + * For aligned accesses, we check the first byte and include the | ||
121 | + * alignment bits within the address. For unaligned access, we | ||
122 | + * check that we don't cross pages using the address of the last | ||
123 | + * byte of the access. | ||
124 | + */ | ||
125 | + addr_adj = addr_reg; | ||
126 | + if (a_mask < s_mask) { | ||
127 | + addr_adj = TCG_REG_TMP0; | ||
128 | + tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI, | ||
129 | + addr_adj, addr_reg, s_mask - a_mask); | ||
130 | } | ||
131 | - } else if (addr_type != TCG_TYPE_I32) { | ||
132 | - base = addr_reg; | ||
133 | + compare_mask = s->page_mask | a_mask; | ||
134 | + if (compare_mask == sextreg(compare_mask, 0, 12)) { | ||
135 | + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); | ||
136 | + } else { | ||
137 | + tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask); | ||
138 | + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj); | ||
139 | + } | ||
140 | + | ||
141 | + /* Load the tlb comparator and the addend. */ | ||
142 | + QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); | ||
143 | + tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, | ||
144 | + is_ld ? offsetof(CPUTLBEntry, addr_read) | ||
145 | + : offsetof(CPUTLBEntry, addr_write)); | ||
146 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, | ||
147 | + offsetof(CPUTLBEntry, addend)); | ||
148 | + | ||
149 | + /* Compare masked address with the TLB entry. */ | ||
150 | + ldst->label_ptr[0] = s->code_ptr; | ||
151 | + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); | ||
152 | + | ||
153 | + /* TLB Hit - translate address using addend. */ | ||
154 | + if (addr_type != TCG_TYPE_I32) { | ||
155 | + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2); | ||
156 | + } else if (have_zba) { | ||
157 | + tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, | ||
158 | + addr_reg, TCG_REG_TMP2); | ||
159 | + } else { | ||
160 | + tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg); | ||
161 | + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, | ||
162 | + TCG_REG_TMP0, TCG_REG_TMP2); | ||
163 | + } | ||
164 | + *pbase = TCG_REG_TMP0; | ||
165 | } else { | ||
166 | - base = TCG_REG_TMP0; | ||
167 | - tcg_out_ext32u(s, base, addr_reg); | ||
168 | + TCGReg base; | ||
169 | + | ||
170 | + if (a_mask) { | ||
171 | + ldst = new_ldst_label(s); | ||
172 | + ldst->is_ld = is_ld; | ||
173 | + ldst->oi = oi; | ||
174 | + ldst->addrlo_reg = addr_reg; | ||
175 | + | ||
176 | + /* We are expecting alignment max 7, so we can always use andi. */ | ||
177 | + tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); | ||
178 | + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); | ||
179 | + | ||
180 | + ldst->label_ptr[0] = s->code_ptr; | ||
181 | + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); | ||
182 | + } | ||
183 | + | ||
184 | + if (guest_base != 0) { | ||
185 | + base = TCG_REG_TMP0; | ||
186 | + if (addr_type != TCG_TYPE_I32) { | ||
187 | + tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, | ||
188 | + TCG_GUEST_BASE_REG); | ||
189 | + } else if (have_zba) { | ||
190 | + tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, | ||
191 | + TCG_GUEST_BASE_REG); | ||
192 | + } else { | ||
193 | + tcg_out_ext32u(s, base, addr_reg); | ||
194 | + tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG); | ||
195 | + } | ||
196 | + } else if (addr_type != TCG_TYPE_I32) { | ||
197 | + base = addr_reg; | ||
198 | + } else { | ||
199 | + base = TCG_REG_TMP0; | ||
200 | + tcg_out_ext32u(s, base, addr_reg); | ||
201 | + } | ||
202 | + *pbase = base; | ||
44 | } | 203 | } |
204 | - *pbase = base; | ||
205 | -#endif | ||
206 | |||
207 | return ldst; | ||
45 | } | 208 | } |
46 | 209 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | |
47 | +/* Define all constraint sets. */ | 210 | TCG_REG_SP, SAVE_OFS + i * REG_SIZE); |
48 | +#include "../tcg-constr.c.inc" | 211 | } |
49 | + | 212 | |
50 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | 213 | -#if !defined(CONFIG_SOFTMMU) |
51 | { | 214 | - if (guest_base) { |
52 | - static const TCGTargetOpDef r | 215 | + if (!tcg_use_softmmu && guest_base) { |
53 | - = { .args_ct_str = { "r" } }; | 216 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); |
54 | - static const TCGTargetOpDef r_r | 217 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
55 | - = { .args_ct_str = { "r", "r" } }; | 218 | } |
56 | - static const TCGTargetOpDef rZ_r | 219 | -#endif |
57 | - = { .args_ct_str = { "rZ", "r" } }; | 220 | |
58 | - static const TCGTargetOpDef rZ_rZ | 221 | /* Call generated code */ |
59 | - = { .args_ct_str = { "rZ", "rZ" } }; | 222 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); |
60 | - static const TCGTargetOpDef rZ_rZ_rZ_rZ | ||
61 | - = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; | ||
62 | - static const TCGTargetOpDef r_r_ri | ||
63 | - = { .args_ct_str = { "r", "r", "ri" } }; | ||
64 | - static const TCGTargetOpDef r_r_rI | ||
65 | - = { .args_ct_str = { "r", "r", "rI" } }; | ||
66 | - static const TCGTargetOpDef r_rZ_rN | ||
67 | - = { .args_ct_str = { "r", "rZ", "rN" } }; | ||
68 | - static const TCGTargetOpDef r_rZ_rZ | ||
69 | - = { .args_ct_str = { "r", "rZ", "rZ" } }; | ||
70 | - static const TCGTargetOpDef r_rZ_rZ_rZ_rZ | ||
71 | - = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; | ||
72 | - static const TCGTargetOpDef r_L | ||
73 | - = { .args_ct_str = { "r", "L" } }; | ||
74 | - static const TCGTargetOpDef r_r_L | ||
75 | - = { .args_ct_str = { "r", "r", "L" } }; | ||
76 | - static const TCGTargetOpDef r_L_L | ||
77 | - = { .args_ct_str = { "r", "L", "L" } }; | ||
78 | - static const TCGTargetOpDef r_r_L_L | ||
79 | - = { .args_ct_str = { "r", "r", "L", "L" } }; | ||
80 | - static const TCGTargetOpDef LZ_L | ||
81 | - = { .args_ct_str = { "LZ", "L" } }; | ||
82 | - static const TCGTargetOpDef LZ_L_L | ||
83 | - = { .args_ct_str = { "LZ", "L", "L" } }; | ||
84 | - static const TCGTargetOpDef LZ_LZ_L | ||
85 | - = { .args_ct_str = { "LZ", "LZ", "L" } }; | ||
86 | - static const TCGTargetOpDef LZ_LZ_L_L | ||
87 | - = { .args_ct_str = { "LZ", "LZ", "L", "L" } }; | ||
88 | - static const TCGTargetOpDef r_r_rZ_rZ_rM_rM | ||
89 | - = { .args_ct_str = { "r", "r", "rZ", "rZ", "rM", "rM" } }; | ||
90 | - | ||
91 | switch (op) { | ||
92 | case INDEX_op_goto_ptr: | ||
93 | - return &r; | ||
94 | + return C_O0_I1(r); | ||
95 | |||
96 | case INDEX_op_ld8u_i32: | ||
97 | case INDEX_op_ld8s_i32: | ||
98 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
99 | case INDEX_op_extrl_i64_i32: | ||
100 | case INDEX_op_extrh_i64_i32: | ||
101 | case INDEX_op_ext_i32_i64: | ||
102 | - return &r_r; | ||
103 | + return C_O1_I1(r, r); | ||
104 | |||
105 | case INDEX_op_st8_i32: | ||
106 | case INDEX_op_st16_i32: | ||
107 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
108 | case INDEX_op_st16_i64: | ||
109 | case INDEX_op_st32_i64: | ||
110 | case INDEX_op_st_i64: | ||
111 | - return &rZ_r; | ||
112 | + return C_O0_I2(rZ, r); | ||
113 | |||
114 | case INDEX_op_add_i32: | ||
115 | case INDEX_op_and_i32: | ||
116 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
117 | case INDEX_op_and_i64: | ||
118 | case INDEX_op_or_i64: | ||
119 | case INDEX_op_xor_i64: | ||
120 | - return &r_r_rI; | ||
121 | + return C_O1_I2(r, r, rI); | ||
122 | |||
123 | case INDEX_op_sub_i32: | ||
124 | case INDEX_op_sub_i64: | ||
125 | - return &r_rZ_rN; | ||
126 | + return C_O1_I2(r, rZ, rN); | ||
127 | |||
128 | case INDEX_op_mul_i32: | ||
129 | case INDEX_op_mulsh_i32: | ||
130 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
131 | case INDEX_op_rem_i64: | ||
132 | case INDEX_op_remu_i64: | ||
133 | case INDEX_op_setcond_i64: | ||
134 | - return &r_rZ_rZ; | ||
135 | + return C_O1_I2(r, rZ, rZ); | ||
136 | |||
137 | case INDEX_op_shl_i32: | ||
138 | case INDEX_op_shr_i32: | ||
139 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
140 | case INDEX_op_shl_i64: | ||
141 | case INDEX_op_shr_i64: | ||
142 | case INDEX_op_sar_i64: | ||
143 | - return &r_r_ri; | ||
144 | + return C_O1_I2(r, r, ri); | ||
145 | |||
146 | case INDEX_op_brcond_i32: | ||
147 | case INDEX_op_brcond_i64: | ||
148 | - return &rZ_rZ; | ||
149 | + return C_O0_I2(rZ, rZ); | ||
150 | |||
151 | case INDEX_op_add2_i32: | ||
152 | case INDEX_op_add2_i64: | ||
153 | case INDEX_op_sub2_i32: | ||
154 | case INDEX_op_sub2_i64: | ||
155 | - return &r_r_rZ_rZ_rM_rM; | ||
156 | + return C_O2_I4(r, r, rZ, rZ, rM, rM); | ||
157 | |||
158 | case INDEX_op_brcond2_i32: | ||
159 | - return &rZ_rZ_rZ_rZ; | ||
160 | + return C_O0_I4(rZ, rZ, rZ, rZ); | ||
161 | |||
162 | case INDEX_op_setcond2_i32: | ||
163 | - return &r_rZ_rZ_rZ_rZ; | ||
164 | + return C_O1_I4(r, rZ, rZ, rZ, rZ); | ||
165 | |||
166 | case INDEX_op_qemu_ld_i32: | ||
167 | - return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; | ||
168 | + return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
169 | + ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); | ||
170 | case INDEX_op_qemu_st_i32: | ||
171 | - return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_L : &LZ_L_L; | ||
172 | + return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
173 | + ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L)); | ||
174 | case INDEX_op_qemu_ld_i64: | ||
175 | - return TCG_TARGET_REG_BITS == 64 ? &r_L | ||
176 | - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L | ||
177 | - : &r_r_L_L; | ||
178 | + return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) | ||
179 | + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) | ||
180 | + : C_O2_I2(r, r, L, L)); | ||
181 | case INDEX_op_qemu_st_i64: | ||
182 | - return TCG_TARGET_REG_BITS == 64 ? &LZ_L | ||
183 | - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_LZ_L | ||
184 | - : &LZ_LZ_L_L; | ||
185 | + return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L) | ||
186 | + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L) | ||
187 | + : C_O0_I4(LZ, LZ, L, L)); | ||
188 | |||
189 | default: | ||
190 | return NULL; | ||
191 | -- | 223 | -- |
192 | 2.25.1 | 224 | 2.34.1 |
193 | |||
194 | diff view generated by jsdifflib |
1 | Do not allocate a large block for indexing. Instead, allocate | 1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | for each temporary as they are seen. | ||
3 | |||
4 | In general, this will use less memory, if we consider that most | ||
5 | TBs do not touch every target register. This also allows us to | ||
6 | allocate TempOptInfo for new temps created during optimization. | ||
7 | |||
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 3 | --- |
11 | tcg/optimize.c | 60 ++++++++++++++++++++++++++++---------------------- | 4 | tcg/s390x/tcg-target.c.inc | 161 ++++++++++++++++++------------------- |
12 | 1 file changed, 34 insertions(+), 26 deletions(-) | 5 | 1 file changed, 79 insertions(+), 82 deletions(-) |
13 | 6 | ||
14 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 7 | diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc |
15 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/tcg/optimize.c | 9 | --- a/tcg/s390x/tcg-target.c.inc |
17 | +++ b/tcg/optimize.c | 10 | +++ b/tcg/s390x/tcg-target.c.inc |
18 | @@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg) | 11 | @@ -XXX,XX +XXX,XX @@ |
12 | /* A scratch register that may be be used throughout the backend. */ | ||
13 | #define TCG_TMP0 TCG_REG_R1 | ||
14 | |||
15 | -#ifndef CONFIG_SOFTMMU | ||
16 | #define TCG_GUEST_BASE_REG TCG_REG_R13 | ||
17 | -#endif | ||
18 | |||
19 | /* All of the following instructions are prefixed with their instruction | ||
20 | format, and are defined as 8- or 16-bit quantities, even when the two | ||
21 | @@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||
22 | h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128); | ||
23 | a_mask = (1 << h->aa.align) - 1; | ||
24 | |||
25 | -#ifdef CONFIG_SOFTMMU | ||
26 | - unsigned s_mask = (1 << s_bits) - 1; | ||
27 | - int mem_index = get_mmuidx(oi); | ||
28 | - int fast_off = tlb_mask_table_ofs(s, mem_index); | ||
29 | - int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); | ||
30 | - int table_off = fast_off + offsetof(CPUTLBDescFast, table); | ||
31 | - int ofs, a_off; | ||
32 | - uint64_t tlb_mask; | ||
33 | + if (tcg_use_softmmu) { | ||
34 | + unsigned s_mask = (1 << s_bits) - 1; | ||
35 | + int mem_index = get_mmuidx(oi); | ||
36 | + int fast_off = tlb_mask_table_ofs(s, mem_index); | ||
37 | + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); | ||
38 | + int table_off = fast_off + offsetof(CPUTLBDescFast, table); | ||
39 | + int ofs, a_off; | ||
40 | + uint64_t tlb_mask; | ||
41 | |||
42 | - ldst = new_ldst_label(s); | ||
43 | - ldst->is_ld = is_ld; | ||
44 | - ldst->oi = oi; | ||
45 | - ldst->addrlo_reg = addr_reg; | ||
46 | - | ||
47 | - tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE, | ||
48 | - s->page_bits - CPU_TLB_ENTRY_BITS); | ||
49 | - | ||
50 | - tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off); | ||
51 | - tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off); | ||
52 | - | ||
53 | - /* | ||
54 | - * For aligned accesses, we check the first byte and include the alignment | ||
55 | - * bits within the address. For unaligned access, we check that we don't | ||
56 | - * cross pages using the address of the last byte of the access. | ||
57 | - */ | ||
58 | - a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask); | ||
59 | - tlb_mask = (uint64_t)s->page_mask | a_mask; | ||
60 | - if (a_off == 0) { | ||
61 | - tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); | ||
62 | - } else { | ||
63 | - tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off); | ||
64 | - tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask); | ||
65 | - } | ||
66 | - | ||
67 | - if (is_ld) { | ||
68 | - ofs = offsetof(CPUTLBEntry, addr_read); | ||
69 | - } else { | ||
70 | - ofs = offsetof(CPUTLBEntry, addr_write); | ||
71 | - } | ||
72 | - if (addr_type == TCG_TYPE_I32) { | ||
73 | - ofs += HOST_BIG_ENDIAN * 4; | ||
74 | - tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); | ||
75 | - } else { | ||
76 | - tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); | ||
77 | - } | ||
78 | - | ||
79 | - tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); | ||
80 | - ldst->label_ptr[0] = s->code_ptr++; | ||
81 | - | ||
82 | - h->index = TCG_TMP0; | ||
83 | - tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE, | ||
84 | - offsetof(CPUTLBEntry, addend)); | ||
85 | - | ||
86 | - if (addr_type == TCG_TYPE_I32) { | ||
87 | - tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg); | ||
88 | - h->base = TCG_REG_NONE; | ||
89 | - } else { | ||
90 | - h->base = addr_reg; | ||
91 | - } | ||
92 | - h->disp = 0; | ||
93 | -#else | ||
94 | - if (a_mask) { | ||
95 | ldst = new_ldst_label(s); | ||
96 | ldst->is_ld = is_ld; | ||
97 | ldst->oi = oi; | ||
98 | ldst->addrlo_reg = addr_reg; | ||
99 | |||
100 | - /* We are expecting a_bits to max out at 7, much lower than TMLL. */ | ||
101 | - tcg_debug_assert(a_mask <= 0xffff); | ||
102 | - tcg_out_insn(s, RI, TMLL, addr_reg, a_mask); | ||
103 | + tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE, | ||
104 | + s->page_bits - CPU_TLB_ENTRY_BITS); | ||
105 | |||
106 | - tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ | ||
107 | + tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off); | ||
108 | + tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off); | ||
109 | + | ||
110 | + /* | ||
111 | + * For aligned accesses, we check the first byte and include the | ||
112 | + * alignment bits within the address. For unaligned access, we | ||
113 | + * check that we don't cross pages using the address of the last | ||
114 | + * byte of the access. | ||
115 | + */ | ||
116 | + a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask); | ||
117 | + tlb_mask = (uint64_t)s->page_mask | a_mask; | ||
118 | + if (a_off == 0) { | ||
119 | + tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); | ||
120 | + } else { | ||
121 | + tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off); | ||
122 | + tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask); | ||
123 | + } | ||
124 | + | ||
125 | + if (is_ld) { | ||
126 | + ofs = offsetof(CPUTLBEntry, addr_read); | ||
127 | + } else { | ||
128 | + ofs = offsetof(CPUTLBEntry, addr_write); | ||
129 | + } | ||
130 | + if (addr_type == TCG_TYPE_I32) { | ||
131 | + ofs += HOST_BIG_ENDIAN * 4; | ||
132 | + tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); | ||
133 | + } else { | ||
134 | + tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs); | ||
135 | + } | ||
136 | + | ||
137 | + tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); | ||
138 | ldst->label_ptr[0] = s->code_ptr++; | ||
139 | - } | ||
140 | |||
141 | - h->base = addr_reg; | ||
142 | - if (addr_type == TCG_TYPE_I32) { | ||
143 | - tcg_out_ext32u(s, TCG_TMP0, addr_reg); | ||
144 | - h->base = TCG_TMP0; | ||
145 | - } | ||
146 | - if (guest_base < 0x80000) { | ||
147 | - h->index = TCG_REG_NONE; | ||
148 | - h->disp = guest_base; | ||
149 | - } else { | ||
150 | - h->index = TCG_GUEST_BASE_REG; | ||
151 | + h->index = TCG_TMP0; | ||
152 | + tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE, | ||
153 | + offsetof(CPUTLBEntry, addend)); | ||
154 | + | ||
155 | + if (addr_type == TCG_TYPE_I32) { | ||
156 | + tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg); | ||
157 | + h->base = TCG_REG_NONE; | ||
158 | + } else { | ||
159 | + h->base = addr_reg; | ||
160 | + } | ||
161 | h->disp = 0; | ||
162 | + } else { | ||
163 | + if (a_mask) { | ||
164 | + ldst = new_ldst_label(s); | ||
165 | + ldst->is_ld = is_ld; | ||
166 | + ldst->oi = oi; | ||
167 | + ldst->addrlo_reg = addr_reg; | ||
168 | + | ||
169 | + /* We are expecting a_bits to max out at 7, much lower than TMLL. */ | ||
170 | + tcg_debug_assert(a_mask <= 0xffff); | ||
171 | + tcg_out_insn(s, RI, TMLL, addr_reg, a_mask); | ||
172 | + | ||
173 | + tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ | ||
174 | + ldst->label_ptr[0] = s->code_ptr++; | ||
175 | + } | ||
176 | + | ||
177 | + h->base = addr_reg; | ||
178 | + if (addr_type == TCG_TYPE_I32) { | ||
179 | + tcg_out_ext32u(s, TCG_TMP0, addr_reg); | ||
180 | + h->base = TCG_TMP0; | ||
181 | + } | ||
182 | + if (guest_base < 0x80000) { | ||
183 | + h->index = TCG_REG_NONE; | ||
184 | + h->disp = guest_base; | ||
185 | + } else { | ||
186 | + h->index = TCG_GUEST_BASE_REG; | ||
187 | + h->disp = 0; | ||
188 | + } | ||
189 | } | ||
190 | -#endif | ||
191 | |||
192 | return ldst; | ||
19 | } | 193 | } |
20 | 194 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | |
21 | /* Initialize and activate a temporary. */ | 195 | TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET, |
22 | -static void init_ts_info(TempOptInfo *infos, | 196 | CPU_TEMP_BUF_NLONGS * sizeof(long)); |
23 | - TCGTempSet *temps_used, TCGTemp *ts) | 197 | |
24 | +static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) | 198 | -#ifndef CONFIG_SOFTMMU |
25 | { | 199 | - if (guest_base >= 0x80000) { |
26 | size_t idx = temp_idx(ts); | 200 | + if (!tcg_use_softmmu && guest_base >= 0x80000) { |
27 | - if (!test_bit(idx, temps_used->l)) { | 201 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); |
28 | - TempOptInfo *ti = &infos[idx]; | 202 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
29 | + TempOptInfo *ti; | ||
30 | |||
31 | + if (test_bit(idx, temps_used->l)) { | ||
32 | + return; | ||
33 | + } | ||
34 | + set_bit(idx, temps_used->l); | ||
35 | + | ||
36 | + ti = ts->state_ptr; | ||
37 | + if (ti == NULL) { | ||
38 | + ti = tcg_malloc(sizeof(TempOptInfo)); | ||
39 | ts->state_ptr = ti; | ||
40 | - ti->next_copy = ts; | ||
41 | - ti->prev_copy = ts; | ||
42 | - if (ts->kind == TEMP_CONST) { | ||
43 | - ti->is_const = true; | ||
44 | - ti->val = ti->mask = ts->val; | ||
45 | - if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { | ||
46 | - /* High bits of a 32-bit quantity are garbage. */ | ||
47 | - ti->mask |= ~0xffffffffull; | ||
48 | - } | ||
49 | - } else { | ||
50 | - ti->is_const = false; | ||
51 | - ti->mask = -1; | ||
52 | + } | ||
53 | + | ||
54 | + ti->next_copy = ts; | ||
55 | + ti->prev_copy = ts; | ||
56 | + if (ts->kind == TEMP_CONST) { | ||
57 | + ti->is_const = true; | ||
58 | + ti->val = ts->val; | ||
59 | + ti->mask = ts->val; | ||
60 | + if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { | ||
61 | + /* High bits of a 32-bit quantity are garbage. */ | ||
62 | + ti->mask |= ~0xffffffffull; | ||
63 | } | ||
64 | - set_bit(idx, temps_used->l); | ||
65 | + } else { | ||
66 | + ti->is_const = false; | ||
67 | + ti->mask = -1; | ||
68 | } | 203 | } |
69 | } | 204 | -#endif |
70 | 205 | ||
71 | -static void init_arg_info(TempOptInfo *infos, | 206 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); |
72 | - TCGTempSet *temps_used, TCGArg arg) | ||
73 | +static void init_arg_info(TCGTempSet *temps_used, TCGArg arg) | ||
74 | { | ||
75 | - init_ts_info(infos, temps_used, arg_temp(arg)); | ||
76 | + init_ts_info(temps_used, arg_temp(arg)); | ||
77 | } | ||
78 | |||
79 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) | ||
80 | @@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) | ||
81 | /* Propagate constants and copies, fold constant expressions. */ | ||
82 | void tcg_optimize(TCGContext *s) | ||
83 | { | ||
84 | - int nb_temps, nb_globals; | ||
85 | + int nb_temps, nb_globals, i; | ||
86 | TCGOp *op, *op_next, *prev_mb = NULL; | ||
87 | - TempOptInfo *infos; | ||
88 | TCGTempSet temps_used; | ||
89 | |||
90 | /* Array VALS has an element for each temp. | ||
91 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
92 | |||
93 | nb_temps = s->nb_temps; | ||
94 | nb_globals = s->nb_globals; | ||
95 | + | ||
96 | bitmap_zero(temps_used.l, nb_temps); | ||
97 | - infos = tcg_malloc(sizeof(TempOptInfo) * nb_temps); | ||
98 | + for (i = 0; i < nb_temps; ++i) { | ||
99 | + s->temps[i].state_ptr = NULL; | ||
100 | + } | ||
101 | |||
102 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { | ||
103 | uint64_t mask, partmask, affected, tmp; | ||
104 | - int nb_oargs, nb_iargs, i; | ||
105 | + int nb_oargs, nb_iargs; | ||
106 | TCGOpcode opc = op->opc; | ||
107 | const TCGOpDef *def = &tcg_op_defs[opc]; | ||
108 | |||
109 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
110 | for (i = 0; i < nb_oargs + nb_iargs; i++) { | ||
111 | TCGTemp *ts = arg_temp(op->args[i]); | ||
112 | if (ts) { | ||
113 | - init_ts_info(infos, &temps_used, ts); | ||
114 | + init_ts_info(&temps_used, ts); | ||
115 | } | ||
116 | } | ||
117 | } else { | ||
118 | nb_oargs = def->nb_oargs; | ||
119 | nb_iargs = def->nb_iargs; | ||
120 | for (i = 0; i < nb_oargs + nb_iargs; i++) { | ||
121 | - init_arg_info(infos, &temps_used, op->args[i]); | ||
122 | + init_arg_info(&temps_used, op->args[i]); | ||
123 | } | ||
124 | } | ||
125 | 207 | ||
126 | -- | 208 | -- |
127 | 2.25.1 | 209 | 2.34.1 |
128 | 210 | ||
129 | 211 | diff view generated by jsdifflib |
1 | These interfaces have been replaced by tcg_gen_dupi_vec | 1 | From: Mike Frysinger <vapier@gentoo.org> |
---|---|---|---|
2 | and tcg_constant_vec. | ||
3 | 2 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Use of the API was removed a while back, but the define wasn't. |
4 | |||
5 | Signed-off-by: Mike Frysinger <vapier@gentoo.org> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
7 | Message-Id: <20231015010046.16020-1-vapier@gentoo.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 9 | --- |
7 | include/tcg/tcg-op.h | 4 ---- | 10 | include/tcg/tcg-op.h | 2 -- |
8 | tcg/tcg-op-vec.c | 20 -------------------- | 11 | 1 file changed, 2 deletions(-) |
9 | 2 files changed, 24 deletions(-) | ||
10 | 12 | ||
11 | diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h | 13 | diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h |
12 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/include/tcg/tcg-op.h | 15 | --- a/include/tcg/tcg-op.h |
14 | +++ b/include/tcg/tcg-op.h | 16 | +++ b/include/tcg/tcg-op.h |
15 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); | 17 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, |
16 | void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); | 18 | typedef TCGv_i32 TCGv; |
17 | void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64); | 19 | #define tcg_temp_new() tcg_temp_new_i32() |
18 | void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long); | 20 | #define tcg_global_mem_new tcg_global_mem_new_i32 |
19 | -void tcg_gen_dup8i_vec(TCGv_vec, uint32_t); | 21 | -#define tcg_temp_free tcg_temp_free_i32 |
20 | -void tcg_gen_dup16i_vec(TCGv_vec, uint32_t); | 22 | #define tcgv_tl_temp tcgv_i32_temp |
21 | -void tcg_gen_dup32i_vec(TCGv_vec, uint32_t); | 23 | #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 |
22 | -void tcg_gen_dup64i_vec(TCGv_vec, uint64_t); | 24 | #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 |
23 | void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t); | 25 | @@ -XXX,XX +XXX,XX @@ typedef TCGv_i32 TCGv; |
24 | void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); | 26 | typedef TCGv_i64 TCGv; |
25 | void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); | 27 | #define tcg_temp_new() tcg_temp_new_i64() |
26 | diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c | 28 | #define tcg_global_mem_new tcg_global_mem_new_i64 |
27 | index XXXXXXX..XXXXXXX 100644 | 29 | -#define tcg_temp_free tcg_temp_free_i64 |
28 | --- a/tcg/tcg-op-vec.c | 30 | #define tcgv_tl_temp tcgv_i64_temp |
29 | +++ b/tcg/tcg-op-vec.c | 31 | #define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 |
30 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m) | 32 | #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 |
31 | return tcg_const_ones_vec(t->base_type); | ||
32 | } | ||
33 | |||
34 | -void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a) | ||
35 | -{ | ||
36 | - tcg_gen_dupi_vec(MO_64, r, a); | ||
37 | -} | ||
38 | - | ||
39 | -void tcg_gen_dup32i_vec(TCGv_vec r, uint32_t a) | ||
40 | -{ | ||
41 | - tcg_gen_dupi_vec(MO_32, r, a); | ||
42 | -} | ||
43 | - | ||
44 | -void tcg_gen_dup16i_vec(TCGv_vec r, uint32_t a) | ||
45 | -{ | ||
46 | - tcg_gen_dupi_vec(MO_16, r, a); | ||
47 | -} | ||
48 | - | ||
49 | -void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a) | ||
50 | -{ | ||
51 | - tcg_gen_dupi_vec(MO_8, r, a); | ||
52 | -} | ||
53 | - | ||
54 | void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a) | ||
55 | { | ||
56 | TCGTemp *rt = tcgv_vec_temp(r); | ||
57 | -- | 33 | -- |
58 | 2.25.1 | 34 | 2.34.1 |
59 | 35 | ||
60 | 36 | diff view generated by jsdifflib |
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
---|---|---|---|
2 | --- | 2 | --- |
3 | tcg/aarch64/tcg-target-constr.h | 31 ++++++++++++ | 3 | tcg/tcg-op.c | 16 ++++++++-------- |
4 | tcg/aarch64/tcg-target.c.inc | 85 +++++++++++---------------------- | 4 | 1 file changed, 8 insertions(+), 8 deletions(-) |
5 | 2 files changed, 60 insertions(+), 56 deletions(-) | ||
6 | create mode 100644 tcg/aarch64/tcg-target-constr.h | ||
7 | 5 | ||
8 | diff --git a/tcg/aarch64/tcg-target-constr.h b/tcg/aarch64/tcg-target-constr.h | 6 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c |
9 | new file mode 100644 | ||
10 | index XXXXXXX..XXXXXXX | ||
11 | --- /dev/null | ||
12 | +++ b/tcg/aarch64/tcg-target-constr.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
15 | +/* | ||
16 | + * AArch64 target-specific operand constaints. | ||
17 | + * Copyright (c) 2020 Linaro | ||
18 | + */ | ||
19 | + | ||
20 | +C_O0_I1(r) | ||
21 | +C_O0_I2(lZ, l) | ||
22 | +C_O0_I2(r, rA) | ||
23 | +C_O0_I2(rZ, r) | ||
24 | +C_O0_I2(w, r) | ||
25 | +C_O1_I1(r, l) | ||
26 | +C_O1_I1(r, r) | ||
27 | +C_O1_I1(w, r) | ||
28 | +C_O1_I1(w, w) | ||
29 | +C_O1_I1(w, wr) | ||
30 | +C_O1_I2(r, 0, rZ) | ||
31 | +C_O1_I2(r, r, r) | ||
32 | +C_O1_I2(r, r, rA) | ||
33 | +C_O1_I2(r, r, rAL) | ||
34 | +C_O1_I2(r, r, ri) | ||
35 | +C_O1_I2(r, r, rL) | ||
36 | +C_O1_I2(r, rZ, rZ) | ||
37 | +C_O1_I2(w, 0, w) | ||
38 | +C_O1_I2(w, w, w) | ||
39 | +C_O1_I2(w, w, wN) | ||
40 | +C_O1_I2(w, w, wO) | ||
41 | +C_O1_I2(w, w, wZ) | ||
42 | +C_O1_I3(w, w, w, w) | ||
43 | +C_O1_I4(r, r, rA, rZ, rZ) | ||
44 | +C_O2_I4(r, r, rZ, rZ, rA, rMZ) | ||
45 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
46 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
47 | --- a/tcg/aarch64/tcg-target.c.inc | 8 | --- a/tcg/tcg-op.c |
48 | +++ b/tcg/aarch64/tcg-target.c.inc | 9 | +++ b/tcg/tcg-op.c |
49 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | 10 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) |
50 | va_end(va); | 11 | tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2); |
51 | } | 12 | } else if (TCG_TARGET_HAS_div2_i32) { |
52 | 13 | TCGv_i32 t0 = tcg_temp_ebb_new_i32(); | |
53 | +/* Define all constraint sets. */ | 14 | - tcg_gen_movi_i32(t0, 0); |
54 | +#include "../tcg-constr.c.inc" | 15 | - tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); |
55 | + | 16 | + TCGv_i32 zero = tcg_constant_i32(0); |
56 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | 17 | + tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, zero, arg2); |
57 | { | 18 | tcg_temp_free_i32(t0); |
58 | - static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; | 19 | } else { |
59 | - static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; | 20 | gen_helper_divu_i32(ret, arg1, arg2); |
60 | - static const TCGTargetOpDef w_w = { .args_ct_str = { "w", "w" } }; | 21 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) |
61 | - static const TCGTargetOpDef w_r = { .args_ct_str = { "w", "r" } }; | 22 | tcg_temp_free_i32(t0); |
62 | - static const TCGTargetOpDef w_wr = { .args_ct_str = { "w", "wr" } }; | 23 | } else if (TCG_TARGET_HAS_div2_i32) { |
63 | - static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; | 24 | TCGv_i32 t0 = tcg_temp_ebb_new_i32(); |
64 | - static const TCGTargetOpDef r_rA = { .args_ct_str = { "r", "rA" } }; | 25 | - tcg_gen_movi_i32(t0, 0); |
65 | - static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; | 26 | - tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); |
66 | - static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } }; | 27 | + TCGv_i32 zero = tcg_constant_i32(0); |
67 | - static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; | 28 | + tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, zero, arg2); |
68 | - static const TCGTargetOpDef w_w_w = { .args_ct_str = { "w", "w", "w" } }; | 29 | tcg_temp_free_i32(t0); |
69 | - static const TCGTargetOpDef w_0_w = { .args_ct_str = { "w", "0", "w" } }; | 30 | } else { |
70 | - static const TCGTargetOpDef w_w_wO = { .args_ct_str = { "w", "w", "wO" } }; | 31 | gen_helper_remu_i32(ret, arg1, arg2); |
71 | - static const TCGTargetOpDef w_w_wN = { .args_ct_str = { "w", "w", "wN" } }; | 32 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) |
72 | - static const TCGTargetOpDef w_w_wZ = { .args_ct_str = { "w", "w", "wZ" } }; | 33 | tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2); |
73 | - static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; | 34 | } else if (TCG_TARGET_HAS_div2_i64) { |
74 | - static const TCGTargetOpDef r_r_rA = { .args_ct_str = { "r", "r", "rA" } }; | 35 | TCGv_i64 t0 = tcg_temp_ebb_new_i64(); |
75 | - static const TCGTargetOpDef r_r_rL = { .args_ct_str = { "r", "r", "rL" } }; | 36 | - tcg_gen_movi_i64(t0, 0); |
76 | - static const TCGTargetOpDef r_r_rAL | 37 | - tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); |
77 | - = { .args_ct_str = { "r", "r", "rAL" } }; | 38 | + TCGv_i64 zero = tcg_constant_i64(0); |
78 | - static const TCGTargetOpDef dep | 39 | + tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, zero, arg2); |
79 | - = { .args_ct_str = { "r", "0", "rZ" } }; | 40 | tcg_temp_free_i64(t0); |
80 | - static const TCGTargetOpDef ext2 | 41 | } else { |
81 | - = { .args_ct_str = { "r", "rZ", "rZ" } }; | 42 | gen_helper_divu_i64(ret, arg1, arg2); |
82 | - static const TCGTargetOpDef movc | 43 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) |
83 | - = { .args_ct_str = { "r", "r", "rA", "rZ", "rZ" } }; | 44 | tcg_temp_free_i64(t0); |
84 | - static const TCGTargetOpDef add2 | 45 | } else if (TCG_TARGET_HAS_div2_i64) { |
85 | - = { .args_ct_str = { "r", "r", "rZ", "rZ", "rA", "rMZ" } }; | 46 | TCGv_i64 t0 = tcg_temp_ebb_new_i64(); |
86 | - static const TCGTargetOpDef w_w_w_w | 47 | - tcg_gen_movi_i64(t0, 0); |
87 | - = { .args_ct_str = { "w", "w", "w", "w" } }; | 48 | - tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); |
88 | - | 49 | + TCGv_i64 zero = tcg_constant_i64(0); |
89 | switch (op) { | 50 | + tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, zero, arg2); |
90 | case INDEX_op_goto_ptr: | 51 | tcg_temp_free_i64(t0); |
91 | - return &r; | 52 | } else { |
92 | + return C_O0_I1(r); | 53 | gen_helper_remu_i64(ret, arg1, arg2); |
93 | |||
94 | case INDEX_op_ld8u_i32: | ||
95 | case INDEX_op_ld8s_i32: | ||
96 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
97 | case INDEX_op_extract_i64: | ||
98 | case INDEX_op_sextract_i32: | ||
99 | case INDEX_op_sextract_i64: | ||
100 | - return &r_r; | ||
101 | + return C_O1_I1(r, r); | ||
102 | |||
103 | case INDEX_op_st8_i32: | ||
104 | case INDEX_op_st16_i32: | ||
105 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
106 | case INDEX_op_st16_i64: | ||
107 | case INDEX_op_st32_i64: | ||
108 | case INDEX_op_st_i64: | ||
109 | - return &rZ_r; | ||
110 | + return C_O0_I2(rZ, r); | ||
111 | |||
112 | case INDEX_op_add_i32: | ||
113 | case INDEX_op_add_i64: | ||
114 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
115 | case INDEX_op_sub_i64: | ||
116 | case INDEX_op_setcond_i32: | ||
117 | case INDEX_op_setcond_i64: | ||
118 | - return &r_r_rA; | ||
119 | + return C_O1_I2(r, r, rA); | ||
120 | |||
121 | case INDEX_op_mul_i32: | ||
122 | case INDEX_op_mul_i64: | ||
123 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
124 | case INDEX_op_remu_i64: | ||
125 | case INDEX_op_muluh_i64: | ||
126 | case INDEX_op_mulsh_i64: | ||
127 | - return &r_r_r; | ||
128 | + return C_O1_I2(r, r, r); | ||
129 | |||
130 | case INDEX_op_and_i32: | ||
131 | case INDEX_op_and_i64: | ||
132 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
133 | case INDEX_op_orc_i64: | ||
134 | case INDEX_op_eqv_i32: | ||
135 | case INDEX_op_eqv_i64: | ||
136 | - return &r_r_rL; | ||
137 | + return C_O1_I2(r, r, rL); | ||
138 | |||
139 | case INDEX_op_shl_i32: | ||
140 | case INDEX_op_shr_i32: | ||
141 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
142 | case INDEX_op_sar_i64: | ||
143 | case INDEX_op_rotl_i64: | ||
144 | case INDEX_op_rotr_i64: | ||
145 | - return &r_r_ri; | ||
146 | + return C_O1_I2(r, r, ri); | ||
147 | |||
148 | case INDEX_op_clz_i32: | ||
149 | case INDEX_op_ctz_i32: | ||
150 | case INDEX_op_clz_i64: | ||
151 | case INDEX_op_ctz_i64: | ||
152 | - return &r_r_rAL; | ||
153 | + return C_O1_I2(r, r, rAL); | ||
154 | |||
155 | case INDEX_op_brcond_i32: | ||
156 | case INDEX_op_brcond_i64: | ||
157 | - return &r_rA; | ||
158 | + return C_O0_I2(r, rA); | ||
159 | |||
160 | case INDEX_op_movcond_i32: | ||
161 | case INDEX_op_movcond_i64: | ||
162 | - return &movc; | ||
163 | + return C_O1_I4(r, r, rA, rZ, rZ); | ||
164 | |||
165 | case INDEX_op_qemu_ld_i32: | ||
166 | case INDEX_op_qemu_ld_i64: | ||
167 | - return &r_l; | ||
168 | + return C_O1_I1(r, l); | ||
169 | case INDEX_op_qemu_st_i32: | ||
170 | case INDEX_op_qemu_st_i64: | ||
171 | - return &lZ_l; | ||
172 | + return C_O0_I2(lZ, l); | ||
173 | |||
174 | case INDEX_op_deposit_i32: | ||
175 | case INDEX_op_deposit_i64: | ||
176 | - return &dep; | ||
177 | + return C_O1_I2(r, 0, rZ); | ||
178 | |||
179 | case INDEX_op_extract2_i32: | ||
180 | case INDEX_op_extract2_i64: | ||
181 | - return &ext2; | ||
182 | + return C_O1_I2(r, rZ, rZ); | ||
183 | |||
184 | case INDEX_op_add2_i32: | ||
185 | case INDEX_op_add2_i64: | ||
186 | case INDEX_op_sub2_i32: | ||
187 | case INDEX_op_sub2_i64: | ||
188 | - return &add2; | ||
189 | + return C_O2_I4(r, r, rZ, rZ, rA, rMZ); | ||
190 | |||
191 | case INDEX_op_add_vec: | ||
192 | case INDEX_op_sub_vec: | ||
193 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
194 | case INDEX_op_shrv_vec: | ||
195 | case INDEX_op_sarv_vec: | ||
196 | case INDEX_op_aa64_sshl_vec: | ||
197 | - return &w_w_w; | ||
198 | + return C_O1_I2(w, w, w); | ||
199 | case INDEX_op_not_vec: | ||
200 | case INDEX_op_neg_vec: | ||
201 | case INDEX_op_abs_vec: | ||
202 | case INDEX_op_shli_vec: | ||
203 | case INDEX_op_shri_vec: | ||
204 | case INDEX_op_sari_vec: | ||
205 | - return &w_w; | ||
206 | + return C_O1_I1(w, w); | ||
207 | case INDEX_op_ld_vec: | ||
208 | - case INDEX_op_st_vec: | ||
209 | case INDEX_op_dupm_vec: | ||
210 | - return &w_r; | ||
211 | + return C_O1_I1(w, r); | ||
212 | + case INDEX_op_st_vec: | ||
213 | + return C_O0_I2(w, r); | ||
214 | case INDEX_op_dup_vec: | ||
215 | - return &w_wr; | ||
216 | + return C_O1_I1(w, wr); | ||
217 | case INDEX_op_or_vec: | ||
218 | case INDEX_op_andc_vec: | ||
219 | - return &w_w_wO; | ||
220 | + return C_O1_I2(w, w, wO); | ||
221 | case INDEX_op_and_vec: | ||
222 | case INDEX_op_orc_vec: | ||
223 | - return &w_w_wN; | ||
224 | + return C_O1_I2(w, w, wN); | ||
225 | case INDEX_op_cmp_vec: | ||
226 | - return &w_w_wZ; | ||
227 | + return C_O1_I2(w, w, wZ); | ||
228 | case INDEX_op_bitsel_vec: | ||
229 | - return &w_w_w_w; | ||
230 | + return C_O1_I3(w, w, w, w); | ||
231 | case INDEX_op_aa64_sli_vec: | ||
232 | - return &w_0_w; | ||
233 | + return C_O1_I2(w, 0, w); | ||
234 | |||
235 | default: | ||
236 | return NULL; | ||
237 | -- | 54 | -- |
238 | 2.25.1 | 55 | 2.34.1 |
239 | |||
240 | diff view generated by jsdifflib |
1 | When the two arguments are identical, this can be reduced to | 1 | We already register allocate through extended basic blocks, |
---|---|---|---|
2 | dup_vec or to mov_vec from a tcg_constant_vec. | 2 | optimize through extended basic blocks as well. |
3 | 3 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 5 | --- |
6 | tcg/optimize.c | 15 +++++++++++++++ | 6 | tcg/optimize.c | 8 +++++--- |
7 | 1 file changed, 15 insertions(+) | 7 | 1 file changed, 5 insertions(+), 3 deletions(-) |
8 | 8 | ||
9 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 9 | diff --git a/tcg/optimize.c b/tcg/optimize.c |
10 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/tcg/optimize.c | 11 | --- a/tcg/optimize.c |
12 | +++ b/tcg/optimize.c | 12 | +++ b/tcg/optimize.c |
13 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 13 | @@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op) |
14 | } | 14 | int i, nb_oargs; |
15 | goto do_default; | 15 | |
16 | 16 | /* | |
17 | + case INDEX_op_dup2_vec: | 17 | - * For an opcode that ends a BB, reset all temp data. |
18 | + assert(TCG_TARGET_REG_BITS == 32); | 18 | - * We do no cross-BB optimization. |
19 | + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | 19 | + * We only optimize extended basic blocks. If the opcode ends a BB |
20 | + tmp = arg_info(op->args[1])->val; | 20 | + * and is not a conditional branch, reset all temp data. |
21 | + if (tmp == arg_info(op->args[2])->val) { | 21 | */ |
22 | + tcg_opt_gen_movi(s, op, op->args[0], tmp); | 22 | if (def->flags & TCG_OPF_BB_END) { |
23 | + break; | 23 | - memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); |
24 | + } | 24 | ctx->prev_mb = NULL; |
25 | + } else if (args_are_copies(op->args[1], op->args[2])) { | 25 | + if (!(def->flags & TCG_OPF_COND_BRANCH)) { |
26 | + op->opc = INDEX_op_dup_vec; | 26 | + memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); |
27 | + TCGOP_VECE(op) = MO_32; | 27 | + } |
28 | + nb_iargs = 1; | 28 | return; |
29 | + } | 29 | } |
30 | + goto do_default; | 30 | |
31 | + | ||
32 | CASE_OP_32_64(not): | ||
33 | CASE_OP_32_64(neg): | ||
34 | CASE_OP_32_64(ext8s): | ||
35 | -- | 31 | -- |
36 | 2.25.1 | 32 | 2.34.1 |
37 | |||
38 | diff view generated by jsdifflib |
1 | Having dupi pass though movi is confusing and arguably wrong. | 1 | Do not require the translators to jump through concat and |
---|---|---|---|
2 | extract of i64 in order to move values to and from env. | ||
2 | 3 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Tested-by: Song Gao <gaosong@loongson.cn> |
5 | Reviewed-by: Song Gao <gaosong@loongson.cn> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 8 | --- |
6 | tcg/tcg.c | 6 +++- | 9 | include/tcg/tcg-op-common.h | 3 +++ |
7 | tcg/aarch64/tcg-target.c.inc | 7 ---- | 10 | tcg/tcg-op.c | 22 ++++++++++++++++++++++ |
8 | tcg/i386/tcg-target.c.inc | 63 ++++++++++++++++++++++++------------ | 11 | 2 files changed, 25 insertions(+) |
9 | tcg/ppc/tcg-target.c.inc | 6 ---- | ||
10 | 4 files changed, 47 insertions(+), 35 deletions(-) | ||
11 | 12 | ||
12 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 13 | diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h |
13 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/tcg.c | 15 | --- a/include/tcg/tcg-op-common.h |
15 | +++ b/tcg/tcg.c | 16 | +++ b/include/tcg/tcg-op-common.h |
16 | @@ -XXX,XX +XXX,XX @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, | 17 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src); |
17 | case TEMP_VAL_CONST: | 18 | void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg); |
18 | reg = tcg_reg_alloc(s, desired_regs, allocated_regs, | 19 | void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi); |
19 | preferred_regs, ts->indirect_base); | 20 | |
20 | - tcg_out_movi(s, ts->type, reg, ts->val); | 21 | +void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset); |
21 | + if (ts->type <= TCG_TYPE_I64) { | 22 | +void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset); |
22 | + tcg_out_movi(s, ts->type, reg, ts->val); | 23 | + |
23 | + } else { | 24 | static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi) |
24 | + tcg_out_dupi_vec(s, ts->type, reg, ts->val); | 25 | { |
25 | + } | 26 | tcg_gen_deposit_i64(ret, lo, hi, 32, 32); |
26 | ts->mem_coherent = 0; | 27 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c |
27 | break; | ||
28 | case TEMP_VAL_MEM: | ||
29 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
30 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/tcg/aarch64/tcg-target.c.inc | 29 | --- a/tcg/tcg-op.c |
32 | +++ b/tcg/aarch64/tcg-target.c.inc | 30 | +++ b/tcg/tcg-op.c |
33 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, | 31 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src) |
34 | case TCG_TYPE_I64: | ||
35 | tcg_debug_assert(rd < 32); | ||
36 | break; | ||
37 | - | ||
38 | - case TCG_TYPE_V64: | ||
39 | - case TCG_TYPE_V128: | ||
40 | - tcg_debug_assert(rd >= 32); | ||
41 | - tcg_out_dupi_vec(s, type, rd, value); | ||
42 | - return; | ||
43 | - | ||
44 | default: | ||
45 | g_assert_not_reached(); | ||
46 | } | ||
47 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/tcg/i386/tcg-target.c.inc | ||
50 | +++ b/tcg/i386/tcg-target.c.inc | ||
51 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, | ||
52 | } | 32 | } |
53 | } | 33 | } |
54 | 34 | ||
55 | -static void tcg_out_movi(TCGContext *s, TCGType type, | 35 | +void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset) |
56 | - TCGReg ret, tcg_target_long arg) | ||
57 | +static void tcg_out_movi_vec(TCGContext *s, TCGType type, | ||
58 | + TCGReg ret, tcg_target_long arg) | ||
59 | +{ | 36 | +{ |
60 | + if (arg == 0) { | 37 | + if (HOST_BIG_ENDIAN) { |
61 | + tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret); | 38 | + tcg_gen_ld_i64(TCGV128_HIGH(ret), base, offset); |
62 | + return; | 39 | + tcg_gen_ld_i64(TCGV128_LOW(ret), base, offset + 8); |
63 | + } | ||
64 | + if (arg == -1) { | ||
65 | + tcg_out_vex_modrm(s, OPC_PCMPEQB, ret, ret, ret); | ||
66 | + return; | ||
67 | + } | ||
68 | + | ||
69 | + int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW); | ||
70 | + tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy + rexw, ret); | ||
71 | + if (TCG_TARGET_REG_BITS == 64) { | ||
72 | + new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4); | ||
73 | + } else { | 40 | + } else { |
74 | + new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0); | 41 | + tcg_gen_ld_i64(TCGV128_LOW(ret), base, offset); |
42 | + tcg_gen_ld_i64(TCGV128_HIGH(ret), base, offset + 8); | ||
75 | + } | 43 | + } |
76 | +} | 44 | +} |
77 | + | 45 | + |
78 | +static void tcg_out_movi_int(TCGContext *s, TCGType type, | 46 | +void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset) |
79 | + TCGReg ret, tcg_target_long arg) | ||
80 | { | ||
81 | tcg_target_long diff; | ||
82 | |||
83 | - switch (type) { | ||
84 | - case TCG_TYPE_I32: | ||
85 | -#if TCG_TARGET_REG_BITS == 64 | ||
86 | - case TCG_TYPE_I64: | ||
87 | -#endif | ||
88 | - if (ret < 16) { | ||
89 | - break; | ||
90 | - } | ||
91 | - /* fallthru */ | ||
92 | - case TCG_TYPE_V64: | ||
93 | - case TCG_TYPE_V128: | ||
94 | - case TCG_TYPE_V256: | ||
95 | - tcg_debug_assert(ret >= 16); | ||
96 | - tcg_out_dupi_vec(s, type, ret, arg); | ||
97 | - return; | ||
98 | - default: | ||
99 | - g_assert_not_reached(); | ||
100 | - } | ||
101 | - | ||
102 | if (arg == 0) { | ||
103 | tgen_arithr(s, ARITH_XOR, ret, ret); | ||
104 | return; | ||
105 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, | ||
106 | tcg_out64(s, arg); | ||
107 | } | ||
108 | |||
109 | +static void tcg_out_movi(TCGContext *s, TCGType type, | ||
110 | + TCGReg ret, tcg_target_long arg) | ||
111 | +{ | 47 | +{ |
112 | + switch (type) { | 48 | + if (HOST_BIG_ENDIAN) { |
113 | + case TCG_TYPE_I32: | 49 | + tcg_gen_st_i64(TCGV128_HIGH(val), base, offset); |
114 | +#if TCG_TARGET_REG_BITS == 64 | 50 | + tcg_gen_st_i64(TCGV128_LOW(val), base, offset + 8); |
115 | + case TCG_TYPE_I64: | 51 | + } else { |
116 | +#endif | 52 | + tcg_gen_st_i64(TCGV128_LOW(val), base, offset); |
117 | + if (ret < 16) { | 53 | + tcg_gen_st_i64(TCGV128_HIGH(val), base, offset + 8); |
118 | + tcg_out_movi_int(s, type, ret, arg); | ||
119 | + } else { | ||
120 | + tcg_out_movi_vec(s, type, ret, arg); | ||
121 | + } | ||
122 | + break; | ||
123 | + default: | ||
124 | + g_assert_not_reached(); | ||
125 | + } | 54 | + } |
126 | +} | 55 | +} |
127 | + | 56 | + |
128 | static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) | 57 | /* QEMU specific operations. */ |
129 | { | 58 | |
130 | if (val == (int8_t)val) { | 59 | void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx) |
131 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | ||
132 | index XXXXXXX..XXXXXXX 100644 | ||
133 | --- a/tcg/ppc/tcg-target.c.inc | ||
134 | +++ b/tcg/ppc/tcg-target.c.inc | ||
135 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, | ||
136 | tcg_out_movi_int(s, type, ret, arg, false); | ||
137 | break; | ||
138 | |||
139 | - case TCG_TYPE_V64: | ||
140 | - case TCG_TYPE_V128: | ||
141 | - tcg_debug_assert(ret >= TCG_REG_V0); | ||
142 | - tcg_out_dupi_vec(s, type, ret, arg); | ||
143 | - break; | ||
144 | - | ||
145 | default: | ||
146 | g_assert_not_reached(); | ||
147 | } | ||
148 | -- | 60 | -- |
149 | 2.25.1 | 61 | 2.34.1 |
150 | 62 | ||
151 | 63 | diff view generated by jsdifflib |
1 | Because we now store uint64_t in TCGTemp, we can now always | ||
---|---|---|---|
2 | store the full 64-bit duplicate immediate. So remove the | ||
3 | difference between 32- and 64-bit hosts. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 2 | --- |
7 | tcg/optimize.c | 9 ++++----- | 3 | target/i386/tcg/translate.c | 63 +++++++++++++++++-------------------- |
8 | tcg/tcg-op-vec.c | 39 ++++++++++----------------------------- | 4 | 1 file changed, 29 insertions(+), 34 deletions(-) |
9 | tcg/tcg.c | 7 +------ | ||
10 | 3 files changed, 15 insertions(+), 40 deletions(-) | ||
11 | 5 | ||
12 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 6 | diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c |
13 | index XXXXXXX..XXXXXXX 100644 | 7 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/optimize.c | 8 | --- a/target/i386/tcg/translate.c |
15 | +++ b/tcg/optimize.c | 9 | +++ b/target/i386/tcg/translate.c |
16 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | 10 | @@ -XXX,XX +XXX,XX @@ static inline void gen_stq_env_A0(DisasContext *s, int offset) |
17 | case INDEX_op_dup2_vec: | 11 | |
18 | assert(TCG_TARGET_REG_BITS == 32); | 12 | static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align) |
19 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | 13 | { |
20 | - tmp = arg_info(op->args[1])->val; | 14 | + MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX |
21 | - if (tmp == arg_info(op->args[2])->val) { | 15 | + ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR); |
22 | - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); | 16 | + MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0); |
23 | - break; | 17 | int mem_index = s->mem_index; |
24 | - } | 18 | - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, |
25 | + tcg_opt_gen_movi(s, &temps_used, op, op->args[0], | 19 | - MO_LEUQ | (align ? MO_ALIGN_16 : 0)); |
26 | + deposit64(arg_info(op->args[1])->val, 32, 32, | 20 | - tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0))); |
27 | + arg_info(op->args[2])->val)); | 21 | - tcg_gen_addi_tl(s->tmp0, s->A0, 8); |
28 | + break; | 22 | - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); |
29 | } else if (args_are_copies(op->args[1], op->args[2])) { | 23 | - tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1))); |
30 | op->opc = INDEX_op_dup_vec; | 24 | + TCGv_i128 t = tcg_temp_new_i128(); |
31 | TCGOP_VECE(op) = MO_32; | 25 | + |
32 | diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c | 26 | + tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop); |
33 | index XXXXXXX..XXXXXXX 100644 | 27 | + tcg_gen_st_i128(t, tcg_env, offset); |
34 | --- a/tcg/tcg-op-vec.c | ||
35 | +++ b/tcg/tcg-op-vec.c | ||
36 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a) | ||
37 | } | ||
38 | } | 28 | } |
39 | 29 | ||
40 | -#define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32) | 30 | static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align) |
41 | - | ||
42 | -static void do_dupi_vec(TCGv_vec r, unsigned vece, TCGArg a) | ||
43 | -{ | ||
44 | - TCGTemp *rt = tcgv_vec_temp(r); | ||
45 | - vec_gen_2(INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a); | ||
46 | -} | ||
47 | - | ||
48 | TCGv_vec tcg_const_zeros_vec(TCGType type) | ||
49 | { | 31 | { |
50 | TCGv_vec ret = tcg_temp_new_vec(type); | 32 | + MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX |
51 | - do_dupi_vec(ret, MO_REG, 0); | 33 | + ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR); |
52 | + tcg_gen_dupi_vec(MO_64, ret, 0); | 34 | + MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0); |
53 | return ret; | 35 | int mem_index = s->mem_index; |
36 | - tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0))); | ||
37 | - tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, | ||
38 | - MO_LEUQ | (align ? MO_ALIGN_16 : 0)); | ||
39 | - tcg_gen_addi_tl(s->tmp0, s->A0, 8); | ||
40 | - tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1))); | ||
41 | - tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); | ||
42 | + TCGv_i128 t = tcg_temp_new_i128(); | ||
43 | + | ||
44 | + tcg_gen_ld_i128(t, tcg_env, offset); | ||
45 | + tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop); | ||
54 | } | 46 | } |
55 | 47 | ||
56 | TCGv_vec tcg_const_ones_vec(TCGType type) | 48 | static void gen_ldy_env_A0(DisasContext *s, int offset, bool align) |
57 | { | 49 | { |
58 | TCGv_vec ret = tcg_temp_new_vec(type); | 50 | + MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR; |
59 | - do_dupi_vec(ret, MO_REG, -1); | 51 | int mem_index = s->mem_index; |
60 | + tcg_gen_dupi_vec(MO_64, ret, -1); | 52 | - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, |
61 | return ret; | 53 | - MO_LEUQ | (align ? MO_ALIGN_32 : 0)); |
54 | - tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0))); | ||
55 | - tcg_gen_addi_tl(s->tmp0, s->A0, 8); | ||
56 | - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); | ||
57 | - tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1))); | ||
58 | + TCGv_i128 t0 = tcg_temp_new_i128(); | ||
59 | + TCGv_i128 t1 = tcg_temp_new_i128(); | ||
60 | |||
61 | + tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); | ||
62 | tcg_gen_addi_tl(s->tmp0, s->A0, 16); | ||
63 | - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); | ||
64 | - tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2))); | ||
65 | - tcg_gen_addi_tl(s->tmp0, s->A0, 24); | ||
66 | - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); | ||
67 | - tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3))); | ||
68 | + tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop); | ||
69 | + | ||
70 | + tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0))); | ||
71 | + tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1))); | ||
62 | } | 72 | } |
63 | 73 | ||
64 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m) | 74 | static void gen_sty_env_A0(DisasContext *s, int offset, bool align) |
65 | |||
66 | void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a) | ||
67 | { | 75 | { |
68 | - if (TCG_TARGET_REG_BITS == 64) { | 76 | + MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR; |
69 | - do_dupi_vec(r, MO_64, a); | 77 | int mem_index = s->mem_index; |
70 | - } else if (a == dup_const(MO_32, a)) { | 78 | - tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0))); |
71 | - do_dupi_vec(r, MO_32, a); | 79 | - tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, |
72 | - } else { | 80 | - MO_LEUQ | (align ? MO_ALIGN_32 : 0)); |
73 | - TCGv_i64 c = tcg_const_i64(a); | 81 | - tcg_gen_addi_tl(s->tmp0, s->A0, 8); |
74 | - tcg_gen_dup_i64_vec(MO_64, r, c); | 82 | - tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1))); |
75 | - tcg_temp_free_i64(c); | 83 | - tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); |
76 | - } | 84 | + TCGv_i128 t = tcg_temp_new_i128(); |
77 | + tcg_gen_dupi_vec(MO_64, r, a); | 85 | + |
86 | + tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0))); | ||
87 | + tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); | ||
88 | tcg_gen_addi_tl(s->tmp0, s->A0, 16); | ||
89 | - tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2))); | ||
90 | - tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); | ||
91 | - tcg_gen_addi_tl(s->tmp0, s->A0, 24); | ||
92 | - tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3))); | ||
93 | - tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); | ||
94 | + tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1))); | ||
95 | + tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop); | ||
78 | } | 96 | } |
79 | 97 | ||
80 | void tcg_gen_dup32i_vec(TCGv_vec r, uint32_t a) | 98 | #include "decode-new.h" |
81 | { | ||
82 | - do_dupi_vec(r, MO_REG, dup_const(MO_32, a)); | ||
83 | + tcg_gen_dupi_vec(MO_32, r, a); | ||
84 | } | ||
85 | |||
86 | void tcg_gen_dup16i_vec(TCGv_vec r, uint32_t a) | ||
87 | { | ||
88 | - do_dupi_vec(r, MO_REG, dup_const(MO_16, a)); | ||
89 | + tcg_gen_dupi_vec(MO_16, r, a); | ||
90 | } | ||
91 | |||
92 | void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a) | ||
93 | { | ||
94 | - do_dupi_vec(r, MO_REG, dup_const(MO_8, a)); | ||
95 | + tcg_gen_dupi_vec(MO_8, r, a); | ||
96 | } | ||
97 | |||
98 | void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a) | ||
99 | { | ||
100 | - if (vece == MO_64) { | ||
101 | - tcg_gen_dup64i_vec(r, a); | ||
102 | - } else { | ||
103 | - do_dupi_vec(r, MO_REG, dup_const(vece, a)); | ||
104 | - } | ||
105 | + TCGTemp *rt = tcgv_vec_temp(r); | ||
106 | + tcg_gen_mov_vec(r, tcg_constant_vec(rt->base_type, vece, a)); | ||
107 | } | ||
108 | |||
109 | void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a) | ||
110 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a) | ||
111 | if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) { | ||
112 | tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1); | ||
113 | } else { | ||
114 | - do_dupi_vec(t, MO_REG, 0); | ||
115 | - tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a, t); | ||
116 | + tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a, | ||
117 | + tcg_constant_vec(type, vece, 0)); | ||
118 | } | ||
119 | tcg_gen_xor_vec(vece, r, a, t); | ||
120 | tcg_gen_sub_vec(vece, r, r, t); | ||
121 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/tcg/tcg.c | ||
124 | +++ b/tcg/tcg.c | ||
125 | @@ -XXX,XX +XXX,XX @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, | ||
126 | * The targets will, in general, have to do this search anyway, | ||
127 | * do this generically. | ||
128 | */ | ||
129 | - if (TCG_TARGET_REG_BITS == 32) { | ||
130 | - val = dup_const(MO_32, val); | ||
131 | - vece = MO_32; | ||
132 | - } | ||
133 | if (val == dup_const(MO_8, val)) { | ||
134 | vece = MO_8; | ||
135 | } else if (val == dup_const(MO_16, val)) { | ||
136 | vece = MO_16; | ||
137 | - } else if (TCG_TARGET_REG_BITS == 64 && | ||
138 | - val == dup_const(MO_32, val)) { | ||
139 | + } else if (val == dup_const(MO_32, val)) { | ||
140 | vece = MO_32; | ||
141 | } | ||
142 | |||
143 | -- | 99 | -- |
144 | 2.25.1 | 100 | 2.34.1 |
145 | |||
146 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
---|---|---|---|
2 | |||
3 | This can be useful to write a shift bit extraction that does not | ||
4 | depend on TARGET_LONG_BITS. | ||
5 | |||
6 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
7 | Message-Id: <20231019104648.389942-15-pbonzini@redhat.com> | ||
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
2 | --- | 9 | --- |
3 | include/tcg/tcg-op.h | 13 +-- | 10 | include/tcg/tcg-op-common.h | 4 ++++ |
4 | tcg/tcg-op.c | 227 ++++++++++++++++++++----------------------- | 11 | include/tcg/tcg-op.h | 2 ++ |
5 | 2 files changed, 109 insertions(+), 131 deletions(-) | 12 | tcg/tcg-op.c | 12 ++++++++++++ |
13 | 3 files changed, 18 insertions(+) | ||
6 | 14 | ||
15 | diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/include/tcg/tcg-op-common.h | ||
18 | +++ b/include/tcg/tcg-op-common.h | ||
19 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret, | ||
20 | TCGv_i32 arg1, int32_t arg2); | ||
21 | void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret, | ||
22 | TCGv_i32 arg1, TCGv_i32 arg2); | ||
23 | +void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret, | ||
24 | + TCGv_i32 arg1, int32_t arg2); | ||
25 | void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, | ||
26 | TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2); | ||
27 | void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, | ||
28 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret, | ||
29 | TCGv_i64 arg1, int64_t arg2); | ||
30 | void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret, | ||
31 | TCGv_i64 arg1, TCGv_i64 arg2); | ||
32 | +void tcg_gen_negsetcondi_i64(TCGCond cond, TCGv_i64 ret, | ||
33 | + TCGv_i64 arg1, int64_t arg2); | ||
34 | void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, | ||
35 | TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2); | ||
36 | void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, | ||
7 | diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h | 37 | diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h |
8 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
9 | --- a/include/tcg/tcg-op.h | 39 | --- a/include/tcg/tcg-op.h |
10 | +++ b/include/tcg/tcg-op.h | 40 | +++ b/include/tcg/tcg-op.h |
11 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mb(TCGBar); | 41 | @@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) |
12 | 42 | #define tcg_gen_setcond_tl tcg_gen_setcond_i64 | |
13 | /* 32 bit ops */ | 43 | #define tcg_gen_setcondi_tl tcg_gen_setcondi_i64 |
14 | 44 | #define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i64 | |
15 | +void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg); | 45 | +#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i64 |
16 | void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); | 46 | #define tcg_gen_mul_tl tcg_gen_mul_i64 |
17 | void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2); | 47 | #define tcg_gen_muli_tl tcg_gen_muli_i64 |
18 | void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); | 48 | #define tcg_gen_div_tl tcg_gen_div_i64 |
19 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg) | 49 | @@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) |
20 | } | 50 | #define tcg_gen_setcond_tl tcg_gen_setcond_i32 |
21 | } | 51 | #define tcg_gen_setcondi_tl tcg_gen_setcondi_i32 |
22 | 52 | #define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i32 | |
23 | -static inline void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg) | 53 | +#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i32 |
24 | -{ | 54 | #define tcg_gen_mul_tl tcg_gen_mul_i32 |
25 | - tcg_gen_op2i_i32(INDEX_op_movi_i32, ret, arg); | 55 | #define tcg_gen_muli_tl tcg_gen_muli_i32 |
26 | -} | 56 | #define tcg_gen_div_tl tcg_gen_div_i32 |
27 | - | ||
28 | static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, | ||
29 | tcg_target_long offset) | ||
30 | { | ||
31 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg) | ||
32 | |||
33 | /* 64 bit ops */ | ||
34 | |||
35 | +void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg); | ||
36 | void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); | ||
37 | void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2); | ||
38 | void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); | ||
39 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) | ||
40 | } | ||
41 | } | ||
42 | |||
43 | -static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) | ||
44 | -{ | ||
45 | - tcg_gen_op2i_i64(INDEX_op_movi_i64, ret, arg); | ||
46 | -} | ||
47 | - | ||
48 | static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, | ||
49 | tcg_target_long offset) | ||
50 | { | ||
51 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | ||
52 | |||
53 | void tcg_gen_discard_i64(TCGv_i64 arg); | ||
54 | void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg); | ||
55 | -void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg); | ||
56 | void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); | ||
57 | void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); | ||
58 | void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); | ||
59 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c | 57 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c |
60 | index XXXXXXX..XXXXXXX 100644 | 58 | index XXXXXXX..XXXXXXX 100644 |
61 | --- a/tcg/tcg-op.c | 59 | --- a/tcg/tcg-op.c |
62 | +++ b/tcg/tcg-op.c | 60 | +++ b/tcg/tcg-op.c |
63 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mb(TCGBar mb_type) | 61 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret, |
64 | 62 | } | |
65 | /* 32 bit ops */ | 63 | } |
66 | 64 | ||
67 | +void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg) | 65 | +void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret, |
66 | + TCGv_i32 arg1, int32_t arg2) | ||
68 | +{ | 67 | +{ |
69 | + tcg_gen_mov_i32(ret, tcg_constant_i32(arg)); | 68 | + tcg_gen_negsetcond_i32(cond, ret, arg1, tcg_constant_i32(arg2)); |
70 | +} | 69 | +} |
71 | + | 70 | + |
72 | void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | 71 | void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) |
73 | { | 72 | { |
74 | /* some cases can be optimized here */ | ||
75 | if (arg2 == 0) { | 73 | if (arg2 == 0) { |
76 | tcg_gen_mov_i32(ret, arg1); | 74 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret, |
77 | } else { | ||
78 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
79 | - tcg_gen_add_i32(ret, arg1, t0); | ||
80 | - tcg_temp_free_i32(t0); | ||
81 | + tcg_gen_add_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
82 | } | 75 | } |
83 | } | 76 | } |
84 | 77 | ||
85 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) | 78 | +void tcg_gen_negsetcondi_i64(TCGCond cond, TCGv_i64 ret, |
86 | /* Don't recurse with tcg_gen_neg_i32. */ | 79 | + TCGv_i64 arg1, int64_t arg2) |
87 | tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2); | ||
88 | } else { | ||
89 | - TCGv_i32 t0 = tcg_const_i32(arg1); | ||
90 | - tcg_gen_sub_i32(ret, t0, arg2); | ||
91 | - tcg_temp_free_i32(t0); | ||
92 | + tcg_gen_sub_i32(ret, tcg_constant_i32(arg1), arg2); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
97 | if (arg2 == 0) { | ||
98 | tcg_gen_mov_i32(ret, arg1); | ||
99 | } else { | ||
100 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
101 | - tcg_gen_sub_i32(ret, arg1, t0); | ||
102 | - tcg_temp_free_i32(t0); | ||
103 | + tcg_gen_sub_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
108 | { | ||
109 | - TCGv_i32 t0; | ||
110 | /* Some cases can be optimized here. */ | ||
111 | switch (arg2) { | ||
112 | case 0: | ||
113 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
114 | } | ||
115 | break; | ||
116 | } | ||
117 | - t0 = tcg_const_i32(arg2); | ||
118 | - tcg_gen_and_i32(ret, arg1, t0); | ||
119 | - tcg_temp_free_i32(t0); | ||
120 | + | ||
121 | + tcg_gen_and_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
122 | } | ||
123 | |||
124 | void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
125 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
126 | } else if (arg2 == 0) { | ||
127 | tcg_gen_mov_i32(ret, arg1); | ||
128 | } else { | ||
129 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
130 | - tcg_gen_or_i32(ret, arg1, t0); | ||
131 | - tcg_temp_free_i32(t0); | ||
132 | + tcg_gen_or_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
133 | } | ||
134 | } | ||
135 | |||
136 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
137 | /* Don't recurse with tcg_gen_not_i32. */ | ||
138 | tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1); | ||
139 | } else { | ||
140 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
141 | - tcg_gen_xor_i32(ret, arg1, t0); | ||
142 | - tcg_temp_free_i32(t0); | ||
143 | + tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
144 | } | ||
145 | } | ||
146 | |||
147 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
148 | if (arg2 == 0) { | ||
149 | tcg_gen_mov_i32(ret, arg1); | ||
150 | } else { | ||
151 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
152 | - tcg_gen_shl_i32(ret, arg1, t0); | ||
153 | - tcg_temp_free_i32(t0); | ||
154 | + tcg_gen_shl_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
159 | if (arg2 == 0) { | ||
160 | tcg_gen_mov_i32(ret, arg1); | ||
161 | } else { | ||
162 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
163 | - tcg_gen_shr_i32(ret, arg1, t0); | ||
164 | - tcg_temp_free_i32(t0); | ||
165 | + tcg_gen_shr_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
170 | if (arg2 == 0) { | ||
171 | tcg_gen_mov_i32(ret, arg1); | ||
172 | } else { | ||
173 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
174 | - tcg_gen_sar_i32(ret, arg1, t0); | ||
175 | - tcg_temp_free_i32(t0); | ||
176 | + tcg_gen_sar_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l) | ||
181 | if (cond == TCG_COND_ALWAYS) { | ||
182 | tcg_gen_br(l); | ||
183 | } else if (cond != TCG_COND_NEVER) { | ||
184 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
185 | - tcg_gen_brcond_i32(cond, arg1, t0, l); | ||
186 | - tcg_temp_free_i32(t0); | ||
187 | + tcg_gen_brcond_i32(cond, arg1, tcg_constant_i32(arg2), l); | ||
188 | } | ||
189 | } | ||
190 | |||
191 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret, | ||
192 | void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret, | ||
193 | TCGv_i32 arg1, int32_t arg2) | ||
194 | { | ||
195 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
196 | - tcg_gen_setcond_i32(cond, ret, arg1, t0); | ||
197 | - tcg_temp_free_i32(t0); | ||
198 | + tcg_gen_setcond_i32(cond, ret, arg1, tcg_constant_i32(arg2)); | ||
199 | } | ||
200 | |||
201 | void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
202 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
203 | } else if (is_power_of_2(arg2)) { | ||
204 | tcg_gen_shli_i32(ret, arg1, ctz32(arg2)); | ||
205 | } else { | ||
206 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
207 | - tcg_gen_mul_i32(ret, arg1, t0); | ||
208 | - tcg_temp_free_i32(t0); | ||
209 | + tcg_gen_mul_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
210 | } | ||
211 | } | ||
212 | |||
213 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | ||
214 | |||
215 | void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) | ||
216 | { | ||
217 | - TCGv_i32 t = tcg_const_i32(arg2); | ||
218 | - tcg_gen_clz_i32(ret, arg1, t); | ||
219 | - tcg_temp_free_i32(t); | ||
220 | + tcg_gen_clz_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
221 | } | ||
222 | |||
223 | void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | ||
224 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) | ||
225 | tcg_gen_clzi_i32(t, t, 32); | ||
226 | tcg_gen_xori_i32(t, t, 31); | ||
227 | } | ||
228 | - z = tcg_const_i32(0); | ||
229 | + z = tcg_constant_i32(0); | ||
230 | tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t); | ||
231 | tcg_temp_free_i32(t); | ||
232 | - tcg_temp_free_i32(z); | ||
233 | } else { | ||
234 | gen_helper_ctz_i32(ret, arg1, arg2); | ||
235 | } | ||
236 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) | ||
237 | tcg_gen_ctpop_i32(ret, t); | ||
238 | tcg_temp_free_i32(t); | ||
239 | } else { | ||
240 | - TCGv_i32 t = tcg_const_i32(arg2); | ||
241 | - tcg_gen_ctz_i32(ret, arg1, t); | ||
242 | - tcg_temp_free_i32(t); | ||
243 | + tcg_gen_ctz_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) | ||
248 | if (arg2 == 0) { | ||
249 | tcg_gen_mov_i32(ret, arg1); | ||
250 | } else if (TCG_TARGET_HAS_rot_i32) { | ||
251 | - TCGv_i32 t0 = tcg_const_i32(arg2); | ||
252 | - tcg_gen_rotl_i32(ret, arg1, t0); | ||
253 | - tcg_temp_free_i32(t0); | ||
254 | + tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2)); | ||
255 | } else { | ||
256 | TCGv_i32 t0, t1; | ||
257 | t0 = tcg_temp_new_i32(); | ||
258 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg, | ||
259 | tcg_gen_andi_i32(ret, arg, (1u << len) - 1); | ||
260 | } else if (TCG_TARGET_HAS_deposit_i32 | ||
261 | && TCG_TARGET_deposit_i32_valid(ofs, len)) { | ||
262 | - TCGv_i32 zero = tcg_const_i32(0); | ||
263 | + TCGv_i32 zero = tcg_constant_i32(0); | ||
264 | tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len); | ||
265 | - tcg_temp_free_i32(zero); | ||
266 | } else { | ||
267 | /* To help two-operand hosts we prefer to zero-extend first, | ||
268 | which allows ARG to stay live. */ | ||
269 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) | ||
270 | } else { | ||
271 | TCGv_i32 t0 = tcg_temp_new_i32(); | ||
272 | TCGv_i32 t1 = tcg_temp_new_i32(); | ||
273 | - TCGv_i32 t2 = tcg_const_i32(0x00ff00ff); | ||
274 | + TCGv_i32 t2 = tcg_constant_i32(0x00ff00ff); | ||
275 | |||
276 | /* arg = abcd */ | ||
277 | tcg_gen_shri_i32(t0, arg, 8); /* t0 = .abc */ | ||
278 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) | ||
279 | |||
280 | tcg_temp_free_i32(t0); | ||
281 | tcg_temp_free_i32(t1); | ||
282 | - tcg_temp_free_i32(t2); | ||
283 | } | ||
284 | } | ||
285 | |||
286 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_discard_i64(TCGv_i64 arg) | ||
287 | |||
288 | void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) | ||
289 | { | ||
290 | - tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | ||
291 | - tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); | ||
292 | + TCGTemp *ts = tcgv_i64_temp(arg); | ||
293 | + | ||
294 | + /* Canonicalize TCGv_i64 TEMP_CONST into TCGv_i32 TEMP_CONST. */ | ||
295 | + if (ts->kind == TEMP_CONST) { | ||
296 | + tcg_gen_movi_i64(ret, ts->val); | ||
297 | + } else { | ||
298 | + tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); | ||
299 | + tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); | ||
300 | + } | ||
301 | } | ||
302 | |||
303 | void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) | ||
304 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | ||
305 | tcg_temp_free_i64(t0); | ||
306 | tcg_temp_free_i32(t1); | ||
307 | } | ||
308 | + | ||
309 | +#else | ||
310 | + | ||
311 | +void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) | ||
312 | +{ | 80 | +{ |
313 | + tcg_gen_mov_i64(ret, tcg_constant_i64(arg)); | 81 | + tcg_gen_negsetcond_i64(cond, ret, arg1, tcg_constant_i64(arg2)); |
314 | +} | 82 | +} |
315 | + | 83 | + |
316 | #endif /* TCG_TARGET_REG_SIZE == 32 */ | 84 | void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret, |
317 | 85 | TCGv_i64 arg1, TCGv_i64 arg2) | |
318 | void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
319 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
320 | /* some cases can be optimized here */ | ||
321 | if (arg2 == 0) { | ||
322 | tcg_gen_mov_i64(ret, arg1); | ||
323 | + } else if (TCG_TARGET_REG_BITS == 64) { | ||
324 | + tcg_gen_add_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
325 | } else { | ||
326 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
327 | - tcg_gen_add_i64(ret, arg1, t0); | ||
328 | - tcg_temp_free_i64(t0); | ||
329 | + tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), | ||
330 | + TCGV_LOW(arg1), TCGV_HIGH(arg1), | ||
331 | + tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32)); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) | ||
336 | if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) { | ||
337 | /* Don't recurse with tcg_gen_neg_i64. */ | ||
338 | tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2); | ||
339 | + } else if (TCG_TARGET_REG_BITS == 64) { | ||
340 | + tcg_gen_sub_i64(ret, tcg_constant_i64(arg1), arg2); | ||
341 | } else { | ||
342 | - TCGv_i64 t0 = tcg_const_i64(arg1); | ||
343 | - tcg_gen_sub_i64(ret, t0, arg2); | ||
344 | - tcg_temp_free_i64(t0); | ||
345 | + tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), | ||
346 | + tcg_constant_i32(arg1), tcg_constant_i32(arg1 >> 32), | ||
347 | + TCGV_LOW(arg2), TCGV_HIGH(arg2)); | ||
348 | } | ||
349 | } | ||
350 | |||
351 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
352 | /* some cases can be optimized here */ | ||
353 | if (arg2 == 0) { | ||
354 | tcg_gen_mov_i64(ret, arg1); | ||
355 | + } else if (TCG_TARGET_REG_BITS == 64) { | ||
356 | + tcg_gen_sub_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
357 | } else { | ||
358 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
359 | - tcg_gen_sub_i64(ret, arg1, t0); | ||
360 | - tcg_temp_free_i64(t0); | ||
361 | + tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), | ||
362 | + TCGV_LOW(arg1), TCGV_HIGH(arg1), | ||
363 | + tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32)); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
368 | { | 86 | { |
369 | - TCGv_i64 t0; | ||
370 | - | ||
371 | if (TCG_TARGET_REG_BITS == 32) { | ||
372 | tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); | ||
373 | tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); | ||
374 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
375 | } | ||
376 | break; | ||
377 | } | ||
378 | - t0 = tcg_const_i64(arg2); | ||
379 | - tcg_gen_and_i64(ret, arg1, t0); | ||
380 | - tcg_temp_free_i64(t0); | ||
381 | + | ||
382 | + tcg_gen_and_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
383 | } | ||
384 | |||
385 | void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
386 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
387 | } else if (arg2 == 0) { | ||
388 | tcg_gen_mov_i64(ret, arg1); | ||
389 | } else { | ||
390 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
391 | - tcg_gen_or_i64(ret, arg1, t0); | ||
392 | - tcg_temp_free_i64(t0); | ||
393 | + tcg_gen_or_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
398 | /* Don't recurse with tcg_gen_not_i64. */ | ||
399 | tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1); | ||
400 | } else { | ||
401 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
402 | - tcg_gen_xor_i64(ret, arg1, t0); | ||
403 | - tcg_temp_free_i64(t0); | ||
404 | + tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
409 | } else if (arg2 == 0) { | ||
410 | tcg_gen_mov_i64(ret, arg1); | ||
411 | } else { | ||
412 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
413 | - tcg_gen_shl_i64(ret, arg1, t0); | ||
414 | - tcg_temp_free_i64(t0); | ||
415 | + tcg_gen_shl_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
420 | } else if (arg2 == 0) { | ||
421 | tcg_gen_mov_i64(ret, arg1); | ||
422 | } else { | ||
423 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
424 | - tcg_gen_shr_i64(ret, arg1, t0); | ||
425 | - tcg_temp_free_i64(t0); | ||
426 | + tcg_gen_shr_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
431 | } else if (arg2 == 0) { | ||
432 | tcg_gen_mov_i64(ret, arg1); | ||
433 | } else { | ||
434 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
435 | - tcg_gen_sar_i64(ret, arg1, t0); | ||
436 | - tcg_temp_free_i64(t0); | ||
437 | + tcg_gen_sar_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
438 | } | ||
439 | } | ||
440 | |||
441 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) | ||
442 | |||
443 | void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) | ||
444 | { | ||
445 | - if (cond == TCG_COND_ALWAYS) { | ||
446 | + if (TCG_TARGET_REG_BITS == 64) { | ||
447 | + tcg_gen_brcond_i64(cond, arg1, tcg_constant_i64(arg2), l); | ||
448 | + } else if (cond == TCG_COND_ALWAYS) { | ||
449 | tcg_gen_br(l); | ||
450 | } else if (cond != TCG_COND_NEVER) { | ||
451 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
452 | - tcg_gen_brcond_i64(cond, arg1, t0, l); | ||
453 | - tcg_temp_free_i64(t0); | ||
454 | + l->refs++; | ||
455 | + tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, | ||
456 | + TCGV_LOW(arg1), TCGV_HIGH(arg1), | ||
457 | + tcg_constant_i32(arg2), | ||
458 | + tcg_constant_i32(arg2 >> 32), | ||
459 | + cond, label_arg(l)); | ||
460 | } | ||
461 | } | ||
462 | |||
463 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret, | ||
464 | void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret, | ||
465 | TCGv_i64 arg1, int64_t arg2) | ||
466 | { | ||
467 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
468 | - tcg_gen_setcond_i64(cond, ret, arg1, t0); | ||
469 | - tcg_temp_free_i64(t0); | ||
470 | + if (TCG_TARGET_REG_BITS == 64) { | ||
471 | + tcg_gen_setcond_i64(cond, ret, arg1, tcg_constant_i64(arg2)); | ||
472 | + } else if (cond == TCG_COND_ALWAYS) { | ||
473 | + tcg_gen_movi_i64(ret, 1); | ||
474 | + } else if (cond == TCG_COND_NEVER) { | ||
475 | + tcg_gen_movi_i64(ret, 0); | ||
476 | + } else { | ||
477 | + tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret), | ||
478 | + TCGV_LOW(arg1), TCGV_HIGH(arg1), | ||
479 | + tcg_constant_i32(arg2), | ||
480 | + tcg_constant_i32(arg2 >> 32), cond); | ||
481 | + tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | ||
482 | + } | ||
483 | } | ||
484 | |||
485 | void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
486 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg) | ||
487 | } else { | ||
488 | TCGv_i64 t0 = tcg_temp_new_i64(); | ||
489 | TCGv_i64 t1 = tcg_temp_new_i64(); | ||
490 | - TCGv_i64 t2 = tcg_const_i64(0x00ff00ff); | ||
491 | + TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff); | ||
492 | |||
493 | /* arg = ....abcd */ | ||
494 | tcg_gen_shri_i64(t0, arg, 8); /* t0 = .....abc */ | ||
495 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg) | ||
496 | |||
497 | tcg_temp_free_i64(t0); | ||
498 | tcg_temp_free_i64(t1); | ||
499 | - tcg_temp_free_i64(t2); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) | ||
504 | if (TCG_TARGET_REG_BITS == 32 | ||
505 | && TCG_TARGET_HAS_clz_i32 | ||
506 | && arg2 <= 0xffffffffu) { | ||
507 | - TCGv_i32 t = tcg_const_i32((uint32_t)arg2 - 32); | ||
508 | - tcg_gen_clz_i32(t, TCGV_LOW(arg1), t); | ||
509 | + TCGv_i32 t = tcg_temp_new_i32(); | ||
510 | + tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32); | ||
511 | tcg_gen_addi_i32(t, t, 32); | ||
512 | tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t); | ||
513 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | ||
514 | tcg_temp_free_i32(t); | ||
515 | } else { | ||
516 | - TCGv_i64 t = tcg_const_i64(arg2); | ||
517 | - tcg_gen_clz_i64(ret, arg1, t); | ||
518 | - tcg_temp_free_i64(t); | ||
519 | + TCGv_i64 t0 = tcg_const_i64(arg2); | ||
520 | + tcg_gen_clz_i64(ret, arg1, t0); | ||
521 | + tcg_temp_free_i64(t0); | ||
522 | } | ||
523 | } | ||
524 | |||
525 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) | ||
526 | tcg_gen_clzi_i64(t, t, 64); | ||
527 | tcg_gen_xori_i64(t, t, 63); | ||
528 | } | ||
529 | - z = tcg_const_i64(0); | ||
530 | + z = tcg_constant_i64(0); | ||
531 | tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t); | ||
532 | tcg_temp_free_i64(t); | ||
533 | tcg_temp_free_i64(z); | ||
534 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) | ||
535 | if (TCG_TARGET_REG_BITS == 32 | ||
536 | && TCG_TARGET_HAS_ctz_i32 | ||
537 | && arg2 <= 0xffffffffu) { | ||
538 | - TCGv_i32 t32 = tcg_const_i32((uint32_t)arg2 - 32); | ||
539 | - tcg_gen_ctz_i32(t32, TCGV_HIGH(arg1), t32); | ||
540 | + TCGv_i32 t32 = tcg_temp_new_i32(); | ||
541 | + tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32); | ||
542 | tcg_gen_addi_i32(t32, t32, 32); | ||
543 | tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32); | ||
544 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | ||
545 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) | ||
546 | tcg_gen_ctpop_i64(ret, t); | ||
547 | tcg_temp_free_i64(t); | ||
548 | } else { | ||
549 | - TCGv_i64 t64 = tcg_const_i64(arg2); | ||
550 | - tcg_gen_ctz_i64(ret, arg1, t64); | ||
551 | - tcg_temp_free_i64(t64); | ||
552 | + TCGv_i64 t0 = tcg_const_i64(arg2); | ||
553 | + tcg_gen_ctz_i64(ret, arg1, t0); | ||
554 | + tcg_temp_free_i64(t0); | ||
555 | } | ||
556 | } | ||
557 | |||
558 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) | ||
559 | if (arg2 == 0) { | ||
560 | tcg_gen_mov_i64(ret, arg1); | ||
561 | } else if (TCG_TARGET_HAS_rot_i64) { | ||
562 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
563 | - tcg_gen_rotl_i64(ret, arg1, t0); | ||
564 | - tcg_temp_free_i64(t0); | ||
565 | + tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
566 | } else { | ||
567 | TCGv_i64 t0, t1; | ||
568 | t0 = tcg_temp_new_i64(); | ||
569 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg, | ||
570 | tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); | ||
571 | } else if (TCG_TARGET_HAS_deposit_i64 | ||
572 | && TCG_TARGET_deposit_i64_valid(ofs, len)) { | ||
573 | - TCGv_i64 zero = tcg_const_i64(0); | ||
574 | + TCGv_i64 zero = tcg_constant_i64(0); | ||
575 | tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len); | ||
576 | - tcg_temp_free_i64(zero); | ||
577 | } else { | ||
578 | if (TCG_TARGET_REG_BITS == 32) { | ||
579 | if (ofs >= 32) { | ||
580 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, | ||
581 | |||
582 | #ifdef CONFIG_SOFTMMU | ||
583 | { | ||
584 | - TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); | ||
585 | - gen(retv, cpu_env, addr, cmpv, newv, oi); | ||
586 | - tcg_temp_free_i32(oi); | ||
587 | + TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); | ||
588 | + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); | ||
589 | } | ||
590 | #else | ||
591 | gen(retv, cpu_env, addr, cmpv, newv); | ||
592 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, | ||
593 | |||
594 | #ifdef CONFIG_SOFTMMU | ||
595 | { | ||
596 | - TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx)); | ||
597 | - gen(retv, cpu_env, addr, cmpv, newv, oi); | ||
598 | - tcg_temp_free_i32(oi); | ||
599 | + TCGMemOpIdx oi = make_memop_idx(memop, idx); | ||
600 | + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); | ||
601 | } | ||
602 | #else | ||
603 | gen(retv, cpu_env, addr, cmpv, newv); | ||
604 | @@ -XXX,XX +XXX,XX @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
605 | |||
606 | #ifdef CONFIG_SOFTMMU | ||
607 | { | ||
608 | - TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); | ||
609 | - gen(ret, cpu_env, addr, val, oi); | ||
610 | - tcg_temp_free_i32(oi); | ||
611 | + TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); | ||
612 | + gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); | ||
613 | } | ||
614 | #else | ||
615 | gen(ret, cpu_env, addr, val); | ||
616 | @@ -XXX,XX +XXX,XX @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | ||
617 | |||
618 | #ifdef CONFIG_SOFTMMU | ||
619 | { | ||
620 | - TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx)); | ||
621 | - gen(ret, cpu_env, addr, val, oi); | ||
622 | - tcg_temp_free_i32(oi); | ||
623 | + TCGMemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); | ||
624 | + gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); | ||
625 | } | ||
626 | #else | ||
627 | gen(ret, cpu_env, addr, val); | ||
628 | -- | 87 | -- |
629 | 2.25.1 | 88 | 2.34.1 |
630 | |||
631 | diff view generated by jsdifflib |
1 | This propagates the extended value of TCGTemp.val that we did before. | 1 | The two concrete type functions already existed, merely needing |
---|---|---|---|
2 | In addition, it will be required for vector constants. | 2 | a bit of hardening to invalid inputs. |
3 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 6 | --- |
6 | tcg/optimize.c | 40 +++++++++++++++++++++------------------- | 7 | include/tcg/tcg-op-common.h | 2 ++ |
7 | 1 file changed, 21 insertions(+), 19 deletions(-) | 8 | include/tcg/tcg-op.h | 2 ++ |
9 | tcg/tcg-op-ldst.c | 14 ++++++++++---- | ||
10 | 3 files changed, 14 insertions(+), 4 deletions(-) | ||
8 | 11 | ||
9 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 12 | diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h |
10 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/tcg/optimize.c | 14 | --- a/include/tcg/tcg-op-common.h |
12 | +++ b/tcg/optimize.c | 15 | +++ b/include/tcg/tcg-op-common.h |
13 | @@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo { | 16 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg); |
14 | bool is_const; | 17 | void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg); |
15 | TCGTemp *prev_copy; | 18 | void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg); |
16 | TCGTemp *next_copy; | 19 | void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg); |
17 | - tcg_target_ulong val; | 20 | +void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc); |
18 | - tcg_target_ulong mask; | 21 | void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags); |
19 | + uint64_t val; | 22 | void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg); |
20 | + uint64_t mask; | 23 | void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg); |
21 | } TempOptInfo; | 24 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg); |
22 | 25 | void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg); | |
23 | static inline TempOptInfo *ts_info(TCGTemp *ts) | 26 | void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg); |
24 | @@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2) | 27 | void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg); |
25 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | 28 | +void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc); |
29 | void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags); | ||
30 | void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags); | ||
31 | void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg); | ||
32 | diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/include/tcg/tcg-op.h | ||
35 | +++ b/include/tcg/tcg-op.h | ||
36 | @@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) | ||
37 | #define tcg_gen_ext16s_tl tcg_gen_ext16s_i64 | ||
38 | #define tcg_gen_ext32u_tl tcg_gen_ext32u_i64 | ||
39 | #define tcg_gen_ext32s_tl tcg_gen_ext32s_i64 | ||
40 | +#define tcg_gen_ext_tl tcg_gen_ext_i64 | ||
41 | #define tcg_gen_bswap16_tl tcg_gen_bswap16_i64 | ||
42 | #define tcg_gen_bswap32_tl tcg_gen_bswap32_i64 | ||
43 | #define tcg_gen_bswap64_tl tcg_gen_bswap64_i64 | ||
44 | @@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) | ||
45 | #define tcg_gen_ext16s_tl tcg_gen_ext16s_i32 | ||
46 | #define tcg_gen_ext32u_tl tcg_gen_mov_i32 | ||
47 | #define tcg_gen_ext32s_tl tcg_gen_mov_i32 | ||
48 | +#define tcg_gen_ext_tl tcg_gen_ext_i32 | ||
49 | #define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 | ||
50 | #define tcg_gen_bswap32_tl(D, S, F) tcg_gen_bswap32_i32(D, S) | ||
51 | #define tcg_gen_bswap_tl tcg_gen_bswap32_i32 | ||
52 | diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/tcg/tcg-op-ldst.c | ||
55 | +++ b/tcg/tcg-op-ldst.c | ||
56 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx, | ||
57 | tcg_gen_qemu_st_i128_int(val, addr, idx, memop); | ||
26 | } | 58 | } |
27 | 59 | ||
28 | -static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val) | 60 | -static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) |
29 | +static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, uint64_t val) | 61 | +void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) |
30 | { | 62 | { |
31 | const TCGOpDef *def; | 63 | switch (opc & MO_SSIZE) { |
32 | TCGOpcode new_op; | 64 | case MO_SB: |
33 | - tcg_target_ulong mask; | 65 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) |
34 | + uint64_t mask; | 66 | case MO_UW: |
35 | TempOptInfo *di = arg_info(dst); | 67 | tcg_gen_ext16u_i32(ret, val); |
36 | 68 | break; | |
37 | def = &tcg_op_defs[op->opc]; | 69 | - default: |
38 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | 70 | + case MO_UL: |
39 | const TCGOpDef *def; | 71 | + case MO_SL: |
40 | TempOptInfo *di; | 72 | tcg_gen_mov_i32(ret, val); |
41 | TempOptInfo *si; | 73 | break; |
42 | - tcg_target_ulong mask; | 74 | + default: |
43 | + uint64_t mask; | 75 | + g_assert_not_reached(); |
44 | TCGOpcode new_op; | ||
45 | |||
46 | if (ts_are_copies(dst_ts, src_ts)) { | ||
47 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | ||
48 | } | 76 | } |
49 | } | 77 | } |
50 | 78 | ||
51 | -static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) | 79 | -static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) |
52 | +static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) | 80 | +void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) |
53 | { | 81 | { |
54 | uint64_t l64, h64; | 82 | switch (opc & MO_SSIZE) { |
55 | 83 | case MO_SB: | |
56 | @@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) | 84 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) |
85 | case MO_UL: | ||
86 | tcg_gen_ext32u_i64(ret, val); | ||
87 | break; | ||
88 | - default: | ||
89 | + case MO_UQ: | ||
90 | + case MO_SQ: | ||
91 | tcg_gen_mov_i64(ret, val); | ||
92 | break; | ||
93 | + default: | ||
94 | + g_assert_not_reached(); | ||
57 | } | 95 | } |
58 | } | 96 | } |
59 | 97 | ||
60 | -static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y) | ||
61 | +static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y) | ||
62 | { | ||
63 | const TCGOpDef *def = &tcg_op_defs[op]; | ||
64 | - TCGArg res = do_constant_folding_2(op, x, y); | ||
65 | + uint64_t res = do_constant_folding_2(op, x, y); | ||
66 | if (!(def->flags & TCG_OPF_64BIT)) { | ||
67 | res = (int32_t)res; | ||
68 | } | ||
69 | @@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c) | ||
70 | static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x, | ||
71 | TCGArg y, TCGCond c) | ||
72 | { | ||
73 | - tcg_target_ulong xv = arg_info(x)->val; | ||
74 | - tcg_target_ulong yv = arg_info(y)->val; | ||
75 | + uint64_t xv = arg_info(x)->val; | ||
76 | + uint64_t yv = arg_info(y)->val; | ||
77 | + | ||
78 | if (arg_is_const(x) && arg_is_const(y)) { | ||
79 | const TCGOpDef *def = &tcg_op_defs[op]; | ||
80 | tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); | ||
81 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
82 | infos = tcg_malloc(sizeof(TempOptInfo) * nb_temps); | ||
83 | |||
84 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { | ||
85 | - tcg_target_ulong mask, partmask, affected; | ||
86 | + uint64_t mask, partmask, affected, tmp; | ||
87 | int nb_oargs, nb_iargs, i; | ||
88 | - TCGArg tmp; | ||
89 | TCGOpcode opc = op->opc; | ||
90 | const TCGOpDef *def = &tcg_op_defs[opc]; | ||
91 | |||
92 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
93 | |||
94 | CASE_OP_32_64(extract2): | ||
95 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | ||
96 | - TCGArg v1 = arg_info(op->args[1])->val; | ||
97 | - TCGArg v2 = arg_info(op->args[2])->val; | ||
98 | + uint64_t v1 = arg_info(op->args[1])->val; | ||
99 | + uint64_t v2 = arg_info(op->args[2])->val; | ||
100 | + int shr = op->args[3]; | ||
101 | |||
102 | if (opc == INDEX_op_extract2_i64) { | ||
103 | - tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3])); | ||
104 | + tmp = (v1 >> shr) | (v2 << (64 - shr)); | ||
105 | } else { | ||
106 | - tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) | | ||
107 | - ((uint32_t)v2 << (32 - op->args[3]))); | ||
108 | + tmp = (int32_t)(((uint32_t)v1 >> shr) | | ||
109 | + ((uint32_t)v2 << (32 - shr))); | ||
110 | } | ||
111 | tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
112 | break; | ||
113 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
114 | break; | ||
115 | } | ||
116 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { | ||
117 | - tcg_target_ulong tv = arg_info(op->args[3])->val; | ||
118 | - tcg_target_ulong fv = arg_info(op->args[4])->val; | ||
119 | + uint64_t tv = arg_info(op->args[3])->val; | ||
120 | + uint64_t fv = arg_info(op->args[4])->val; | ||
121 | TCGCond cond = op->args[5]; | ||
122 | + | ||
123 | if (fv == 1 && tv == 0) { | ||
124 | cond = tcg_invert_cond(cond); | ||
125 | } else if (!(tv == 1 && fv == 0)) { | ||
126 | -- | 98 | -- |
127 | 2.25.1 | 99 | 2.34.1 |
128 | 100 | ||
129 | 101 | diff view generated by jsdifflib |
1 | Begin conversion of constraints to pre-validated, read-only entities. | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | To begin, create a simple method by which sets of TCGTargetOpDef | ||
3 | structures may be declared and used. This simplifies each host's | ||
4 | tcg_target_op_def function and ensures that we have a collected | ||
5 | set of constraints. | ||
6 | 2 | ||
3 | This will also come in handy later for "less than" comparisons. | ||
4 | |||
5 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
6 | Message-Id: <03ba02fd-fade-4409-be16-2f81a5690b4c@redhat.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 9 | --- |
9 | tcg/tcg-constr.c.inc | 108 +++++++++++++++++++++++++++++++++++++++++++ | 10 | include/exec/target_long.h | 2 ++ |
10 | 1 file changed, 108 insertions(+) | 11 | 1 file changed, 2 insertions(+) |
11 | create mode 100644 tcg/tcg-constr.c.inc | ||
12 | 12 | ||
13 | diff --git a/tcg/tcg-constr.c.inc b/tcg/tcg-constr.c.inc | 13 | diff --git a/include/exec/target_long.h b/include/exec/target_long.h |
14 | new file mode 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
15 | index XXXXXXX..XXXXXXX | 15 | --- a/include/exec/target_long.h |
16 | --- /dev/null | 16 | +++ b/include/exec/target_long.h |
17 | +++ b/tcg/tcg-constr.c.inc | 17 | @@ -XXX,XX +XXX,XX @@ typedef uint32_t target_ulong; |
18 | @@ -XXX,XX +XXX,XX @@ | 18 | #define TARGET_FMT_lx "%08x" |
19 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | 19 | #define TARGET_FMT_ld "%d" |
20 | +/* | 20 | #define TARGET_FMT_lu "%u" |
21 | + * TCG backend data: operand constaints. | 21 | +#define MO_TL MO_32 |
22 | + * Copyright (c) 2020 Linaro | 22 | #elif TARGET_LONG_SIZE == 8 |
23 | + */ | 23 | typedef int64_t target_long; |
24 | + | 24 | typedef uint64_t target_ulong; |
25 | +/* | 25 | #define TARGET_FMT_lx "%016" PRIx64 |
26 | + * Define structures for each set of constraints. | 26 | #define TARGET_FMT_ld "%" PRId64 |
27 | + */ | 27 | #define TARGET_FMT_lu "%" PRIu64 |
28 | + | 28 | +#define MO_TL MO_64 |
29 | +#define C_PFX1(P, A) P##A | 29 | #else |
30 | +#define C_PFX2(P, A, B) P##A##_##B | 30 | #error TARGET_LONG_SIZE undefined |
31 | +#define C_PFX3(P, A, B, C) P##A##_##B##_##C | 31 | #endif |
32 | +#define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D | ||
33 | +#define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E | ||
34 | +#define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F | ||
35 | + | ||
36 | +#define C_O0_I1(I1) \ | ||
37 | + static const TCGTargetOpDef C_PFX1(c_o0_i1_, I1) \ | ||
38 | + = { .args_ct_str = { #I1 } }; | ||
39 | + | ||
40 | +#define C_O0_I2(I1, I2) \ | ||
41 | + static const TCGTargetOpDef C_PFX2(c_o0_i2_, I1, I2) \ | ||
42 | + = { .args_ct_str = { #I1, #I2 } }; | ||
43 | + | ||
44 | +#define C_O0_I3(I1, I2, I3) \ | ||
45 | + static const TCGTargetOpDef C_PFX3(c_o0_i3_, I1, I2, I3) \ | ||
46 | + = { .args_ct_str = { #I1, #I2, #I3 } }; | ||
47 | + | ||
48 | +#define C_O0_I4(I1, I2, I3, I4) \ | ||
49 | + static const TCGTargetOpDef C_PFX4(c_o0_i4_, I1, I2, I3, I4) \ | ||
50 | + = { .args_ct_str = { #I1, #I2, #I3, #I4 } }; | ||
51 | + | ||
52 | +#define C_O1_I1(O1, I1) \ | ||
53 | + static const TCGTargetOpDef C_PFX2(c_o1_i1_, O1, I1) \ | ||
54 | + = { .args_ct_str = { #O1, #I1 } }; | ||
55 | + | ||
56 | +#define C_O1_I2(O1, I1, I2) \ | ||
57 | + static const TCGTargetOpDef C_PFX3(c_o1_i2_, O1, I1, I2) \ | ||
58 | + = { .args_ct_str = { #O1, #I1, #I2 } }; | ||
59 | + | ||
60 | +#define C_O1_I3(O1, I1, I2, I3) \ | ||
61 | + static const TCGTargetOpDef C_PFX4(c_o1_i3_, O1, I1, I2, I3) \ | ||
62 | + = { .args_ct_str = { #O1, #I1, #I2, #I3 } }; | ||
63 | + | ||
64 | +#define C_O1_I4(O1, I1, I2, I3, I4) \ | ||
65 | + static const TCGTargetOpDef C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4) \ | ||
66 | + = { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } }; | ||
67 | + | ||
68 | +#define C_N1_I2(O1, I1, I2) \ | ||
69 | + static const TCGTargetOpDef C_PFX3(c_n1_i2_, O1, I1, I2) \ | ||
70 | + = { .args_ct_str = { "&" #O1, #I1, #I2 } }; | ||
71 | + | ||
72 | +#define C_O2_I1(O1, O2, I1) \ | ||
73 | + static const TCGTargetOpDef C_PFX3(c_o2_i1_, O1, O2, I1) \ | ||
74 | + = { .args_ct_str = { #O1, #O2, #I1 } }; | ||
75 | + | ||
76 | +#define C_O2_I2(O1, O2, I1, I2) \ | ||
77 | + static const TCGTargetOpDef C_PFX4(c_o2_i2_, O1, O2, I1, I2) \ | ||
78 | + = { .args_ct_str = { #O1, #O2, #I1, #I2 } }; | ||
79 | + | ||
80 | +#define C_O2_I3(O1, O2, I1, I2, I3) \ | ||
81 | + static const TCGTargetOpDef C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3) \ | ||
82 | + = { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } }; | ||
83 | + | ||
84 | +#define C_O2_I4(O1, O2, I1, I2, I3, I4) \ | ||
85 | + static const TCGTargetOpDef C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4) \ | ||
86 | + = { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } }; | ||
87 | + | ||
88 | +#include "tcg-target-constr.h" | ||
89 | + | ||
90 | + | ||
91 | +/* | ||
92 | + * Redefine the macros so that they now reference those structures. | ||
93 | + * These values should be returned from tcg_target_op_def(). | ||
94 | + */ | ||
95 | + | ||
96 | +#undef C_O0_I1 | ||
97 | +#undef C_O0_I2 | ||
98 | +#undef C_O0_I3 | ||
99 | +#undef C_O0_I4 | ||
100 | +#undef C_O1_I1 | ||
101 | +#undef C_O1_I2 | ||
102 | +#undef C_O1_I3 | ||
103 | +#undef C_O1_I4 | ||
104 | +#undef C_N1_I2 | ||
105 | +#undef C_O2_I1 | ||
106 | +#undef C_O2_I2 | ||
107 | +#undef C_O2_I3 | ||
108 | +#undef C_O2_I4 | ||
109 | + | ||
110 | +#define C_O0_I1(I1) &C_PFX1(c_o0_i1_, I1) | ||
111 | +#define C_O0_I2(I1, I2) &C_PFX2(c_o0_i2_, I1, I2) | ||
112 | +#define C_O0_I3(I1, I2, I3) &C_PFX3(c_o0_i3_, I1, I2, I3) | ||
113 | +#define C_O0_I4(I1, I2, I3, I4) &C_PFX4(c_o0_i4_, I1, I2, I3, I4) | ||
114 | + | ||
115 | +#define C_O1_I1(O1, I1) &C_PFX2(c_o1_i1_, O1, I1) | ||
116 | +#define C_O1_I2(O1, I1, I2) &C_PFX3(c_o1_i2_, O1, I1, I2) | ||
117 | +#define C_O1_I3(O1, I1, I2, I3) &C_PFX4(c_o1_i3_, O1, I1, I2, I3) | ||
118 | +#define C_O1_I4(O1, I1, I2, I3, I4) &C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4) | ||
119 | + | ||
120 | +#define C_N1_I2(O1, I1, I2) &C_PFX3(c_n1_i2_, O1, I1, I2) | ||
121 | + | ||
122 | +#define C_O2_I1(O1, O2, I1) &C_PFX3(c_o2_i1_, O1, O2, I1) | ||
123 | +#define C_O2_I2(O1, O2, I1, I2) &C_PFX4(c_o2_i2_, O1, O2, I1, I2) | ||
124 | +#define C_O2_I3(O1, O2, I1, I2, I3) &C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3) | ||
125 | +#define C_O2_I4(O1, O2, I1, I2, I3, I4) \ | ||
126 | + &C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4) | ||
127 | -- | 32 | -- |
128 | 2.25.1 | 33 | 2.34.1 |
129 | |||
130 | diff view generated by jsdifflib |
1 | Prefer TEMP_CONST over anything else. | 1 | The ext_and_shift_reg helper does this plus a shift. |
---|---|---|---|
2 | The non-zero check for shift count is duplicate to | ||
3 | the one done within tcg_gen_shli_i64. | ||
2 | 4 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
4 | --- | 6 | --- |
5 | tcg/optimize.c | 27 ++++++++++++--------------- | 7 | target/arm/tcg/translate-a64.c | 37 ++-------------------------------- |
6 | 1 file changed, 12 insertions(+), 15 deletions(-) | 8 | 1 file changed, 2 insertions(+), 35 deletions(-) |
7 | 9 | ||
8 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 10 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
9 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
10 | --- a/tcg/optimize.c | 12 | --- a/target/arm/tcg/translate-a64.c |
11 | +++ b/tcg/optimize.c | 13 | +++ b/target/arm/tcg/translate-a64.c |
12 | @@ -XXX,XX +XXX,XX @@ static void init_arg_info(TempOptInfo *infos, | 14 | @@ -XXX,XX +XXX,XX @@ static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in, |
13 | 15 | int extsize = extract32(option, 0, 2); | |
14 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) | 16 | bool is_signed = extract32(option, 2, 1); |
15 | { | 17 | |
16 | - TCGTemp *i; | 18 | - if (is_signed) { |
17 | + TCGTemp *i, *g, *l; | 19 | - switch (extsize) { |
18 | 20 | - case 0: | |
19 | - /* If this is already a global, we can't do better. */ | 21 | - tcg_gen_ext8s_i64(tcg_out, tcg_in); |
20 | - if (ts->kind >= TEMP_GLOBAL) { | 22 | - break; |
21 | + /* If this is already readonly, we can't do better. */ | 23 | - case 1: |
22 | + if (temp_readonly(ts)) { | 24 | - tcg_gen_ext16s_i64(tcg_out, tcg_in); |
23 | return ts; | 25 | - break; |
24 | } | 26 | - case 2: |
25 | 27 | - tcg_gen_ext32s_i64(tcg_out, tcg_in); | |
26 | - /* Search for a global first. */ | 28 | - break; |
27 | + g = l = NULL; | 29 | - case 3: |
28 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { | 30 | - tcg_gen_mov_i64(tcg_out, tcg_in); |
29 | - if (i->kind >= TEMP_GLOBAL) { | 31 | - break; |
30 | + if (temp_readonly(i)) { | 32 | - } |
31 | return i; | 33 | - } else { |
34 | - switch (extsize) { | ||
35 | - case 0: | ||
36 | - tcg_gen_ext8u_i64(tcg_out, tcg_in); | ||
37 | - break; | ||
38 | - case 1: | ||
39 | - tcg_gen_ext16u_i64(tcg_out, tcg_in); | ||
40 | - break; | ||
41 | - case 2: | ||
42 | - tcg_gen_ext32u_i64(tcg_out, tcg_in); | ||
43 | - break; | ||
44 | - case 3: | ||
45 | - tcg_gen_mov_i64(tcg_out, tcg_in); | ||
46 | - break; | ||
32 | - } | 47 | - } |
33 | - } | 48 | - } |
34 | - | 49 | - |
35 | - /* If it is a temp, search for a temp local. */ | 50 | - if (shift) { |
36 | - if (ts->kind == TEMP_NORMAL) { | 51 | - tcg_gen_shli_i64(tcg_out, tcg_out, shift); |
37 | - for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { | 52 | - } |
38 | - if (i->kind >= TEMP_LOCAL) { | 53 | + tcg_gen_ext_i64(tcg_out, tcg_in, extsize | (is_signed ? MO_SIGN : 0)); |
39 | - return i; | 54 | + tcg_gen_shli_i64(tcg_out, tcg_out, shift); |
40 | + } else if (i->kind > ts->kind) { | ||
41 | + if (i->kind == TEMP_GLOBAL) { | ||
42 | + g = i; | ||
43 | + } else if (i->kind == TEMP_LOCAL) { | ||
44 | + l = i; | ||
45 | } | ||
46 | } | ||
47 | } | ||
48 | |||
49 | - /* Failure to find a better representation, return the same temp. */ | ||
50 | - return ts; | ||
51 | + /* If we didn't find a better representation, return the same temp. */ | ||
52 | + return g ? g : l ? l : ts; | ||
53 | } | 55 | } |
54 | 56 | ||
55 | static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) | 57 | static inline void gen_check_sp_alignment(DisasContext *s) |
56 | -- | 58 | -- |
57 | 2.25.1 | 59 | 2.34.1 |
58 | |||
59 | diff view generated by jsdifflib |
1 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
3 | --- | 3 | --- |
4 | accel/tcg/plugin-gen.c | 49 +++++++++++++++++++----------------------- | 4 | target/i386/tcg/translate.c | 28 +++------------------------- |
5 | 1 file changed, 22 insertions(+), 27 deletions(-) | 5 | 1 file changed, 3 insertions(+), 25 deletions(-) |
6 | 6 | ||
7 | diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c | 7 | diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c |
8 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
9 | --- a/accel/tcg/plugin-gen.c | 9 | --- a/target/i386/tcg/translate.c |
10 | +++ b/accel/tcg/plugin-gen.c | 10 | +++ b/target/i386/tcg/translate.c |
11 | @@ -XXX,XX +XXX,XX @@ static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) | 11 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) |
12 | if (TCG_TARGET_REG_BITS == 32) { | 12 | |
13 | /* mov_i32 */ | 13 | static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) |
14 | op = copy_op(begin_op, op, INDEX_op_mov_i32); | 14 | { |
15 | - /* movi_i32 */ | 15 | - switch (size) { |
16 | - op = copy_op(begin_op, op, INDEX_op_movi_i32); | 16 | - case MO_8: |
17 | + /* mov_i32 w/ $0 */ | 17 | - if (sign) { |
18 | + op = copy_op(begin_op, op, INDEX_op_mov_i32); | 18 | - tcg_gen_ext8s_tl(dst, src); |
19 | } else { | 19 | - } else { |
20 | /* extu_i32_i64 */ | 20 | - tcg_gen_ext8u_tl(dst, src); |
21 | op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); | 21 | - } |
22 | @@ -XXX,XX +XXX,XX @@ static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) | 22 | - return dst; |
23 | return op; | 23 | - case MO_16: |
24 | - if (sign) { | ||
25 | - tcg_gen_ext16s_tl(dst, src); | ||
26 | - } else { | ||
27 | - tcg_gen_ext16u_tl(dst, src); | ||
28 | - } | ||
29 | - return dst; | ||
30 | -#ifdef TARGET_X86_64 | ||
31 | - case MO_32: | ||
32 | - if (sign) { | ||
33 | - tcg_gen_ext32s_tl(dst, src); | ||
34 | - } else { | ||
35 | - tcg_gen_ext32u_tl(dst, src); | ||
36 | - } | ||
37 | - return dst; | ||
38 | -#endif | ||
39 | - default: | ||
40 | + if (size == MO_TL) { | ||
41 | return src; | ||
42 | } | ||
43 | + tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0)); | ||
44 | + return dst; | ||
24 | } | 45 | } |
25 | 46 | ||
26 | -static TCGOp *copy_movi_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) | 47 | static void gen_extu(MemOp ot, TCGv reg) |
27 | -{ | ||
28 | - if (TCG_TARGET_REG_BITS == 32) { | ||
29 | - /* 2x movi_i32 */ | ||
30 | - op = copy_op(begin_op, op, INDEX_op_movi_i32); | ||
31 | - op->args[1] = v; | ||
32 | - | ||
33 | - op = copy_op(begin_op, op, INDEX_op_movi_i32); | ||
34 | - op->args[1] = v >> 32; | ||
35 | - } else { | ||
36 | - /* movi_i64 */ | ||
37 | - op = copy_op(begin_op, op, INDEX_op_movi_i64); | ||
38 | - op->args[1] = v; | ||
39 | - } | ||
40 | - return op; | ||
41 | -} | ||
42 | - | ||
43 | static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) | ||
44 | { | ||
45 | if (UINTPTR_MAX == UINT32_MAX) { | ||
46 | - /* movi_i32 */ | ||
47 | - op = copy_op(begin_op, op, INDEX_op_movi_i32); | ||
48 | - op->args[1] = (uintptr_t)ptr; | ||
49 | + /* mov_i32 */ | ||
50 | + op = copy_op(begin_op, op, INDEX_op_mov_i32); | ||
51 | + op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr)); | ||
52 | } else { | ||
53 | - /* movi_i64 */ | ||
54 | - op = copy_movi_i64(begin_op, op, (uint64_t)(uintptr_t)ptr); | ||
55 | + /* mov_i64 */ | ||
56 | + op = copy_op(begin_op, op, INDEX_op_mov_i64); | ||
57 | + op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr)); | ||
58 | } | ||
59 | return op; | ||
60 | } | ||
61 | |||
62 | static TCGOp *copy_const_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) | ||
63 | { | ||
64 | - return copy_movi_i64(begin_op, op, v); | ||
65 | + if (TCG_TARGET_REG_BITS == 32) { | ||
66 | + /* 2x mov_i32 */ | ||
67 | + op = copy_op(begin_op, op, INDEX_op_mov_i32); | ||
68 | + op->args[1] = tcgv_i32_arg(tcg_constant_i32(v)); | ||
69 | + op = copy_op(begin_op, op, INDEX_op_mov_i32); | ||
70 | + op->args[1] = tcgv_i32_arg(tcg_constant_i32(v >> 32)); | ||
71 | + } else { | ||
72 | + /* mov_i64 */ | ||
73 | + op = copy_op(begin_op, op, INDEX_op_mov_i64); | ||
74 | + op->args[1] = tcgv_i64_arg(tcg_constant_i64(v)); | ||
75 | + } | ||
76 | + return op; | ||
77 | } | ||
78 | |||
79 | static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) | ||
80 | @@ -XXX,XX +XXX,XX @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, | ||
81 | |||
82 | tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); | ||
83 | |||
84 | - /* const_i32 == movi_i32 ("info", so it remains as is) */ | ||
85 | - op = copy_op(&begin_op, op, INDEX_op_movi_i32); | ||
86 | + /* const_i32 == mov_i32 ("info", so it remains as is) */ | ||
87 | + op = copy_op(&begin_op, op, INDEX_op_mov_i32); | ||
88 | |||
89 | /* const_ptr */ | ||
90 | op = copy_const_ptr(&begin_op, op, cb->userp); | ||
91 | -- | 48 | -- |
92 | 2.25.1 | 49 | 2.34.1 |
93 | 50 | ||
94 | 51 | diff view generated by jsdifflib |
1 | We still need to check OS_{BYTE,WORD,LONG}, | ||
---|---|---|---|
2 | because m68k includes floating point in OS_*. | ||
3 | |||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
2 | --- | 6 | --- |
3 | tcg/i386/tcg-target-constr.h | 55 +++++++++++ | 7 | target/m68k/translate.c | 23 +++-------------------- |
4 | tcg/i386/tcg-target.c.inc | 187 +++++++++++++---------------------- | 8 | 1 file changed, 3 insertions(+), 20 deletions(-) |
5 | 2 files changed, 121 insertions(+), 121 deletions(-) | ||
6 | create mode 100644 tcg/i386/tcg-target-constr.h | ||
7 | 9 | ||
8 | diff --git a/tcg/i386/tcg-target-constr.h b/tcg/i386/tcg-target-constr.h | 10 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c |
9 | new file mode 100644 | ||
10 | index XXXXXXX..XXXXXXX | ||
11 | --- /dev/null | ||
12 | +++ b/tcg/i386/tcg-target-constr.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
15 | +/* | ||
16 | + * i386 target-specific operand constaints. | ||
17 | + * Copyright (c) 2020 Linaro | ||
18 | + */ | ||
19 | + | ||
20 | +C_O0_I1(r) | ||
21 | + | ||
22 | +C_O0_I2(qi, r) | ||
23 | +C_O0_I2(ri, r) | ||
24 | +C_O0_I2(re, r) | ||
25 | +C_O0_I2(r, re) | ||
26 | +C_O0_I2(L, L) | ||
27 | +C_O0_I2(x, r) | ||
28 | + | ||
29 | +C_O0_I3(L, L, L) | ||
30 | + | ||
31 | +C_O0_I4(L, L, L, L) | ||
32 | +C_O0_I4(r, r, ri, ri) | ||
33 | + | ||
34 | +C_O1_I1(r, 0) | ||
35 | +C_O1_I1(r, q) | ||
36 | +C_O1_I1(r, r) | ||
37 | +C_O1_I1(r, L) | ||
38 | +C_O1_I1(x, r) | ||
39 | +C_O1_I1(x, x) | ||
40 | + | ||
41 | +C_O1_I2(r, r, re) | ||
42 | +C_O1_I2(r, 0, r) | ||
43 | +C_O1_I2(r, 0, re) | ||
44 | +C_O1_I2(r, 0, reZ) | ||
45 | +C_O1_I2(r, 0, rI) | ||
46 | +C_O1_I2(r, 0, ri) | ||
47 | +C_O1_I2(r, 0, ci) | ||
48 | +C_O1_I2(r, r, ri) | ||
49 | +C_O1_I2(Q, 0, Q) | ||
50 | +C_O1_I2(q, r, re) | ||
51 | +C_O1_I2(r, L, L) | ||
52 | +C_O1_I2(x, x, x) | ||
53 | +C_N1_I2(r, r, r) | ||
54 | +C_N1_I2(r, r, rW) | ||
55 | + | ||
56 | +C_O1_I3(x, x, x, x) | ||
57 | + | ||
58 | +C_O1_I4(r, r, re, r, 0) | ||
59 | +C_O1_I4(r, r, r, ri, ri) | ||
60 | + | ||
61 | +C_O2_I1(r, r, L) | ||
62 | + | ||
63 | +C_O2_I2(r, r, L, L) | ||
64 | +C_O2_I2(a, d, a, r) | ||
65 | + | ||
66 | +C_O2_I3(a, d, 0, 1, r) | ||
67 | + | ||
68 | +C_O2_I4(r, r, 0, 1, re, re) | ||
69 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
70 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
71 | --- a/tcg/i386/tcg-target.c.inc | 12 | --- a/target/m68k/translate.c |
72 | +++ b/tcg/i386/tcg-target.c.inc | 13 | +++ b/target/m68k/translate.c |
73 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | 14 | @@ -XXX,XX +XXX,XX @@ static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign) |
74 | } | ||
75 | } | ||
76 | |||
77 | +/* Define all constraint sets. */ | ||
78 | +#include "../tcg-constr.c.inc" | ||
79 | + | ||
80 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
81 | { | 15 | { |
82 | - static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; | 16 | switch (opsize) { |
83 | - static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } }; | 17 | case OS_BYTE: |
84 | - static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } }; | 18 | - if (sign) { |
85 | - static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } }; | 19 | - tcg_gen_ext8s_i32(res, val); |
86 | - static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; | 20 | - } else { |
87 | - static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } }; | 21 | - tcg_gen_ext8u_i32(res, val); |
88 | - static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } }; | ||
89 | - static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } }; | ||
90 | - static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; | ||
91 | - static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } }; | ||
92 | - static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } }; | ||
93 | - static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } }; | ||
94 | - static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } }; | ||
95 | - static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; | ||
96 | - static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; | ||
97 | - static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; | ||
98 | - static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; | ||
99 | - static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; | ||
100 | - static const TCGTargetOpDef r_r_L_L | ||
101 | - = { .args_ct_str = { "r", "r", "L", "L" } }; | ||
102 | - static const TCGTargetOpDef L_L_L_L | ||
103 | - = { .args_ct_str = { "L", "L", "L", "L" } }; | ||
104 | - static const TCGTargetOpDef x_x = { .args_ct_str = { "x", "x" } }; | ||
105 | - static const TCGTargetOpDef x_x_x = { .args_ct_str = { "x", "x", "x" } }; | ||
106 | - static const TCGTargetOpDef x_x_x_x | ||
107 | - = { .args_ct_str = { "x", "x", "x", "x" } }; | ||
108 | - static const TCGTargetOpDef x_r = { .args_ct_str = { "x", "r" } }; | ||
109 | - | ||
110 | switch (op) { | ||
111 | case INDEX_op_goto_ptr: | ||
112 | - return &r; | ||
113 | + return C_O0_I1(r); | ||
114 | |||
115 | case INDEX_op_ld8u_i32: | ||
116 | case INDEX_op_ld8u_i64: | ||
117 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
118 | case INDEX_op_ld32u_i64: | ||
119 | case INDEX_op_ld32s_i64: | ||
120 | case INDEX_op_ld_i64: | ||
121 | - return &r_r; | ||
122 | + return C_O1_I1(r, r); | ||
123 | |||
124 | case INDEX_op_st8_i32: | ||
125 | case INDEX_op_st8_i64: | ||
126 | - return &qi_r; | ||
127 | + return C_O0_I2(qi, r); | ||
128 | + | ||
129 | case INDEX_op_st16_i32: | ||
130 | case INDEX_op_st16_i64: | ||
131 | case INDEX_op_st_i32: | ||
132 | case INDEX_op_st32_i64: | ||
133 | - return &ri_r; | ||
134 | + return C_O0_I2(ri, r); | ||
135 | + | ||
136 | case INDEX_op_st_i64: | ||
137 | - return &re_r; | ||
138 | + return C_O0_I2(re, r); | ||
139 | |||
140 | case INDEX_op_add_i32: | ||
141 | case INDEX_op_add_i64: | ||
142 | - return &r_r_re; | ||
143 | + return C_O1_I2(r, r, re); | ||
144 | + | ||
145 | case INDEX_op_sub_i32: | ||
146 | case INDEX_op_sub_i64: | ||
147 | case INDEX_op_mul_i32: | ||
148 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
149 | case INDEX_op_or_i64: | ||
150 | case INDEX_op_xor_i32: | ||
151 | case INDEX_op_xor_i64: | ||
152 | - return &r_0_re; | ||
153 | + return C_O1_I2(r, 0, re); | ||
154 | |||
155 | case INDEX_op_and_i32: | ||
156 | case INDEX_op_and_i64: | ||
157 | - { | ||
158 | - static const TCGTargetOpDef and | ||
159 | - = { .args_ct_str = { "r", "0", "reZ" } }; | ||
160 | - return ∧ | ||
161 | - } | 22 | - } |
162 | - break; | 23 | - break; |
163 | + return C_O1_I2(r, 0, reZ); | 24 | case OS_WORD: |
164 | + | 25 | - if (sign) { |
165 | case INDEX_op_andc_i32: | 26 | - tcg_gen_ext16s_i32(res, val); |
166 | case INDEX_op_andc_i64: | 27 | - } else { |
167 | - { | 28 | - tcg_gen_ext16u_i32(res, val); |
168 | - static const TCGTargetOpDef andc | ||
169 | - = { .args_ct_str = { "r", "r", "rI" } }; | ||
170 | - return &andc; | ||
171 | - } | 29 | - } |
172 | - break; | 30 | - break; |
173 | + return C_O1_I2(r, 0, rI); | 31 | case OS_LONG: |
174 | 32 | - tcg_gen_mov_i32(res, val); | |
175 | case INDEX_op_shl_i32: | 33 | + tcg_gen_ext_i32(res, val, opsize | (sign ? MO_SIGN : 0)); |
176 | case INDEX_op_shl_i64: | 34 | break; |
177 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
178 | case INDEX_op_shr_i64: | ||
179 | case INDEX_op_sar_i32: | ||
180 | case INDEX_op_sar_i64: | ||
181 | - return have_bmi2 ? &r_r_ri : &r_0_ci; | ||
182 | + return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci); | ||
183 | + | ||
184 | case INDEX_op_rotl_i32: | ||
185 | case INDEX_op_rotl_i64: | ||
186 | case INDEX_op_rotr_i32: | ||
187 | case INDEX_op_rotr_i64: | ||
188 | - return &r_0_ci; | ||
189 | + return C_O1_I2(r, 0, ci); | ||
190 | |||
191 | case INDEX_op_brcond_i32: | ||
192 | case INDEX_op_brcond_i64: | ||
193 | - return &r_re; | ||
194 | + return C_O0_I2(r, re); | ||
195 | |||
196 | case INDEX_op_bswap16_i32: | ||
197 | case INDEX_op_bswap16_i64: | ||
198 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
199 | case INDEX_op_not_i32: | ||
200 | case INDEX_op_not_i64: | ||
201 | case INDEX_op_extrh_i64_i32: | ||
202 | - return &r_0; | ||
203 | + return C_O1_I1(r, 0); | ||
204 | |||
205 | case INDEX_op_ext8s_i32: | ||
206 | case INDEX_op_ext8s_i64: | ||
207 | case INDEX_op_ext8u_i32: | ||
208 | case INDEX_op_ext8u_i64: | ||
209 | - return &r_q; | ||
210 | + return C_O1_I1(r, q); | ||
211 | + | ||
212 | case INDEX_op_ext16s_i32: | ||
213 | case INDEX_op_ext16s_i64: | ||
214 | case INDEX_op_ext16u_i32: | ||
215 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
216 | case INDEX_op_sextract_i32: | ||
217 | case INDEX_op_ctpop_i32: | ||
218 | case INDEX_op_ctpop_i64: | ||
219 | - return &r_r; | ||
220 | + return C_O1_I1(r, r); | ||
221 | + | ||
222 | case INDEX_op_extract2_i32: | ||
223 | case INDEX_op_extract2_i64: | ||
224 | - return &r_0_r; | ||
225 | + return C_O1_I2(r, 0, r); | ||
226 | |||
227 | case INDEX_op_deposit_i32: | ||
228 | case INDEX_op_deposit_i64: | ||
229 | - { | ||
230 | - static const TCGTargetOpDef dep | ||
231 | - = { .args_ct_str = { "Q", "0", "Q" } }; | ||
232 | - return &dep; | ||
233 | - } | ||
234 | + return C_O1_I2(Q, 0, Q); | ||
235 | + | ||
236 | case INDEX_op_setcond_i32: | ||
237 | case INDEX_op_setcond_i64: | ||
238 | - { | ||
239 | - static const TCGTargetOpDef setc | ||
240 | - = { .args_ct_str = { "q", "r", "re" } }; | ||
241 | - return &setc; | ||
242 | - } | ||
243 | + return C_O1_I2(q, r, re); | ||
244 | + | ||
245 | case INDEX_op_movcond_i32: | ||
246 | case INDEX_op_movcond_i64: | ||
247 | - { | ||
248 | - static const TCGTargetOpDef movc | ||
249 | - = { .args_ct_str = { "r", "r", "re", "r", "0" } }; | ||
250 | - return &movc; | ||
251 | - } | ||
252 | + return C_O1_I4(r, r, re, r, 0); | ||
253 | + | ||
254 | case INDEX_op_div2_i32: | ||
255 | case INDEX_op_div2_i64: | ||
256 | case INDEX_op_divu2_i32: | ||
257 | case INDEX_op_divu2_i64: | ||
258 | - { | ||
259 | - static const TCGTargetOpDef div2 | ||
260 | - = { .args_ct_str = { "a", "d", "0", "1", "r" } }; | ||
261 | - return &div2; | ||
262 | - } | ||
263 | + return C_O2_I3(a, d, 0, 1, r); | ||
264 | + | ||
265 | case INDEX_op_mulu2_i32: | ||
266 | case INDEX_op_mulu2_i64: | ||
267 | case INDEX_op_muls2_i32: | ||
268 | case INDEX_op_muls2_i64: | ||
269 | - { | ||
270 | - static const TCGTargetOpDef mul2 | ||
271 | - = { .args_ct_str = { "a", "d", "a", "r" } }; | ||
272 | - return &mul2; | ||
273 | - } | ||
274 | + return C_O2_I2(a, d, a, r); | ||
275 | + | ||
276 | case INDEX_op_add2_i32: | ||
277 | case INDEX_op_add2_i64: | ||
278 | case INDEX_op_sub2_i32: | ||
279 | case INDEX_op_sub2_i64: | ||
280 | - { | ||
281 | - static const TCGTargetOpDef arith2 | ||
282 | - = { .args_ct_str = { "r", "r", "0", "1", "re", "re" } }; | ||
283 | - return &arith2; | ||
284 | - } | ||
285 | + return C_O2_I4(r, r, 0, 1, re, re); | ||
286 | + | ||
287 | case INDEX_op_ctz_i32: | ||
288 | case INDEX_op_ctz_i64: | ||
289 | - { | ||
290 | - static const TCGTargetOpDef ctz[2] = { | ||
291 | - { .args_ct_str = { "&r", "r", "r" } }, | ||
292 | - { .args_ct_str = { "&r", "r", "rW" } }, | ||
293 | - }; | ||
294 | - return &ctz[have_bmi1]; | ||
295 | - } | ||
296 | + return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); | ||
297 | + | ||
298 | case INDEX_op_clz_i32: | ||
299 | case INDEX_op_clz_i64: | ||
300 | - { | ||
301 | - static const TCGTargetOpDef clz[2] = { | ||
302 | - { .args_ct_str = { "&r", "r", "r" } }, | ||
303 | - { .args_ct_str = { "&r", "r", "rW" } }, | ||
304 | - }; | ||
305 | - return &clz[have_lzcnt]; | ||
306 | - } | ||
307 | + return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); | ||
308 | |||
309 | case INDEX_op_qemu_ld_i32: | ||
310 | - return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; | ||
311 | + return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
312 | + ? C_O1_I1(r, L) : C_O1_I2(r, L, L)); | ||
313 | + | ||
314 | case INDEX_op_qemu_st_i32: | ||
315 | - return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L; | ||
316 | + return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS | ||
317 | + ? C_O0_I2(L, L) : C_O0_I3(L, L, L)); | ||
318 | + | ||
319 | case INDEX_op_qemu_ld_i64: | ||
320 | - return (TCG_TARGET_REG_BITS == 64 ? &r_L | ||
321 | - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L | ||
322 | - : &r_r_L_L); | ||
323 | + return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) | ||
324 | + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L) | ||
325 | + : C_O2_I2(r, r, L, L)); | ||
326 | + | ||
327 | case INDEX_op_qemu_st_i64: | ||
328 | - return (TCG_TARGET_REG_BITS == 64 ? &L_L | ||
329 | - : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L | ||
330 | - : &L_L_L_L); | ||
331 | + return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) | ||
332 | + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(L, L, L) | ||
333 | + : C_O0_I4(L, L, L, L)); | ||
334 | |||
335 | case INDEX_op_brcond2_i32: | ||
336 | - { | ||
337 | - static const TCGTargetOpDef b2 | ||
338 | - = { .args_ct_str = { "r", "r", "ri", "ri" } }; | ||
339 | - return &b2; | ||
340 | - } | ||
341 | + return C_O0_I4(r, r, ri, ri); | ||
342 | + | ||
343 | case INDEX_op_setcond2_i32: | ||
344 | - { | ||
345 | - static const TCGTargetOpDef s2 | ||
346 | - = { .args_ct_str = { "r", "r", "r", "ri", "ri" } }; | ||
347 | - return &s2; | ||
348 | - } | ||
349 | + return C_O1_I4(r, r, r, ri, ri); | ||
350 | |||
351 | case INDEX_op_ld_vec: | ||
352 | - case INDEX_op_st_vec: | ||
353 | case INDEX_op_dupm_vec: | ||
354 | - return &x_r; | ||
355 | + return C_O1_I1(x, r); | ||
356 | + | ||
357 | + case INDEX_op_st_vec: | ||
358 | + return C_O0_I2(x, r); | ||
359 | |||
360 | case INDEX_op_add_vec: | ||
361 | case INDEX_op_sub_vec: | ||
362 | @@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | ||
363 | #if TCG_TARGET_REG_BITS == 32 | ||
364 | case INDEX_op_dup2_vec: | ||
365 | #endif | ||
366 | - return &x_x_x; | ||
367 | + return C_O1_I2(x, x, x); | ||
368 | + | ||
369 | case INDEX_op_abs_vec: | ||
370 | case INDEX_op_dup_vec: | ||
371 | case INDEX_op_shli_vec: | ||
372 | case INDEX_op_shri_vec: | ||
373 | case INDEX_op_sari_vec: | ||
374 | case INDEX_op_x86_psrldq_vec: | ||
375 | - return &x_x; | ||
376 | + return C_O1_I1(x, x); | ||
377 | + | ||
378 | case INDEX_op_x86_vpblendvb_vec: | ||
379 | - return &x_x_x_x; | ||
380 | + return C_O1_I3(x, x, x, x); | ||
381 | |||
382 | default: | 35 | default: |
383 | break; | 36 | g_assert_not_reached(); |
37 | @@ -XXX,XX +XXX,XX @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, | ||
38 | tmp = tcg_temp_new(); | ||
39 | switch (opsize) { | ||
40 | case OS_BYTE: | ||
41 | - tcg_gen_ext8s_i32(tmp, reg); | ||
42 | - gen_helper_exts32(tcg_env, fp, tmp); | ||
43 | - break; | ||
44 | case OS_WORD: | ||
45 | - tcg_gen_ext16s_i32(tmp, reg); | ||
46 | - gen_helper_exts32(tcg_env, fp, tmp); | ||
47 | - break; | ||
48 | case OS_LONG: | ||
49 | - gen_helper_exts32(tcg_env, fp, reg); | ||
50 | + tcg_gen_ext_i32(tmp, reg, opsize | MO_SIGN); | ||
51 | + gen_helper_exts32(tcg_env, fp, tmp); | ||
52 | break; | ||
53 | case OS_SINGLE: | ||
54 | gen_helper_extf32(tcg_env, fp, reg); | ||
384 | -- | 55 | -- |
385 | 2.25.1 | 56 | 2.34.1 |
386 | 57 | ||
387 | 58 | diff view generated by jsdifflib |
1 | The definition of INDEX_op_dupi_vec is that it operates on | 1 | Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp> |
---|---|---|---|
2 | units of tcg_target_ulong -- in this case 32 bits. It does | 2 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
3 | not work to use this for a uint64_t value that happens to be | ||
4 | small enough to fit in tcg_target_ulong. | ||
5 | |||
6 | Fixes: d2fd745fe8b | ||
7 | Fixes: db432672dc5 | ||
8 | Cc: qemu-stable@nongnu.org | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 4 | --- |
11 | tcg/tcg-op-vec.c | 12 ++++++++---- | 5 | target/rx/translate.c | 11 +++-------- |
12 | 1 file changed, 8 insertions(+), 4 deletions(-) | 6 | 1 file changed, 3 insertions(+), 8 deletions(-) |
13 | 7 | ||
14 | diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c | 8 | diff --git a/target/rx/translate.c b/target/rx/translate.c |
15 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/tcg/tcg-op-vec.c | 10 | --- a/target/rx/translate.c |
17 | +++ b/tcg/tcg-op-vec.c | 11 | +++ b/target/rx/translate.c |
18 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m) | 12 | @@ -XXX,XX +XXX,XX @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) |
19 | 13 | /* mov.<bwl> rs,rd */ | |
20 | void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a) | 14 | static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) |
21 | { | 15 | { |
22 | - if (TCG_TARGET_REG_BITS == 32 && a == deposit64(a, 32, 32, a)) { | 16 | - static void (* const mov[])(TCGv ret, TCGv arg) = { |
23 | - do_dupi_vec(r, MO_32, a); | 17 | - tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32, |
24 | - } else if (TCG_TARGET_REG_BITS == 64 || a == (uint64_t)(int32_t)a) { | 18 | - }; |
25 | + if (TCG_TARGET_REG_BITS == 64) { | 19 | TCGv tmp, mem, addr; |
26 | do_dupi_vec(r, MO_64, a); | 20 | + |
27 | + } else if (a == dup_const(MO_32, a)) { | 21 | if (a->lds == 3 && a->ldd == 3) { |
28 | + do_dupi_vec(r, MO_32, a); | 22 | /* mov.<bwl> rs,rd */ |
29 | } else { | 23 | - mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]); |
30 | TCGv_i64 c = tcg_const_i64(a); | 24 | + tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz | MO_SIGN); |
31 | tcg_gen_dup_i64_vec(MO_64, r, c); | 25 | return true; |
32 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a) | 26 | } |
33 | 27 | ||
34 | void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a) | 28 | @@ -XXX,XX +XXX,XX @@ static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) |
29 | /* movu.<bw> rs,rd */ | ||
30 | static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a) | ||
35 | { | 31 | { |
36 | - do_dupi_vec(r, MO_REG, dup_const(vece, a)); | 32 | - static void (* const ext[])(TCGv ret, TCGv arg) = { |
37 | + if (vece == MO_64) { | 33 | - tcg_gen_ext8u_i32, tcg_gen_ext16u_i32, |
38 | + tcg_gen_dup64i_vec(r, a); | 34 | - }; |
39 | + } else { | 35 | - ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]); |
40 | + do_dupi_vec(r, MO_REG, dup_const(vece, a)); | 36 | + tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz); |
41 | + } | 37 | return true; |
42 | } | 38 | } |
43 | 39 | ||
44 | void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a) | ||
45 | -- | 40 | -- |
46 | 2.25.1 | 41 | 2.34.1 |
47 | 42 | ||
48 | 43 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The cmp_vec opcode is mandatory; this symbol is unused. | ||
2 | 1 | ||
3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
4 | --- | ||
5 | tcg/aarch64/tcg-target.h | 1 - | ||
6 | tcg/i386/tcg-target.h | 1 - | ||
7 | tcg/ppc/tcg-target.h | 1 - | ||
8 | 3 files changed, 3 deletions(-) | ||
9 | |||
10 | diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tcg/aarch64/tcg-target.h | ||
13 | +++ b/tcg/aarch64/tcg-target.h | ||
14 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
15 | #define TCG_TARGET_HAS_shi_vec 1 | ||
16 | #define TCG_TARGET_HAS_shs_vec 0 | ||
17 | #define TCG_TARGET_HAS_shv_vec 1 | ||
18 | -#define TCG_TARGET_HAS_cmp_vec 1 | ||
19 | #define TCG_TARGET_HAS_mul_vec 1 | ||
20 | #define TCG_TARGET_HAS_sat_vec 1 | ||
21 | #define TCG_TARGET_HAS_minmax_vec 1 | ||
22 | diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/tcg/i386/tcg-target.h | ||
25 | +++ b/tcg/i386/tcg-target.h | ||
26 | @@ -XXX,XX +XXX,XX @@ extern bool have_avx2; | ||
27 | #define TCG_TARGET_HAS_shi_vec 1 | ||
28 | #define TCG_TARGET_HAS_shs_vec 1 | ||
29 | #define TCG_TARGET_HAS_shv_vec have_avx2 | ||
30 | -#define TCG_TARGET_HAS_cmp_vec 1 | ||
31 | #define TCG_TARGET_HAS_mul_vec 1 | ||
32 | #define TCG_TARGET_HAS_sat_vec 1 | ||
33 | #define TCG_TARGET_HAS_minmax_vec 1 | ||
34 | diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/tcg/ppc/tcg-target.h | ||
37 | +++ b/tcg/ppc/tcg-target.h | ||
38 | @@ -XXX,XX +XXX,XX @@ extern bool have_vsx; | ||
39 | #define TCG_TARGET_HAS_shi_vec 0 | ||
40 | #define TCG_TARGET_HAS_shs_vec 0 | ||
41 | #define TCG_TARGET_HAS_shv_vec 1 | ||
42 | -#define TCG_TARGET_HAS_cmp_vec 1 | ||
43 | #define TCG_TARGET_HAS_mul_vec 1 | ||
44 | #define TCG_TARGET_HAS_sat_vec 1 | ||
45 | #define TCG_TARGET_HAS_minmax_vec 1 | ||
46 | -- | ||
47 | 2.25.1 | ||
48 | |||
49 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This will reduce the differences between 32-bit and 64-bit hosts, | ||
2 | allowing full 64-bit constants to be created with the same interface. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | include/tcg/tcg.h | 2 +- | ||
7 | tcg/tcg.c | 2 +- | ||
8 | 2 files changed, 2 insertions(+), 2 deletions(-) | ||
9 | |||
10 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/include/tcg/tcg.h | ||
13 | +++ b/include/tcg/tcg.h | ||
14 | @@ -XXX,XX +XXX,XX @@ typedef struct TCGTemp { | ||
15 | unsigned int mem_allocated:1; | ||
16 | unsigned int temp_allocated:1; | ||
17 | |||
18 | - tcg_target_long val; | ||
19 | + int64_t val; | ||
20 | struct TCGTemp *mem_base; | ||
21 | intptr_t mem_offset; | ||
22 | const char *name; | ||
23 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/tcg/tcg.c | ||
26 | +++ b/tcg/tcg.c | ||
27 | @@ -XXX,XX +XXX,XX @@ static void dump_regs(TCGContext *s) | ||
28 | tcg_target_reg_names[ts->mem_base->reg]); | ||
29 | break; | ||
30 | case TEMP_VAL_CONST: | ||
31 | - printf("$0x%" TCG_PRIlx, ts->val); | ||
32 | + printf("$0x%" PRIx64, ts->val); | ||
33 | break; | ||
34 | case TEMP_VAL_DEAD: | ||
35 | printf("D"); | ||
36 | -- | ||
37 | 2.25.1 | ||
38 | |||
39 | diff view generated by jsdifflib |
1 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 1 | The EXTR instructions can use the extract opcodes. |
---|---|---|---|
2 | |||
3 | Reviewed-by: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> | ||
2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
3 | --- | 5 | --- |
4 | tcg/i386/tcg-target.c.inc | 26 +++++++++++++------------- | 6 | target/tricore/translate.c | 20 ++++---------------- |
5 | 1 file changed, 13 insertions(+), 13 deletions(-) | 7 | 1 file changed, 4 insertions(+), 16 deletions(-) |
6 | 8 | ||
7 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | 9 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c |
8 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
9 | --- a/tcg/i386/tcg-target.c.inc | 11 | --- a/target/tricore/translate.c |
10 | +++ b/tcg/i386/tcg-target.c.inc | 12 | +++ b/target/tricore/translate.c |
11 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0, | 13 | @@ -XXX,XX +XXX,XX @@ static void decode_rrpw_extract_insert(DisasContext *ctx) |
12 | static void expand_vec_mul(TCGType type, unsigned vece, | 14 | switch (op2) { |
13 | TCGv_vec v0, TCGv_vec v1, TCGv_vec v2) | 15 | case OPC2_32_RRPW_EXTR: |
14 | { | 16 | if (width == 0) { |
15 | - TCGv_vec t1, t2, t3, t4; | 17 | - tcg_gen_movi_tl(cpu_gpr_d[r3], 0); |
16 | + TCGv_vec t1, t2, t3, t4, zero; | 18 | - break; |
17 | 19 | - } | |
18 | tcg_debug_assert(vece == MO_8); | 20 | - |
19 | 21 | - if (pos + width <= 32) { | |
20 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_mul(TCGType type, unsigned vece, | 22 | - /* optimize special cases */ |
21 | case TCG_TYPE_V64: | 23 | - if ((pos == 0) && (width == 8)) { |
22 | t1 = tcg_temp_new_vec(TCG_TYPE_V128); | 24 | - tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); |
23 | t2 = tcg_temp_new_vec(TCG_TYPE_V128); | 25 | - } else if ((pos == 0) && (width == 16)) { |
24 | - tcg_gen_dup16i_vec(t2, 0); | 26 | - tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); |
25 | + zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0); | 27 | - } else { |
26 | vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, | 28 | - tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width); |
27 | - tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(t2)); | 29 | - tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width); |
28 | + tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero)); | 30 | - } |
29 | vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, | 31 | + tcg_gen_movi_tl(cpu_gpr_d[r3], 0); |
30 | - tcgv_vec_arg(t2), tcgv_vec_arg(t2), tcgv_vec_arg(v2)); | 32 | + } else if (pos + width <= 32) { |
31 | + tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2)); | 33 | + tcg_gen_sextract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width); |
32 | tcg_gen_mul_vec(MO_16, t1, t1, t2); | 34 | } |
33 | tcg_gen_shri_vec(MO_16, t1, t1, 8); | 35 | break; |
34 | vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8, | 36 | case OPC2_32_RRPW_EXTR_U: |
35 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_mul(TCGType type, unsigned vece, | 37 | if (width == 0) { |
36 | t2 = tcg_temp_new_vec(type); | 38 | tcg_gen_movi_tl(cpu_gpr_d[r3], 0); |
37 | t3 = tcg_temp_new_vec(type); | 39 | } else { |
38 | t4 = tcg_temp_new_vec(type); | 40 | - tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos); |
39 | - tcg_gen_dup16i_vec(t4, 0); | 41 | - tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width)); |
40 | + zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0); | 42 | + tcg_gen_extract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width); |
41 | vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8, | 43 | } |
42 | - tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(t4)); | 44 | break; |
43 | + tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero)); | 45 | case OPC2_32_RRPW_IMASK: |
44 | vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8, | ||
45 | - tcgv_vec_arg(t2), tcgv_vec_arg(t4), tcgv_vec_arg(v2)); | ||
46 | + tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2)); | ||
47 | vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8, | ||
48 | - tcgv_vec_arg(t3), tcgv_vec_arg(v1), tcgv_vec_arg(t4)); | ||
49 | + tcgv_vec_arg(t3), tcgv_vec_arg(v1), tcgv_vec_arg(zero)); | ||
50 | vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8, | ||
51 | - tcgv_vec_arg(t4), tcgv_vec_arg(t4), tcgv_vec_arg(v2)); | ||
52 | + tcgv_vec_arg(t4), tcgv_vec_arg(zero), tcgv_vec_arg(v2)); | ||
53 | tcg_gen_mul_vec(MO_16, t1, t1, t2); | ||
54 | tcg_gen_mul_vec(MO_16, t3, t3, t4); | ||
55 | tcg_gen_shri_vec(MO_16, t1, t1, 8); | ||
56 | @@ -XXX,XX +XXX,XX @@ static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0, | ||
57 | NEED_UMIN = 8, | ||
58 | NEED_UMAX = 16, | ||
59 | }; | ||
60 | - TCGv_vec t1, t2; | ||
61 | + TCGv_vec t1, t2, t3; | ||
62 | uint8_t fixup; | ||
63 | |||
64 | switch (cond) { | ||
65 | @@ -XXX,XX +XXX,XX @@ static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0, | ||
66 | } else if (fixup & NEED_BIAS) { | ||
67 | t1 = tcg_temp_new_vec(type); | ||
68 | t2 = tcg_temp_new_vec(type); | ||
69 | - tcg_gen_dupi_vec(vece, t2, 1ull << ((8 << vece) - 1)); | ||
70 | - tcg_gen_sub_vec(vece, t1, v1, t2); | ||
71 | - tcg_gen_sub_vec(vece, t2, v2, t2); | ||
72 | + t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1)); | ||
73 | + tcg_gen_sub_vec(vece, t1, v1, t3); | ||
74 | + tcg_gen_sub_vec(vece, t2, v2, t3); | ||
75 | v1 = t1; | ||
76 | v2 = t2; | ||
77 | cond = tcg_signed_cond(cond); | ||
78 | -- | 46 | -- |
79 | 2.25.1 | 47 | 2.34.1 |
80 | |||
81 | diff view generated by jsdifflib |
1 | Fix this name vs our coding style. | 1 | Reviewed-by: Max Filippov <jcmvbkbc@gmail.com> |
---|---|---|---|
2 | |||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 3 | --- |
7 | tcg/optimize.c | 32 ++++++++++++++++---------------- | 4 | target/xtensa/translate.c | 12 +----------- |
8 | 1 file changed, 16 insertions(+), 16 deletions(-) | 5 | 1 file changed, 1 insertion(+), 11 deletions(-) |
9 | 6 | ||
10 | diff --git a/tcg/optimize.c b/tcg/optimize.c | 7 | diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c |
11 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tcg/optimize.c | 9 | --- a/target/xtensa/translate.c |
13 | +++ b/tcg/optimize.c | 10 | +++ b/target/xtensa/translate.c |
14 | @@ -XXX,XX +XXX,XX @@ | 11 | @@ -XXX,XX +XXX,XX @@ static void translate_salt(DisasContext *dc, const OpcodeArg arg[], |
15 | glue(glue(case INDEX_op_, x), _i64): \ | 12 | static void translate_sext(DisasContext *dc, const OpcodeArg arg[], |
16 | glue(glue(case INDEX_op_, x), _vec) | 13 | const uint32_t par[]) |
17 | |||
18 | -struct tcg_temp_info { | ||
19 | +typedef struct TempOptInfo { | ||
20 | bool is_const; | ||
21 | TCGTemp *prev_copy; | ||
22 | TCGTemp *next_copy; | ||
23 | tcg_target_ulong val; | ||
24 | tcg_target_ulong mask; | ||
25 | -}; | ||
26 | +} TempOptInfo; | ||
27 | |||
28 | -static inline struct tcg_temp_info *ts_info(TCGTemp *ts) | ||
29 | +static inline TempOptInfo *ts_info(TCGTemp *ts) | ||
30 | { | 14 | { |
31 | return ts->state_ptr; | 15 | - int shift = 31 - arg[2].imm; |
16 | - | ||
17 | - if (shift == 24) { | ||
18 | - tcg_gen_ext8s_i32(arg[0].out, arg[1].in); | ||
19 | - } else if (shift == 16) { | ||
20 | - tcg_gen_ext16s_i32(arg[0].out, arg[1].in); | ||
21 | - } else { | ||
22 | - TCGv_i32 tmp = tcg_temp_new_i32(); | ||
23 | - tcg_gen_shli_i32(tmp, arg[1].in, shift); | ||
24 | - tcg_gen_sari_i32(arg[0].out, tmp, shift); | ||
25 | - } | ||
26 | + tcg_gen_sextract_i32(arg[0].out, arg[1].in, 0, arg[2].imm + 1); | ||
32 | } | 27 | } |
33 | 28 | ||
34 | -static inline struct tcg_temp_info *arg_info(TCGArg arg) | 29 | static uint32_t test_exceptions_simcall(DisasContext *dc, |
35 | +static inline TempOptInfo *arg_info(TCGArg arg) | ||
36 | { | ||
37 | return ts_info(arg_temp(arg)); | ||
38 | } | ||
39 | @@ -XXX,XX +XXX,XX @@ static inline bool ts_is_copy(TCGTemp *ts) | ||
40 | /* Reset TEMP's state, possibly removing the temp for the list of copies. */ | ||
41 | static void reset_ts(TCGTemp *ts) | ||
42 | { | ||
43 | - struct tcg_temp_info *ti = ts_info(ts); | ||
44 | - struct tcg_temp_info *pi = ts_info(ti->prev_copy); | ||
45 | - struct tcg_temp_info *ni = ts_info(ti->next_copy); | ||
46 | + TempOptInfo *ti = ts_info(ts); | ||
47 | + TempOptInfo *pi = ts_info(ti->prev_copy); | ||
48 | + TempOptInfo *ni = ts_info(ti->next_copy); | ||
49 | |||
50 | ni->prev_copy = ti->prev_copy; | ||
51 | pi->next_copy = ti->next_copy; | ||
52 | @@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg) | ||
53 | } | ||
54 | |||
55 | /* Initialize and activate a temporary. */ | ||
56 | -static void init_ts_info(struct tcg_temp_info *infos, | ||
57 | +static void init_ts_info(TempOptInfo *infos, | ||
58 | TCGTempSet *temps_used, TCGTemp *ts) | ||
59 | { | ||
60 | size_t idx = temp_idx(ts); | ||
61 | if (!test_bit(idx, temps_used->l)) { | ||
62 | - struct tcg_temp_info *ti = &infos[idx]; | ||
63 | + TempOptInfo *ti = &infos[idx]; | ||
64 | |||
65 | ts->state_ptr = ti; | ||
66 | ti->next_copy = ts; | ||
67 | @@ -XXX,XX +XXX,XX @@ static void init_ts_info(struct tcg_temp_info *infos, | ||
68 | } | ||
69 | } | ||
70 | |||
71 | -static void init_arg_info(struct tcg_temp_info *infos, | ||
72 | +static void init_arg_info(TempOptInfo *infos, | ||
73 | TCGTempSet *temps_used, TCGArg arg) | ||
74 | { | ||
75 | init_ts_info(infos, temps_used, arg_temp(arg)); | ||
76 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val) | ||
77 | const TCGOpDef *def; | ||
78 | TCGOpcode new_op; | ||
79 | tcg_target_ulong mask; | ||
80 | - struct tcg_temp_info *di = arg_info(dst); | ||
81 | + TempOptInfo *di = arg_info(dst); | ||
82 | |||
83 | def = &tcg_op_defs[op->opc]; | ||
84 | if (def->flags & TCG_OPF_VECTOR) { | ||
85 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | ||
86 | TCGTemp *dst_ts = arg_temp(dst); | ||
87 | TCGTemp *src_ts = arg_temp(src); | ||
88 | const TCGOpDef *def; | ||
89 | - struct tcg_temp_info *di; | ||
90 | - struct tcg_temp_info *si; | ||
91 | + TempOptInfo *di; | ||
92 | + TempOptInfo *si; | ||
93 | tcg_target_ulong mask; | ||
94 | TCGOpcode new_op; | ||
95 | |||
96 | @@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) | ||
97 | di->mask = mask; | ||
98 | |||
99 | if (src_ts->type == dst_ts->type) { | ||
100 | - struct tcg_temp_info *ni = ts_info(si->next_copy); | ||
101 | + TempOptInfo *ni = ts_info(si->next_copy); | ||
102 | |||
103 | di->next_copy = si->next_copy; | ||
104 | di->prev_copy = src_ts; | ||
105 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
106 | { | ||
107 | int nb_temps, nb_globals; | ||
108 | TCGOp *op, *op_next, *prev_mb = NULL; | ||
109 | - struct tcg_temp_info *infos; | ||
110 | + TempOptInfo *infos; | ||
111 | TCGTempSet temps_used; | ||
112 | |||
113 | /* Array VALS has an element for each temp. | ||
114 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
115 | nb_temps = s->nb_temps; | ||
116 | nb_globals = s->nb_globals; | ||
117 | bitmap_zero(temps_used.l, nb_temps); | ||
118 | - infos = tcg_malloc(sizeof(struct tcg_temp_info) * nb_temps); | ||
119 | + infos = tcg_malloc(sizeof(TempOptInfo) * nb_temps); | ||
120 | |||
121 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { | ||
122 | tcg_target_ulong mask, partmask, affected; | ||
123 | -- | 30 | -- |
124 | 2.25.1 | 31 | 2.34.1 |
125 | |||
126 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The normal movi opcodes are going away. We need something | ||
2 | for TCI to use internally. | ||
3 | 1 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | include/tcg/tcg-opc.h | 8 ++++++++ | ||
8 | tcg/tci.c | 4 ++-- | ||
9 | tcg/tci/tcg-target.c.inc | 4 ++-- | ||
10 | 3 files changed, 12 insertions(+), 4 deletions(-) | ||
11 | |||
12 | diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/include/tcg/tcg-opc.h | ||
15 | +++ b/include/tcg/tcg-opc.h | ||
16 | @@ -XXX,XX +XXX,XX @@ DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT) | ||
17 | #include "tcg-target.opc.h" | ||
18 | #endif | ||
19 | |||
20 | +#ifdef TCG_TARGET_INTERPRETER | ||
21 | +/* These opcodes are only for use between the tci generator and interpreter. */ | ||
22 | +DEF(tci_movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT) | ||
23 | +#if TCG_TARGET_REG_BITS == 64 | ||
24 | +DEF(tci_movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) | ||
25 | +#endif | ||
26 | +#endif | ||
27 | + | ||
28 | #undef TLADDR_ARGS | ||
29 | #undef DATA64_ARGS | ||
30 | #undef IMPL | ||
31 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/tcg/tci.c | ||
34 | +++ b/tcg/tci.c | ||
35 | @@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) | ||
36 | t1 = tci_read_r32(regs, &tb_ptr); | ||
37 | tci_write_reg32(regs, t0, t1); | ||
38 | break; | ||
39 | - case INDEX_op_movi_i32: | ||
40 | + case INDEX_op_tci_movi_i32: | ||
41 | t0 = *tb_ptr++; | ||
42 | t1 = tci_read_i32(&tb_ptr); | ||
43 | tci_write_reg32(regs, t0, t1); | ||
44 | @@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) | ||
45 | t1 = tci_read_r64(regs, &tb_ptr); | ||
46 | tci_write_reg64(regs, t0, t1); | ||
47 | break; | ||
48 | - case INDEX_op_movi_i64: | ||
49 | + case INDEX_op_tci_movi_i64: | ||
50 | t0 = *tb_ptr++; | ||
51 | t1 = tci_read_i64(&tb_ptr); | ||
52 | tci_write_reg64(regs, t0, t1); | ||
53 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
54 | index XXXXXXX..XXXXXXX 100644 | ||
55 | --- a/tcg/tci/tcg-target.c.inc | ||
56 | +++ b/tcg/tci/tcg-target.c.inc | ||
57 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, | ||
58 | uint8_t *old_code_ptr = s->code_ptr; | ||
59 | uint32_t arg32 = arg; | ||
60 | if (type == TCG_TYPE_I32 || arg == arg32) { | ||
61 | - tcg_out_op_t(s, INDEX_op_movi_i32); | ||
62 | + tcg_out_op_t(s, INDEX_op_tci_movi_i32); | ||
63 | tcg_out_r(s, t0); | ||
64 | tcg_out32(s, arg32); | ||
65 | } else { | ||
66 | tcg_debug_assert(type == TCG_TYPE_I64); | ||
67 | #if TCG_TARGET_REG_BITS == 64 | ||
68 | - tcg_out_op_t(s, INDEX_op_movi_i64); | ||
69 | + tcg_out_op_t(s, INDEX_op_tci_movi_i64); | ||
70 | tcg_out_r(s, t0); | ||
71 | tcg_out64(s, arg); | ||
72 | #else | ||
73 | -- | ||
74 | 2.25.1 | ||
75 | |||
76 | diff view generated by jsdifflib |