1 | TCG patch queue, plus one target/sh4 patch that | 1 | The following changes since commit 66a6aa8f9a56a6317e074b1f5e269fecdf4ad782: |
---|---|---|---|
2 | Yoshinori Sato asked me to process. | ||
3 | 2 | ||
4 | 3 | Merge tag 'vfio-updates-20230307.1' of https://gitlab.com/alex.williamson/qemu into staging (2023-03-09 15:19:44 +0000) | |
5 | r~ | ||
6 | |||
7 | |||
8 | The following changes since commit efbf38d73e5dcc4d5f8b98c6e7a12be1f3b91745: | ||
9 | |||
10 | Merge tag 'for-upstream' of git://repo.or.cz/qemu/kevin into staging (2022-10-03 15:06:07 -0400) | ||
11 | 4 | ||
12 | are available in the Git repository at: | 5 | are available in the Git repository at: |
13 | 6 | ||
14 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20221004 | 7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230309 |
15 | 8 | ||
16 | for you to fetch changes up to ab419fd8a035a65942de4e63effcd55ccbf1a9fe: | 9 | for you to fetch changes up to 29fc660789547ceb5d6565e7fc39d8c1f65dd157: |
17 | 10 | ||
18 | target/sh4: Fix TB_FLAG_UNALIGN (2022-10-04 12:33:05 -0700) | 11 | tcg: Drop tcg_const_* (2023-03-09 11:55:56 -0800) |
19 | 12 | ||
20 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
21 | Cache CPUClass for use in hot code paths. | 14 | accel/tcg: Fix NB_MMU_MODES to 16 |
22 | Add CPUTLBEntryFull, probe_access_full, tlb_set_page_full. | 15 | Balance of the target/ patchset which eliminates tcg_temp_free |
23 | Add generic support for TARGET_TB_PCREL. | 16 | Balance of the target/ patchset which eliminates tcg_const |
24 | tcg/ppc: Optimize 26-bit jumps using STQ for POWER 2.07 | ||
25 | target/sh4: Fix TB_FLAG_UNALIGN | ||
26 | 17 | ||
27 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
28 | Alex Bennée (3): | 19 | Anton Johansson (23): |
29 | cpu: cache CPUClass in CPUState for hot code paths | 20 | include/exec: Set default `NB_MMU_MODES` to 16 |
30 | hw/core/cpu-sysemu: used cached class in cpu_asidx_from_attrs | 21 | target/alpha: Remove `NB_MMU_MODES` define |
31 | cputlb: used cached CPUClass in our hot-paths | 22 | target/arm: Remove `NB_MMU_MODES` define |
23 | target/avr: Remove `NB_MMU_MODES` define | ||
24 | target/cris: Remove `NB_MMU_MODES` define | ||
25 | target/hexagon: Remove `NB_MMU_MODES` define | ||
26 | target/hppa: Remove `NB_MMU_MODES` define | ||
27 | target/i386: Remove `NB_MMU_MODES` define | ||
28 | target/loongarch: Remove `NB_MMU_MODES` define | ||
29 | target/m68k: Remove `NB_MMU_MODES` define | ||
30 | target/microblaze: Remove `NB_MMU_MODES` define | ||
31 | target/mips: Remove `NB_MMU_MODES` define | ||
32 | target/nios2: Remove `NB_MMU_MODES` define | ||
33 | target/openrisc: Remove `NB_MMU_MODES` define | ||
34 | target/ppc: Remove `NB_MMU_MODES` define | ||
35 | target/riscv: Remove `NB_MMU_MODES` define | ||
36 | target/rx: Remove `NB_MMU_MODES` define | ||
37 | target/s390x: Remove `NB_MMU_MODES` define | ||
38 | target/sh4: Remove `NB_MMU_MODES` define | ||
39 | target/sparc: Remove `NB_MMU_MODES` define | ||
40 | target/tricore: Remove `NB_MMU_MODES` define | ||
41 | target/xtensa: Remove `NB_MMU_MODES` define | ||
42 | include/exec: Remove guards around `NB_MMU_MODES` | ||
32 | 43 | ||
33 | Leandro Lupori (1): | 44 | Richard Henderson (68): |
34 | tcg/ppc: Optimize 26-bit jumps | 45 | target/mips: Drop tcg_temp_free from micromips_translate.c.inc |
46 | target/mips: Drop tcg_temp_free from msa_translate.c | ||
47 | target/mips: Drop tcg_temp_free from mxu_translate.c | ||
48 | target/mips: Drop tcg_temp_free from nanomips_translate.c.inc | ||
49 | target/mips: Drop tcg_temp_free from octeon_translate.c | ||
50 | target/mips: Drop tcg_temp_free from translate_addr_const.c | ||
51 | target/mips: Drop tcg_temp_free from tx79_translate.c | ||
52 | target/mips: Drop tcg_temp_free from vr54xx_translate.c | ||
53 | target/mips: Drop tcg_temp_free from translate.c | ||
54 | target/s390x: Drop free_compare | ||
55 | target/s390x: Drop tcg_temp_free from translate_vx.c.inc | ||
56 | target/s390x: Drop tcg_temp_free from translate.c | ||
57 | target/s390x: Remove assert vs g_in2 | ||
58 | target/s390x: Remove g_out, g_out2, g_in1, g_in2 from DisasContext | ||
59 | tcg: Create tcg/tcg-temp-internal.h | ||
60 | target/avr: Avoid use of tcg_const_i32 in SBIC, SBIS | ||
61 | target/avr: Avoid use of tcg_const_i32 throughout | ||
62 | target/cris: Avoid use of tcg_const_i32 throughout | ||
63 | target/hppa: Avoid tcg_const_i64 in trans_fid_f | ||
64 | target/hppa: Avoid use of tcg_const_i32 throughout | ||
65 | target/i386: Avoid use of tcg_const_* throughout | ||
66 | target/m68k: Avoid tcg_const_i32 when modified | ||
67 | target/m68k: Avoid tcg_const_i32 in bfop_reg | ||
68 | target/m68k: Avoid tcg_const_* throughout | ||
69 | target/mips: Split out gen_lxl | ||
70 | target/mips: Split out gen_lxr | ||
71 | target/mips: Avoid tcg_const_tl in gen_r6_ld | ||
72 | target/mips: Avoid tcg_const_* throughout | ||
73 | target/ppc: Split out gen_vx_vmul10 | ||
74 | target/ppc: Avoid tcg_const_i64 in do_vector_shift_quad | ||
75 | target/rx: Use tcg_gen_abs_i32 | ||
76 | target/rx: Use cpu_psw_z as temp in flags computation | ||
77 | target/rx: Avoid tcg_const_i32 when new temp needed | ||
78 | target/rx: Avoid tcg_const_i32 | ||
79 | target/s390x: Avoid tcg_const_i64 | ||
80 | target/sh4: Avoid tcg_const_i32 for TAS.B | ||
81 | target/sh4: Avoid tcg_const_i32 | ||
82 | tcg/sparc: Avoid tcg_const_tl in gen_edge | ||
83 | target/tricore: Split t_n as constant from temp as variable | ||
84 | target/tricore: Rename t_off10 and use tcg_constant_i32 | ||
85 | target/tricore: Use setcondi instead of explicit allocation | ||
86 | target/tricore: Drop some temp initialization | ||
87 | target/tricore: Avoid tcg_const_i32 | ||
88 | tcg: Replace tcg_const_i64 in tcg-op.c | ||
89 | target/arm: Use rmode >= 0 for need_rmode | ||
90 | target/arm: Handle FPROUNDING_ODD in arm_rmode_to_sf | ||
91 | target/arm: Improve arm_rmode_to_sf | ||
92 | target/arm: Consistently use ARMFPRounding during translation | ||
93 | target/arm: Create gen_set_rmode, gen_restore_rmode | ||
94 | target/arm: Improve trans_BFCI | ||
95 | target/arm: Avoid tcg_const_ptr in gen_sve_{ldr,str} | ||
96 | target/arm: Avoid tcg_const_* in translate-mve.c | ||
97 | target/arm: Avoid tcg_const_ptr in disas_simd_zip_trn | ||
98 | target/arm: Avoid tcg_const_ptr in handle_vec_simd_sqshrn | ||
99 | target/arm: Avoid tcg_const_ptr in handle_rev | ||
100 | target/m68k: Use tcg_constant_i32 in gen_ea_mode | ||
101 | target/ppc: Avoid tcg_const_i64 in do_vcntmb | ||
102 | target/ppc: Avoid tcg_const_* in vmx-impl.c.inc | ||
103 | target/ppc: Avoid tcg_const_* in xxeval | ||
104 | target/ppc: Avoid tcg_const_* in vsx-impl.c.inc | ||
105 | target/ppc: Avoid tcg_const_* in fp-impl.c.inc | ||
106 | target/ppc: Avoid tcg_const_* in power8-pmu-regs.c.inc | ||
107 | target/ppc: Rewrite trans_ADDG6S | ||
108 | target/ppc: Fix gen_tlbsx_booke206 | ||
109 | target/ppc: Avoid tcg_const_* in translate.c | ||
110 | target/tricore: Use min/max for saturate | ||
111 | tcg: Drop tcg_const_*_vec | ||
112 | tcg: Drop tcg_const_* | ||
35 | 113 | ||
36 | Richard Henderson (16): | 114 | include/exec/cpu-defs.h | 9 +- |
37 | accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull | 115 | include/tcg/tcg-op.h | 4 - |
38 | accel/tcg: Drop addr member from SavedIOTLB | 116 | include/tcg/tcg-temp-internal.h | 83 +++ |
39 | accel/tcg: Suppress auto-invalidate in probe_access_internal | 117 | include/tcg/tcg.h | 64 --- |
40 | accel/tcg: Introduce probe_access_full | 118 | target/alpha/cpu-param.h | 2 - |
41 | accel/tcg: Introduce tlb_set_page_full | 119 | target/arm/cpu-param.h | 2 - |
42 | include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA | 120 | target/arm/internals.h | 12 +- |
43 | accel/tcg: Remove PageDesc code_bitmap | 121 | target/arm/tcg/translate.h | 17 + |
44 | accel/tcg: Use bool for page_find_alloc | 122 | target/avr/cpu-param.h | 1 - |
45 | accel/tcg: Use DisasContextBase in plugin_gen_tb_start | 123 | target/cris/cpu-param.h | 1 - |
46 | accel/tcg: Do not align tb->page_addr[0] | 124 | target/hexagon/cpu-param.h | 2 - |
47 | accel/tcg: Inline tb_flush_jmp_cache | 125 | target/hppa/cpu-param.h | 1 - |
48 | include/hw/core: Create struct CPUJumpCache | 126 | target/i386/cpu-param.h | 1 - |
49 | hw/core: Add CPUClass.get_pc | 127 | target/loongarch/cpu-param.h | 1 - |
50 | accel/tcg: Introduce tb_pc and log_pc | 128 | target/m68k/cpu-param.h | 1 - |
51 | accel/tcg: Introduce TARGET_TB_PCREL | 129 | target/microblaze/cpu-param.h | 1 - |
52 | target/sh4: Fix TB_FLAG_UNALIGN | 130 | target/microblaze/cpu.h | 2 +- |
53 | 131 | target/mips/cpu-param.h | 1 - | |
54 | accel/tcg/internal.h | 10 ++ | 132 | target/nios2/cpu-param.h | 1 - |
55 | accel/tcg/tb-hash.h | 1 + | 133 | target/openrisc/cpu-param.h | 1 - |
56 | accel/tcg/tb-jmp-cache.h | 65 ++++++++ | 134 | target/ppc/cpu-param.h | 1 - |
57 | include/exec/cpu-common.h | 1 + | 135 | target/riscv/cpu-param.h | 1 - |
58 | include/exec/cpu-defs.h | 48 ++++-- | 136 | target/rx/cpu-param.h | 2 - |
59 | include/exec/exec-all.h | 75 ++++++++- | 137 | target/s390x/cpu-param.h | 1 - |
60 | include/exec/plugin-gen.h | 7 +- | 138 | target/sh4/cpu-param.h | 1 - |
61 | include/hw/core/cpu.h | 28 ++-- | 139 | target/sparc/cpu-param.h | 2 - |
62 | include/qemu/typedefs.h | 2 + | 140 | target/tricore/cpu-param.h | 1 - |
63 | include/tcg/tcg.h | 2 +- | 141 | target/xtensa/cpu-param.h | 1 - |
64 | target/sh4/cpu.h | 56 ++++--- | 142 | accel/tcg/plugin-gen.c | 1 + |
65 | accel/stubs/tcg-stub.c | 4 + | 143 | target/arm/tcg/translate-a64.c | 168 +++--- |
66 | accel/tcg/cpu-exec.c | 80 +++++----- | 144 | target/arm/tcg/translate-mve.c | 56 +- |
67 | accel/tcg/cputlb.c | 259 ++++++++++++++++++-------------- | 145 | target/arm/tcg/translate-sve.c | 28 +- |
68 | accel/tcg/plugin-gen.c | 22 +-- | 146 | target/arm/tcg/translate-vfp.c | 26 +- |
69 | accel/tcg/translate-all.c | 214 ++++++++++++-------------- | 147 | target/arm/tcg/translate.c | 13 +- |
70 | accel/tcg/translator.c | 2 +- | 148 | target/arm/vfp_helper.c | 35 +- |
71 | cpu.c | 9 +- | 149 | target/avr/translate.c | 48 +- |
72 | hw/core/cpu-common.c | 3 +- | 150 | target/cris/translate.c | 46 +- |
73 | hw/core/cpu-sysemu.c | 5 +- | 151 | target/hppa/translate.c | 35 +- |
74 | linux-user/sh4/signal.c | 6 +- | 152 | target/i386/tcg/translate.c | 83 +-- |
75 | plugins/core.c | 2 +- | 153 | target/m68k/translate.c | 231 ++++---- |
76 | target/alpha/cpu.c | 9 ++ | 154 | target/mips/tcg/msa_translate.c | 9 - |
77 | target/arm/cpu.c | 17 ++- | 155 | target/mips/tcg/mxu_translate.c | 55 +- |
78 | target/arm/mte_helper.c | 14 +- | 156 | target/mips/tcg/octeon_translate.c | 23 - |
79 | target/arm/sve_helper.c | 4 +- | 157 | target/mips/tcg/translate.c | 819 +++++------------------------ |
80 | target/arm/translate-a64.c | 2 +- | 158 | target/mips/tcg/translate_addr_const.c | 7 - |
81 | target/avr/cpu.c | 10 +- | 159 | target/mips/tcg/tx79_translate.c | 45 +- |
82 | target/cris/cpu.c | 8 + | 160 | target/mips/tcg/vr54xx_translate.c | 4 - |
83 | target/hexagon/cpu.c | 10 +- | 161 | target/ppc/translate.c | 148 +++--- |
84 | target/hppa/cpu.c | 12 +- | 162 | target/rx/translate.c | 84 ++- |
85 | target/i386/cpu.c | 9 ++ | 163 | target/s390x/tcg/translate.c | 208 +------- |
86 | target/i386/tcg/tcg-cpu.c | 2 +- | 164 | target/sh4/translate.c | 35 +- |
87 | target/loongarch/cpu.c | 11 +- | 165 | target/sparc/translate.c | 14 +- |
88 | target/m68k/cpu.c | 8 + | 166 | target/tricore/translate.c | 476 ++++++++--------- |
89 | target/microblaze/cpu.c | 10 +- | 167 | tcg/tcg-op-gvec.c | 1 + |
90 | target/mips/cpu.c | 8 + | 168 | tcg/tcg-op-vec.c | 35 +- |
91 | target/mips/tcg/exception.c | 2 +- | 169 | tcg/tcg-op.c | 13 +- |
92 | target/mips/tcg/sysemu/special_helper.c | 2 +- | 170 | tcg/tcg.c | 17 +- |
93 | target/nios2/cpu.c | 9 ++ | 171 | target/cris/translate_v10.c.inc | 26 +- |
94 | target/openrisc/cpu.c | 10 +- | 172 | target/mips/tcg/micromips_translate.c.inc | 12 +- |
95 | target/ppc/cpu_init.c | 8 + | 173 | target/mips/tcg/nanomips_translate.c.inc | 143 +---- |
96 | target/riscv/cpu.c | 17 ++- | 174 | target/ppc/power8-pmu-regs.c.inc | 4 +- |
97 | target/rx/cpu.c | 10 +- | 175 | target/ppc/translate/fixedpoint-impl.c.inc | 44 +- |
98 | target/s390x/cpu.c | 8 + | 176 | target/ppc/translate/fp-impl.c.inc | 26 +- |
99 | target/s390x/tcg/mem_helper.c | 4 - | 177 | target/ppc/translate/vmx-impl.c.inc | 130 ++--- |
100 | target/sh4/cpu.c | 18 ++- | 178 | target/ppc/translate/vsx-impl.c.inc | 36 +- |
101 | target/sh4/helper.c | 6 +- | 179 | target/s390x/tcg/translate_vx.c.inc | 143 ----- |
102 | target/sh4/translate.c | 90 +++++------ | 180 | tcg/i386/tcg-target.c.inc | 9 +- |
103 | target/sparc/cpu.c | 10 +- | 181 | 67 files changed, 1165 insertions(+), 2388 deletions(-) |
104 | target/tricore/cpu.c | 11 +- | 182 | create mode 100644 include/tcg/tcg-temp-internal.h |
105 | target/xtensa/cpu.c | 8 + | ||
106 | tcg/tcg.c | 8 +- | ||
107 | trace/control-target.c | 2 +- | ||
108 | tcg/ppc/tcg-target.c.inc | 119 +++++++++++---- | ||
109 | 55 files changed, 915 insertions(+), 462 deletions(-) | ||
110 | create mode 100644 accel/tcg/tb-jmp-cache.h | ||
111 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/micromips_translate.c.inc | 8 -------- | ||
7 | 1 file changed, 8 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/micromips_translate.c.inc | ||
12 | +++ b/target/mips/tcg/micromips_translate.c.inc | ||
13 | @@ -XXX,XX +XXX,XX @@ static void gen_ldst_multiple(DisasContext *ctx, uint32_t opc, int reglist, | ||
14 | break; | ||
15 | #endif | ||
16 | } | ||
17 | - tcg_temp_free(t0); | ||
18 | - tcg_temp_free(t1); | ||
19 | - tcg_temp_free_i32(t2); | ||
20 | } | ||
21 | |||
22 | |||
23 | @@ -XXX,XX +XXX,XX @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, | ||
24 | break; | ||
25 | #endif | ||
26 | } | ||
27 | - tcg_temp_free(t0); | ||
28 | - tcg_temp_free(t1); | ||
29 | } | ||
30 | |||
31 | static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) | ||
32 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) | ||
33 | |||
34 | gen_load_gpr(t0, rt); | ||
35 | gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7); | ||
36 | - tcg_temp_free(t0); | ||
37 | } | ||
38 | break; | ||
39 | #endif | ||
40 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) | ||
41 | * mode. | ||
42 | */ | ||
43 | ctx->base.is_jmp = DISAS_STOP; | ||
44 | - tcg_temp_free(t0); | ||
45 | } | ||
46 | break; | ||
47 | case EI: | ||
48 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) | ||
49 | */ | ||
50 | gen_save_pc(ctx->base.pc_next + 4); | ||
51 | ctx->base.is_jmp = DISAS_EXIT; | ||
52 | - tcg_temp_free(t0); | ||
53 | } | ||
54 | break; | ||
55 | default: | ||
56 | -- | ||
57 | 2.34.1 | ||
58 | |||
59 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/msa_translate.c | 9 --------- | ||
7 | 1 file changed, 9 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/msa_translate.c | ||
12 | +++ b/target/mips/tcg/msa_translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt, | ||
14 | /* if some bit is non-zero then some element is zero */ | ||
15 | tcg_gen_setcondi_i64(cond, t0, t0, 0); | ||
16 | tcg_gen_trunc_i64_tl(tresult, t0); | ||
17 | - tcg_temp_free_i64(t0); | ||
18 | - tcg_temp_free_i64(t1); | ||
19 | } | ||
20 | |||
21 | static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond) | ||
22 | @@ -XXX,XX +XXX,XX @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond) | ||
23 | tcg_gen_or_i64(t0, msa_wr_d[wt << 1], msa_wr_d[(wt << 1) + 1]); | ||
24 | tcg_gen_setcondi_i64(cond, t0, t0, 0); | ||
25 | tcg_gen_trunc_i64_tl(bcond, t0); | ||
26 | - tcg_temp_free_i64(t0); | ||
27 | |||
28 | ctx->btarget = ctx->base.pc_next + (sa << 2) + 4; | ||
29 | |||
30 | @@ -XXX,XX +XXX,XX @@ static bool trans_CTCMSA(DisasContext *ctx, arg_msa_elm *a) | ||
31 | gen_load_gpr(telm, a->ws); | ||
32 | gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd)); | ||
33 | |||
34 | - tcg_temp_free(telm); | ||
35 | - | ||
36 | return true; | ||
37 | } | ||
38 | |||
39 | @@ -XXX,XX +XXX,XX @@ static bool trans_CFCMSA(DisasContext *ctx, arg_msa_elm *a) | ||
40 | gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws)); | ||
41 | gen_store_gpr(telm, a->wd); | ||
42 | |||
43 | - tcg_temp_free(telm); | ||
44 | - | ||
45 | return true; | ||
46 | } | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ static bool trans_msa_ldst(DisasContext *ctx, arg_msa_i *a, | ||
49 | gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df); | ||
50 | gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr); | ||
51 | |||
52 | - tcg_temp_free(taddr); | ||
53 | - | ||
54 | return true; | ||
55 | } | ||
56 | |||
57 | -- | ||
58 | 2.34.1 | ||
59 | |||
60 | diff view generated by jsdifflib |
1 | This structure will shortly contain more than just | 1 | Translators are no longer required to free tcg temporaries. |
---|---|---|---|
2 | data for accessing MMIO. Rename the 'addr' member | ||
3 | to 'xlat_section' to more clearly indicate its purpose. | ||
4 | 2 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 5 | --- |
10 | include/exec/cpu-defs.h | 22 ++++---- | 6 | target/mips/tcg/mxu_translate.c | 51 --------------------------------- |
11 | accel/tcg/cputlb.c | 102 +++++++++++++++++++------------------ | 7 | 1 file changed, 51 deletions(-) |
12 | target/arm/mte_helper.c | 14 ++--- | ||
13 | target/arm/sve_helper.c | 4 +- | ||
14 | target/arm/translate-a64.c | 2 +- | ||
15 | 5 files changed, 73 insertions(+), 71 deletions(-) | ||
16 | 8 | ||
17 | diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h | 9 | diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c |
18 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/exec/cpu-defs.h | 11 | --- a/target/mips/tcg/mxu_translate.c |
20 | +++ b/include/exec/cpu-defs.h | 12 | +++ b/target/mips/tcg/mxu_translate.c |
21 | @@ -XXX,XX +XXX,XX @@ typedef uint64_t target_ulong; | 13 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_s32i2m(DisasContext *ctx) |
22 | # endif | 14 | } else if (XRa == 16) { |
23 | # endif | 15 | gen_store_mxu_cr(t0); |
24 | 16 | } | |
25 | +/* Minimalized TLB entry for use by TCG fast path. */ | 17 | - |
26 | typedef struct CPUTLBEntry { | 18 | - tcg_temp_free(t0); |
27 | /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address | 19 | } |
28 | bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not | ||
29 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntry { | ||
30 | |||
31 | QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); | ||
32 | |||
33 | -/* The IOTLB is not accessed directly inline by generated TCG code, | ||
34 | - * so the CPUIOTLBEntry layout is not as critical as that of the | ||
35 | - * CPUTLBEntry. (This is also why we don't want to combine the two | ||
36 | - * structs into one.) | ||
37 | +/* | ||
38 | + * The full TLB entry, which is not accessed by generated TCG code, | ||
39 | + * so the layout is not as critical as that of CPUTLBEntry. This is | ||
40 | + * also why we don't want to combine the two structs. | ||
41 | */ | ||
42 | -typedef struct CPUIOTLBEntry { | ||
43 | +typedef struct CPUTLBEntryFull { | ||
44 | /* | ||
45 | - * @addr contains: | ||
46 | + * @xlat_section contains: | ||
47 | * - in the lower TARGET_PAGE_BITS, a physical section number | ||
48 | * - with the lower TARGET_PAGE_BITS masked off, an offset which | ||
49 | * must be added to the virtual address to obtain: | ||
50 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUIOTLBEntry { | ||
51 | * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) | ||
52 | * + the offset within the target MemoryRegion (otherwise) | ||
53 | */ | ||
54 | - hwaddr addr; | ||
55 | + hwaddr xlat_section; | ||
56 | MemTxAttrs attrs; | ||
57 | -} CPUIOTLBEntry; | ||
58 | +} CPUTLBEntryFull; | ||
59 | 20 | ||
60 | /* | 21 | /* |
61 | * Data elements that are per MMU mode, minus the bits accessed by | 22 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_s32m2i(DisasContext *ctx) |
62 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBDesc { | 23 | } |
63 | size_t vindex; | 24 | |
64 | /* The tlb victim table, in two parts. */ | 25 | gen_store_gpr(t0, Rb); |
65 | CPUTLBEntry vtable[CPU_VTLB_SIZE]; | 26 | - |
66 | - CPUIOTLBEntry viotlb[CPU_VTLB_SIZE]; | 27 | - tcg_temp_free(t0); |
67 | - /* The iotlb. */ | 28 | } |
68 | - CPUIOTLBEntry *iotlb; | ||
69 | + CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; | ||
70 | + CPUTLBEntryFull *fulltlb; | ||
71 | } CPUTLBDesc; | ||
72 | 29 | ||
73 | /* | 30 | /* |
74 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 31 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_s8ldd(DisasContext *ctx) |
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/accel/tcg/cputlb.c | ||
77 | +++ b/accel/tcg/cputlb.c | ||
78 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, | ||
79 | } | 32 | } |
80 | 33 | ||
81 | g_free(fast->table); | 34 | gen_store_mxu_gpr(t0, XRa); |
82 | - g_free(desc->iotlb); | 35 | - |
83 | + g_free(desc->fulltlb); | 36 | - tcg_temp_free(t0); |
84 | 37 | - tcg_temp_free(t1); | |
85 | tlb_window_reset(desc, now, 0); | 38 | } |
86 | /* desc->n_used_entries is cleared by the caller */ | 39 | |
87 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; | 40 | /* |
88 | fast->table = g_try_new(CPUTLBEntry, new_size); | 41 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_d16mul(DisasContext *ctx) |
89 | - desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | 42 | } |
90 | + desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); | 43 | gen_store_mxu_gpr(t3, XRa); |
91 | 44 | gen_store_mxu_gpr(t2, XRd); | |
92 | /* | 45 | - |
93 | * If the allocations fail, try smaller sizes. We just freed some | 46 | - tcg_temp_free(t0); |
94 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, | 47 | - tcg_temp_free(t1); |
95 | * allocations to fail though, so we progressively reduce the allocation | 48 | - tcg_temp_free(t2); |
96 | * size, aborting if we cannot even allocate the smallest TLB we support. | 49 | - tcg_temp_free(t3); |
97 | */ | 50 | } |
98 | - while (fast->table == NULL || desc->iotlb == NULL) { | 51 | |
99 | + while (fast->table == NULL || desc->fulltlb == NULL) { | 52 | /* |
100 | if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { | 53 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_d16mac(DisasContext *ctx) |
101 | error_report("%s: %s", __func__, strerror(errno)); | 54 | } |
102 | abort(); | 55 | gen_store_mxu_gpr(t3, XRa); |
103 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, | 56 | gen_store_mxu_gpr(t2, XRd); |
104 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; | 57 | - |
105 | 58 | - tcg_temp_free(t0); | |
106 | g_free(fast->table); | 59 | - tcg_temp_free(t1); |
107 | - g_free(desc->iotlb); | 60 | - tcg_temp_free(t2); |
108 | + g_free(desc->fulltlb); | 61 | - tcg_temp_free(t3); |
109 | fast->table = g_try_new(CPUTLBEntry, new_size); | 62 | } |
110 | - desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | 63 | |
111 | + desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); | 64 | /* |
65 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_q8mul_q8mulsu(DisasContext *ctx) | ||
66 | |||
67 | gen_store_mxu_gpr(t0, XRd); | ||
68 | gen_store_mxu_gpr(t1, XRa); | ||
69 | - | ||
70 | - tcg_temp_free(t0); | ||
71 | - tcg_temp_free(t1); | ||
72 | - tcg_temp_free(t2); | ||
73 | - tcg_temp_free(t3); | ||
74 | - tcg_temp_free(t4); | ||
75 | - tcg_temp_free(t5); | ||
76 | - tcg_temp_free(t6); | ||
77 | - tcg_temp_free(t7); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx) | ||
82 | tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP)); | ||
83 | |||
84 | gen_store_mxu_gpr(t1, XRa); | ||
85 | - | ||
86 | - tcg_temp_free(t0); | ||
87 | - tcg_temp_free(t1); | ||
88 | } | ||
89 | |||
90 | |||
91 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) | ||
92 | tcg_gen_shri_i32(t0, t0, 16); | ||
93 | /* finally update the destination */ | ||
94 | tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); | ||
95 | - | ||
96 | - tcg_temp_free(t1); | ||
97 | - tcg_temp_free(t0); | ||
98 | } else if (unlikely(XRb == XRc)) { | ||
99 | /* both operands same -> just set destination to one of them */ | ||
100 | tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]); | ||
101 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) | ||
102 | tcg_gen_shri_i32(t0, t0, 16); | ||
103 | /* finally update the destination */ | ||
104 | tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); | ||
105 | - | ||
106 | - tcg_temp_free(t1); | ||
107 | - tcg_temp_free(t0); | ||
112 | } | 108 | } |
113 | } | 109 | } |
114 | 110 | ||
115 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) | 111 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) |
116 | desc->n_used_entries = 0; | 112 | /* finally update the destination */ |
117 | fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; | 113 | tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); |
118 | fast->table = g_new(CPUTLBEntry, n_entries); | 114 | } |
119 | - desc->iotlb = g_new(CPUIOTLBEntry, n_entries); | 115 | - |
120 | + desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); | 116 | - tcg_temp_free(t1); |
121 | tlb_mmu_flush_locked(desc, fast); | 117 | - tcg_temp_free(t0); |
122 | } | 118 | } else if (unlikely(XRb == XRc)) { |
123 | 119 | /* both operands same -> just set destination to one of them */ | |
124 | @@ -XXX,XX +XXX,XX @@ void tlb_destroy(CPUState *cpu) | 120 | tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]); |
125 | CPUTLBDescFast *fast = &env_tlb(env)->f[i]; | 121 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) |
126 | 122 | /* finally update the destination */ | |
127 | g_free(fast->table); | 123 | tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0); |
128 | - g_free(desc->iotlb); | 124 | } |
129 | + g_free(desc->fulltlb); | 125 | - |
126 | - tcg_temp_free(t1); | ||
127 | - tcg_temp_free(t0); | ||
130 | } | 128 | } |
131 | } | 129 | } |
132 | 130 | ||
133 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 131 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_S32ALNI(DisasContext *ctx) |
134 | 132 | tcg_gen_shri_i32(t1, t1, 24); | |
135 | /* Evict the old entry into the victim tlb. */ | 133 | |
136 | copy_tlb_helper_locked(tv, te); | 134 | tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1); |
137 | - desc->viotlb[vidx] = desc->iotlb[index]; | 135 | - |
138 | + desc->vfulltlb[vidx] = desc->fulltlb[index]; | 136 | - tcg_temp_free(t1); |
139 | tlb_n_used_entries_dec(env, mmu_idx); | 137 | - tcg_temp_free(t0); |
138 | } | ||
139 | break; | ||
140 | case MXU_OPTN3_PTN2: | ||
141 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_S32ALNI(DisasContext *ctx) | ||
142 | tcg_gen_shri_i32(t1, t1, 16); | ||
143 | |||
144 | tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1); | ||
145 | - | ||
146 | - tcg_temp_free(t1); | ||
147 | - tcg_temp_free(t0); | ||
148 | } | ||
149 | break; | ||
150 | case MXU_OPTN3_PTN3: | ||
151 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_S32ALNI(DisasContext *ctx) | ||
152 | tcg_gen_shri_i32(t1, t1, 8); | ||
153 | |||
154 | tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1); | ||
155 | - | ||
156 | - tcg_temp_free(t1); | ||
157 | - tcg_temp_free(t0); | ||
158 | } | ||
159 | break; | ||
160 | case MXU_OPTN3_PTN4: | ||
161 | @@ -XXX,XX +XXX,XX @@ bool decode_ase_mxu(DisasContext *ctx, uint32_t insn) | ||
162 | } | ||
163 | |||
164 | gen_set_label(l_exit); | ||
165 | - tcg_temp_free(t_mxu_cr); | ||
140 | } | 166 | } |
141 | 167 | ||
142 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 168 | return true; |
143 | * subtract here is that of the page base, and not the same as the | ||
144 | * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). | ||
145 | */ | ||
146 | - desc->iotlb[index].addr = iotlb - vaddr_page; | ||
147 | - desc->iotlb[index].attrs = attrs; | ||
148 | + desc->fulltlb[index].xlat_section = iotlb - vaddr_page; | ||
149 | + desc->fulltlb[index].attrs = attrs; | ||
150 | |||
151 | /* Now calculate the new entry */ | ||
152 | tn.addend = addend - vaddr_page; | ||
153 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, | ||
154 | } | ||
155 | } | ||
156 | |||
157 | -static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
158 | +static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, | ||
159 | int mmu_idx, target_ulong addr, uintptr_t retaddr, | ||
160 | MMUAccessType access_type, MemOp op) | ||
161 | { | ||
162 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
163 | bool locked = false; | ||
164 | MemTxResult r; | ||
165 | |||
166 | - section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | ||
167 | + section = iotlb_to_section(cpu, full->xlat_section, full->attrs); | ||
168 | mr = section->mr; | ||
169 | - mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | ||
170 | + mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; | ||
171 | cpu->mem_io_pc = retaddr; | ||
172 | if (!cpu->can_do_io) { | ||
173 | cpu_io_recompile(cpu, retaddr); | ||
174 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
175 | qemu_mutex_lock_iothread(); | ||
176 | locked = true; | ||
177 | } | ||
178 | - r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); | ||
179 | + r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); | ||
180 | if (r != MEMTX_OK) { | ||
181 | hwaddr physaddr = mr_offset + | ||
182 | section->offset_within_address_space - | ||
183 | section->offset_within_region; | ||
184 | |||
185 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, | ||
186 | - mmu_idx, iotlbentry->attrs, r, retaddr); | ||
187 | + mmu_idx, full->attrs, r, retaddr); | ||
188 | } | ||
189 | if (locked) { | ||
190 | qemu_mutex_unlock_iothread(); | ||
191 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | - * Save a potentially trashed IOTLB entry for later lookup by plugin. | ||
196 | - * This is read by tlb_plugin_lookup if the iotlb entry doesn't match | ||
197 | + * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. | ||
198 | + * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match | ||
199 | * because of the side effect of io_writex changing memory layout. | ||
200 | */ | ||
201 | static void save_iotlb_data(CPUState *cs, hwaddr addr, | ||
202 | @@ -XXX,XX +XXX,XX @@ static void save_iotlb_data(CPUState *cs, hwaddr addr, | ||
203 | #endif | ||
204 | } | ||
205 | |||
206 | -static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
207 | +static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, | ||
208 | int mmu_idx, uint64_t val, target_ulong addr, | ||
209 | uintptr_t retaddr, MemOp op) | ||
210 | { | ||
211 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
212 | bool locked = false; | ||
213 | MemTxResult r; | ||
214 | |||
215 | - section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | ||
216 | + section = iotlb_to_section(cpu, full->xlat_section, full->attrs); | ||
217 | mr = section->mr; | ||
218 | - mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | ||
219 | + mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; | ||
220 | if (!cpu->can_do_io) { | ||
221 | cpu_io_recompile(cpu, retaddr); | ||
222 | } | ||
223 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
224 | * The memory_region_dispatch may trigger a flush/resize | ||
225 | * so for plugins we save the iotlb_data just in case. | ||
226 | */ | ||
227 | - save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); | ||
228 | + save_iotlb_data(cpu, full->xlat_section, section, mr_offset); | ||
229 | |||
230 | if (!qemu_mutex_iothread_locked()) { | ||
231 | qemu_mutex_lock_iothread(); | ||
232 | locked = true; | ||
233 | } | ||
234 | - r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); | ||
235 | + r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); | ||
236 | if (r != MEMTX_OK) { | ||
237 | hwaddr physaddr = mr_offset + | ||
238 | section->offset_within_address_space - | ||
239 | section->offset_within_region; | ||
240 | |||
241 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), | ||
242 | - MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, | ||
243 | + MMU_DATA_STORE, mmu_idx, full->attrs, r, | ||
244 | retaddr); | ||
245 | } | ||
246 | if (locked) { | ||
247 | @@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | ||
248 | copy_tlb_helper_locked(vtlb, &tmptlb); | ||
249 | qemu_spin_unlock(&env_tlb(env)->c.lock); | ||
250 | |||
251 | - CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
252 | - CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; | ||
253 | - tmpio = *io; *io = *vio; *vio = tmpio; | ||
254 | + CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | ||
255 | + CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; | ||
256 | + CPUTLBEntryFull tmpf; | ||
257 | + tmpf = *f1; *f1 = *f2; *f2 = tmpf; | ||
258 | return true; | ||
259 | } | ||
260 | } | ||
261 | @@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | ||
262 | (ADDR) & TARGET_PAGE_MASK) | ||
263 | |||
264 | static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, | ||
265 | - CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) | ||
266 | + CPUTLBEntryFull *full, uintptr_t retaddr) | ||
267 | { | ||
268 | - ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; | ||
269 | + ram_addr_t ram_addr = mem_vaddr + full->xlat_section; | ||
270 | |||
271 | trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); | ||
272 | |||
273 | @@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr, | ||
274 | /* Handle clean RAM pages. */ | ||
275 | if (unlikely(flags & TLB_NOTDIRTY)) { | ||
276 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
277 | - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
278 | + CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | ||
279 | |||
280 | - notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); | ||
281 | + notdirty_write(env_cpu(env), addr, 1, full, retaddr); | ||
282 | flags &= ~TLB_NOTDIRTY; | ||
283 | } | ||
284 | |||
285 | @@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
286 | |||
287 | if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { | ||
288 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
289 | - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
290 | + CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | ||
291 | |||
292 | /* Handle watchpoints. */ | ||
293 | if (flags & TLB_WATCHPOINT) { | ||
294 | int wp_access = (access_type == MMU_DATA_STORE | ||
295 | ? BP_MEM_WRITE : BP_MEM_READ); | ||
296 | cpu_check_watchpoint(env_cpu(env), addr, size, | ||
297 | - iotlbentry->attrs, wp_access, retaddr); | ||
298 | + full->attrs, wp_access, retaddr); | ||
299 | } | ||
300 | |||
301 | /* Handle clean RAM pages. */ | ||
302 | if (flags & TLB_NOTDIRTY) { | ||
303 | - notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); | ||
304 | + notdirty_write(env_cpu(env), addr, 1, full, retaddr); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | @@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, | ||
309 | * should have just filled the TLB. The one corner case is io_writex | ||
310 | * which can cause TLB flushes and potential resizing of the TLBs | ||
311 | * losing the information we need. In those cases we need to recover | ||
312 | - * data from a copy of the iotlbentry. As long as this always occurs | ||
313 | + * data from a copy of the CPUTLBEntryFull. As long as this always occurs | ||
314 | * from the same thread (which a mem callback will be) this is safe. | ||
315 | */ | ||
316 | |||
317 | @@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, | ||
318 | if (likely(tlb_hit(tlb_addr, addr))) { | ||
319 | /* We must have an iotlb entry for MMIO */ | ||
320 | if (tlb_addr & TLB_MMIO) { | ||
321 | - CPUIOTLBEntry *iotlbentry; | ||
322 | - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
323 | + CPUTLBEntryFull *full; | ||
324 | + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | ||
325 | data->is_io = true; | ||
326 | - data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | ||
327 | - data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | ||
328 | + data->v.io.section = | ||
329 | + iotlb_to_section(cpu, full->xlat_section, full->attrs); | ||
330 | + data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; | ||
331 | } else { | ||
332 | data->is_io = false; | ||
333 | data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); | ||
334 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
335 | |||
336 | if (unlikely(tlb_addr & TLB_NOTDIRTY)) { | ||
337 | notdirty_write(env_cpu(env), addr, size, | ||
338 | - &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); | ||
339 | + &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); | ||
340 | } | ||
341 | |||
342 | return hostaddr; | ||
343 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, | ||
344 | |||
345 | /* Handle anything that isn't just a straight memory access. */ | ||
346 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
347 | - CPUIOTLBEntry *iotlbentry; | ||
348 | + CPUTLBEntryFull *full; | ||
349 | bool need_swap; | ||
350 | |||
351 | /* For anything that is unaligned, recurse through full_load. */ | ||
352 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, | ||
353 | goto do_unaligned_access; | ||
354 | } | ||
355 | |||
356 | - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
357 | + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | ||
358 | |||
359 | /* Handle watchpoints. */ | ||
360 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | ||
361 | /* On watchpoint hit, this will longjmp out. */ | ||
362 | cpu_check_watchpoint(env_cpu(env), addr, size, | ||
363 | - iotlbentry->attrs, BP_MEM_READ, retaddr); | ||
364 | + full->attrs, BP_MEM_READ, retaddr); | ||
365 | } | ||
366 | |||
367 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); | ||
368 | |||
369 | /* Handle I/O access. */ | ||
370 | if (likely(tlb_addr & TLB_MMIO)) { | ||
371 | - return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, | ||
372 | + return io_readx(env, full, mmu_idx, addr, retaddr, | ||
373 | access_type, op ^ (need_swap * MO_BSWAP)); | ||
374 | } | ||
375 | |||
376 | @@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, | ||
377 | */ | ||
378 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | ||
379 | cpu_check_watchpoint(env_cpu(env), addr, size - size2, | ||
380 | - env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | ||
381 | + env_tlb(env)->d[mmu_idx].fulltlb[index].attrs, | ||
382 | BP_MEM_WRITE, retaddr); | ||
383 | } | ||
384 | if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { | ||
385 | cpu_check_watchpoint(env_cpu(env), page2, size2, | ||
386 | - env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, | ||
387 | + env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs, | ||
388 | BP_MEM_WRITE, retaddr); | ||
389 | } | ||
390 | |||
391 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
392 | |||
393 | /* Handle anything that isn't just a straight memory access. */ | ||
394 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
395 | - CPUIOTLBEntry *iotlbentry; | ||
396 | + CPUTLBEntryFull *full; | ||
397 | bool need_swap; | ||
398 | |||
399 | /* For anything that is unaligned, recurse through byte stores. */ | ||
400 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
401 | goto do_unaligned_access; | ||
402 | } | ||
403 | |||
404 | - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
405 | + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | ||
406 | |||
407 | /* Handle watchpoints. */ | ||
408 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | ||
409 | /* On watchpoint hit, this will longjmp out. */ | ||
410 | cpu_check_watchpoint(env_cpu(env), addr, size, | ||
411 | - iotlbentry->attrs, BP_MEM_WRITE, retaddr); | ||
412 | + full->attrs, BP_MEM_WRITE, retaddr); | ||
413 | } | ||
414 | |||
415 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); | ||
416 | |||
417 | /* Handle I/O access. */ | ||
418 | if (tlb_addr & TLB_MMIO) { | ||
419 | - io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, | ||
420 | + io_writex(env, full, mmu_idx, val, addr, retaddr, | ||
421 | op ^ (need_swap * MO_BSWAP)); | ||
422 | return; | ||
423 | } | ||
424 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
425 | |||
426 | /* Handle clean RAM pages. */ | ||
427 | if (tlb_addr & TLB_NOTDIRTY) { | ||
428 | - notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); | ||
429 | + notdirty_write(env_cpu(env), addr, size, full, retaddr); | ||
430 | } | ||
431 | |||
432 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
433 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | ||
434 | index XXXXXXX..XXXXXXX 100644 | ||
435 | --- a/target/arm/mte_helper.c | ||
436 | +++ b/target/arm/mte_helper.c | ||
437 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
438 | return tags + index; | ||
439 | #else | ||
440 | uintptr_t index; | ||
441 | - CPUIOTLBEntry *iotlbentry; | ||
442 | + CPUTLBEntryFull *full; | ||
443 | int in_page, flags; | ||
444 | ram_addr_t ptr_ra; | ||
445 | hwaddr ptr_paddr, tag_paddr, xlat; | ||
446 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
447 | assert(!(flags & TLB_INVALID_MASK)); | ||
448 | |||
449 | /* | ||
450 | - * Find the iotlbentry for ptr. This *must* be present in the TLB | ||
451 | + * Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB | ||
452 | * because we just found the mapping. | ||
453 | * TODO: Perhaps there should be a cputlb helper that returns a | ||
454 | * matching tlb entry + iotlb entry. | ||
455 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
456 | g_assert(tlb_hit(comparator, ptr)); | ||
457 | } | ||
458 | # endif | ||
459 | - iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index]; | ||
460 | + full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index]; | ||
461 | |||
462 | /* If the virtual page MemAttr != Tagged, access unchecked. */ | ||
463 | - if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) { | ||
464 | + if (!arm_tlb_mte_tagged(&full->attrs)) { | ||
465 | return NULL; | ||
466 | } | ||
467 | |||
468 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
469 | int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; | ||
470 | assert(ra != 0); | ||
471 | cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, | ||
472 | - iotlbentry->attrs, wp, ra); | ||
473 | + full->attrs, wp, ra); | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | @@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | ||
478 | tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1); | ||
479 | |||
480 | /* Look up the address in tag space. */ | ||
481 | - tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; | ||
482 | + tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; | ||
483 | tag_as = cpu_get_address_space(env_cpu(env), tag_asi); | ||
484 | mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL, | ||
485 | tag_access == MMU_DATA_STORE, | ||
486 | - iotlbentry->attrs); | ||
487 | + full->attrs); | ||
488 | |||
489 | /* | ||
490 | * Note that @mr will never be NULL. If there is nothing in the address | ||
491 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
492 | index XXXXXXX..XXXXXXX 100644 | ||
493 | --- a/target/arm/sve_helper.c | ||
494 | +++ b/target/arm/sve_helper.c | ||
495 | @@ -XXX,XX +XXX,XX @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, | ||
496 | g_assert(tlb_hit(comparator, addr)); | ||
497 | # endif | ||
498 | |||
499 | - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
500 | - info->attrs = iotlbentry->attrs; | ||
501 | + CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | ||
502 | + info->attrs = full->attrs; | ||
503 | } | ||
504 | #endif | ||
505 | |||
506 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
507 | index XXXXXXX..XXXXXXX 100644 | ||
508 | --- a/target/arm/translate-a64.c | ||
509 | +++ b/target/arm/translate-a64.c | ||
510 | @@ -XXX,XX +XXX,XX @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s) | ||
511 | * table entry even for that case. | ||
512 | */ | ||
513 | return (tlb_hit(entry->addr_code, addr) && | ||
514 | - arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs)); | ||
515 | + arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs)); | ||
516 | #endif | ||
517 | } | ||
518 | |||
519 | -- | 169 | -- |
520 | 2.34.1 | 170 | 2.34.1 |
521 | 171 | ||
522 | 172 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/nanomips_translate.c.inc | 127 ++--------------------- | ||
7 | 1 file changed, 10 insertions(+), 117 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/nanomips_translate.c.inc | ||
12 | +++ b/target/mips/tcg/nanomips_translate.c.inc | ||
13 | @@ -XXX,XX +XXX,XX @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, | ||
14 | tcg_gen_extr_i64_tl(tmp1, tmp2, tval); | ||
15 | } | ||
16 | gen_store_gpr(tmp1, reg1); | ||
17 | - tcg_temp_free(tmp1); | ||
18 | gen_store_gpr(tmp2, reg2); | ||
19 | - tcg_temp_free(tmp2); | ||
20 | tcg_gen_st_i64(tval, cpu_env, offsetof(CPUMIPSState, llval_wp)); | ||
21 | - tcg_temp_free_i64(tval); | ||
22 | tcg_gen_st_tl(taddr, cpu_env, offsetof(CPUMIPSState, lladdr)); | ||
23 | - tcg_temp_free(taddr); | ||
24 | } | ||
25 | |||
26 | static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, | ||
27 | @@ -XXX,XX +XXX,XX @@ static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count, | ||
28 | |||
29 | /* adjust stack pointer */ | ||
30 | gen_adjust_sp(ctx, -u); | ||
31 | - | ||
32 | - tcg_temp_free(t0); | ||
33 | - tcg_temp_free(va); | ||
34 | } | ||
35 | |||
36 | static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, | ||
37 | @@ -XXX,XX +XXX,XX @@ static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, | ||
38 | |||
39 | /* adjust stack pointer */ | ||
40 | gen_adjust_sp(ctx, u); | ||
41 | - | ||
42 | - tcg_temp_free(t0); | ||
43 | - tcg_temp_free(va); | ||
44 | } | ||
45 | |||
46 | static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc, | ||
47 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc, | ||
48 | if (insn_bytes == 2) { | ||
49 | ctx->hflags |= MIPS_HFLAG_B16; | ||
50 | } | ||
51 | - tcg_temp_free(t0); | ||
52 | - tcg_temp_free(t1); | ||
53 | } | ||
54 | |||
55 | static void gen_pool16c_nanomips_insn(DisasContext *ctx) | ||
56 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
57 | } | ||
58 | break; | ||
59 | } | ||
60 | - tcg_temp_free(t0); | ||
61 | #endif | ||
62 | } else { | ||
63 | gen_slt(ctx, OPC_SLTU, rd, rs, rt); | ||
64 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
65 | /* operands of same sign, result different sign */ | ||
66 | tcg_gen_setcondi_tl(TCG_COND_LT, t0, t1, 0); | ||
67 | gen_store_gpr(t0, rd); | ||
68 | - | ||
69 | - tcg_temp_free(t0); | ||
70 | - tcg_temp_free(t1); | ||
71 | - tcg_temp_free(t2); | ||
72 | } | ||
73 | break; | ||
74 | case NM_MUL: | ||
75 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
76 | |||
77 | gen_load_gpr(t0, rt); | ||
78 | gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3)); | ||
79 | - tcg_temp_free(t0); | ||
80 | } | ||
81 | break; | ||
82 | case NM_D_E_MT_VPE: | ||
83 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
84 | } | ||
85 | break; | ||
86 | } | ||
87 | - | ||
88 | - tcg_temp_free(t0); | ||
89 | } | ||
90 | break; | ||
91 | case NM_FORK: | ||
92 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
93 | gen_load_gpr(t0, rt); | ||
94 | gen_load_gpr(t1, rs); | ||
95 | gen_helper_fork(t0, t1); | ||
96 | - tcg_temp_free(t0); | ||
97 | - tcg_temp_free(t1); | ||
98 | } | ||
99 | break; | ||
100 | case NM_MFTR: | ||
101 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
102 | gen_load_gpr(t0, rs); | ||
103 | gen_helper_yield(t0, cpu_env, t0); | ||
104 | gen_store_gpr(t0, rt); | ||
105 | - tcg_temp_free(t0); | ||
106 | } | ||
107 | break; | ||
108 | #endif | ||
109 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_1_5_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
110 | gen_reserved_instruction(ctx); | ||
111 | break; | ||
112 | } | ||
113 | - | ||
114 | - tcg_temp_free_i32(t0); | ||
115 | - | ||
116 | - tcg_temp_free(v0_t); | ||
117 | - tcg_temp_free(v1_t); | ||
118 | } | ||
119 | |||
120 | |||
121 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
122 | gen_reserved_instruction(ctx); | ||
123 | break; | ||
124 | } | ||
125 | - | ||
126 | - tcg_temp_free(t0); | ||
127 | - tcg_temp_free(t1); | ||
128 | - tcg_temp_free(v0_t); | ||
129 | } | ||
130 | |||
131 | static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, | ||
132 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, | ||
133 | gen_reserved_instruction(ctx); | ||
134 | break; | ||
135 | } | ||
136 | - | ||
137 | - tcg_temp_free_i32(t0); | ||
138 | } | ||
139 | |||
140 | static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
141 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
142 | tcg_gen_mul_i64(t2, t2, t3); | ||
143 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
144 | tcg_gen_add_i64(t2, t2, t3); | ||
145 | - tcg_temp_free_i64(t3); | ||
146 | gen_move_low32(cpu_LO[acc], t2); | ||
147 | gen_move_high32(cpu_HI[acc], t2); | ||
148 | - tcg_temp_free_i64(t2); | ||
149 | } | ||
150 | break; | ||
151 | case NM_MULT: | ||
152 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
153 | tcg_gen_muls2_i32(t2, t3, t2, t3); | ||
154 | tcg_gen_ext_i32_tl(cpu_LO[acc], t2); | ||
155 | tcg_gen_ext_i32_tl(cpu_HI[acc], t3); | ||
156 | - tcg_temp_free_i32(t2); | ||
157 | - tcg_temp_free_i32(t3); | ||
158 | } | ||
159 | break; | ||
160 | case NM_EXTRV_W: | ||
161 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
162 | tcg_gen_mul_i64(t2, t2, t3); | ||
163 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
164 | tcg_gen_add_i64(t2, t2, t3); | ||
165 | - tcg_temp_free_i64(t3); | ||
166 | gen_move_low32(cpu_LO[acc], t2); | ||
167 | gen_move_high32(cpu_HI[acc], t2); | ||
168 | - tcg_temp_free_i64(t2); | ||
169 | } | ||
170 | break; | ||
171 | case NM_MULTU: | ||
172 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
173 | tcg_gen_mulu2_i32(t2, t3, t2, t3); | ||
174 | tcg_gen_ext_i32_tl(cpu_LO[acc], t2); | ||
175 | tcg_gen_ext_i32_tl(cpu_HI[acc], t3); | ||
176 | - tcg_temp_free_i32(t2); | ||
177 | - tcg_temp_free_i32(t3); | ||
178 | } | ||
179 | break; | ||
180 | case NM_EXTRV_R_W: | ||
181 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
182 | tcg_gen_mul_i64(t2, t2, t3); | ||
183 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
184 | tcg_gen_sub_i64(t2, t3, t2); | ||
185 | - tcg_temp_free_i64(t3); | ||
186 | gen_move_low32(cpu_LO[acc], t2); | ||
187 | gen_move_high32(cpu_HI[acc], t2); | ||
188 | - tcg_temp_free_i64(t2); | ||
189 | } | ||
190 | break; | ||
191 | case NM_EXTRV_RS_W: | ||
192 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
193 | tcg_gen_mul_i64(t2, t2, t3); | ||
194 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
195 | tcg_gen_sub_i64(t2, t3, t2); | ||
196 | - tcg_temp_free_i64(t3); | ||
197 | gen_move_low32(cpu_LO[acc], t2); | ||
198 | gen_move_high32(cpu_HI[acc], t2); | ||
199 | - tcg_temp_free_i64(t2); | ||
200 | } | ||
201 | break; | ||
202 | case NM_EXTRV_S_H: | ||
203 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
204 | gen_reserved_instruction(ctx); | ||
205 | break; | ||
206 | } | ||
207 | - | ||
208 | - tcg_temp_free(t0); | ||
209 | - tcg_temp_free(t1); | ||
210 | - | ||
211 | - tcg_temp_free(v0_t); | ||
212 | - tcg_temp_free(v1_t); | ||
213 | } | ||
214 | |||
215 | static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
216 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
217 | gen_load_gpr(tv0, rt); | ||
218 | gen_helper_insv(v0_t, cpu_env, v0_t, tv0); | ||
219 | gen_store_gpr(v0_t, ret); | ||
220 | - tcg_temp_free(tv0); | ||
221 | } | ||
222 | break; | ||
223 | case NM_RADDU_W_QB: | ||
224 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
225 | gen_reserved_instruction(ctx); | ||
226 | break; | ||
227 | } | ||
228 | - | ||
229 | - tcg_temp_free(v0_t); | ||
230 | - tcg_temp_free(t0); | ||
231 | } | ||
232 | |||
233 | static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
234 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, | ||
235 | gen_reserved_instruction(ctx); | ||
236 | break; | ||
237 | } | ||
238 | - tcg_temp_free(t0); | ||
239 | - tcg_temp_free(rs_t); | ||
240 | } | ||
241 | |||
242 | |||
243 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
244 | gen_store_gpr(t0, rt); | ||
245 | /* Stop translation as we may have switched the execution mode */ | ||
246 | ctx->base.is_jmp = DISAS_STOP; | ||
247 | - tcg_temp_free(t0); | ||
248 | } | ||
249 | break; | ||
250 | case NM_EI: | ||
251 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) | ||
252 | gen_store_gpr(t0, rt); | ||
253 | /* Stop translation as we may have switched the execution mode */ | ||
254 | ctx->base.is_jmp = DISAS_STOP; | ||
255 | - tcg_temp_free(t0); | ||
256 | } | ||
257 | break; | ||
258 | case NM_RDPGPR: | ||
259 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, | ||
260 | /* Unconditional branch */ | ||
261 | } else if (rt == 0 && imm != 0) { | ||
262 | /* Treat as NOP */ | ||
263 | - goto out; | ||
264 | + return; | ||
265 | } else { | ||
266 | cond = TCG_COND_EQ; | ||
267 | } | ||
268 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, | ||
269 | check_nms(ctx); | ||
270 | if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) { | ||
271 | gen_reserved_instruction(ctx); | ||
272 | - goto out; | ||
273 | + return; | ||
274 | } else if (rt == 0 && opc == NM_BBEQZC) { | ||
275 | /* Unconditional branch */ | ||
276 | } else if (rt == 0 && opc == NM_BBNEZC) { | ||
277 | /* Treat as NOP */ | ||
278 | - goto out; | ||
279 | + return; | ||
280 | } else { | ||
281 | tcg_gen_shri_tl(t0, t0, imm); | ||
282 | tcg_gen_andi_tl(t0, t0, 1); | ||
283 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, | ||
284 | case NM_BNEIC: | ||
285 | if (rt == 0 && imm == 0) { | ||
286 | /* Treat as NOP */ | ||
287 | - goto out; | ||
288 | + return; | ||
289 | } else if (rt == 0 && imm != 0) { | ||
290 | /* Unconditional branch */ | ||
291 | } else { | ||
292 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, | ||
293 | default: | ||
294 | MIPS_INVAL("Immediate Value Compact branch"); | ||
295 | gen_reserved_instruction(ctx); | ||
296 | - goto out; | ||
297 | + return; | ||
298 | } | ||
299 | |||
300 | /* branch completion */ | ||
301 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, | ||
302 | |||
303 | gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); | ||
304 | } | ||
305 | - | ||
306 | -out: | ||
307 | - tcg_temp_free(t0); | ||
308 | - tcg_temp_free(t1); | ||
309 | } | ||
310 | |||
311 | /* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */ | ||
312 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, | ||
313 | /* unconditional branch to register */ | ||
314 | tcg_gen_mov_tl(cpu_PC, btarget); | ||
315 | tcg_gen_lookup_and_goto_ptr(); | ||
316 | - | ||
317 | - tcg_temp_free(t0); | ||
318 | - tcg_temp_free(t1); | ||
319 | } | ||
320 | |||
321 | /* nanoMIPS Branches */ | ||
322 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, | ||
323 | gen_load_gpr(tbase, rt); | ||
324 | tcg_gen_movi_tl(toffset, offset); | ||
325 | gen_op_addr_add(ctx, btarget, tbase, toffset); | ||
326 | - tcg_temp_free(tbase); | ||
327 | - tcg_temp_free(toffset); | ||
328 | } | ||
329 | break; | ||
330 | default: | ||
331 | MIPS_INVAL("Compact branch/jump"); | ||
332 | gen_reserved_instruction(ctx); | ||
333 | - goto out; | ||
334 | + return; | ||
335 | } | ||
336 | |||
337 | if (bcond_compute == 0) { | ||
338 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, | ||
339 | default: | ||
340 | MIPS_INVAL("Compact branch/jump"); | ||
341 | gen_reserved_instruction(ctx); | ||
342 | - goto out; | ||
343 | + return; | ||
344 | } | ||
345 | } else { | ||
346 | /* Conditional compact branch */ | ||
347 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, | ||
348 | default: | ||
349 | MIPS_INVAL("Compact conditional branch/jump"); | ||
350 | gen_reserved_instruction(ctx); | ||
351 | - goto out; | ||
352 | + return; | ||
353 | } | ||
354 | |||
355 | /* branch completion */ | ||
356 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, | ||
357 | |||
358 | gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); | ||
359 | } | ||
360 | - | ||
361 | -out: | ||
362 | - tcg_temp_free(t0); | ||
363 | - tcg_temp_free(t1); | ||
364 | } | ||
365 | |||
366 | |||
367 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch_cp1_nm(DisasContext *ctx, uint32_t op, | ||
368 | default: | ||
369 | MIPS_INVAL("cp1 cond branch"); | ||
370 | gen_reserved_instruction(ctx); | ||
371 | - goto out; | ||
372 | + return; | ||
373 | } | ||
374 | |||
375 | tcg_gen_trunc_i64_tl(bcond, t0); | ||
376 | |||
377 | ctx->btarget = btarget; | ||
378 | - | ||
379 | -out: | ||
380 | - tcg_temp_free_i64(t0); | ||
381 | } | ||
382 | |||
383 | |||
384 | @@ -XXX,XX +XXX,XX @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) | ||
385 | break; | ||
386 | default: | ||
387 | gen_reserved_instruction(ctx); | ||
388 | - goto out; | ||
389 | + return; | ||
390 | } | ||
391 | } | ||
392 | gen_op_addr_add(ctx, t0, t0, t1); | ||
393 | @@ -XXX,XX +XXX,XX @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) | ||
394 | gen_reserved_instruction(ctx); | ||
395 | break; | ||
396 | } | ||
397 | - | ||
398 | -out: | ||
399 | - tcg_temp_free(t0); | ||
400 | - tcg_temp_free(t1); | ||
401 | } | ||
402 | |||
403 | static void gen_pool32f_nanomips_insn(DisasContext *ctx) | ||
404 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, | ||
405 | gen_helper_precr_sra_ph_w(v1_t, sa_t, v1_t, | ||
406 | cpu_gpr[rt]); | ||
407 | gen_store_gpr(v1_t, rt); | ||
408 | - tcg_temp_free_i32(sa_t); | ||
409 | } | ||
410 | break; | ||
411 | case 1: | ||
412 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, | ||
413 | gen_helper_precr_sra_r_ph_w(v1_t, sa_t, v1_t, | ||
414 | cpu_gpr[rt]); | ||
415 | gen_store_gpr(v1_t, rt); | ||
416 | - tcg_temp_free_i32(sa_t); | ||
417 | } | ||
418 | break; | ||
419 | } | ||
420 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, | ||
421 | tcg_gen_movi_tl(tv0, rd >> 3); | ||
422 | tcg_gen_movi_tl(tv1, imm); | ||
423 | gen_helper_shilo(tv0, tv1, cpu_env); | ||
424 | - tcg_temp_free(tv1); | ||
425 | - tcg_temp_free(tv0); | ||
426 | } | ||
427 | break; | ||
428 | case NM_MULEQ_S_W_PHL: | ||
429 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, | ||
430 | gen_reserved_instruction(ctx); | ||
431 | break; | ||
432 | } | ||
433 | - | ||
434 | - tcg_temp_free(v2_t); | ||
435 | - tcg_temp_free(v1_t); | ||
436 | - tcg_temp_free(t0); | ||
437 | } | ||
438 | |||
439 | static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
440 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
441 | |||
442 | tcg_gen_movi_tl(t0, addr); | ||
443 | tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL); | ||
444 | - tcg_temp_free(t0); | ||
445 | } | ||
446 | break; | ||
447 | case NM_SWPC48: | ||
448 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
449 | gen_load_gpr(t1, rt); | ||
450 | |||
451 | tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); | ||
452 | - | ||
453 | - tcg_temp_free(t0); | ||
454 | - tcg_temp_free(t1); | ||
455 | } | ||
456 | break; | ||
457 | default: | ||
458 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
459 | gen_load_gpr(t0, rs); | ||
460 | tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, imm); | ||
461 | gen_store_gpr(t0, rt); | ||
462 | - | ||
463 | - tcg_temp_free(t0); | ||
464 | } | ||
465 | break; | ||
466 | case NM_ADDIUNEG: | ||
467 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
468 | |||
469 | gen_load_gpr(t0, rs); | ||
470 | gen_helper_rotx(cpu_gpr[rt], t0, shift, shiftx, stripe); | ||
471 | - tcg_temp_free(t0); | ||
472 | - | ||
473 | - tcg_temp_free_i32(shift); | ||
474 | - tcg_temp_free_i32(shiftx); | ||
475 | - tcg_temp_free_i32(stripe); | ||
476 | } | ||
477 | break; | ||
478 | case NM_P_INS: | ||
479 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
480 | MO_UNALN); | ||
481 | break; | ||
482 | } | ||
483 | - tcg_temp_free(t0); | ||
484 | - tcg_temp_free(t1); | ||
485 | } | ||
486 | break; | ||
487 | case NM_P_LL: | ||
488 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
489 | } | ||
490 | counter++; | ||
491 | } | ||
492 | - tcg_temp_free(va); | ||
493 | - tcg_temp_free(t1); | ||
494 | } | ||
495 | break; | ||
496 | default: | ||
497 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
498 | gen_load_gpr(t0, rt); | ||
499 | tcg_gen_mov_tl(cpu_gpr[rd], t0); | ||
500 | gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s); | ||
501 | - tcg_temp_free(t0); | ||
502 | } | ||
503 | break; | ||
504 | case NM_P_BAL: | ||
505 | @@ -XXX,XX +XXX,XX @@ static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx) | ||
506 | if (ctx->base.pc_next & 0x1) { | ||
507 | TCGv tmp = tcg_const_tl(ctx->base.pc_next); | ||
508 | tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); | ||
509 | - tcg_temp_free(tmp); | ||
510 | generate_exception_end(ctx, EXCP_AdEL); | ||
511 | return 2; | ||
512 | } | ||
513 | @@ -XXX,XX +XXX,XX @@ static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx) | ||
514 | gen_load_gpr(t1, rt); | ||
515 | tcg_gen_mov_tl(cpu_gpr[rd], t0); | ||
516 | tcg_gen_mov_tl(cpu_gpr[re], t1); | ||
517 | - tcg_temp_free(t0); | ||
518 | - tcg_temp_free(t1); | ||
519 | } | ||
520 | break; | ||
521 | default: | ||
522 | -- | ||
523 | 2.34.1 | ||
524 | |||
525 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/octeon_translate.c | 23 ----------------------- | ||
7 | 1 file changed, 23 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/octeon_translate.c | ||
12 | +++ b/target/mips/tcg/octeon_translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static bool trans_BBIT(DisasContext *ctx, arg_BBIT *a) | ||
14 | ctx->hflags |= MIPS_HFLAG_BC; | ||
15 | ctx->btarget = ctx->base.pc_next + 4 + a->offset * 4; | ||
16 | ctx->hflags |= MIPS_HFLAG_BDS32; | ||
17 | - | ||
18 | - tcg_temp_free(t0); | ||
19 | return true; | ||
20 | } | ||
21 | |||
22 | @@ -XXX,XX +XXX,XX @@ static bool trans_BADDU(DisasContext *ctx, arg_BADDU *a) | ||
23 | |||
24 | tcg_gen_add_tl(t0, t0, t1); | ||
25 | tcg_gen_andi_i64(cpu_gpr[a->rd], t0, 0xff); | ||
26 | - | ||
27 | - tcg_temp_free(t0); | ||
28 | - tcg_temp_free(t1); | ||
29 | - | ||
30 | return true; | ||
31 | } | ||
32 | |||
33 | @@ -XXX,XX +XXX,XX @@ static bool trans_DMUL(DisasContext *ctx, arg_DMUL *a) | ||
34 | gen_load_gpr(t1, a->rt); | ||
35 | |||
36 | tcg_gen_mul_i64(cpu_gpr[a->rd], t0, t1); | ||
37 | - | ||
38 | - tcg_temp_free(t0); | ||
39 | - tcg_temp_free(t1); | ||
40 | - | ||
41 | return true; | ||
42 | } | ||
43 | |||
44 | @@ -XXX,XX +XXX,XX @@ static bool trans_EXTS(DisasContext *ctx, arg_EXTS *a) | ||
45 | gen_load_gpr(t0, a->rs); | ||
46 | tcg_gen_sextract_tl(t0, t0, a->p, a->lenm1 + 1); | ||
47 | gen_store_gpr(t0, a->rt); | ||
48 | - tcg_temp_free(t0); | ||
49 | - | ||
50 | return true; | ||
51 | } | ||
52 | |||
53 | @@ -XXX,XX +XXX,XX @@ static bool trans_CINS(DisasContext *ctx, arg_CINS *a) | ||
54 | gen_load_gpr(t0, a->rs); | ||
55 | tcg_gen_deposit_z_tl(t0, t0, a->p, a->lenm1 + 1); | ||
56 | gen_store_gpr(t0, a->rt); | ||
57 | - tcg_temp_free(t0); | ||
58 | - | ||
59 | return true; | ||
60 | } | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ static bool trans_POP(DisasContext *ctx, arg_POP *a) | ||
63 | } | ||
64 | tcg_gen_ctpop_tl(t0, t0); | ||
65 | gen_store_gpr(t0, a->rd); | ||
66 | - tcg_temp_free(t0); | ||
67 | - | ||
68 | return true; | ||
69 | } | ||
70 | |||
71 | @@ -XXX,XX +XXX,XX @@ static bool trans_SEQNE(DisasContext *ctx, arg_SEQNE *a) | ||
72 | } else { | ||
73 | tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr[a->rd], t1, t0); | ||
74 | } | ||
75 | - | ||
76 | - tcg_temp_free(t0); | ||
77 | - tcg_temp_free(t1); | ||
78 | - | ||
79 | return true; | ||
80 | } | ||
81 | |||
82 | @@ -XXX,XX +XXX,XX @@ static bool trans_SEQNEI(DisasContext *ctx, arg_SEQNEI *a) | ||
83 | } else { | ||
84 | tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], t0, imm); | ||
85 | } | ||
86 | - | ||
87 | - tcg_temp_free(t0); | ||
88 | - | ||
89 | return true; | ||
90 | } | ||
91 | -- | ||
92 | 2.34.1 | ||
93 | |||
94 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/translate_addr_const.c | 7 ------- | ||
7 | 1 file changed, 7 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/translate_addr_const.c | ||
12 | +++ b/target/mips/tcg/translate_addr_const.c | ||
13 | @@ -XXX,XX +XXX,XX @@ bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa) | ||
14 | tcg_gen_shli_tl(t0, t0, sa + 1); | ||
15 | tcg_gen_add_tl(cpu_gpr[rd], t0, t1); | ||
16 | tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); | ||
17 | - | ||
18 | - tcg_temp_free(t1); | ||
19 | - tcg_temp_free(t0); | ||
20 | - | ||
21 | return true; | ||
22 | } | ||
23 | |||
24 | @@ -XXX,XX +XXX,XX @@ bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa) | ||
25 | gen_load_gpr(t1, rt); | ||
26 | tcg_gen_shli_tl(t0, t0, sa + 1); | ||
27 | tcg_gen_add_tl(cpu_gpr[rd], t0, t1); | ||
28 | - tcg_temp_free(t1); | ||
29 | - tcg_temp_free(t0); | ||
30 | - | ||
31 | return true; | ||
32 | } | ||
33 | -- | ||
34 | 2.34.1 | ||
35 | |||
36 | diff view generated by jsdifflib |
1 | Populate this new method for all targets. Always match | 1 | Translators are no longer required to free tcg temporaries. |
---|---|---|---|
2 | the result that would be given by cpu_get_tb_cpu_state, | ||
3 | as we will want these values to correspond in the logs. | ||
4 | 2 | ||
5 | Reviewed-by: Taylor Simpson <tsimpson@quicinc.com> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
7 | Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (target/sparc) | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 5 | --- |
10 | Cc: Eduardo Habkost <eduardo@habkost.net> (supporter:Machine core) | 6 | target/mips/tcg/tx79_translate.c | 41 -------------------------------- |
11 | Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> (supporter:Machine core) | 7 | 1 file changed, 41 deletions(-) |
12 | Cc: "Philippe Mathieu-Daudé" <f4bug@amsat.org> (reviewer:Machine core) | ||
13 | Cc: Yanan Wang <wangyanan55@huawei.com> (reviewer:Machine core) | ||
14 | Cc: Michael Rolnik <mrolnik@gmail.com> (maintainer:AVR TCG CPUs) | ||
15 | Cc: "Edgar E. Iglesias" <edgar.iglesias@gmail.com> (maintainer:CRIS TCG CPUs) | ||
16 | Cc: Taylor Simpson <tsimpson@quicinc.com> (supporter:Hexagon TCG CPUs) | ||
17 | Cc: Song Gao <gaosong@loongson.cn> (maintainer:LoongArch TCG CPUs) | ||
18 | Cc: Xiaojuan Yang <yangxiaojuan@loongson.cn> (maintainer:LoongArch TCG CPUs) | ||
19 | Cc: Laurent Vivier <laurent@vivier.eu> (maintainer:M68K TCG CPUs) | ||
20 | Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> (reviewer:MIPS TCG CPUs) | ||
21 | Cc: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> (reviewer:MIPS TCG CPUs) | ||
22 | Cc: Chris Wulff <crwulff@gmail.com> (maintainer:NiosII TCG CPUs) | ||
23 | Cc: Marek Vasut <marex@denx.de> (maintainer:NiosII TCG CPUs) | ||
24 | Cc: Stafford Horne <shorne@gmail.com> (odd fixer:OpenRISC TCG CPUs) | ||
25 | Cc: Yoshinori Sato <ysato@users.sourceforge.jp> (reviewer:RENESAS RX CPUs) | ||
26 | Cc: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (maintainer:SPARC TCG CPUs) | ||
27 | Cc: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> (maintainer:TriCore TCG CPUs) | ||
28 | Cc: Max Filippov <jcmvbkbc@gmail.com> (maintainer:Xtensa TCG CPUs) | ||
29 | Cc: qemu-arm@nongnu.org (open list:ARM TCG CPUs) | ||
30 | Cc: qemu-ppc@nongnu.org (open list:PowerPC TCG CPUs) | ||
31 | Cc: qemu-riscv@nongnu.org (open list:RISC-V TCG CPUs) | ||
32 | Cc: qemu-s390x@nongnu.org (open list:S390 TCG CPUs) | ||
33 | --- | ||
34 | include/hw/core/cpu.h | 3 +++ | ||
35 | target/alpha/cpu.c | 9 +++++++++ | ||
36 | target/arm/cpu.c | 13 +++++++++++++ | ||
37 | target/avr/cpu.c | 8 ++++++++ | ||
38 | target/cris/cpu.c | 8 ++++++++ | ||
39 | target/hexagon/cpu.c | 8 ++++++++ | ||
40 | target/hppa/cpu.c | 8 ++++++++ | ||
41 | target/i386/cpu.c | 9 +++++++++ | ||
42 | target/loongarch/cpu.c | 9 +++++++++ | ||
43 | target/m68k/cpu.c | 8 ++++++++ | ||
44 | target/microblaze/cpu.c | 8 ++++++++ | ||
45 | target/mips/cpu.c | 8 ++++++++ | ||
46 | target/nios2/cpu.c | 9 +++++++++ | ||
47 | target/openrisc/cpu.c | 8 ++++++++ | ||
48 | target/ppc/cpu_init.c | 8 ++++++++ | ||
49 | target/riscv/cpu.c | 13 +++++++++++++ | ||
50 | target/rx/cpu.c | 8 ++++++++ | ||
51 | target/s390x/cpu.c | 8 ++++++++ | ||
52 | target/sh4/cpu.c | 8 ++++++++ | ||
53 | target/sparc/cpu.c | 8 ++++++++ | ||
54 | target/tricore/cpu.c | 9 +++++++++ | ||
55 | target/xtensa/cpu.c | 8 ++++++++ | ||
56 | 22 files changed, 186 insertions(+) | ||
57 | 8 | ||
58 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | 9 | diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c |
59 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
60 | --- a/include/hw/core/cpu.h | 11 | --- a/target/mips/tcg/tx79_translate.c |
61 | +++ b/include/hw/core/cpu.h | 12 | +++ b/target/mips/tcg/tx79_translate.c |
62 | @@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps; | 13 | @@ -XXX,XX +XXX,XX @@ static bool trans_parallel_arith(DisasContext *ctx, arg_r *a, |
63 | * If the target behaviour here is anything other than "set | 14 | gen_load_gpr_hi(ax, a->rs); |
64 | * the PC register to the value passed in" then the target must | 15 | gen_load_gpr_hi(bx, a->rt); |
65 | * also implement the synchronize_from_tb hook. | 16 | gen_logic_i64(cpu_gpr_hi[a->rd], ax, bx); |
66 | + * @get_pc: Callback for getting the Program Counter register. | 17 | - |
67 | + * As above, with the semantics of the target architecture. | 18 | - tcg_temp_free(bx); |
68 | * @gdb_read_register: Callback for letting GDB read a register. | 19 | - tcg_temp_free(ax); |
69 | * @gdb_write_register: Callback for letting GDB write a register. | 20 | - |
70 | * @gdb_adjust_breakpoint: Callback for adjusting the address of a | 21 | return true; |
71 | @@ -XXX,XX +XXX,XX @@ struct CPUClass { | ||
72 | void (*dump_state)(CPUState *cpu, FILE *, int flags); | ||
73 | int64_t (*get_arch_id)(CPUState *cpu); | ||
74 | void (*set_pc)(CPUState *cpu, vaddr value); | ||
75 | + vaddr (*get_pc)(CPUState *cpu); | ||
76 | int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg); | ||
77 | int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); | ||
78 | vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr); | ||
79 | diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/target/alpha/cpu.c | ||
82 | +++ b/target/alpha/cpu.c | ||
83 | @@ -XXX,XX +XXX,XX @@ static void alpha_cpu_set_pc(CPUState *cs, vaddr value) | ||
84 | cpu->env.pc = value; | ||
85 | } | 22 | } |
86 | 23 | ||
87 | +static vaddr alpha_cpu_get_pc(CPUState *cs) | 24 | @@ -XXX,XX +XXX,XX @@ static bool trans_parallel_compare(DisasContext *ctx, arg_r *a, |
88 | +{ | 25 | tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0); |
89 | + AlphaCPU *cpu = ALPHA_CPU(cs); | 26 | tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], t2, wlen * i, wlen); |
90 | + | ||
91 | + return cpu->env.pc; | ||
92 | +} | ||
93 | + | ||
94 | + | ||
95 | static bool alpha_cpu_has_work(CPUState *cs) | ||
96 | { | ||
97 | /* Here we are checking to see if the CPU should wake up from HALT. | ||
98 | @@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data) | ||
99 | cc->has_work = alpha_cpu_has_work; | ||
100 | cc->dump_state = alpha_cpu_dump_state; | ||
101 | cc->set_pc = alpha_cpu_set_pc; | ||
102 | + cc->get_pc = alpha_cpu_get_pc; | ||
103 | cc->gdb_read_register = alpha_cpu_gdb_read_register; | ||
104 | cc->gdb_write_register = alpha_cpu_gdb_write_register; | ||
105 | #ifndef CONFIG_USER_ONLY | ||
106 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
107 | index XXXXXXX..XXXXXXX 100644 | ||
108 | --- a/target/arm/cpu.c | ||
109 | +++ b/target/arm/cpu.c | ||
110 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value) | ||
111 | } | 27 | } |
28 | - | ||
29 | - tcg_temp_free(t2); | ||
30 | - tcg_temp_free(t1); | ||
31 | - tcg_temp_free(t0); | ||
32 | - tcg_temp_free(bx); | ||
33 | - tcg_temp_free(ax); | ||
34 | - tcg_temp_free(c1); | ||
35 | - tcg_temp_free(c0); | ||
36 | - | ||
37 | return true; | ||
112 | } | 38 | } |
113 | 39 | ||
114 | +static vaddr arm_cpu_get_pc(CPUState *cs) | 40 | @@ -XXX,XX +XXX,XX @@ static bool trans_LQ(DisasContext *ctx, arg_i *a) |
115 | +{ | 41 | tcg_gen_addi_i64(addr, addr, 8); |
116 | + ARMCPU *cpu = ARM_CPU(cs); | 42 | tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); |
117 | + CPUARMState *env = &cpu->env; | 43 | gen_store_gpr_hi(t0, a->rt); |
118 | + | 44 | - |
119 | + if (is_a64(env)) { | 45 | - tcg_temp_free(t0); |
120 | + return env->pc; | 46 | - tcg_temp_free(addr); |
121 | + } else { | 47 | - |
122 | + return env->regs[15]; | 48 | return true; |
123 | + } | ||
124 | +} | ||
125 | + | ||
126 | #ifdef CONFIG_TCG | ||
127 | void arm_cpu_synchronize_from_tb(CPUState *cs, | ||
128 | const TranslationBlock *tb) | ||
129 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) | ||
130 | cc->has_work = arm_cpu_has_work; | ||
131 | cc->dump_state = arm_cpu_dump_state; | ||
132 | cc->set_pc = arm_cpu_set_pc; | ||
133 | + cc->get_pc = arm_cpu_get_pc; | ||
134 | cc->gdb_read_register = arm_cpu_gdb_read_register; | ||
135 | cc->gdb_write_register = arm_cpu_gdb_write_register; | ||
136 | #ifndef CONFIG_USER_ONLY | ||
137 | diff --git a/target/avr/cpu.c b/target/avr/cpu.c | ||
138 | index XXXXXXX..XXXXXXX 100644 | ||
139 | --- a/target/avr/cpu.c | ||
140 | +++ b/target/avr/cpu.c | ||
141 | @@ -XXX,XX +XXX,XX @@ static void avr_cpu_set_pc(CPUState *cs, vaddr value) | ||
142 | cpu->env.pc_w = value / 2; /* internally PC points to words */ | ||
143 | } | 49 | } |
144 | 50 | ||
145 | +static vaddr avr_cpu_get_pc(CPUState *cs) | 51 | @@ -XXX,XX +XXX,XX @@ static bool trans_SQ(DisasContext *ctx, arg_i *a) |
146 | +{ | 52 | tcg_gen_addi_i64(addr, addr, 8); |
147 | + AVRCPU *cpu = AVR_CPU(cs); | 53 | gen_load_gpr_hi(t0, a->rt); |
148 | + | 54 | tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); |
149 | + return cpu->env.pc_w * 2; | 55 | - |
150 | +} | 56 | - tcg_temp_free(addr); |
151 | + | 57 | - tcg_temp_free(t0); |
152 | static bool avr_cpu_has_work(CPUState *cs) | 58 | - |
153 | { | 59 | return true; |
154 | AVRCPU *cpu = AVR_CPU(cs); | ||
155 | @@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data) | ||
156 | cc->has_work = avr_cpu_has_work; | ||
157 | cc->dump_state = avr_cpu_dump_state; | ||
158 | cc->set_pc = avr_cpu_set_pc; | ||
159 | + cc->get_pc = avr_cpu_get_pc; | ||
160 | dc->vmsd = &vms_avr_cpu; | ||
161 | cc->sysemu_ops = &avr_sysemu_ops; | ||
162 | cc->disas_set_info = avr_cpu_disas_set_info; | ||
163 | diff --git a/target/cris/cpu.c b/target/cris/cpu.c | ||
164 | index XXXXXXX..XXXXXXX 100644 | ||
165 | --- a/target/cris/cpu.c | ||
166 | +++ b/target/cris/cpu.c | ||
167 | @@ -XXX,XX +XXX,XX @@ static void cris_cpu_set_pc(CPUState *cs, vaddr value) | ||
168 | cpu->env.pc = value; | ||
169 | } | 60 | } |
170 | 61 | ||
171 | +static vaddr cris_cpu_get_pc(CPUState *cs) | 62 | @@ -XXX,XX +XXX,XX @@ static bool trans_PPACW(DisasContext *ctx, arg_r *a) |
172 | +{ | 63 | |
173 | + CRISCPU *cpu = CRIS_CPU(cs); | 64 | gen_load_gpr_hi(t0, a->rs); /* a1 */ |
174 | + | 65 | tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], a0, t0, 32, 32); |
175 | + return cpu->env.pc; | 66 | - |
176 | +} | 67 | - tcg_temp_free(t0); |
177 | + | 68 | - tcg_temp_free(b0); |
178 | static bool cris_cpu_has_work(CPUState *cs) | 69 | - tcg_temp_free(a0); |
179 | { | 70 | - |
180 | return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); | 71 | return true; |
181 | @@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data) | ||
182 | cc->has_work = cris_cpu_has_work; | ||
183 | cc->dump_state = cris_cpu_dump_state; | ||
184 | cc->set_pc = cris_cpu_set_pc; | ||
185 | + cc->get_pc = cris_cpu_get_pc; | ||
186 | cc->gdb_read_register = cris_cpu_gdb_read_register; | ||
187 | cc->gdb_write_register = cris_cpu_gdb_write_register; | ||
188 | #ifndef CONFIG_USER_ONLY | ||
189 | diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c | ||
190 | index XXXXXXX..XXXXXXX 100644 | ||
191 | --- a/target/hexagon/cpu.c | ||
192 | +++ b/target/hexagon/cpu.c | ||
193 | @@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_set_pc(CPUState *cs, vaddr value) | ||
194 | env->gpr[HEX_REG_PC] = value; | ||
195 | } | 72 | } |
196 | 73 | ||
197 | +static vaddr hexagon_cpu_get_pc(CPUState *cs) | 74 | @@ -XXX,XX +XXX,XX @@ static bool trans_PEXTLx(DisasContext *ctx, arg_r *a, unsigned wlen) |
198 | +{ | 75 | tcg_gen_shri_i64(bx, bx, wlen); |
199 | + HexagonCPU *cpu = HEXAGON_CPU(cs); | 76 | tcg_gen_shri_i64(ax, ax, wlen); |
200 | + CPUHexagonState *env = &cpu->env; | 77 | } |
201 | + return env->gpr[HEX_REG_PC]; | 78 | - |
202 | +} | 79 | - tcg_temp_free(bx); |
203 | + | 80 | - tcg_temp_free(ax); |
204 | static void hexagon_cpu_synchronize_from_tb(CPUState *cs, | 81 | - |
205 | const TranslationBlock *tb) | 82 | return true; |
206 | { | ||
207 | @@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data) | ||
208 | cc->has_work = hexagon_cpu_has_work; | ||
209 | cc->dump_state = hexagon_dump_state; | ||
210 | cc->set_pc = hexagon_cpu_set_pc; | ||
211 | + cc->get_pc = hexagon_cpu_get_pc; | ||
212 | cc->gdb_read_register = hexagon_gdb_read_register; | ||
213 | cc->gdb_write_register = hexagon_gdb_write_register; | ||
214 | cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS; | ||
215 | diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c | ||
216 | index XXXXXXX..XXXXXXX 100644 | ||
217 | --- a/target/hppa/cpu.c | ||
218 | +++ b/target/hppa/cpu.c | ||
219 | @@ -XXX,XX +XXX,XX @@ static void hppa_cpu_set_pc(CPUState *cs, vaddr value) | ||
220 | cpu->env.iaoq_b = value + 4; | ||
221 | } | 83 | } |
222 | 84 | ||
223 | +static vaddr hppa_cpu_get_pc(CPUState *cs) | 85 | @@ -XXX,XX +XXX,XX @@ static bool trans_PEXTLW(DisasContext *ctx, arg_r *a) |
224 | +{ | 86 | gen_load_gpr(ax, a->rs); |
225 | + HPPACPU *cpu = HPPA_CPU(cs); | 87 | gen_load_gpr(bx, a->rt); |
226 | + | 88 | gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx); |
227 | + return cpu->env.iaoq_f; | 89 | - |
228 | +} | 90 | - tcg_temp_free(bx); |
229 | + | 91 | - tcg_temp_free(ax); |
230 | static void hppa_cpu_synchronize_from_tb(CPUState *cs, | 92 | - |
231 | const TranslationBlock *tb) | 93 | return true; |
232 | { | ||
233 | @@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data) | ||
234 | cc->has_work = hppa_cpu_has_work; | ||
235 | cc->dump_state = hppa_cpu_dump_state; | ||
236 | cc->set_pc = hppa_cpu_set_pc; | ||
237 | + cc->get_pc = hppa_cpu_get_pc; | ||
238 | cc->gdb_read_register = hppa_cpu_gdb_read_register; | ||
239 | cc->gdb_write_register = hppa_cpu_gdb_write_register; | ||
240 | #ifndef CONFIG_USER_ONLY | ||
241 | diff --git a/target/i386/cpu.c b/target/i386/cpu.c | ||
242 | index XXXXXXX..XXXXXXX 100644 | ||
243 | --- a/target/i386/cpu.c | ||
244 | +++ b/target/i386/cpu.c | ||
245 | @@ -XXX,XX +XXX,XX @@ static void x86_cpu_set_pc(CPUState *cs, vaddr value) | ||
246 | cpu->env.eip = value; | ||
247 | } | 94 | } |
248 | 95 | ||
249 | +static vaddr x86_cpu_get_pc(CPUState *cs) | 96 | @@ -XXX,XX +XXX,XX @@ static bool trans_PEXTUW(DisasContext *ctx, arg_r *a) |
250 | +{ | 97 | gen_load_gpr_hi(ax, a->rs); |
251 | + X86CPU *cpu = X86_CPU(cs); | 98 | gen_load_gpr_hi(bx, a->rt); |
252 | + | 99 | gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx); |
253 | + /* Match cpu_get_tb_cpu_state. */ | 100 | - |
254 | + return cpu->env.eip + cpu->env.segs[R_CS].base; | 101 | - tcg_temp_free(bx); |
255 | +} | 102 | - tcg_temp_free(ax); |
256 | + | 103 | - |
257 | int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) | 104 | return true; |
258 | { | ||
259 | X86CPU *cpu = X86_CPU(cs); | ||
260 | @@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) | ||
261 | cc->has_work = x86_cpu_has_work; | ||
262 | cc->dump_state = x86_cpu_dump_state; | ||
263 | cc->set_pc = x86_cpu_set_pc; | ||
264 | + cc->get_pc = x86_cpu_get_pc; | ||
265 | cc->gdb_read_register = x86_cpu_gdb_read_register; | ||
266 | cc->gdb_write_register = x86_cpu_gdb_write_register; | ||
267 | cc->get_arch_id = x86_cpu_get_arch_id; | ||
268 | diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c | ||
269 | index XXXXXXX..XXXXXXX 100644 | ||
270 | --- a/target/loongarch/cpu.c | ||
271 | +++ b/target/loongarch/cpu.c | ||
272 | @@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) | ||
273 | env->pc = value; | ||
274 | } | 105 | } |
275 | 106 | ||
276 | +static vaddr loongarch_cpu_get_pc(CPUState *cs) | 107 | @@ -XXX,XX +XXX,XX @@ static bool trans_PROT3W(DisasContext *ctx, arg_r *a) |
277 | +{ | 108 | |
278 | + LoongArchCPU *cpu = LOONGARCH_CPU(cs); | 109 | tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rt], ax, 0, 32); |
279 | + CPULoongArchState *env = &cpu->env; | 110 | tcg_gen_rotri_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], 32); |
280 | + | 111 | - |
281 | + return env->pc; | 112 | - tcg_temp_free(ax); |
282 | +} | 113 | - |
283 | + | 114 | return true; |
284 | #ifndef CONFIG_USER_ONLY | ||
285 | #include "hw/loongarch/virt.h" | ||
286 | |||
287 | @@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data) | ||
288 | cc->has_work = loongarch_cpu_has_work; | ||
289 | cc->dump_state = loongarch_cpu_dump_state; | ||
290 | cc->set_pc = loongarch_cpu_set_pc; | ||
291 | + cc->get_pc = loongarch_cpu_get_pc; | ||
292 | #ifndef CONFIG_USER_ONLY | ||
293 | dc->vmsd = &vmstate_loongarch_cpu; | ||
294 | cc->sysemu_ops = &loongarch_sysemu_ops; | ||
295 | diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c | ||
296 | index XXXXXXX..XXXXXXX 100644 | ||
297 | --- a/target/m68k/cpu.c | ||
298 | +++ b/target/m68k/cpu.c | ||
299 | @@ -XXX,XX +XXX,XX @@ static void m68k_cpu_set_pc(CPUState *cs, vaddr value) | ||
300 | cpu->env.pc = value; | ||
301 | } | 115 | } |
302 | |||
303 | +static vaddr m68k_cpu_get_pc(CPUState *cs) | ||
304 | +{ | ||
305 | + M68kCPU *cpu = M68K_CPU(cs); | ||
306 | + | ||
307 | + return cpu->env.pc; | ||
308 | +} | ||
309 | + | ||
310 | static bool m68k_cpu_has_work(CPUState *cs) | ||
311 | { | ||
312 | return cs->interrupt_request & CPU_INTERRUPT_HARD; | ||
313 | @@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data) | ||
314 | cc->has_work = m68k_cpu_has_work; | ||
315 | cc->dump_state = m68k_cpu_dump_state; | ||
316 | cc->set_pc = m68k_cpu_set_pc; | ||
317 | + cc->get_pc = m68k_cpu_get_pc; | ||
318 | cc->gdb_read_register = m68k_cpu_gdb_read_register; | ||
319 | cc->gdb_write_register = m68k_cpu_gdb_write_register; | ||
320 | #if defined(CONFIG_SOFTMMU) | ||
321 | diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c | ||
322 | index XXXXXXX..XXXXXXX 100644 | ||
323 | --- a/target/microblaze/cpu.c | ||
324 | +++ b/target/microblaze/cpu.c | ||
325 | @@ -XXX,XX +XXX,XX @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value) | ||
326 | cpu->env.iflags = 0; | ||
327 | } | ||
328 | |||
329 | +static vaddr mb_cpu_get_pc(CPUState *cs) | ||
330 | +{ | ||
331 | + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); | ||
332 | + | ||
333 | + return cpu->env.pc; | ||
334 | +} | ||
335 | + | ||
336 | static void mb_cpu_synchronize_from_tb(CPUState *cs, | ||
337 | const TranslationBlock *tb) | ||
338 | { | ||
339 | @@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data) | ||
340 | |||
341 | cc->dump_state = mb_cpu_dump_state; | ||
342 | cc->set_pc = mb_cpu_set_pc; | ||
343 | + cc->get_pc = mb_cpu_get_pc; | ||
344 | cc->gdb_read_register = mb_cpu_gdb_read_register; | ||
345 | cc->gdb_write_register = mb_cpu_gdb_write_register; | ||
346 | |||
347 | diff --git a/target/mips/cpu.c b/target/mips/cpu.c | ||
348 | index XXXXXXX..XXXXXXX 100644 | ||
349 | --- a/target/mips/cpu.c | ||
350 | +++ b/target/mips/cpu.c | ||
351 | @@ -XXX,XX +XXX,XX @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value) | ||
352 | mips_env_set_pc(&cpu->env, value); | ||
353 | } | ||
354 | |||
355 | +static vaddr mips_cpu_get_pc(CPUState *cs) | ||
356 | +{ | ||
357 | + MIPSCPU *cpu = MIPS_CPU(cs); | ||
358 | + | ||
359 | + return cpu->env.active_tc.PC; | ||
360 | +} | ||
361 | + | ||
362 | static bool mips_cpu_has_work(CPUState *cs) | ||
363 | { | ||
364 | MIPSCPU *cpu = MIPS_CPU(cs); | ||
365 | @@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data) | ||
366 | cc->has_work = mips_cpu_has_work; | ||
367 | cc->dump_state = mips_cpu_dump_state; | ||
368 | cc->set_pc = mips_cpu_set_pc; | ||
369 | + cc->get_pc = mips_cpu_get_pc; | ||
370 | cc->gdb_read_register = mips_cpu_gdb_read_register; | ||
371 | cc->gdb_write_register = mips_cpu_gdb_write_register; | ||
372 | #ifndef CONFIG_USER_ONLY | ||
373 | diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c | ||
374 | index XXXXXXX..XXXXXXX 100644 | ||
375 | --- a/target/nios2/cpu.c | ||
376 | +++ b/target/nios2/cpu.c | ||
377 | @@ -XXX,XX +XXX,XX @@ static void nios2_cpu_set_pc(CPUState *cs, vaddr value) | ||
378 | env->pc = value; | ||
379 | } | ||
380 | |||
381 | +static vaddr nios2_cpu_get_pc(CPUState *cs) | ||
382 | +{ | ||
383 | + Nios2CPU *cpu = NIOS2_CPU(cs); | ||
384 | + CPUNios2State *env = &cpu->env; | ||
385 | + | ||
386 | + return env->pc; | ||
387 | +} | ||
388 | + | ||
389 | static bool nios2_cpu_has_work(CPUState *cs) | ||
390 | { | ||
391 | return cs->interrupt_request & CPU_INTERRUPT_HARD; | ||
392 | @@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data) | ||
393 | cc->has_work = nios2_cpu_has_work; | ||
394 | cc->dump_state = nios2_cpu_dump_state; | ||
395 | cc->set_pc = nios2_cpu_set_pc; | ||
396 | + cc->get_pc = nios2_cpu_get_pc; | ||
397 | cc->disas_set_info = nios2_cpu_disas_set_info; | ||
398 | #ifndef CONFIG_USER_ONLY | ||
399 | cc->sysemu_ops = &nios2_sysemu_ops; | ||
400 | diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c | ||
401 | index XXXXXXX..XXXXXXX 100644 | ||
402 | --- a/target/openrisc/cpu.c | ||
403 | +++ b/target/openrisc/cpu.c | ||
404 | @@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value) | ||
405 | cpu->env.dflag = 0; | ||
406 | } | ||
407 | |||
408 | +static vaddr openrisc_cpu_get_pc(CPUState *cs) | ||
409 | +{ | ||
410 | + OpenRISCCPU *cpu = OPENRISC_CPU(cs); | ||
411 | + | ||
412 | + return cpu->env.pc; | ||
413 | +} | ||
414 | + | ||
415 | static void openrisc_cpu_synchronize_from_tb(CPUState *cs, | ||
416 | const TranslationBlock *tb) | ||
417 | { | ||
418 | @@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data) | ||
419 | cc->has_work = openrisc_cpu_has_work; | ||
420 | cc->dump_state = openrisc_cpu_dump_state; | ||
421 | cc->set_pc = openrisc_cpu_set_pc; | ||
422 | + cc->get_pc = openrisc_cpu_get_pc; | ||
423 | cc->gdb_read_register = openrisc_cpu_gdb_read_register; | ||
424 | cc->gdb_write_register = openrisc_cpu_gdb_write_register; | ||
425 | #ifndef CONFIG_USER_ONLY | ||
426 | diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c | ||
427 | index XXXXXXX..XXXXXXX 100644 | ||
428 | --- a/target/ppc/cpu_init.c | ||
429 | +++ b/target/ppc/cpu_init.c | ||
430 | @@ -XXX,XX +XXX,XX @@ static void ppc_cpu_set_pc(CPUState *cs, vaddr value) | ||
431 | cpu->env.nip = value; | ||
432 | } | ||
433 | |||
434 | +static vaddr ppc_cpu_get_pc(CPUState *cs) | ||
435 | +{ | ||
436 | + PowerPCCPU *cpu = POWERPC_CPU(cs); | ||
437 | + | ||
438 | + return cpu->env.nip; | ||
439 | +} | ||
440 | + | ||
441 | static bool ppc_cpu_has_work(CPUState *cs) | ||
442 | { | ||
443 | PowerPCCPU *cpu = POWERPC_CPU(cs); | ||
444 | @@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) | ||
445 | cc->has_work = ppc_cpu_has_work; | ||
446 | cc->dump_state = ppc_cpu_dump_state; | ||
447 | cc->set_pc = ppc_cpu_set_pc; | ||
448 | + cc->get_pc = ppc_cpu_get_pc; | ||
449 | cc->gdb_read_register = ppc_cpu_gdb_read_register; | ||
450 | cc->gdb_write_register = ppc_cpu_gdb_write_register; | ||
451 | #ifndef CONFIG_USER_ONLY | ||
452 | diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c | ||
453 | index XXXXXXX..XXXXXXX 100644 | ||
454 | --- a/target/riscv/cpu.c | ||
455 | +++ b/target/riscv/cpu.c | ||
456 | @@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_pc(CPUState *cs, vaddr value) | ||
457 | } | ||
458 | } | ||
459 | |||
460 | +static vaddr riscv_cpu_get_pc(CPUState *cs) | ||
461 | +{ | ||
462 | + RISCVCPU *cpu = RISCV_CPU(cs); | ||
463 | + CPURISCVState *env = &cpu->env; | ||
464 | + | ||
465 | + /* Match cpu_get_tb_cpu_state. */ | ||
466 | + if (env->xl == MXL_RV32) { | ||
467 | + return env->pc & UINT32_MAX; | ||
468 | + } | ||
469 | + return env->pc; | ||
470 | +} | ||
471 | + | ||
472 | static void riscv_cpu_synchronize_from_tb(CPUState *cs, | ||
473 | const TranslationBlock *tb) | ||
474 | { | ||
475 | @@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) | ||
476 | cc->has_work = riscv_cpu_has_work; | ||
477 | cc->dump_state = riscv_cpu_dump_state; | ||
478 | cc->set_pc = riscv_cpu_set_pc; | ||
479 | + cc->get_pc = riscv_cpu_get_pc; | ||
480 | cc->gdb_read_register = riscv_cpu_gdb_read_register; | ||
481 | cc->gdb_write_register = riscv_cpu_gdb_write_register; | ||
482 | cc->gdb_num_core_regs = 33; | ||
483 | diff --git a/target/rx/cpu.c b/target/rx/cpu.c | ||
484 | index XXXXXXX..XXXXXXX 100644 | ||
485 | --- a/target/rx/cpu.c | ||
486 | +++ b/target/rx/cpu.c | ||
487 | @@ -XXX,XX +XXX,XX @@ static void rx_cpu_set_pc(CPUState *cs, vaddr value) | ||
488 | cpu->env.pc = value; | ||
489 | } | ||
490 | |||
491 | +static vaddr rx_cpu_get_pc(CPUState *cs) | ||
492 | +{ | ||
493 | + RXCPU *cpu = RX_CPU(cs); | ||
494 | + | ||
495 | + return cpu->env.pc; | ||
496 | +} | ||
497 | + | ||
498 | static void rx_cpu_synchronize_from_tb(CPUState *cs, | ||
499 | const TranslationBlock *tb) | ||
500 | { | ||
501 | @@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data) | ||
502 | cc->has_work = rx_cpu_has_work; | ||
503 | cc->dump_state = rx_cpu_dump_state; | ||
504 | cc->set_pc = rx_cpu_set_pc; | ||
505 | + cc->get_pc = rx_cpu_get_pc; | ||
506 | |||
507 | #ifndef CONFIG_USER_ONLY | ||
508 | cc->sysemu_ops = &rx_sysemu_ops; | ||
509 | diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c | ||
510 | index XXXXXXX..XXXXXXX 100644 | ||
511 | --- a/target/s390x/cpu.c | ||
512 | +++ b/target/s390x/cpu.c | ||
513 | @@ -XXX,XX +XXX,XX @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value) | ||
514 | cpu->env.psw.addr = value; | ||
515 | } | ||
516 | |||
517 | +static vaddr s390_cpu_get_pc(CPUState *cs) | ||
518 | +{ | ||
519 | + S390CPU *cpu = S390_CPU(cs); | ||
520 | + | ||
521 | + return cpu->env.psw.addr; | ||
522 | +} | ||
523 | + | ||
524 | static bool s390_cpu_has_work(CPUState *cs) | ||
525 | { | ||
526 | S390CPU *cpu = S390_CPU(cs); | ||
527 | @@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) | ||
528 | cc->has_work = s390_cpu_has_work; | ||
529 | cc->dump_state = s390_cpu_dump_state; | ||
530 | cc->set_pc = s390_cpu_set_pc; | ||
531 | + cc->get_pc = s390_cpu_get_pc; | ||
532 | cc->gdb_read_register = s390_cpu_gdb_read_register; | ||
533 | cc->gdb_write_register = s390_cpu_gdb_write_register; | ||
534 | #ifndef CONFIG_USER_ONLY | ||
535 | diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c | ||
536 | index XXXXXXX..XXXXXXX 100644 | ||
537 | --- a/target/sh4/cpu.c | ||
538 | +++ b/target/sh4/cpu.c | ||
539 | @@ -XXX,XX +XXX,XX @@ static void superh_cpu_set_pc(CPUState *cs, vaddr value) | ||
540 | cpu->env.pc = value; | ||
541 | } | ||
542 | |||
543 | +static vaddr superh_cpu_get_pc(CPUState *cs) | ||
544 | +{ | ||
545 | + SuperHCPU *cpu = SUPERH_CPU(cs); | ||
546 | + | ||
547 | + return cpu->env.pc; | ||
548 | +} | ||
549 | + | ||
550 | static void superh_cpu_synchronize_from_tb(CPUState *cs, | ||
551 | const TranslationBlock *tb) | ||
552 | { | ||
553 | @@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data) | ||
554 | cc->has_work = superh_cpu_has_work; | ||
555 | cc->dump_state = superh_cpu_dump_state; | ||
556 | cc->set_pc = superh_cpu_set_pc; | ||
557 | + cc->get_pc = superh_cpu_get_pc; | ||
558 | cc->gdb_read_register = superh_cpu_gdb_read_register; | ||
559 | cc->gdb_write_register = superh_cpu_gdb_write_register; | ||
560 | #ifndef CONFIG_USER_ONLY | ||
561 | diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c | ||
562 | index XXXXXXX..XXXXXXX 100644 | ||
563 | --- a/target/sparc/cpu.c | ||
564 | +++ b/target/sparc/cpu.c | ||
565 | @@ -XXX,XX +XXX,XX @@ static void sparc_cpu_set_pc(CPUState *cs, vaddr value) | ||
566 | cpu->env.npc = value + 4; | ||
567 | } | ||
568 | |||
569 | +static vaddr sparc_cpu_get_pc(CPUState *cs) | ||
570 | +{ | ||
571 | + SPARCCPU *cpu = SPARC_CPU(cs); | ||
572 | + | ||
573 | + return cpu->env.pc; | ||
574 | +} | ||
575 | + | ||
576 | static void sparc_cpu_synchronize_from_tb(CPUState *cs, | ||
577 | const TranslationBlock *tb) | ||
578 | { | ||
579 | @@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data) | ||
580 | cc->memory_rw_debug = sparc_cpu_memory_rw_debug; | ||
581 | #endif | ||
582 | cc->set_pc = sparc_cpu_set_pc; | ||
583 | + cc->get_pc = sparc_cpu_get_pc; | ||
584 | cc->gdb_read_register = sparc_cpu_gdb_read_register; | ||
585 | cc->gdb_write_register = sparc_cpu_gdb_write_register; | ||
586 | #ifndef CONFIG_USER_ONLY | ||
587 | diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c | ||
588 | index XXXXXXX..XXXXXXX 100644 | ||
589 | --- a/target/tricore/cpu.c | ||
590 | +++ b/target/tricore/cpu.c | ||
591 | @@ -XXX,XX +XXX,XX @@ static void tricore_cpu_set_pc(CPUState *cs, vaddr value) | ||
592 | env->PC = value & ~(target_ulong)1; | ||
593 | } | ||
594 | |||
595 | +static vaddr tricore_cpu_get_pc(CPUState *cs) | ||
596 | +{ | ||
597 | + TriCoreCPU *cpu = TRICORE_CPU(cs); | ||
598 | + CPUTriCoreState *env = &cpu->env; | ||
599 | + | ||
600 | + return env->PC; | ||
601 | +} | ||
602 | + | ||
603 | static void tricore_cpu_synchronize_from_tb(CPUState *cs, | ||
604 | const TranslationBlock *tb) | ||
605 | { | ||
606 | @@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data) | ||
607 | |||
608 | cc->dump_state = tricore_cpu_dump_state; | ||
609 | cc->set_pc = tricore_cpu_set_pc; | ||
610 | + cc->get_pc = tricore_cpu_get_pc; | ||
611 | cc->sysemu_ops = &tricore_sysemu_ops; | ||
612 | cc->tcg_ops = &tricore_tcg_ops; | ||
613 | } | ||
614 | diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c | ||
615 | index XXXXXXX..XXXXXXX 100644 | ||
616 | --- a/target/xtensa/cpu.c | ||
617 | +++ b/target/xtensa/cpu.c | ||
618 | @@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_set_pc(CPUState *cs, vaddr value) | ||
619 | cpu->env.pc = value; | ||
620 | } | ||
621 | |||
622 | +static vaddr xtensa_cpu_get_pc(CPUState *cs) | ||
623 | +{ | ||
624 | + XtensaCPU *cpu = XTENSA_CPU(cs); | ||
625 | + | ||
626 | + return cpu->env.pc; | ||
627 | +} | ||
628 | + | ||
629 | static bool xtensa_cpu_has_work(CPUState *cs) | ||
630 | { | ||
631 | #ifndef CONFIG_USER_ONLY | ||
632 | @@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data) | ||
633 | cc->has_work = xtensa_cpu_has_work; | ||
634 | cc->dump_state = xtensa_cpu_dump_state; | ||
635 | cc->set_pc = xtensa_cpu_set_pc; | ||
636 | + cc->get_pc = xtensa_cpu_get_pc; | ||
637 | cc->gdb_read_register = xtensa_cpu_gdb_read_register; | ||
638 | cc->gdb_write_register = xtensa_cpu_gdb_write_register; | ||
639 | cc->gdb_stop_before_watchpoint = true; | ||
640 | -- | 116 | -- |
641 | 2.34.1 | 117 | 2.34.1 |
642 | 118 | ||
643 | 119 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/vr54xx_translate.c | 4 ---- | ||
7 | 1 file changed, 4 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/vr54xx_translate.c | ||
12 | +++ b/target/mips/tcg/vr54xx_translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static bool trans_mult_acc(DisasContext *ctx, arg_r *a, | ||
14 | gen_helper_mult_acc(t0, cpu_env, t0, t1); | ||
15 | |||
16 | gen_store_gpr(t0, a->rd); | ||
17 | - | ||
18 | - tcg_temp_free(t0); | ||
19 | - tcg_temp_free(t1); | ||
20 | - | ||
21 | return true; | ||
22 | } | ||
23 | |||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/translate.c | 537 +----------------------------------- | ||
7 | 1 file changed, 14 insertions(+), 523 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/translate.c | ||
12 | +++ b/target/mips/tcg/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static inline void gen_load_srsgpr(int from, int to) | ||
14 | tcg_gen_add_ptr(addr, cpu_env, addr); | ||
15 | |||
16 | tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from); | ||
17 | - tcg_temp_free_ptr(addr); | ||
18 | - tcg_temp_free_i32(t2); | ||
19 | } | ||
20 | gen_store_gpr(t0, to); | ||
21 | - tcg_temp_free(t0); | ||
22 | } | ||
23 | |||
24 | static inline void gen_store_srsgpr(int from, int to) | ||
25 | @@ -XXX,XX +XXX,XX @@ static inline void gen_store_srsgpr(int from, int to) | ||
26 | tcg_gen_add_ptr(addr, cpu_env, addr); | ||
27 | |||
28 | tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to); | ||
29 | - tcg_temp_free_ptr(addr); | ||
30 | - tcg_temp_free_i32(t2); | ||
31 | - tcg_temp_free(t0); | ||
32 | } | ||
33 | } | ||
34 | |||
35 | @@ -XXX,XX +XXX,XX @@ void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) | ||
36 | t64 = tcg_temp_new_i64(); | ||
37 | tcg_gen_extu_i32_i64(t64, t); | ||
38 | tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32); | ||
39 | - tcg_temp_free_i64(t64); | ||
40 | } | ||
41 | |||
42 | static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) | ||
43 | @@ -XXX,XX +XXX,XX @@ static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) | ||
44 | TCGv_i64 t64 = tcg_temp_new_i64(); | ||
45 | tcg_gen_extu_i32_i64(t64, t); | ||
46 | tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32); | ||
47 | - tcg_temp_free_i64(t64); | ||
48 | } else { | ||
49 | gen_store_fpr32(ctx, t, reg | 1); | ||
50 | } | ||
51 | @@ -XXX,XX +XXX,XX @@ void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) | ||
52 | t0 = tcg_temp_new_i64(); | ||
53 | tcg_gen_shri_i64(t0, t, 32); | ||
54 | tcg_gen_deposit_i64(fpu_f64[reg | 1], fpu_f64[reg | 1], t0, 0, 32); | ||
55 | - tcg_temp_free_i64(t0); | ||
56 | } | ||
57 | } | ||
58 | |||
59 | @@ -XXX,XX +XXX,XX @@ static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \ | ||
60 | default: \ | ||
61 | abort(); \ | ||
62 | } \ | ||
63 | - tcg_temp_free_i##bits(fp0); \ | ||
64 | - tcg_temp_free_i##bits(fp1); \ | ||
65 | } | ||
66 | |||
67 | FOP_CONDS(, 0, d, FMT_D, 64) | ||
68 | @@ -XXX,XX +XXX,XX @@ static inline void gen_r6_cmp_ ## fmt(DisasContext *ctx, int n, \ | ||
69 | abort(); \ | ||
70 | } \ | ||
71 | STORE; \ | ||
72 | - tcg_temp_free_i ## bits(fp0); \ | ||
73 | - tcg_temp_free_i ## bits(fp1); \ | ||
74 | } | ||
75 | |||
76 | FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd)) | ||
77 | @@ -XXX,XX +XXX,XX @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ | ||
78 | tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \ | ||
79 | tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \ | ||
80 | tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \ | ||
81 | - tcg_temp_free(t0); \ | ||
82 | } | ||
83 | #else | ||
84 | #define OP_LD_ATOMIC(insn, fname) \ | ||
85 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
86 | tcg_gen_shl_tl(t2, t2, t1); | ||
87 | gen_load_gpr(t1, rt); | ||
88 | tcg_gen_andc_tl(t1, t1, t2); | ||
89 | - tcg_temp_free(t2); | ||
90 | tcg_gen_or_tl(t0, t0, t1); | ||
91 | - tcg_temp_free(t1); | ||
92 | gen_store_gpr(t0, rt); | ||
93 | break; | ||
94 | case OPC_LDR: | ||
95 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
96 | tcg_gen_shl_tl(t2, t2, t1); | ||
97 | gen_load_gpr(t1, rt); | ||
98 | tcg_gen_and_tl(t1, t1, t2); | ||
99 | - tcg_temp_free(t2); | ||
100 | tcg_gen_or_tl(t0, t0, t1); | ||
101 | - tcg_temp_free(t1); | ||
102 | gen_store_gpr(t0, rt); | ||
103 | break; | ||
104 | case OPC_LDPC: | ||
105 | t1 = tcg_const_tl(pc_relative_pc(ctx)); | ||
106 | gen_op_addr_add(ctx, t0, t0, t1); | ||
107 | - tcg_temp_free(t1); | ||
108 | tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); | ||
109 | gen_store_gpr(t0, rt); | ||
110 | break; | ||
111 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
112 | case OPC_LWPC: | ||
113 | t1 = tcg_const_tl(pc_relative_pc(ctx)); | ||
114 | gen_op_addr_add(ctx, t0, t0, t1); | ||
115 | - tcg_temp_free(t1); | ||
116 | tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL); | ||
117 | gen_store_gpr(t0, rt); | ||
118 | break; | ||
119 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
120 | tcg_gen_shl_tl(t2, t2, t1); | ||
121 | gen_load_gpr(t1, rt); | ||
122 | tcg_gen_andc_tl(t1, t1, t2); | ||
123 | - tcg_temp_free(t2); | ||
124 | tcg_gen_or_tl(t0, t0, t1); | ||
125 | - tcg_temp_free(t1); | ||
126 | tcg_gen_ext32s_tl(t0, t0); | ||
127 | gen_store_gpr(t0, rt); | ||
128 | break; | ||
129 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
130 | tcg_gen_shl_tl(t2, t2, t1); | ||
131 | gen_load_gpr(t1, rt); | ||
132 | tcg_gen_and_tl(t1, t1, t2); | ||
133 | - tcg_temp_free(t2); | ||
134 | tcg_gen_or_tl(t0, t0, t1); | ||
135 | - tcg_temp_free(t1); | ||
136 | tcg_gen_ext32s_tl(t0, t0); | ||
137 | gen_store_gpr(t0, rt); | ||
138 | break; | ||
139 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
140 | gen_store_gpr(t0, rt); | ||
141 | break; | ||
142 | } | ||
143 | - tcg_temp_free(t0); | ||
144 | } | ||
145 | |||
146 | /* Store */ | ||
147 | @@ -XXX,XX +XXX,XX @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, | ||
148 | gen_helper_0e2i(swr, t1, t0, mem_idx); | ||
149 | break; | ||
150 | } | ||
151 | - tcg_temp_free(t0); | ||
152 | - tcg_temp_free(t1); | ||
153 | } | ||
154 | |||
155 | |||
156 | @@ -XXX,XX +XXX,XX @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, | ||
157 | /* compare the address against that of the preceding LL */ | ||
158 | gen_base_offset_addr(ctx, addr, base, offset); | ||
159 | tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1); | ||
160 | - tcg_temp_free(addr); | ||
161 | tcg_gen_movi_tl(t0, 0); | ||
162 | gen_store_gpr(t0, rt); | ||
163 | tcg_gen_br(done); | ||
164 | @@ -XXX,XX +XXX,XX @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, | ||
165 | eva ? MIPS_HFLAG_UM : ctx->mem_idx, tcg_mo); | ||
166 | tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval); | ||
167 | gen_store_gpr(t0, rt); | ||
168 | - tcg_temp_free(val); | ||
169 | |||
170 | gen_set_label(done); | ||
171 | - tcg_temp_free(t0); | ||
172 | } | ||
173 | |||
174 | /* Load and store */ | ||
175 | @@ -XXX,XX +XXX,XX @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, | ||
176 | tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | | ||
177 | ctx->default_tcg_memop_mask); | ||
178 | gen_store_fpr32(ctx, fp0, ft); | ||
179 | - tcg_temp_free_i32(fp0); | ||
180 | } | ||
181 | break; | ||
182 | case OPC_SWC1: | ||
183 | @@ -XXX,XX +XXX,XX @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, | ||
184 | gen_load_fpr32(ctx, fp0, ft); | ||
185 | tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | | ||
186 | ctx->default_tcg_memop_mask); | ||
187 | - tcg_temp_free_i32(fp0); | ||
188 | } | ||
189 | break; | ||
190 | case OPC_LDC1: | ||
191 | @@ -XXX,XX +XXX,XX @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, | ||
192 | tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | | ||
193 | ctx->default_tcg_memop_mask); | ||
194 | gen_store_fpr64(ctx, fp0, ft); | ||
195 | - tcg_temp_free_i64(fp0); | ||
196 | } | ||
197 | break; | ||
198 | case OPC_SDC1: | ||
199 | @@ -XXX,XX +XXX,XX @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, | ||
200 | gen_load_fpr64(ctx, fp0, ft); | ||
201 | tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | | ||
202 | ctx->default_tcg_memop_mask); | ||
203 | - tcg_temp_free_i64(fp0); | ||
204 | } | ||
205 | break; | ||
206 | default: | ||
207 | @@ -XXX,XX +XXX,XX @@ static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt, | ||
208 | } else { | ||
209 | generate_exception_err(ctx, EXCP_CpU, 1); | ||
210 | } | ||
211 | - tcg_temp_free(t0); | ||
212 | } | ||
213 | |||
214 | /* Arithmetic with immediate operand */ | ||
215 | @@ -XXX,XX +XXX,XX @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, | ||
216 | tcg_gen_xori_tl(t1, t1, ~uimm); | ||
217 | tcg_gen_xori_tl(t2, t0, uimm); | ||
218 | tcg_gen_and_tl(t1, t1, t2); | ||
219 | - tcg_temp_free(t2); | ||
220 | tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); | ||
221 | - tcg_temp_free(t1); | ||
222 | /* operands of same sign, result different sign */ | ||
223 | generate_exception(ctx, EXCP_OVERFLOW); | ||
224 | gen_set_label(l1); | ||
225 | tcg_gen_ext32s_tl(t0, t0); | ||
226 | gen_store_gpr(t0, rt); | ||
227 | - tcg_temp_free(t0); | ||
228 | } | ||
229 | break; | ||
230 | case OPC_ADDIU: | ||
231 | @@ -XXX,XX +XXX,XX @@ static void gen_arith_imm(DisasContext *ctx, uint32_t opc, | ||
232 | tcg_gen_xori_tl(t1, t1, ~uimm); | ||
233 | tcg_gen_xori_tl(t2, t0, uimm); | ||
234 | tcg_gen_and_tl(t1, t1, t2); | ||
235 | - tcg_temp_free(t2); | ||
236 | tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); | ||
237 | - tcg_temp_free(t1); | ||
238 | /* operands of same sign, result different sign */ | ||
239 | generate_exception(ctx, EXCP_OVERFLOW); | ||
240 | gen_set_label(l1); | ||
241 | gen_store_gpr(t0, rt); | ||
242 | - tcg_temp_free(t0); | ||
243 | } | ||
244 | break; | ||
245 | case OPC_DADDIU: | ||
246 | @@ -XXX,XX +XXX,XX @@ static void gen_slt_imm(DisasContext *ctx, uint32_t opc, | ||
247 | tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm); | ||
248 | break; | ||
249 | } | ||
250 | - tcg_temp_free(t0); | ||
251 | } | ||
252 | |||
253 | /* Shifts with immediate operand */ | ||
254 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_imm(DisasContext *ctx, uint32_t opc, | ||
255 | tcg_gen_trunc_tl_i32(t1, t0); | ||
256 | tcg_gen_rotri_i32(t1, t1, uimm); | ||
257 | tcg_gen_ext_i32_tl(cpu_gpr[rt], t1); | ||
258 | - tcg_temp_free_i32(t1); | ||
259 | } else { | ||
260 | tcg_gen_ext32s_tl(cpu_gpr[rt], t0); | ||
261 | } | ||
262 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_imm(DisasContext *ctx, uint32_t opc, | ||
263 | break; | ||
264 | #endif | ||
265 | } | ||
266 | - tcg_temp_free(t0); | ||
267 | } | ||
268 | |||
269 | /* Arithmetic */ | ||
270 | @@ -XXX,XX +XXX,XX @@ static void gen_arith(DisasContext *ctx, uint32_t opc, | ||
271 | tcg_gen_xor_tl(t1, t1, t2); | ||
272 | tcg_gen_xor_tl(t2, t0, t2); | ||
273 | tcg_gen_andc_tl(t1, t2, t1); | ||
274 | - tcg_temp_free(t2); | ||
275 | tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); | ||
276 | - tcg_temp_free(t1); | ||
277 | /* operands of same sign, result different sign */ | ||
278 | generate_exception(ctx, EXCP_OVERFLOW); | ||
279 | gen_set_label(l1); | ||
280 | gen_store_gpr(t0, rd); | ||
281 | - tcg_temp_free(t0); | ||
282 | } | ||
283 | break; | ||
284 | case OPC_ADDU: | ||
285 | @@ -XXX,XX +XXX,XX @@ static void gen_arith(DisasContext *ctx, uint32_t opc, | ||
286 | tcg_gen_xor_tl(t2, t1, t2); | ||
287 | tcg_gen_xor_tl(t1, t0, t1); | ||
288 | tcg_gen_and_tl(t1, t1, t2); | ||
289 | - tcg_temp_free(t2); | ||
290 | tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); | ||
291 | - tcg_temp_free(t1); | ||
292 | /* | ||
293 | * operands of different sign, first operand and the result | ||
294 | * of different sign | ||
295 | @@ -XXX,XX +XXX,XX @@ static void gen_arith(DisasContext *ctx, uint32_t opc, | ||
296 | generate_exception(ctx, EXCP_OVERFLOW); | ||
297 | gen_set_label(l1); | ||
298 | gen_store_gpr(t0, rd); | ||
299 | - tcg_temp_free(t0); | ||
300 | } | ||
301 | break; | ||
302 | case OPC_SUBU: | ||
303 | @@ -XXX,XX +XXX,XX @@ static void gen_arith(DisasContext *ctx, uint32_t opc, | ||
304 | tcg_gen_xor_tl(t1, t1, t2); | ||
305 | tcg_gen_xor_tl(t2, t0, t2); | ||
306 | tcg_gen_andc_tl(t1, t2, t1); | ||
307 | - tcg_temp_free(t2); | ||
308 | tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); | ||
309 | - tcg_temp_free(t1); | ||
310 | /* operands of same sign, result different sign */ | ||
311 | generate_exception(ctx, EXCP_OVERFLOW); | ||
312 | gen_set_label(l1); | ||
313 | gen_store_gpr(t0, rd); | ||
314 | - tcg_temp_free(t0); | ||
315 | } | ||
316 | break; | ||
317 | case OPC_DADDU: | ||
318 | @@ -XXX,XX +XXX,XX @@ static void gen_arith(DisasContext *ctx, uint32_t opc, | ||
319 | tcg_gen_xor_tl(t2, t1, t2); | ||
320 | tcg_gen_xor_tl(t1, t0, t1); | ||
321 | tcg_gen_and_tl(t1, t1, t2); | ||
322 | - tcg_temp_free(t2); | ||
323 | tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); | ||
324 | - tcg_temp_free(t1); | ||
325 | /* | ||
326 | * Operands of different sign, first operand and result different | ||
327 | * sign. | ||
328 | @@ -XXX,XX +XXX,XX @@ static void gen_arith(DisasContext *ctx, uint32_t opc, | ||
329 | generate_exception(ctx, EXCP_OVERFLOW); | ||
330 | gen_set_label(l1); | ||
331 | gen_store_gpr(t0, rd); | ||
332 | - tcg_temp_free(t0); | ||
333 | } | ||
334 | break; | ||
335 | case OPC_DSUBU: | ||
336 | @@ -XXX,XX +XXX,XX @@ static void gen_cond_move(DisasContext *ctx, uint32_t opc, | ||
337 | tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1); | ||
338 | break; | ||
339 | } | ||
340 | - tcg_temp_free(t2); | ||
341 | - tcg_temp_free(t1); | ||
342 | - tcg_temp_free(t0); | ||
343 | } | ||
344 | |||
345 | /* Logic */ | ||
346 | @@ -XXX,XX +XXX,XX @@ static void gen_slt(DisasContext *ctx, uint32_t opc, | ||
347 | tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1); | ||
348 | break; | ||
349 | } | ||
350 | - tcg_temp_free(t0); | ||
351 | - tcg_temp_free(t1); | ||
352 | } | ||
353 | |||
354 | /* Shifts */ | ||
355 | @@ -XXX,XX +XXX,XX @@ static void gen_shift(DisasContext *ctx, uint32_t opc, | ||
356 | tcg_gen_andi_i32(t2, t2, 0x1f); | ||
357 | tcg_gen_rotr_i32(t2, t3, t2); | ||
358 | tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); | ||
359 | - tcg_temp_free_i32(t2); | ||
360 | - tcg_temp_free_i32(t3); | ||
361 | } | ||
362 | break; | ||
363 | #if defined(TARGET_MIPS64) | ||
364 | @@ -XXX,XX +XXX,XX @@ static void gen_shift(DisasContext *ctx, uint32_t opc, | ||
365 | break; | ||
366 | #endif | ||
367 | } | ||
368 | - tcg_temp_free(t0); | ||
369 | - tcg_temp_free(t1); | ||
370 | } | ||
371 | |||
372 | /* Arithmetic on HI/LO registers */ | ||
373 | @@ -XXX,XX +XXX,XX @@ static inline void gen_r6_ld(target_long addr, int reg, int memidx, | ||
374 | TCGv t0 = tcg_const_tl(addr); | ||
375 | tcg_gen_qemu_ld_tl(t0, t0, memidx, memop); | ||
376 | gen_store_gpr(t0, reg); | ||
377 | - tcg_temp_free(t0); | ||
378 | } | ||
379 | |||
380 | static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, | ||
381 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
382 | tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); | ||
383 | tcg_gen_div_tl(cpu_gpr[rd], t0, t1); | ||
384 | tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); | ||
385 | - tcg_temp_free(t3); | ||
386 | - tcg_temp_free(t2); | ||
387 | } | ||
388 | break; | ||
389 | case R6_OPC_MOD: | ||
390 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
391 | tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); | ||
392 | tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); | ||
393 | tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); | ||
394 | - tcg_temp_free(t3); | ||
395 | - tcg_temp_free(t2); | ||
396 | } | ||
397 | break; | ||
398 | case R6_OPC_DIVU: | ||
399 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
400 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
401 | tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); | ||
402 | tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); | ||
403 | - tcg_temp_free(t3); | ||
404 | - tcg_temp_free(t2); | ||
405 | } | ||
406 | break; | ||
407 | case R6_OPC_MODU: | ||
408 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
409 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
410 | tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); | ||
411 | tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); | ||
412 | - tcg_temp_free(t3); | ||
413 | - tcg_temp_free(t2); | ||
414 | } | ||
415 | break; | ||
416 | case R6_OPC_MUL: | ||
417 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
418 | tcg_gen_trunc_tl_i32(t3, t1); | ||
419 | tcg_gen_mul_i32(t2, t2, t3); | ||
420 | tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); | ||
421 | - tcg_temp_free_i32(t2); | ||
422 | - tcg_temp_free_i32(t3); | ||
423 | } | ||
424 | break; | ||
425 | case R6_OPC_MUH: | ||
426 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
427 | tcg_gen_trunc_tl_i32(t3, t1); | ||
428 | tcg_gen_muls2_i32(t2, t3, t2, t3); | ||
429 | tcg_gen_ext_i32_tl(cpu_gpr[rd], t3); | ||
430 | - tcg_temp_free_i32(t2); | ||
431 | - tcg_temp_free_i32(t3); | ||
432 | } | ||
433 | break; | ||
434 | case R6_OPC_MULU: | ||
435 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
436 | tcg_gen_trunc_tl_i32(t3, t1); | ||
437 | tcg_gen_mul_i32(t2, t2, t3); | ||
438 | tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); | ||
439 | - tcg_temp_free_i32(t2); | ||
440 | - tcg_temp_free_i32(t3); | ||
441 | } | ||
442 | break; | ||
443 | case R6_OPC_MUHU: | ||
444 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
445 | tcg_gen_trunc_tl_i32(t3, t1); | ||
446 | tcg_gen_mulu2_i32(t2, t3, t2, t3); | ||
447 | tcg_gen_ext_i32_tl(cpu_gpr[rd], t3); | ||
448 | - tcg_temp_free_i32(t2); | ||
449 | - tcg_temp_free_i32(t3); | ||
450 | } | ||
451 | break; | ||
452 | #if defined(TARGET_MIPS64) | ||
453 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
454 | tcg_gen_movi_tl(t3, 0); | ||
455 | tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); | ||
456 | tcg_gen_div_tl(cpu_gpr[rd], t0, t1); | ||
457 | - tcg_temp_free(t3); | ||
458 | - tcg_temp_free(t2); | ||
459 | } | ||
460 | break; | ||
461 | case R6_OPC_DMOD: | ||
462 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
463 | tcg_gen_movi_tl(t3, 0); | ||
464 | tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); | ||
465 | tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); | ||
466 | - tcg_temp_free(t3); | ||
467 | - tcg_temp_free(t2); | ||
468 | } | ||
469 | break; | ||
470 | case R6_OPC_DDIVU: | ||
471 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
472 | TCGv t3 = tcg_const_tl(1); | ||
473 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
474 | tcg_gen_divu_i64(cpu_gpr[rd], t0, t1); | ||
475 | - tcg_temp_free(t3); | ||
476 | - tcg_temp_free(t2); | ||
477 | } | ||
478 | break; | ||
479 | case R6_OPC_DMODU: | ||
480 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
481 | TCGv t3 = tcg_const_tl(1); | ||
482 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
483 | tcg_gen_remu_i64(cpu_gpr[rd], t0, t1); | ||
484 | - tcg_temp_free(t3); | ||
485 | - tcg_temp_free(t2); | ||
486 | } | ||
487 | break; | ||
488 | case R6_OPC_DMUL: | ||
489 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
490 | { | ||
491 | TCGv t2 = tcg_temp_new(); | ||
492 | tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1); | ||
493 | - tcg_temp_free(t2); | ||
494 | } | ||
495 | break; | ||
496 | case R6_OPC_DMULU: | ||
497 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
498 | { | ||
499 | TCGv t2 = tcg_temp_new(); | ||
500 | tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1); | ||
501 | - tcg_temp_free(t2); | ||
502 | } | ||
503 | break; | ||
504 | #endif | ||
505 | default: | ||
506 | MIPS_INVAL("r6 mul/div"); | ||
507 | gen_reserved_instruction(ctx); | ||
508 | - goto out; | ||
509 | + break; | ||
510 | } | ||
511 | - out: | ||
512 | - tcg_temp_free(t0); | ||
513 | - tcg_temp_free(t1); | ||
514 | } | ||
515 | |||
516 | #if defined(TARGET_MIPS64) | ||
517 | @@ -XXX,XX +XXX,XX @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) | ||
518 | tcg_gen_rem_tl(cpu_HI[1], t0, t1); | ||
519 | tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]); | ||
520 | tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]); | ||
521 | - tcg_temp_free(t3); | ||
522 | - tcg_temp_free(t2); | ||
523 | } | ||
524 | break; | ||
525 | case MMI_OPC_DIVU1: | ||
526 | @@ -XXX,XX +XXX,XX @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) | ||
527 | tcg_gen_remu_tl(cpu_HI[1], t0, t1); | ||
528 | tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]); | ||
529 | tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]); | ||
530 | - tcg_temp_free(t3); | ||
531 | - tcg_temp_free(t2); | ||
532 | } | ||
533 | break; | ||
534 | default: | ||
535 | MIPS_INVAL("div1 TX79"); | ||
536 | gen_reserved_instruction(ctx); | ||
537 | - goto out; | ||
538 | + break; | ||
539 | } | ||
540 | - out: | ||
541 | - tcg_temp_free(t0); | ||
542 | - tcg_temp_free(t1); | ||
543 | } | ||
544 | #endif | ||
545 | |||
546 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
547 | tcg_gen_rem_tl(cpu_HI[acc], t0, t1); | ||
548 | tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]); | ||
549 | tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]); | ||
550 | - tcg_temp_free(t3); | ||
551 | - tcg_temp_free(t2); | ||
552 | } | ||
553 | break; | ||
554 | case OPC_DIVU: | ||
555 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
556 | tcg_gen_remu_tl(cpu_HI[acc], t0, t1); | ||
557 | tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]); | ||
558 | tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]); | ||
559 | - tcg_temp_free(t3); | ||
560 | - tcg_temp_free(t2); | ||
561 | } | ||
562 | break; | ||
563 | case OPC_MULT: | ||
564 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
565 | tcg_gen_muls2_i32(t2, t3, t2, t3); | ||
566 | tcg_gen_ext_i32_tl(cpu_LO[acc], t2); | ||
567 | tcg_gen_ext_i32_tl(cpu_HI[acc], t3); | ||
568 | - tcg_temp_free_i32(t2); | ||
569 | - tcg_temp_free_i32(t3); | ||
570 | } | ||
571 | break; | ||
572 | case OPC_MULTU: | ||
573 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
574 | tcg_gen_mulu2_i32(t2, t3, t2, t3); | ||
575 | tcg_gen_ext_i32_tl(cpu_LO[acc], t2); | ||
576 | tcg_gen_ext_i32_tl(cpu_HI[acc], t3); | ||
577 | - tcg_temp_free_i32(t2); | ||
578 | - tcg_temp_free_i32(t3); | ||
579 | } | ||
580 | break; | ||
581 | #if defined(TARGET_MIPS64) | ||
582 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
583 | tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); | ||
584 | tcg_gen_div_tl(cpu_LO[acc], t0, t1); | ||
585 | tcg_gen_rem_tl(cpu_HI[acc], t0, t1); | ||
586 | - tcg_temp_free(t3); | ||
587 | - tcg_temp_free(t2); | ||
588 | } | ||
589 | break; | ||
590 | case OPC_DDIVU: | ||
591 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
592 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
593 | tcg_gen_divu_i64(cpu_LO[acc], t0, t1); | ||
594 | tcg_gen_remu_i64(cpu_HI[acc], t0, t1); | ||
595 | - tcg_temp_free(t3); | ||
596 | - tcg_temp_free(t2); | ||
597 | } | ||
598 | break; | ||
599 | case OPC_DMULT: | ||
600 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
601 | tcg_gen_mul_i64(t2, t2, t3); | ||
602 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
603 | tcg_gen_add_i64(t2, t2, t3); | ||
604 | - tcg_temp_free_i64(t3); | ||
605 | gen_move_low32(cpu_LO[acc], t2); | ||
606 | gen_move_high32(cpu_HI[acc], t2); | ||
607 | - tcg_temp_free_i64(t2); | ||
608 | } | ||
609 | break; | ||
610 | case OPC_MADDU: | ||
611 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
612 | tcg_gen_mul_i64(t2, t2, t3); | ||
613 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
614 | tcg_gen_add_i64(t2, t2, t3); | ||
615 | - tcg_temp_free_i64(t3); | ||
616 | gen_move_low32(cpu_LO[acc], t2); | ||
617 | gen_move_high32(cpu_HI[acc], t2); | ||
618 | - tcg_temp_free_i64(t2); | ||
619 | } | ||
620 | break; | ||
621 | case OPC_MSUB: | ||
622 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
623 | tcg_gen_mul_i64(t2, t2, t3); | ||
624 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
625 | tcg_gen_sub_i64(t2, t3, t2); | ||
626 | - tcg_temp_free_i64(t3); | ||
627 | gen_move_low32(cpu_LO[acc], t2); | ||
628 | gen_move_high32(cpu_HI[acc], t2); | ||
629 | - tcg_temp_free_i64(t2); | ||
630 | } | ||
631 | break; | ||
632 | case OPC_MSUBU: | ||
633 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
634 | tcg_gen_mul_i64(t2, t2, t3); | ||
635 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
636 | tcg_gen_sub_i64(t2, t3, t2); | ||
637 | - tcg_temp_free_i64(t3); | ||
638 | gen_move_low32(cpu_LO[acc], t2); | ||
639 | gen_move_high32(cpu_HI[acc], t2); | ||
640 | - tcg_temp_free_i64(t2); | ||
641 | } | ||
642 | break; | ||
643 | default: | ||
644 | MIPS_INVAL("mul/div"); | ||
645 | gen_reserved_instruction(ctx); | ||
646 | - goto out; | ||
647 | + break; | ||
648 | } | ||
649 | - out: | ||
650 | - tcg_temp_free(t0); | ||
651 | - tcg_temp_free(t1); | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | @@ -XXX,XX +XXX,XX @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, | ||
656 | } | ||
657 | tcg_gen_ext_i32_tl(cpu_LO[acc], t2); | ||
658 | tcg_gen_ext_i32_tl(cpu_HI[acc], t3); | ||
659 | - tcg_temp_free_i32(t2); | ||
660 | - tcg_temp_free_i32(t3); | ||
661 | } | ||
662 | break; | ||
663 | case MMI_OPC_MULTU1: | ||
664 | @@ -XXX,XX +XXX,XX @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, | ||
665 | } | ||
666 | tcg_gen_ext_i32_tl(cpu_LO[acc], t2); | ||
667 | tcg_gen_ext_i32_tl(cpu_HI[acc], t3); | ||
668 | - tcg_temp_free_i32(t2); | ||
669 | - tcg_temp_free_i32(t3); | ||
670 | } | ||
671 | break; | ||
672 | case MMI_OPC_MADD1: | ||
673 | @@ -XXX,XX +XXX,XX @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, | ||
674 | tcg_gen_mul_i64(t2, t2, t3); | ||
675 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
676 | tcg_gen_add_i64(t2, t2, t3); | ||
677 | - tcg_temp_free_i64(t3); | ||
678 | gen_move_low32(cpu_LO[acc], t2); | ||
679 | gen_move_high32(cpu_HI[acc], t2); | ||
680 | if (rd) { | ||
681 | gen_move_low32(cpu_gpr[rd], t2); | ||
682 | } | ||
683 | - tcg_temp_free_i64(t2); | ||
684 | } | ||
685 | break; | ||
686 | case MMI_OPC_MADDU1: | ||
687 | @@ -XXX,XX +XXX,XX @@ static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, | ||
688 | tcg_gen_mul_i64(t2, t2, t3); | ||
689 | tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); | ||
690 | tcg_gen_add_i64(t2, t2, t3); | ||
691 | - tcg_temp_free_i64(t3); | ||
692 | gen_move_low32(cpu_LO[acc], t2); | ||
693 | gen_move_high32(cpu_HI[acc], t2); | ||
694 | if (rd) { | ||
695 | gen_move_low32(cpu_gpr[rd], t2); | ||
696 | } | ||
697 | - tcg_temp_free_i64(t2); | ||
698 | } | ||
699 | break; | ||
700 | default: | ||
701 | MIPS_INVAL("mul/madd TXx9"); | ||
702 | gen_reserved_instruction(ctx); | ||
703 | - goto out; | ||
704 | + break; | ||
705 | } | ||
706 | - | ||
707 | - out: | ||
708 | - tcg_temp_free(t0); | ||
709 | - tcg_temp_free(t1); | ||
710 | } | ||
711 | |||
712 | static void gen_cl(DisasContext *ctx, uint32_t opc, | ||
713 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, | ||
714 | break; | ||
715 | #endif | ||
716 | } | ||
717 | - | ||
718 | - tcg_temp_free(t0); | ||
719 | - tcg_temp_free(t1); | ||
720 | } | ||
721 | |||
722 | /* Loongson multimedia instructions */ | ||
723 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) | ||
724 | tcg_gen_xor_i64(t1, t1, t2); | ||
725 | tcg_gen_xor_i64(t2, t2, t0); | ||
726 | tcg_gen_andc_i64(t1, t2, t1); | ||
727 | - tcg_temp_free_i64(t2); | ||
728 | tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab); | ||
729 | generate_exception(ctx, EXCP_OVERFLOW); | ||
730 | gen_set_label(lab); | ||
731 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) | ||
732 | tcg_gen_xor_i64(t1, t1, t2); | ||
733 | tcg_gen_xor_i64(t2, t2, t0); | ||
734 | tcg_gen_and_i64(t1, t1, t2); | ||
735 | - tcg_temp_free_i64(t2); | ||
736 | tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab); | ||
737 | generate_exception(ctx, EXCP_OVERFLOW); | ||
738 | gen_set_label(lab); | ||
739 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) | ||
740 | tcg_gen_extrl_i64_i32(t32, t64); | ||
741 | tcg_gen_deposit_i32(fpu_fcr31, fpu_fcr31, t32, | ||
742 | get_fp_bit(cc), 1); | ||
743 | - | ||
744 | - tcg_temp_free_i32(t32); | ||
745 | - tcg_temp_free_i64(t64); | ||
746 | } | ||
747 | - goto no_rd; | ||
748 | - break; | ||
749 | + return; | ||
750 | default: | ||
751 | MIPS_INVAL("loongson_cp2"); | ||
752 | gen_reserved_instruction(ctx); | ||
753 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) | ||
754 | } | ||
755 | |||
756 | gen_store_fpr64(ctx, t0, rd); | ||
757 | - | ||
758 | -no_rd: | ||
759 | - tcg_temp_free_i64(t0); | ||
760 | - tcg_temp_free_i64(t1); | ||
761 | } | ||
762 | |||
763 | static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
764 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
765 | ctx->default_tcg_memop_mask); | ||
766 | gen_store_gpr(t1, rt); | ||
767 | gen_store_gpr(t0, lsq_rt1); | ||
768 | - tcg_temp_free(t1); | ||
769 | break; | ||
770 | case OPC_GSLQC1: | ||
771 | check_cp1_enabled(ctx); | ||
772 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
773 | ctx->default_tcg_memop_mask); | ||
774 | gen_store_fpr64(ctx, t1, rt); | ||
775 | gen_store_fpr64(ctx, t0, lsq_rt1); | ||
776 | - tcg_temp_free(t1); | ||
777 | break; | ||
778 | case OPC_GSSQ: | ||
779 | t1 = tcg_temp_new(); | ||
780 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
781 | gen_load_gpr(t1, lsq_rt1); | ||
782 | tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | | ||
783 | ctx->default_tcg_memop_mask); | ||
784 | - tcg_temp_free(t1); | ||
785 | break; | ||
786 | case OPC_GSSQC1: | ||
787 | check_cp1_enabled(ctx); | ||
788 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
789 | gen_load_fpr64(ctx, t1, lsq_rt1); | ||
790 | tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | | ||
791 | ctx->default_tcg_memop_mask); | ||
792 | - tcg_temp_free(t1); | ||
793 | break; | ||
794 | #endif | ||
795 | case OPC_GSSHFL: | ||
796 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
797 | gen_load_fpr32(ctx, fp0, rt); | ||
798 | tcg_gen_ext_i32_tl(t1, fp0); | ||
799 | tcg_gen_andc_tl(t1, t1, t2); | ||
800 | - tcg_temp_free(t2); | ||
801 | tcg_gen_or_tl(t0, t0, t1); | ||
802 | - tcg_temp_free(t1); | ||
803 | #if defined(TARGET_MIPS64) | ||
804 | tcg_gen_extrl_i64_i32(fp0, t0); | ||
805 | #else | ||
806 | tcg_gen_ext32s_tl(fp0, t0); | ||
807 | #endif | ||
808 | gen_store_fpr32(ctx, fp0, rt); | ||
809 | - tcg_temp_free_i32(fp0); | ||
810 | break; | ||
811 | case OPC_GSLWRC1: | ||
812 | check_cp1_enabled(ctx); | ||
813 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
814 | gen_load_fpr32(ctx, fp0, rt); | ||
815 | tcg_gen_ext_i32_tl(t1, fp0); | ||
816 | tcg_gen_and_tl(t1, t1, t2); | ||
817 | - tcg_temp_free(t2); | ||
818 | tcg_gen_or_tl(t0, t0, t1); | ||
819 | - tcg_temp_free(t1); | ||
820 | #if defined(TARGET_MIPS64) | ||
821 | tcg_gen_extrl_i64_i32(fp0, t0); | ||
822 | #else | ||
823 | tcg_gen_ext32s_tl(fp0, t0); | ||
824 | #endif | ||
825 | gen_store_fpr32(ctx, fp0, rt); | ||
826 | - tcg_temp_free_i32(fp0); | ||
827 | break; | ||
828 | #if defined(TARGET_MIPS64) | ||
829 | case OPC_GSLDLC1: | ||
830 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
831 | tcg_gen_shl_tl(t2, t2, t1); | ||
832 | gen_load_fpr64(ctx, t1, rt); | ||
833 | tcg_gen_andc_tl(t1, t1, t2); | ||
834 | - tcg_temp_free(t2); | ||
835 | tcg_gen_or_tl(t0, t0, t1); | ||
836 | - tcg_temp_free(t1); | ||
837 | gen_store_fpr64(ctx, t0, rt); | ||
838 | break; | ||
839 | case OPC_GSLDRC1: | ||
840 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
841 | tcg_gen_shl_tl(t2, t2, t1); | ||
842 | gen_load_fpr64(ctx, t1, rt); | ||
843 | tcg_gen_and_tl(t1, t1, t2); | ||
844 | - tcg_temp_free(t2); | ||
845 | tcg_gen_or_tl(t0, t0, t1); | ||
846 | - tcg_temp_free(t1); | ||
847 | gen_store_fpr64(ctx, t0, rt); | ||
848 | break; | ||
849 | #endif | ||
850 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
851 | gen_load_fpr32(ctx, fp0, rt); | ||
852 | tcg_gen_ext_i32_tl(t1, fp0); | ||
853 | gen_helper_0e2i(swl, t1, t0, ctx->mem_idx); | ||
854 | - tcg_temp_free_i32(fp0); | ||
855 | - tcg_temp_free(t1); | ||
856 | break; | ||
857 | case OPC_GSSWRC1: | ||
858 | check_cp1_enabled(ctx); | ||
859 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
860 | gen_load_fpr32(ctx, fp0, rt); | ||
861 | tcg_gen_ext_i32_tl(t1, fp0); | ||
862 | gen_helper_0e2i(swr, t1, t0, ctx->mem_idx); | ||
863 | - tcg_temp_free_i32(fp0); | ||
864 | - tcg_temp_free(t1); | ||
865 | break; | ||
866 | #if defined(TARGET_MIPS64) | ||
867 | case OPC_GSSDLC1: | ||
868 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
869 | gen_base_offset_addr(ctx, t0, rs, shf_offset); | ||
870 | gen_load_fpr64(ctx, t1, rt); | ||
871 | gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx); | ||
872 | - tcg_temp_free(t1); | ||
873 | break; | ||
874 | case OPC_GSSDRC1: | ||
875 | check_cp1_enabled(ctx); | ||
876 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
877 | gen_base_offset_addr(ctx, t0, rs, shf_offset); | ||
878 | gen_load_fpr64(ctx, t1, rt); | ||
879 | gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx); | ||
880 | - tcg_temp_free(t1); | ||
881 | break; | ||
882 | #endif | ||
883 | default: | ||
884 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
885 | gen_reserved_instruction(ctx); | ||
886 | break; | ||
887 | } | ||
888 | - tcg_temp_free(t0); | ||
889 | } | ||
890 | |||
891 | /* Loongson EXT LDC2/SDC2 */ | ||
892 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, | ||
893 | tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | | ||
894 | ctx->default_tcg_memop_mask); | ||
895 | gen_store_fpr32(ctx, fp0, rt); | ||
896 | - tcg_temp_free_i32(fp0); | ||
897 | break; | ||
898 | #if defined(TARGET_MIPS64) | ||
899 | case OPC_GSLDXC1: | ||
900 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, | ||
901 | t1 = tcg_temp_new(); | ||
902 | gen_load_gpr(t1, rt); | ||
903 | tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_SB); | ||
904 | - tcg_temp_free(t1); | ||
905 | break; | ||
906 | case OPC_GSSHX: | ||
907 | t1 = tcg_temp_new(); | ||
908 | gen_load_gpr(t1, rt); | ||
909 | tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW | | ||
910 | ctx->default_tcg_memop_mask); | ||
911 | - tcg_temp_free(t1); | ||
912 | break; | ||
913 | case OPC_GSSWX: | ||
914 | t1 = tcg_temp_new(); | ||
915 | gen_load_gpr(t1, rt); | ||
916 | tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | | ||
917 | ctx->default_tcg_memop_mask); | ||
918 | - tcg_temp_free(t1); | ||
919 | break; | ||
920 | #if defined(TARGET_MIPS64) | ||
921 | case OPC_GSSDX: | ||
922 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, | ||
923 | gen_load_gpr(t1, rt); | ||
924 | tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | | ||
925 | ctx->default_tcg_memop_mask); | ||
926 | - tcg_temp_free(t1); | ||
927 | break; | ||
928 | #endif | ||
929 | case OPC_GSSWXC1: | ||
930 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, | ||
931 | gen_load_fpr32(ctx, fp0, rt); | ||
932 | tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | | ||
933 | ctx->default_tcg_memop_mask); | ||
934 | - tcg_temp_free_i32(fp0); | ||
935 | break; | ||
936 | #if defined(TARGET_MIPS64) | ||
937 | case OPC_GSSDXC1: | ||
938 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, | ||
939 | gen_load_fpr64(ctx, t1, rt); | ||
940 | tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ | | ||
941 | ctx->default_tcg_memop_mask); | ||
942 | - tcg_temp_free(t1); | ||
943 | break; | ||
944 | #endif | ||
945 | default: | ||
946 | break; | ||
947 | } | ||
948 | - | ||
949 | - tcg_temp_free(t0); | ||
950 | } | ||
951 | |||
952 | /* Traps */ | ||
953 | @@ -XXX,XX +XXX,XX @@ static void gen_trap(DisasContext *ctx, uint32_t opc, | ||
954 | generate_exception(ctx, EXCP_TRAP); | ||
955 | gen_set_label(l1); | ||
956 | } | ||
957 | - tcg_temp_free(t0); | ||
958 | - tcg_temp_free(t1); | ||
959 | } | ||
960 | |||
961 | static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) | ||
962 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, | ||
963 | if (insn_bytes == 2) { | ||
964 | ctx->hflags |= MIPS_HFLAG_B16; | ||
965 | } | ||
966 | - tcg_temp_free(t0); | ||
967 | - tcg_temp_free(t1); | ||
968 | } | ||
969 | |||
970 | |||
971 | @@ -XXX,XX +XXX,XX @@ static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt, | ||
972 | fail: | ||
973 | MIPS_INVAL("bitops"); | ||
974 | gen_reserved_instruction(ctx); | ||
975 | - tcg_temp_free(t0); | ||
976 | - tcg_temp_free(t1); | ||
977 | return; | ||
978 | } | ||
979 | gen_store_gpr(t0, rt); | ||
980 | - tcg_temp_free(t0); | ||
981 | - tcg_temp_free(t1); | ||
982 | } | ||
983 | |||
984 | static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) | ||
985 | @@ -XXX,XX +XXX,XX @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) | ||
986 | tcg_gen_and_tl(t0, t0, t2); | ||
987 | tcg_gen_shli_tl(t0, t0, 8); | ||
988 | tcg_gen_or_tl(t0, t0, t1); | ||
989 | - tcg_temp_free(t2); | ||
990 | - tcg_temp_free(t1); | ||
991 | tcg_gen_ext32s_tl(cpu_gpr[rd], t0); | ||
992 | } | ||
993 | break; | ||
994 | @@ -XXX,XX +XXX,XX @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) | ||
995 | tcg_gen_and_tl(t0, t0, t2); | ||
996 | tcg_gen_shli_tl(t0, t0, 8); | ||
997 | tcg_gen_or_tl(cpu_gpr[rd], t0, t1); | ||
998 | - tcg_temp_free(t2); | ||
999 | - tcg_temp_free(t1); | ||
1000 | } | ||
1001 | break; | ||
1002 | case OPC_DSHD: | ||
1003 | @@ -XXX,XX +XXX,XX @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) | ||
1004 | tcg_gen_shri_tl(t1, t0, 32); | ||
1005 | tcg_gen_shli_tl(t0, t0, 32); | ||
1006 | tcg_gen_or_tl(cpu_gpr[rd], t0, t1); | ||
1007 | - tcg_temp_free(t2); | ||
1008 | - tcg_temp_free(t1); | ||
1009 | } | ||
1010 | break; | ||
1011 | #endif | ||
1012 | default: | ||
1013 | MIPS_INVAL("bsfhl"); | ||
1014 | gen_reserved_instruction(ctx); | ||
1015 | - tcg_temp_free(t0); | ||
1016 | return; | ||
1017 | } | ||
1018 | - tcg_temp_free(t0); | ||
1019 | } | ||
1020 | |||
1021 | static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, | ||
1022 | @@ -XXX,XX +XXX,XX @@ static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, | ||
1023 | tcg_gen_concat_tl_i64(t2, t1, t0); | ||
1024 | tcg_gen_shri_i64(t2, t2, 32 - bits); | ||
1025 | gen_move_low32(cpu_gpr[rd], t2); | ||
1026 | - tcg_temp_free_i64(t2); | ||
1027 | } | ||
1028 | break; | ||
1029 | #if defined(TARGET_MIPS64) | ||
1030 | @@ -XXX,XX +XXX,XX @@ static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, | ||
1031 | break; | ||
1032 | #endif | ||
1033 | } | ||
1034 | - tcg_temp_free(t1); | ||
1035 | } | ||
1036 | - | ||
1037 | - tcg_temp_free(t0); | ||
1038 | } | ||
1039 | |||
1040 | void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp) | ||
1041 | @@ -XXX,XX +XXX,XX @@ static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt) | ||
1042 | break; | ||
1043 | #endif | ||
1044 | } | ||
1045 | - tcg_temp_free(t0); | ||
1046 | } | ||
1047 | |||
1048 | #ifndef CONFIG_USER_ONLY | ||
1049 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mthc0_entrylo(TCGv arg, target_ulong off) | ||
1050 | tcg_gen_concat32_i64(t1, t1, t0); | ||
1051 | #endif | ||
1052 | tcg_gen_st_i64(t1, cpu_env, off); | ||
1053 | - tcg_temp_free_i64(t1); | ||
1054 | - tcg_temp_free_i64(t0); | ||
1055 | } | ||
1056 | |||
1057 | static inline void gen_mthc0_store64(TCGv arg, target_ulong off) | ||
1058 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mthc0_store64(TCGv arg, target_ulong off) | ||
1059 | tcg_gen_ld_i64(t1, cpu_env, off); | ||
1060 | tcg_gen_concat32_i64(t1, t1, t0); | ||
1061 | tcg_gen_st_i64(t1, cpu_env, off); | ||
1062 | - tcg_temp_free_i64(t1); | ||
1063 | - tcg_temp_free_i64(t0); | ||
1064 | } | ||
1065 | |||
1066 | static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off) | ||
1067 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off) | ||
1068 | tcg_gen_shri_i64(t0, t0, 32); | ||
1069 | #endif | ||
1070 | gen_move_low32(arg, t0); | ||
1071 | - tcg_temp_free_i64(t0); | ||
1072 | } | ||
1073 | |||
1074 | static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift) | ||
1075 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift) | ||
1076 | tcg_gen_ld_i64(t0, cpu_env, off); | ||
1077 | tcg_gen_shri_i64(t0, t0, 32 + shift); | ||
1078 | gen_move_low32(arg, t0); | ||
1079 | - tcg_temp_free_i64(t0); | ||
1080 | } | ||
1081 | |||
1082 | static inline void gen_mfc0_load32(TCGv arg, target_ulong off) | ||
1083 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mfc0_load32(TCGv arg, target_ulong off) | ||
1084 | |||
1085 | tcg_gen_ld_i32(t0, cpu_env, off); | ||
1086 | tcg_gen_ext_i32_tl(arg, t0); | ||
1087 | - tcg_temp_free_i32(t0); | ||
1088 | } | ||
1089 | |||
1090 | static inline void gen_mfc0_load64(TCGv arg, target_ulong off) | ||
1091 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mtc0_store32(TCGv arg, target_ulong off) | ||
1092 | |||
1093 | tcg_gen_trunc_tl_i32(t0, arg); | ||
1094 | tcg_gen_st_i32(t0, cpu_env, off); | ||
1095 | - tcg_temp_free_i32(t0); | ||
1096 | } | ||
1097 | |||
1098 | #define CP0_CHECK(c) \ | ||
1099 | @@ -XXX,XX +XXX,XX @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) | ||
1100 | } | ||
1101 | #endif | ||
1102 | gen_move_low32(arg, tmp); | ||
1103 | - tcg_temp_free_i64(tmp); | ||
1104 | } | ||
1105 | register_name = "EntryLo0"; | ||
1106 | break; | ||
1107 | @@ -XXX,XX +XXX,XX @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) | ||
1108 | } | ||
1109 | #endif | ||
1110 | gen_move_low32(arg, tmp); | ||
1111 | - tcg_temp_free_i64(tmp); | ||
1112 | } | ||
1113 | register_name = "EntryLo1"; | ||
1114 | break; | ||
1115 | @@ -XXX,XX +XXX,XX @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) | ||
1116 | TCGv_i64 tmp = tcg_temp_new_i64(); | ||
1117 | tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo)); | ||
1118 | gen_move_low32(arg, tmp); | ||
1119 | - tcg_temp_free_i64(tmp); | ||
1120 | } | ||
1121 | register_name = "TagLo"; | ||
1122 | break; | ||
1123 | @@ -XXX,XX +XXX,XX @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, | ||
1124 | |||
1125 | gen_load_fpr32(ctx, fp0, rt); | ||
1126 | tcg_gen_ext_i32_tl(t0, fp0); | ||
1127 | - tcg_temp_free_i32(fp0); | ||
1128 | } else { | ||
1129 | TCGv_i32 fp0 = tcg_temp_new_i32(); | ||
1130 | |||
1131 | gen_load_fpr32h(ctx, fp0, rt); | ||
1132 | tcg_gen_ext_i32_tl(t0, fp0); | ||
1133 | - tcg_temp_free_i32(fp0); | ||
1134 | } | ||
1135 | break; | ||
1136 | case 3: | ||
1137 | @@ -XXX,XX +XXX,XX @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, | ||
1138 | } | ||
1139 | trace_mips_translate_tr("mftr", rt, u, sel, h); | ||
1140 | gen_store_gpr(t0, rd); | ||
1141 | - tcg_temp_free(t0); | ||
1142 | return; | ||
1143 | |||
1144 | die: | ||
1145 | - tcg_temp_free(t0); | ||
1146 | LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); | ||
1147 | gen_reserved_instruction(ctx); | ||
1148 | } | ||
1149 | @@ -XXX,XX +XXX,XX @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, | ||
1150 | |||
1151 | tcg_gen_trunc_tl_i32(fp0, t0); | ||
1152 | gen_store_fpr32(ctx, fp0, rd); | ||
1153 | - tcg_temp_free_i32(fp0); | ||
1154 | } else { | ||
1155 | TCGv_i32 fp0 = tcg_temp_new_i32(); | ||
1156 | |||
1157 | tcg_gen_trunc_tl_i32(fp0, t0); | ||
1158 | gen_store_fpr32h(ctx, fp0, rd); | ||
1159 | - tcg_temp_free_i32(fp0); | ||
1160 | } | ||
1161 | break; | ||
1162 | case 3: | ||
1163 | @@ -XXX,XX +XXX,XX @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, | ||
1164 | } | ||
1165 | } | ||
1166 | trace_mips_translate_tr("mttr", rd, u, sel, h); | ||
1167 | - tcg_temp_free(t0); | ||
1168 | return; | ||
1169 | |||
1170 | die: | ||
1171 | - tcg_temp_free(t0); | ||
1172 | LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); | ||
1173 | gen_reserved_instruction(ctx); | ||
1174 | } | ||
1175 | @@ -XXX,XX +XXX,XX @@ static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, | ||
1176 | |||
1177 | gen_load_gpr(t0, rt); | ||
1178 | gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7); | ||
1179 | - tcg_temp_free(t0); | ||
1180 | } | ||
1181 | opn = "mtc0"; | ||
1182 | break; | ||
1183 | @@ -XXX,XX +XXX,XX @@ static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, | ||
1184 | |||
1185 | gen_load_gpr(t0, rt); | ||
1186 | gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7); | ||
1187 | - tcg_temp_free(t0); | ||
1188 | } | ||
1189 | opn = "dmtc0"; | ||
1190 | break; | ||
1191 | @@ -XXX,XX +XXX,XX @@ static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, | ||
1192 | TCGv t0 = tcg_temp_new(); | ||
1193 | gen_load_gpr(t0, rt); | ||
1194 | gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7); | ||
1195 | - tcg_temp_free(t0); | ||
1196 | } | ||
1197 | opn = "mthc0"; | ||
1198 | break; | ||
1199 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, | ||
1200 | |||
1201 | if ((ctx->insn_flags & ISA_MIPS_R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { | ||
1202 | gen_reserved_instruction(ctx); | ||
1203 | - goto out; | ||
1204 | + return; | ||
1205 | } | ||
1206 | |||
1207 | if (cc != 0) { | ||
1208 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, | ||
1209 | tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc)); | ||
1210 | tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1)); | ||
1211 | tcg_gen_nand_i32(t0, t0, t1); | ||
1212 | - tcg_temp_free_i32(t1); | ||
1213 | tcg_gen_andi_i32(t0, t0, 1); | ||
1214 | tcg_gen_extu_i32_tl(bcond, t0); | ||
1215 | } | ||
1216 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, | ||
1217 | tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc)); | ||
1218 | tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1)); | ||
1219 | tcg_gen_or_i32(t0, t0, t1); | ||
1220 | - tcg_temp_free_i32(t1); | ||
1221 | tcg_gen_andi_i32(t0, t0, 1); | ||
1222 | tcg_gen_extu_i32_tl(bcond, t0); | ||
1223 | } | ||
1224 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, | ||
1225 | tcg_gen_and_i32(t0, t0, t1); | ||
1226 | tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3)); | ||
1227 | tcg_gen_nand_i32(t0, t0, t1); | ||
1228 | - tcg_temp_free_i32(t1); | ||
1229 | tcg_gen_andi_i32(t0, t0, 1); | ||
1230 | tcg_gen_extu_i32_tl(bcond, t0); | ||
1231 | } | ||
1232 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, | ||
1233 | tcg_gen_or_i32(t0, t0, t1); | ||
1234 | tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3)); | ||
1235 | tcg_gen_or_i32(t0, t0, t1); | ||
1236 | - tcg_temp_free_i32(t1); | ||
1237 | tcg_gen_andi_i32(t0, t0, 1); | ||
1238 | tcg_gen_extu_i32_tl(bcond, t0); | ||
1239 | } | ||
1240 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1(DisasContext *ctx, uint32_t op, | ||
1241 | default: | ||
1242 | MIPS_INVAL("cp1 cond branch"); | ||
1243 | gen_reserved_instruction(ctx); | ||
1244 | - goto out; | ||
1245 | + return; | ||
1246 | } | ||
1247 | ctx->btarget = btarget; | ||
1248 | ctx->hflags |= MIPS_HFLAG_BDS32; | ||
1249 | - out: | ||
1250 | - tcg_temp_free_i32(t0); | ||
1251 | } | ||
1252 | |||
1253 | /* R6 CP1 Branches */ | ||
1254 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, | ||
1255 | "\n", ctx->base.pc_next); | ||
1256 | #endif | ||
1257 | gen_reserved_instruction(ctx); | ||
1258 | - goto out; | ||
1259 | + return; | ||
1260 | } | ||
1261 | |||
1262 | gen_load_fpr64(ctx, t0, ft); | ||
1263 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, | ||
1264 | default: | ||
1265 | MIPS_INVAL("cp1 cond branch"); | ||
1266 | gen_reserved_instruction(ctx); | ||
1267 | - goto out; | ||
1268 | + return; | ||
1269 | } | ||
1270 | |||
1271 | tcg_gen_trunc_i64_tl(bcond, t0); | ||
1272 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, | ||
1273 | ctx->hflags |= MIPS_HFLAG_BDS32; | ||
1274 | break; | ||
1275 | } | ||
1276 | - | ||
1277 | -out: | ||
1278 | - tcg_temp_free_i64(t0); | ||
1279 | } | ||
1280 | |||
1281 | /* Coprocessor 1 (FPU) */ | ||
1282 | @@ -XXX,XX +XXX,XX @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) | ||
1283 | |||
1284 | gen_load_fpr32(ctx, fp0, fs); | ||
1285 | tcg_gen_ext_i32_tl(t0, fp0); | ||
1286 | - tcg_temp_free_i32(fp0); | ||
1287 | } | ||
1288 | gen_store_gpr(t0, rt); | ||
1289 | break; | ||
1290 | @@ -XXX,XX +XXX,XX @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) | ||
1291 | |||
1292 | tcg_gen_trunc_tl_i32(fp0, t0); | ||
1293 | gen_store_fpr32(ctx, fp0, fs); | ||
1294 | - tcg_temp_free_i32(fp0); | ||
1295 | } | ||
1296 | break; | ||
1297 | case OPC_CFC1: | ||
1298 | @@ -XXX,XX +XXX,XX @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) | ||
1299 | |||
1300 | gen_load_fpr32h(ctx, fp0, fs); | ||
1301 | tcg_gen_ext_i32_tl(t0, fp0); | ||
1302 | - tcg_temp_free_i32(fp0); | ||
1303 | } | ||
1304 | gen_store_gpr(t0, rt); | ||
1305 | break; | ||
1306 | @@ -XXX,XX +XXX,XX @@ static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) | ||
1307 | |||
1308 | tcg_gen_trunc_tl_i32(fp0, t0); | ||
1309 | gen_store_fpr32h(ctx, fp0, fs); | ||
1310 | - tcg_temp_free_i32(fp0); | ||
1311 | } | ||
1312 | break; | ||
1313 | default: | ||
1314 | MIPS_INVAL("cp1 move"); | ||
1315 | gen_reserved_instruction(ctx); | ||
1316 | - goto out; | ||
1317 | + return; | ||
1318 | } | ||
1319 | - | ||
1320 | - out: | ||
1321 | - tcg_temp_free(t0); | ||
1322 | } | ||
1323 | |||
1324 | static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf) | ||
1325 | @@ -XXX,XX +XXX,XX @@ static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf) | ||
1326 | t0 = tcg_temp_new_i32(); | ||
1327 | tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc)); | ||
1328 | tcg_gen_brcondi_i32(cond, t0, 0, l1); | ||
1329 | - tcg_temp_free_i32(t0); | ||
1330 | gen_load_gpr(cpu_gpr[rd], rs); | ||
1331 | gen_set_label(l1); | ||
1332 | } | ||
1333 | @@ -XXX,XX +XXX,XX @@ static inline void gen_movcf_s(DisasContext *ctx, int fs, int fd, int cc, | ||
1334 | gen_load_fpr32(ctx, t0, fs); | ||
1335 | gen_store_fpr32(ctx, t0, fd); | ||
1336 | gen_set_label(l1); | ||
1337 | - tcg_temp_free_i32(t0); | ||
1338 | } | ||
1339 | |||
1340 | static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc, | ||
1341 | @@ -XXX,XX +XXX,XX @@ static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc, | ||
1342 | |||
1343 | tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc)); | ||
1344 | tcg_gen_brcondi_i32(cond, t0, 0, l1); | ||
1345 | - tcg_temp_free_i32(t0); | ||
1346 | fp0 = tcg_temp_new_i64(); | ||
1347 | gen_load_fpr64(ctx, fp0, fs); | ||
1348 | gen_store_fpr64(ctx, fp0, fd); | ||
1349 | - tcg_temp_free_i64(fp0); | ||
1350 | gen_set_label(l1); | ||
1351 | } | ||
1352 | |||
1353 | @@ -XXX,XX +XXX,XX @@ static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, | ||
1354 | tcg_gen_brcondi_i32(cond, t0, 0, l2); | ||
1355 | gen_load_fpr32h(ctx, t0, fs); | ||
1356 | gen_store_fpr32h(ctx, t0, fd); | ||
1357 | - tcg_temp_free_i32(t0); | ||
1358 | gen_set_label(l2); | ||
1359 | } | ||
1360 | |||
1361 | @@ -XXX,XX +XXX,XX @@ static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, | ||
1362 | } | ||
1363 | |||
1364 | gen_store_fpr32(ctx, fp0, fd); | ||
1365 | - tcg_temp_free_i32(fp2); | ||
1366 | - tcg_temp_free_i32(fp1); | ||
1367 | - tcg_temp_free_i32(fp0); | ||
1368 | - tcg_temp_free_i32(t1); | ||
1369 | } | ||
1370 | |||
1371 | static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, | ||
1372 | @@ -XXX,XX +XXX,XX @@ static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, | ||
1373 | } | ||
1374 | |||
1375 | gen_store_fpr64(ctx, fp0, fd); | ||
1376 | - tcg_temp_free_i64(fp2); | ||
1377 | - tcg_temp_free_i64(fp1); | ||
1378 | - tcg_temp_free_i64(fp0); | ||
1379 | - tcg_temp_free_i64(t1); | ||
1380 | } | ||
1381 | |||
1382 | static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1383 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1384 | gen_load_fpr32(ctx, fp0, fs); | ||
1385 | gen_load_fpr32(ctx, fp1, ft); | ||
1386 | gen_helper_float_add_s(fp0, cpu_env, fp0, fp1); | ||
1387 | - tcg_temp_free_i32(fp1); | ||
1388 | gen_store_fpr32(ctx, fp0, fd); | ||
1389 | - tcg_temp_free_i32(fp0); | ||
1390 | } | ||
1391 | break; | ||
1392 | case OPC_SUB_S: | ||
1393 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1394 | gen_load_fpr32(ctx, fp0, fs); | ||
1395 | gen_load_fpr32(ctx, fp1, ft); | ||
1396 | gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1); | ||
1397 | - tcg_temp_free_i32(fp1); | ||
1398 | gen_store_fpr32(ctx, fp0, fd); | ||
1399 | - tcg_temp_free_i32(fp0); | ||
1400 | } | ||
1401 | break; | ||
1402 | case OPC_MUL_S: | ||
1403 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1404 | gen_load_fpr32(ctx, fp0, fs); | ||
1405 | gen_load_fpr32(ctx, fp1, ft); | ||
1406 | gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1); | ||
1407 | - tcg_temp_free_i32(fp1); | ||
1408 | gen_store_fpr32(ctx, fp0, fd); | ||
1409 | - tcg_temp_free_i32(fp0); | ||
1410 | } | ||
1411 | break; | ||
1412 | case OPC_DIV_S: | ||
1413 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1414 | gen_load_fpr32(ctx, fp0, fs); | ||
1415 | gen_load_fpr32(ctx, fp1, ft); | ||
1416 | gen_helper_float_div_s(fp0, cpu_env, fp0, fp1); | ||
1417 | - tcg_temp_free_i32(fp1); | ||
1418 | gen_store_fpr32(ctx, fp0, fd); | ||
1419 | - tcg_temp_free_i32(fp0); | ||
1420 | } | ||
1421 | break; | ||
1422 | case OPC_SQRT_S: | ||
1423 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1424 | gen_load_fpr32(ctx, fp0, fs); | ||
1425 | gen_helper_float_sqrt_s(fp0, cpu_env, fp0); | ||
1426 | gen_store_fpr32(ctx, fp0, fd); | ||
1427 | - tcg_temp_free_i32(fp0); | ||
1428 | } | ||
1429 | break; | ||
1430 | case OPC_ABS_S: | ||
1431 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1432 | gen_helper_float_abs_s(fp0, fp0); | ||
1433 | } | ||
1434 | gen_store_fpr32(ctx, fp0, fd); | ||
1435 | - tcg_temp_free_i32(fp0); | ||
1436 | } | ||
1437 | break; | ||
1438 | case OPC_MOV_S: | ||
1439 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1440 | |||
1441 | gen_load_fpr32(ctx, fp0, fs); | ||
1442 | gen_store_fpr32(ctx, fp0, fd); | ||
1443 | - tcg_temp_free_i32(fp0); | ||
1444 | } | ||
1445 | break; | ||
1446 | case OPC_NEG_S: | ||
1447 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1448 | gen_helper_float_chs_s(fp0, fp0); | ||
1449 | } | ||
1450 | gen_store_fpr32(ctx, fp0, fd); | ||
1451 | - tcg_temp_free_i32(fp0); | ||
1452 | } | ||
1453 | break; | ||
1454 | case OPC_ROUND_L_S: | ||
1455 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1456 | } else { | ||
1457 | gen_helper_float_round_l_s(fp64, cpu_env, fp32); | ||
1458 | } | ||
1459 | - tcg_temp_free_i32(fp32); | ||
1460 | gen_store_fpr64(ctx, fp64, fd); | ||
1461 | - tcg_temp_free_i64(fp64); | ||
1462 | } | ||
1463 | break; | ||
1464 | case OPC_TRUNC_L_S: | ||
1465 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1466 | } else { | ||
1467 | gen_helper_float_trunc_l_s(fp64, cpu_env, fp32); | ||
1468 | } | ||
1469 | - tcg_temp_free_i32(fp32); | ||
1470 | gen_store_fpr64(ctx, fp64, fd); | ||
1471 | - tcg_temp_free_i64(fp64); | ||
1472 | } | ||
1473 | break; | ||
1474 | case OPC_CEIL_L_S: | ||
1475 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1476 | } else { | ||
1477 | gen_helper_float_ceil_l_s(fp64, cpu_env, fp32); | ||
1478 | } | ||
1479 | - tcg_temp_free_i32(fp32); | ||
1480 | gen_store_fpr64(ctx, fp64, fd); | ||
1481 | - tcg_temp_free_i64(fp64); | ||
1482 | } | ||
1483 | break; | ||
1484 | case OPC_FLOOR_L_S: | ||
1485 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1486 | } else { | ||
1487 | gen_helper_float_floor_l_s(fp64, cpu_env, fp32); | ||
1488 | } | ||
1489 | - tcg_temp_free_i32(fp32); | ||
1490 | gen_store_fpr64(ctx, fp64, fd); | ||
1491 | - tcg_temp_free_i64(fp64); | ||
1492 | } | ||
1493 | break; | ||
1494 | case OPC_ROUND_W_S: | ||
1495 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1496 | gen_helper_float_round_w_s(fp0, cpu_env, fp0); | ||
1497 | } | ||
1498 | gen_store_fpr32(ctx, fp0, fd); | ||
1499 | - tcg_temp_free_i32(fp0); | ||
1500 | } | ||
1501 | break; | ||
1502 | case OPC_TRUNC_W_S: | ||
1503 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1504 | gen_helper_float_trunc_w_s(fp0, cpu_env, fp0); | ||
1505 | } | ||
1506 | gen_store_fpr32(ctx, fp0, fd); | ||
1507 | - tcg_temp_free_i32(fp0); | ||
1508 | } | ||
1509 | break; | ||
1510 | case OPC_CEIL_W_S: | ||
1511 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1512 | gen_helper_float_ceil_w_s(fp0, cpu_env, fp0); | ||
1513 | } | ||
1514 | gen_store_fpr32(ctx, fp0, fd); | ||
1515 | - tcg_temp_free_i32(fp0); | ||
1516 | } | ||
1517 | break; | ||
1518 | case OPC_FLOOR_W_S: | ||
1519 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1520 | gen_helper_float_floor_w_s(fp0, cpu_env, fp0); | ||
1521 | } | ||
1522 | gen_store_fpr32(ctx, fp0, fd); | ||
1523 | - tcg_temp_free_i32(fp0); | ||
1524 | } | ||
1525 | break; | ||
1526 | case OPC_SEL_S: | ||
1527 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1528 | fp0 = tcg_temp_new_i32(); | ||
1529 | gen_load_fpr32(ctx, fp0, fs); | ||
1530 | gen_store_fpr32(ctx, fp0, fd); | ||
1531 | - tcg_temp_free_i32(fp0); | ||
1532 | gen_set_label(l1); | ||
1533 | } | ||
1534 | break; | ||
1535 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1536 | fp0 = tcg_temp_new_i32(); | ||
1537 | gen_load_fpr32(ctx, fp0, fs); | ||
1538 | gen_store_fpr32(ctx, fp0, fd); | ||
1539 | - tcg_temp_free_i32(fp0); | ||
1540 | gen_set_label(l1); | ||
1541 | } | ||
1542 | } | ||
1543 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1544 | gen_load_fpr32(ctx, fp0, fs); | ||
1545 | gen_helper_float_recip_s(fp0, cpu_env, fp0); | ||
1546 | gen_store_fpr32(ctx, fp0, fd); | ||
1547 | - tcg_temp_free_i32(fp0); | ||
1548 | } | ||
1549 | break; | ||
1550 | case OPC_RSQRT_S: | ||
1551 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1552 | gen_load_fpr32(ctx, fp0, fs); | ||
1553 | gen_helper_float_rsqrt_s(fp0, cpu_env, fp0); | ||
1554 | gen_store_fpr32(ctx, fp0, fd); | ||
1555 | - tcg_temp_free_i32(fp0); | ||
1556 | } | ||
1557 | break; | ||
1558 | case OPC_MADDF_S: | ||
1559 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1560 | gen_load_fpr32(ctx, fp2, fd); | ||
1561 | gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2); | ||
1562 | gen_store_fpr32(ctx, fp2, fd); | ||
1563 | - tcg_temp_free_i32(fp2); | ||
1564 | - tcg_temp_free_i32(fp1); | ||
1565 | - tcg_temp_free_i32(fp0); | ||
1566 | } | ||
1567 | break; | ||
1568 | case OPC_MSUBF_S: | ||
1569 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1570 | gen_load_fpr32(ctx, fp2, fd); | ||
1571 | gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2); | ||
1572 | gen_store_fpr32(ctx, fp2, fd); | ||
1573 | - tcg_temp_free_i32(fp2); | ||
1574 | - tcg_temp_free_i32(fp1); | ||
1575 | - tcg_temp_free_i32(fp0); | ||
1576 | } | ||
1577 | break; | ||
1578 | case OPC_RINT_S: | ||
1579 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1580 | gen_load_fpr32(ctx, fp0, fs); | ||
1581 | gen_helper_float_rint_s(fp0, cpu_env, fp0); | ||
1582 | gen_store_fpr32(ctx, fp0, fd); | ||
1583 | - tcg_temp_free_i32(fp0); | ||
1584 | } | ||
1585 | break; | ||
1586 | case OPC_CLASS_S: | ||
1587 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1588 | gen_load_fpr32(ctx, fp0, fs); | ||
1589 | gen_helper_float_class_s(fp0, cpu_env, fp0); | ||
1590 | gen_store_fpr32(ctx, fp0, fd); | ||
1591 | - tcg_temp_free_i32(fp0); | ||
1592 | } | ||
1593 | break; | ||
1594 | case OPC_MIN_S: /* OPC_RECIP2_S */ | ||
1595 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1596 | gen_load_fpr32(ctx, fp1, ft); | ||
1597 | gen_helper_float_min_s(fp2, cpu_env, fp0, fp1); | ||
1598 | gen_store_fpr32(ctx, fp2, fd); | ||
1599 | - tcg_temp_free_i32(fp2); | ||
1600 | - tcg_temp_free_i32(fp1); | ||
1601 | - tcg_temp_free_i32(fp0); | ||
1602 | } else { | ||
1603 | /* OPC_RECIP2_S */ | ||
1604 | check_cp1_64bitmode(ctx); | ||
1605 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1606 | gen_load_fpr32(ctx, fp0, fs); | ||
1607 | gen_load_fpr32(ctx, fp1, ft); | ||
1608 | gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1); | ||
1609 | - tcg_temp_free_i32(fp1); | ||
1610 | gen_store_fpr32(ctx, fp0, fd); | ||
1611 | - tcg_temp_free_i32(fp0); | ||
1612 | } | ||
1613 | } | ||
1614 | break; | ||
1615 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1616 | gen_load_fpr32(ctx, fp1, ft); | ||
1617 | gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1); | ||
1618 | gen_store_fpr32(ctx, fp2, fd); | ||
1619 | - tcg_temp_free_i32(fp2); | ||
1620 | - tcg_temp_free_i32(fp1); | ||
1621 | - tcg_temp_free_i32(fp0); | ||
1622 | } else { | ||
1623 | /* OPC_RECIP1_S */ | ||
1624 | check_cp1_64bitmode(ctx); | ||
1625 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1626 | gen_load_fpr32(ctx, fp0, fs); | ||
1627 | gen_helper_float_recip1_s(fp0, cpu_env, fp0); | ||
1628 | gen_store_fpr32(ctx, fp0, fd); | ||
1629 | - tcg_temp_free_i32(fp0); | ||
1630 | } | ||
1631 | } | ||
1632 | break; | ||
1633 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1634 | gen_load_fpr32(ctx, fp1, ft); | ||
1635 | gen_helper_float_max_s(fp1, cpu_env, fp0, fp1); | ||
1636 | gen_store_fpr32(ctx, fp1, fd); | ||
1637 | - tcg_temp_free_i32(fp1); | ||
1638 | - tcg_temp_free_i32(fp0); | ||
1639 | } else { | ||
1640 | /* OPC_RSQRT1_S */ | ||
1641 | check_cp1_64bitmode(ctx); | ||
1642 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1643 | gen_load_fpr32(ctx, fp0, fs); | ||
1644 | gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0); | ||
1645 | gen_store_fpr32(ctx, fp0, fd); | ||
1646 | - tcg_temp_free_i32(fp0); | ||
1647 | } | ||
1648 | } | ||
1649 | break; | ||
1650 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1651 | gen_load_fpr32(ctx, fp1, ft); | ||
1652 | gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1); | ||
1653 | gen_store_fpr32(ctx, fp1, fd); | ||
1654 | - tcg_temp_free_i32(fp1); | ||
1655 | - tcg_temp_free_i32(fp0); | ||
1656 | } else { | ||
1657 | /* OPC_RSQRT2_S */ | ||
1658 | check_cp1_64bitmode(ctx); | ||
1659 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1660 | gen_load_fpr32(ctx, fp0, fs); | ||
1661 | gen_load_fpr32(ctx, fp1, ft); | ||
1662 | gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1); | ||
1663 | - tcg_temp_free_i32(fp1); | ||
1664 | gen_store_fpr32(ctx, fp0, fd); | ||
1665 | - tcg_temp_free_i32(fp0); | ||
1666 | } | ||
1667 | } | ||
1668 | break; | ||
1669 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1670 | |||
1671 | gen_load_fpr32(ctx, fp32, fs); | ||
1672 | gen_helper_float_cvtd_s(fp64, cpu_env, fp32); | ||
1673 | - tcg_temp_free_i32(fp32); | ||
1674 | gen_store_fpr64(ctx, fp64, fd); | ||
1675 | - tcg_temp_free_i64(fp64); | ||
1676 | } | ||
1677 | break; | ||
1678 | case OPC_CVT_W_S: | ||
1679 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1680 | gen_helper_float_cvt_w_s(fp0, cpu_env, fp0); | ||
1681 | } | ||
1682 | gen_store_fpr32(ctx, fp0, fd); | ||
1683 | - tcg_temp_free_i32(fp0); | ||
1684 | } | ||
1685 | break; | ||
1686 | case OPC_CVT_L_S: | ||
1687 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1688 | } else { | ||
1689 | gen_helper_float_cvt_l_s(fp64, cpu_env, fp32); | ||
1690 | } | ||
1691 | - tcg_temp_free_i32(fp32); | ||
1692 | gen_store_fpr64(ctx, fp64, fd); | ||
1693 | - tcg_temp_free_i64(fp64); | ||
1694 | } | ||
1695 | break; | ||
1696 | case OPC_CVT_PS_S: | ||
1697 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1698 | gen_load_fpr32(ctx, fp32_0, fs); | ||
1699 | gen_load_fpr32(ctx, fp32_1, ft); | ||
1700 | tcg_gen_concat_i32_i64(fp64, fp32_1, fp32_0); | ||
1701 | - tcg_temp_free_i32(fp32_1); | ||
1702 | - tcg_temp_free_i32(fp32_0); | ||
1703 | gen_store_fpr64(ctx, fp64, fd); | ||
1704 | - tcg_temp_free_i64(fp64); | ||
1705 | } | ||
1706 | break; | ||
1707 | case OPC_CMP_F_S: | ||
1708 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1709 | gen_load_fpr64(ctx, fp0, fs); | ||
1710 | gen_load_fpr64(ctx, fp1, ft); | ||
1711 | gen_helper_float_add_d(fp0, cpu_env, fp0, fp1); | ||
1712 | - tcg_temp_free_i64(fp1); | ||
1713 | gen_store_fpr64(ctx, fp0, fd); | ||
1714 | - tcg_temp_free_i64(fp0); | ||
1715 | } | ||
1716 | break; | ||
1717 | case OPC_SUB_D: | ||
1718 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1719 | gen_load_fpr64(ctx, fp0, fs); | ||
1720 | gen_load_fpr64(ctx, fp1, ft); | ||
1721 | gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1); | ||
1722 | - tcg_temp_free_i64(fp1); | ||
1723 | gen_store_fpr64(ctx, fp0, fd); | ||
1724 | - tcg_temp_free_i64(fp0); | ||
1725 | } | ||
1726 | break; | ||
1727 | case OPC_MUL_D: | ||
1728 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1729 | gen_load_fpr64(ctx, fp0, fs); | ||
1730 | gen_load_fpr64(ctx, fp1, ft); | ||
1731 | gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1); | ||
1732 | - tcg_temp_free_i64(fp1); | ||
1733 | gen_store_fpr64(ctx, fp0, fd); | ||
1734 | - tcg_temp_free_i64(fp0); | ||
1735 | } | ||
1736 | break; | ||
1737 | case OPC_DIV_D: | ||
1738 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1739 | gen_load_fpr64(ctx, fp0, fs); | ||
1740 | gen_load_fpr64(ctx, fp1, ft); | ||
1741 | gen_helper_float_div_d(fp0, cpu_env, fp0, fp1); | ||
1742 | - tcg_temp_free_i64(fp1); | ||
1743 | gen_store_fpr64(ctx, fp0, fd); | ||
1744 | - tcg_temp_free_i64(fp0); | ||
1745 | } | ||
1746 | break; | ||
1747 | case OPC_SQRT_D: | ||
1748 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1749 | gen_load_fpr64(ctx, fp0, fs); | ||
1750 | gen_helper_float_sqrt_d(fp0, cpu_env, fp0); | ||
1751 | gen_store_fpr64(ctx, fp0, fd); | ||
1752 | - tcg_temp_free_i64(fp0); | ||
1753 | } | ||
1754 | break; | ||
1755 | case OPC_ABS_D: | ||
1756 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1757 | gen_helper_float_abs_d(fp0, fp0); | ||
1758 | } | ||
1759 | gen_store_fpr64(ctx, fp0, fd); | ||
1760 | - tcg_temp_free_i64(fp0); | ||
1761 | } | ||
1762 | break; | ||
1763 | case OPC_MOV_D: | ||
1764 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1765 | |||
1766 | gen_load_fpr64(ctx, fp0, fs); | ||
1767 | gen_store_fpr64(ctx, fp0, fd); | ||
1768 | - tcg_temp_free_i64(fp0); | ||
1769 | } | ||
1770 | break; | ||
1771 | case OPC_NEG_D: | ||
1772 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1773 | gen_helper_float_chs_d(fp0, fp0); | ||
1774 | } | ||
1775 | gen_store_fpr64(ctx, fp0, fd); | ||
1776 | - tcg_temp_free_i64(fp0); | ||
1777 | } | ||
1778 | break; | ||
1779 | case OPC_ROUND_L_D: | ||
1780 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1781 | gen_helper_float_round_l_d(fp0, cpu_env, fp0); | ||
1782 | } | ||
1783 | gen_store_fpr64(ctx, fp0, fd); | ||
1784 | - tcg_temp_free_i64(fp0); | ||
1785 | } | ||
1786 | break; | ||
1787 | case OPC_TRUNC_L_D: | ||
1788 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1789 | gen_helper_float_trunc_l_d(fp0, cpu_env, fp0); | ||
1790 | } | ||
1791 | gen_store_fpr64(ctx, fp0, fd); | ||
1792 | - tcg_temp_free_i64(fp0); | ||
1793 | } | ||
1794 | break; | ||
1795 | case OPC_CEIL_L_D: | ||
1796 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1797 | gen_helper_float_ceil_l_d(fp0, cpu_env, fp0); | ||
1798 | } | ||
1799 | gen_store_fpr64(ctx, fp0, fd); | ||
1800 | - tcg_temp_free_i64(fp0); | ||
1801 | } | ||
1802 | break; | ||
1803 | case OPC_FLOOR_L_D: | ||
1804 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1805 | gen_helper_float_floor_l_d(fp0, cpu_env, fp0); | ||
1806 | } | ||
1807 | gen_store_fpr64(ctx, fp0, fd); | ||
1808 | - tcg_temp_free_i64(fp0); | ||
1809 | } | ||
1810 | break; | ||
1811 | case OPC_ROUND_W_D: | ||
1812 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1813 | } else { | ||
1814 | gen_helper_float_round_w_d(fp32, cpu_env, fp64); | ||
1815 | } | ||
1816 | - tcg_temp_free_i64(fp64); | ||
1817 | gen_store_fpr32(ctx, fp32, fd); | ||
1818 | - tcg_temp_free_i32(fp32); | ||
1819 | } | ||
1820 | break; | ||
1821 | case OPC_TRUNC_W_D: | ||
1822 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1823 | } else { | ||
1824 | gen_helper_float_trunc_w_d(fp32, cpu_env, fp64); | ||
1825 | } | ||
1826 | - tcg_temp_free_i64(fp64); | ||
1827 | gen_store_fpr32(ctx, fp32, fd); | ||
1828 | - tcg_temp_free_i32(fp32); | ||
1829 | } | ||
1830 | break; | ||
1831 | case OPC_CEIL_W_D: | ||
1832 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1833 | } else { | ||
1834 | gen_helper_float_ceil_w_d(fp32, cpu_env, fp64); | ||
1835 | } | ||
1836 | - tcg_temp_free_i64(fp64); | ||
1837 | gen_store_fpr32(ctx, fp32, fd); | ||
1838 | - tcg_temp_free_i32(fp32); | ||
1839 | } | ||
1840 | break; | ||
1841 | case OPC_FLOOR_W_D: | ||
1842 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1843 | } else { | ||
1844 | gen_helper_float_floor_w_d(fp32, cpu_env, fp64); | ||
1845 | } | ||
1846 | - tcg_temp_free_i64(fp64); | ||
1847 | gen_store_fpr32(ctx, fp32, fd); | ||
1848 | - tcg_temp_free_i32(fp32); | ||
1849 | } | ||
1850 | break; | ||
1851 | case OPC_SEL_D: | ||
1852 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1853 | fp0 = tcg_temp_new_i64(); | ||
1854 | gen_load_fpr64(ctx, fp0, fs); | ||
1855 | gen_store_fpr64(ctx, fp0, fd); | ||
1856 | - tcg_temp_free_i64(fp0); | ||
1857 | gen_set_label(l1); | ||
1858 | } | ||
1859 | break; | ||
1860 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1861 | fp0 = tcg_temp_new_i64(); | ||
1862 | gen_load_fpr64(ctx, fp0, fs); | ||
1863 | gen_store_fpr64(ctx, fp0, fd); | ||
1864 | - tcg_temp_free_i64(fp0); | ||
1865 | gen_set_label(l1); | ||
1866 | } | ||
1867 | } | ||
1868 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1869 | gen_load_fpr64(ctx, fp0, fs); | ||
1870 | gen_helper_float_recip_d(fp0, cpu_env, fp0); | ||
1871 | gen_store_fpr64(ctx, fp0, fd); | ||
1872 | - tcg_temp_free_i64(fp0); | ||
1873 | } | ||
1874 | break; | ||
1875 | case OPC_RSQRT_D: | ||
1876 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1877 | gen_load_fpr64(ctx, fp0, fs); | ||
1878 | gen_helper_float_rsqrt_d(fp0, cpu_env, fp0); | ||
1879 | gen_store_fpr64(ctx, fp0, fd); | ||
1880 | - tcg_temp_free_i64(fp0); | ||
1881 | } | ||
1882 | break; | ||
1883 | case OPC_MADDF_D: | ||
1884 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1885 | gen_load_fpr64(ctx, fp2, fd); | ||
1886 | gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2); | ||
1887 | gen_store_fpr64(ctx, fp2, fd); | ||
1888 | - tcg_temp_free_i64(fp2); | ||
1889 | - tcg_temp_free_i64(fp1); | ||
1890 | - tcg_temp_free_i64(fp0); | ||
1891 | } | ||
1892 | break; | ||
1893 | case OPC_MSUBF_D: | ||
1894 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1895 | gen_load_fpr64(ctx, fp2, fd); | ||
1896 | gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2); | ||
1897 | gen_store_fpr64(ctx, fp2, fd); | ||
1898 | - tcg_temp_free_i64(fp2); | ||
1899 | - tcg_temp_free_i64(fp1); | ||
1900 | - tcg_temp_free_i64(fp0); | ||
1901 | } | ||
1902 | break; | ||
1903 | case OPC_RINT_D: | ||
1904 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1905 | gen_load_fpr64(ctx, fp0, fs); | ||
1906 | gen_helper_float_rint_d(fp0, cpu_env, fp0); | ||
1907 | gen_store_fpr64(ctx, fp0, fd); | ||
1908 | - tcg_temp_free_i64(fp0); | ||
1909 | } | ||
1910 | break; | ||
1911 | case OPC_CLASS_D: | ||
1912 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1913 | gen_load_fpr64(ctx, fp0, fs); | ||
1914 | gen_helper_float_class_d(fp0, cpu_env, fp0); | ||
1915 | gen_store_fpr64(ctx, fp0, fd); | ||
1916 | - tcg_temp_free_i64(fp0); | ||
1917 | } | ||
1918 | break; | ||
1919 | case OPC_MIN_D: /* OPC_RECIP2_D */ | ||
1920 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1921 | gen_load_fpr64(ctx, fp1, ft); | ||
1922 | gen_helper_float_min_d(fp1, cpu_env, fp0, fp1); | ||
1923 | gen_store_fpr64(ctx, fp1, fd); | ||
1924 | - tcg_temp_free_i64(fp1); | ||
1925 | - tcg_temp_free_i64(fp0); | ||
1926 | } else { | ||
1927 | /* OPC_RECIP2_D */ | ||
1928 | check_cp1_64bitmode(ctx); | ||
1929 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1930 | gen_load_fpr64(ctx, fp0, fs); | ||
1931 | gen_load_fpr64(ctx, fp1, ft); | ||
1932 | gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1); | ||
1933 | - tcg_temp_free_i64(fp1); | ||
1934 | gen_store_fpr64(ctx, fp0, fd); | ||
1935 | - tcg_temp_free_i64(fp0); | ||
1936 | } | ||
1937 | } | ||
1938 | break; | ||
1939 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1940 | gen_load_fpr64(ctx, fp1, ft); | ||
1941 | gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1); | ||
1942 | gen_store_fpr64(ctx, fp1, fd); | ||
1943 | - tcg_temp_free_i64(fp1); | ||
1944 | - tcg_temp_free_i64(fp0); | ||
1945 | } else { | ||
1946 | /* OPC_RECIP1_D */ | ||
1947 | check_cp1_64bitmode(ctx); | ||
1948 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1949 | gen_load_fpr64(ctx, fp0, fs); | ||
1950 | gen_helper_float_recip1_d(fp0, cpu_env, fp0); | ||
1951 | gen_store_fpr64(ctx, fp0, fd); | ||
1952 | - tcg_temp_free_i64(fp0); | ||
1953 | } | ||
1954 | } | ||
1955 | break; | ||
1956 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1957 | gen_load_fpr64(ctx, fp1, ft); | ||
1958 | gen_helper_float_max_d(fp1, cpu_env, fp0, fp1); | ||
1959 | gen_store_fpr64(ctx, fp1, fd); | ||
1960 | - tcg_temp_free_i64(fp1); | ||
1961 | - tcg_temp_free_i64(fp0); | ||
1962 | } else { | ||
1963 | /* OPC_RSQRT1_D */ | ||
1964 | check_cp1_64bitmode(ctx); | ||
1965 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1966 | gen_load_fpr64(ctx, fp0, fs); | ||
1967 | gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0); | ||
1968 | gen_store_fpr64(ctx, fp0, fd); | ||
1969 | - tcg_temp_free_i64(fp0); | ||
1970 | } | ||
1971 | } | ||
1972 | break; | ||
1973 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1974 | gen_load_fpr64(ctx, fp1, ft); | ||
1975 | gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1); | ||
1976 | gen_store_fpr64(ctx, fp1, fd); | ||
1977 | - tcg_temp_free_i64(fp1); | ||
1978 | - tcg_temp_free_i64(fp0); | ||
1979 | } else { | ||
1980 | /* OPC_RSQRT2_D */ | ||
1981 | check_cp1_64bitmode(ctx); | ||
1982 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1983 | gen_load_fpr64(ctx, fp0, fs); | ||
1984 | gen_load_fpr64(ctx, fp1, ft); | ||
1985 | gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1); | ||
1986 | - tcg_temp_free_i64(fp1); | ||
1987 | gen_store_fpr64(ctx, fp0, fd); | ||
1988 | - tcg_temp_free_i64(fp0); | ||
1989 | } | ||
1990 | } | ||
1991 | break; | ||
1992 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
1993 | |||
1994 | gen_load_fpr64(ctx, fp64, fs); | ||
1995 | gen_helper_float_cvts_d(fp32, cpu_env, fp64); | ||
1996 | - tcg_temp_free_i64(fp64); | ||
1997 | gen_store_fpr32(ctx, fp32, fd); | ||
1998 | - tcg_temp_free_i32(fp32); | ||
1999 | } | ||
2000 | break; | ||
2001 | case OPC_CVT_W_D: | ||
2002 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2003 | } else { | ||
2004 | gen_helper_float_cvt_w_d(fp32, cpu_env, fp64); | ||
2005 | } | ||
2006 | - tcg_temp_free_i64(fp64); | ||
2007 | gen_store_fpr32(ctx, fp32, fd); | ||
2008 | - tcg_temp_free_i32(fp32); | ||
2009 | } | ||
2010 | break; | ||
2011 | case OPC_CVT_L_D: | ||
2012 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2013 | gen_helper_float_cvt_l_d(fp0, cpu_env, fp0); | ||
2014 | } | ||
2015 | gen_store_fpr64(ctx, fp0, fd); | ||
2016 | - tcg_temp_free_i64(fp0); | ||
2017 | } | ||
2018 | break; | ||
2019 | case OPC_CVT_S_W: | ||
2020 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2021 | gen_load_fpr32(ctx, fp0, fs); | ||
2022 | gen_helper_float_cvts_w(fp0, cpu_env, fp0); | ||
2023 | gen_store_fpr32(ctx, fp0, fd); | ||
2024 | - tcg_temp_free_i32(fp0); | ||
2025 | } | ||
2026 | break; | ||
2027 | case OPC_CVT_D_W: | ||
2028 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2029 | |||
2030 | gen_load_fpr32(ctx, fp32, fs); | ||
2031 | gen_helper_float_cvtd_w(fp64, cpu_env, fp32); | ||
2032 | - tcg_temp_free_i32(fp32); | ||
2033 | gen_store_fpr64(ctx, fp64, fd); | ||
2034 | - tcg_temp_free_i64(fp64); | ||
2035 | } | ||
2036 | break; | ||
2037 | case OPC_CVT_S_L: | ||
2038 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2039 | |||
2040 | gen_load_fpr64(ctx, fp64, fs); | ||
2041 | gen_helper_float_cvts_l(fp32, cpu_env, fp64); | ||
2042 | - tcg_temp_free_i64(fp64); | ||
2043 | gen_store_fpr32(ctx, fp32, fd); | ||
2044 | - tcg_temp_free_i32(fp32); | ||
2045 | } | ||
2046 | break; | ||
2047 | case OPC_CVT_D_L: | ||
2048 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2049 | gen_load_fpr64(ctx, fp0, fs); | ||
2050 | gen_helper_float_cvtd_l(fp0, cpu_env, fp0); | ||
2051 | gen_store_fpr64(ctx, fp0, fd); | ||
2052 | - tcg_temp_free_i64(fp0); | ||
2053 | } | ||
2054 | break; | ||
2055 | case OPC_CVT_PS_PW: | ||
2056 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2057 | gen_load_fpr64(ctx, fp0, fs); | ||
2058 | gen_helper_float_cvtps_pw(fp0, cpu_env, fp0); | ||
2059 | gen_store_fpr64(ctx, fp0, fd); | ||
2060 | - tcg_temp_free_i64(fp0); | ||
2061 | } | ||
2062 | break; | ||
2063 | case OPC_ADD_PS: | ||
2064 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2065 | gen_load_fpr64(ctx, fp0, fs); | ||
2066 | gen_load_fpr64(ctx, fp1, ft); | ||
2067 | gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1); | ||
2068 | - tcg_temp_free_i64(fp1); | ||
2069 | gen_store_fpr64(ctx, fp0, fd); | ||
2070 | - tcg_temp_free_i64(fp0); | ||
2071 | } | ||
2072 | break; | ||
2073 | case OPC_SUB_PS: | ||
2074 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2075 | gen_load_fpr64(ctx, fp0, fs); | ||
2076 | gen_load_fpr64(ctx, fp1, ft); | ||
2077 | gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1); | ||
2078 | - tcg_temp_free_i64(fp1); | ||
2079 | gen_store_fpr64(ctx, fp0, fd); | ||
2080 | - tcg_temp_free_i64(fp0); | ||
2081 | } | ||
2082 | break; | ||
2083 | case OPC_MUL_PS: | ||
2084 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2085 | gen_load_fpr64(ctx, fp0, fs); | ||
2086 | gen_load_fpr64(ctx, fp1, ft); | ||
2087 | gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1); | ||
2088 | - tcg_temp_free_i64(fp1); | ||
2089 | gen_store_fpr64(ctx, fp0, fd); | ||
2090 | - tcg_temp_free_i64(fp0); | ||
2091 | } | ||
2092 | break; | ||
2093 | case OPC_ABS_PS: | ||
2094 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2095 | gen_load_fpr64(ctx, fp0, fs); | ||
2096 | gen_helper_float_abs_ps(fp0, fp0); | ||
2097 | gen_store_fpr64(ctx, fp0, fd); | ||
2098 | - tcg_temp_free_i64(fp0); | ||
2099 | } | ||
2100 | break; | ||
2101 | case OPC_MOV_PS: | ||
2102 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2103 | |||
2104 | gen_load_fpr64(ctx, fp0, fs); | ||
2105 | gen_store_fpr64(ctx, fp0, fd); | ||
2106 | - tcg_temp_free_i64(fp0); | ||
2107 | } | ||
2108 | break; | ||
2109 | case OPC_NEG_PS: | ||
2110 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2111 | gen_load_fpr64(ctx, fp0, fs); | ||
2112 | gen_helper_float_chs_ps(fp0, fp0); | ||
2113 | gen_store_fpr64(ctx, fp0, fd); | ||
2114 | - tcg_temp_free_i64(fp0); | ||
2115 | } | ||
2116 | break; | ||
2117 | case OPC_MOVCF_PS: | ||
2118 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2119 | fp0 = tcg_temp_new_i64(); | ||
2120 | gen_load_fpr64(ctx, fp0, fs); | ||
2121 | gen_store_fpr64(ctx, fp0, fd); | ||
2122 | - tcg_temp_free_i64(fp0); | ||
2123 | gen_set_label(l1); | ||
2124 | } | ||
2125 | break; | ||
2126 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2127 | fp0 = tcg_temp_new_i64(); | ||
2128 | gen_load_fpr64(ctx, fp0, fs); | ||
2129 | gen_store_fpr64(ctx, fp0, fd); | ||
2130 | - tcg_temp_free_i64(fp0); | ||
2131 | gen_set_label(l1); | ||
2132 | } | ||
2133 | } | ||
2134 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2135 | gen_load_fpr64(ctx, fp0, ft); | ||
2136 | gen_load_fpr64(ctx, fp1, fs); | ||
2137 | gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1); | ||
2138 | - tcg_temp_free_i64(fp1); | ||
2139 | gen_store_fpr64(ctx, fp0, fd); | ||
2140 | - tcg_temp_free_i64(fp0); | ||
2141 | } | ||
2142 | break; | ||
2143 | case OPC_MULR_PS: | ||
2144 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2145 | gen_load_fpr64(ctx, fp0, ft); | ||
2146 | gen_load_fpr64(ctx, fp1, fs); | ||
2147 | gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1); | ||
2148 | - tcg_temp_free_i64(fp1); | ||
2149 | gen_store_fpr64(ctx, fp0, fd); | ||
2150 | - tcg_temp_free_i64(fp0); | ||
2151 | } | ||
2152 | break; | ||
2153 | case OPC_RECIP2_PS: | ||
2154 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2155 | gen_load_fpr64(ctx, fp0, fs); | ||
2156 | gen_load_fpr64(ctx, fp1, ft); | ||
2157 | gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1); | ||
2158 | - tcg_temp_free_i64(fp1); | ||
2159 | gen_store_fpr64(ctx, fp0, fd); | ||
2160 | - tcg_temp_free_i64(fp0); | ||
2161 | } | ||
2162 | break; | ||
2163 | case OPC_RECIP1_PS: | ||
2164 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2165 | gen_load_fpr64(ctx, fp0, fs); | ||
2166 | gen_helper_float_recip1_ps(fp0, cpu_env, fp0); | ||
2167 | gen_store_fpr64(ctx, fp0, fd); | ||
2168 | - tcg_temp_free_i64(fp0); | ||
2169 | } | ||
2170 | break; | ||
2171 | case OPC_RSQRT1_PS: | ||
2172 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2173 | gen_load_fpr64(ctx, fp0, fs); | ||
2174 | gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0); | ||
2175 | gen_store_fpr64(ctx, fp0, fd); | ||
2176 | - tcg_temp_free_i64(fp0); | ||
2177 | } | ||
2178 | break; | ||
2179 | case OPC_RSQRT2_PS: | ||
2180 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2181 | gen_load_fpr64(ctx, fp0, fs); | ||
2182 | gen_load_fpr64(ctx, fp1, ft); | ||
2183 | gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1); | ||
2184 | - tcg_temp_free_i64(fp1); | ||
2185 | gen_store_fpr64(ctx, fp0, fd); | ||
2186 | - tcg_temp_free_i64(fp0); | ||
2187 | } | ||
2188 | break; | ||
2189 | case OPC_CVT_S_PU: | ||
2190 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2191 | gen_load_fpr32h(ctx, fp0, fs); | ||
2192 | gen_helper_float_cvts_pu(fp0, cpu_env, fp0); | ||
2193 | gen_store_fpr32(ctx, fp0, fd); | ||
2194 | - tcg_temp_free_i32(fp0); | ||
2195 | } | ||
2196 | break; | ||
2197 | case OPC_CVT_PW_PS: | ||
2198 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2199 | gen_load_fpr64(ctx, fp0, fs); | ||
2200 | gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0); | ||
2201 | gen_store_fpr64(ctx, fp0, fd); | ||
2202 | - tcg_temp_free_i64(fp0); | ||
2203 | } | ||
2204 | break; | ||
2205 | case OPC_CVT_S_PL: | ||
2206 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2207 | gen_load_fpr32(ctx, fp0, fs); | ||
2208 | gen_helper_float_cvts_pl(fp0, cpu_env, fp0); | ||
2209 | gen_store_fpr32(ctx, fp0, fd); | ||
2210 | - tcg_temp_free_i32(fp0); | ||
2211 | } | ||
2212 | break; | ||
2213 | case OPC_PLL_PS: | ||
2214 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2215 | gen_load_fpr32(ctx, fp1, ft); | ||
2216 | gen_store_fpr32h(ctx, fp0, fd); | ||
2217 | gen_store_fpr32(ctx, fp1, fd); | ||
2218 | - tcg_temp_free_i32(fp0); | ||
2219 | - tcg_temp_free_i32(fp1); | ||
2220 | } | ||
2221 | break; | ||
2222 | case OPC_PLU_PS: | ||
2223 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2224 | gen_load_fpr32h(ctx, fp1, ft); | ||
2225 | gen_store_fpr32(ctx, fp1, fd); | ||
2226 | gen_store_fpr32h(ctx, fp0, fd); | ||
2227 | - tcg_temp_free_i32(fp0); | ||
2228 | - tcg_temp_free_i32(fp1); | ||
2229 | } | ||
2230 | break; | ||
2231 | case OPC_PUL_PS: | ||
2232 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2233 | gen_load_fpr32(ctx, fp1, ft); | ||
2234 | gen_store_fpr32(ctx, fp1, fd); | ||
2235 | gen_store_fpr32h(ctx, fp0, fd); | ||
2236 | - tcg_temp_free_i32(fp0); | ||
2237 | - tcg_temp_free_i32(fp1); | ||
2238 | } | ||
2239 | break; | ||
2240 | case OPC_PUU_PS: | ||
2241 | @@ -XXX,XX +XXX,XX @@ static void gen_farith(DisasContext *ctx, enum fopcode op1, | ||
2242 | gen_load_fpr32h(ctx, fp1, ft); | ||
2243 | gen_store_fpr32(ctx, fp1, fd); | ||
2244 | gen_store_fpr32h(ctx, fp0, fd); | ||
2245 | - tcg_temp_free_i32(fp0); | ||
2246 | - tcg_temp_free_i32(fp1); | ||
2247 | } | ||
2248 | break; | ||
2249 | case OPC_CMP_F_PS: | ||
2250 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, | ||
2251 | tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL); | ||
2252 | tcg_gen_trunc_tl_i32(fp0, t0); | ||
2253 | gen_store_fpr32(ctx, fp0, fd); | ||
2254 | - tcg_temp_free_i32(fp0); | ||
2255 | } | ||
2256 | break; | ||
2257 | case OPC_LDXC1: | ||
2258 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, | ||
2259 | TCGv_i64 fp0 = tcg_temp_new_i64(); | ||
2260 | tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); | ||
2261 | gen_store_fpr64(ctx, fp0, fd); | ||
2262 | - tcg_temp_free_i64(fp0); | ||
2263 | } | ||
2264 | break; | ||
2265 | case OPC_LUXC1: | ||
2266 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, | ||
2267 | |||
2268 | tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); | ||
2269 | gen_store_fpr64(ctx, fp0, fd); | ||
2270 | - tcg_temp_free_i64(fp0); | ||
2271 | } | ||
2272 | break; | ||
2273 | case OPC_SWXC1: | ||
2274 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, | ||
2275 | TCGv_i32 fp0 = tcg_temp_new_i32(); | ||
2276 | gen_load_fpr32(ctx, fp0, fs); | ||
2277 | tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL); | ||
2278 | - tcg_temp_free_i32(fp0); | ||
2279 | } | ||
2280 | break; | ||
2281 | case OPC_SDXC1: | ||
2282 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, | ||
2283 | TCGv_i64 fp0 = tcg_temp_new_i64(); | ||
2284 | gen_load_fpr64(ctx, fp0, fs); | ||
2285 | tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); | ||
2286 | - tcg_temp_free_i64(fp0); | ||
2287 | } | ||
2288 | break; | ||
2289 | case OPC_SUXC1: | ||
2290 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, | ||
2291 | TCGv_i64 fp0 = tcg_temp_new_i64(); | ||
2292 | gen_load_fpr64(ctx, fp0, fs); | ||
2293 | tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); | ||
2294 | - tcg_temp_free_i64(fp0); | ||
2295 | } | ||
2296 | break; | ||
2297 | } | ||
2298 | - tcg_temp_free(t0); | ||
2299 | } | ||
2300 | |||
2301 | static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2302 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2303 | tcg_gen_br(l2); | ||
2304 | gen_set_label(l1); | ||
2305 | tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2); | ||
2306 | - tcg_temp_free(t0); | ||
2307 | if (cpu_is_bigendian(ctx)) { | ||
2308 | gen_load_fpr32(ctx, fp, fs); | ||
2309 | gen_load_fpr32h(ctx, fph, ft); | ||
2310 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2311 | gen_store_fpr32h(ctx, fp, fd); | ||
2312 | } | ||
2313 | gen_set_label(l2); | ||
2314 | - tcg_temp_free_i32(fp); | ||
2315 | - tcg_temp_free_i32(fph); | ||
2316 | } | ||
2317 | break; | ||
2318 | case OPC_MADD_S: | ||
2319 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2320 | gen_load_fpr32(ctx, fp1, ft); | ||
2321 | gen_load_fpr32(ctx, fp2, fr); | ||
2322 | gen_helper_float_madd_s(fp2, cpu_env, fp0, fp1, fp2); | ||
2323 | - tcg_temp_free_i32(fp0); | ||
2324 | - tcg_temp_free_i32(fp1); | ||
2325 | gen_store_fpr32(ctx, fp2, fd); | ||
2326 | - tcg_temp_free_i32(fp2); | ||
2327 | } | ||
2328 | break; | ||
2329 | case OPC_MADD_D: | ||
2330 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2331 | gen_load_fpr64(ctx, fp1, ft); | ||
2332 | gen_load_fpr64(ctx, fp2, fr); | ||
2333 | gen_helper_float_madd_d(fp2, cpu_env, fp0, fp1, fp2); | ||
2334 | - tcg_temp_free_i64(fp0); | ||
2335 | - tcg_temp_free_i64(fp1); | ||
2336 | gen_store_fpr64(ctx, fp2, fd); | ||
2337 | - tcg_temp_free_i64(fp2); | ||
2338 | } | ||
2339 | break; | ||
2340 | case OPC_MADD_PS: | ||
2341 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2342 | gen_load_fpr64(ctx, fp1, ft); | ||
2343 | gen_load_fpr64(ctx, fp2, fr); | ||
2344 | gen_helper_float_madd_ps(fp2, cpu_env, fp0, fp1, fp2); | ||
2345 | - tcg_temp_free_i64(fp0); | ||
2346 | - tcg_temp_free_i64(fp1); | ||
2347 | gen_store_fpr64(ctx, fp2, fd); | ||
2348 | - tcg_temp_free_i64(fp2); | ||
2349 | } | ||
2350 | break; | ||
2351 | case OPC_MSUB_S: | ||
2352 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2353 | gen_load_fpr32(ctx, fp1, ft); | ||
2354 | gen_load_fpr32(ctx, fp2, fr); | ||
2355 | gen_helper_float_msub_s(fp2, cpu_env, fp0, fp1, fp2); | ||
2356 | - tcg_temp_free_i32(fp0); | ||
2357 | - tcg_temp_free_i32(fp1); | ||
2358 | gen_store_fpr32(ctx, fp2, fd); | ||
2359 | - tcg_temp_free_i32(fp2); | ||
2360 | } | ||
2361 | break; | ||
2362 | case OPC_MSUB_D: | ||
2363 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2364 | gen_load_fpr64(ctx, fp1, ft); | ||
2365 | gen_load_fpr64(ctx, fp2, fr); | ||
2366 | gen_helper_float_msub_d(fp2, cpu_env, fp0, fp1, fp2); | ||
2367 | - tcg_temp_free_i64(fp0); | ||
2368 | - tcg_temp_free_i64(fp1); | ||
2369 | gen_store_fpr64(ctx, fp2, fd); | ||
2370 | - tcg_temp_free_i64(fp2); | ||
2371 | } | ||
2372 | break; | ||
2373 | case OPC_MSUB_PS: | ||
2374 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2375 | gen_load_fpr64(ctx, fp1, ft); | ||
2376 | gen_load_fpr64(ctx, fp2, fr); | ||
2377 | gen_helper_float_msub_ps(fp2, cpu_env, fp0, fp1, fp2); | ||
2378 | - tcg_temp_free_i64(fp0); | ||
2379 | - tcg_temp_free_i64(fp1); | ||
2380 | gen_store_fpr64(ctx, fp2, fd); | ||
2381 | - tcg_temp_free_i64(fp2); | ||
2382 | } | ||
2383 | break; | ||
2384 | case OPC_NMADD_S: | ||
2385 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2386 | gen_load_fpr32(ctx, fp1, ft); | ||
2387 | gen_load_fpr32(ctx, fp2, fr); | ||
2388 | gen_helper_float_nmadd_s(fp2, cpu_env, fp0, fp1, fp2); | ||
2389 | - tcg_temp_free_i32(fp0); | ||
2390 | - tcg_temp_free_i32(fp1); | ||
2391 | gen_store_fpr32(ctx, fp2, fd); | ||
2392 | - tcg_temp_free_i32(fp2); | ||
2393 | } | ||
2394 | break; | ||
2395 | case OPC_NMADD_D: | ||
2396 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2397 | gen_load_fpr64(ctx, fp1, ft); | ||
2398 | gen_load_fpr64(ctx, fp2, fr); | ||
2399 | gen_helper_float_nmadd_d(fp2, cpu_env, fp0, fp1, fp2); | ||
2400 | - tcg_temp_free_i64(fp0); | ||
2401 | - tcg_temp_free_i64(fp1); | ||
2402 | gen_store_fpr64(ctx, fp2, fd); | ||
2403 | - tcg_temp_free_i64(fp2); | ||
2404 | } | ||
2405 | break; | ||
2406 | case OPC_NMADD_PS: | ||
2407 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2408 | gen_load_fpr64(ctx, fp1, ft); | ||
2409 | gen_load_fpr64(ctx, fp2, fr); | ||
2410 | gen_helper_float_nmadd_ps(fp2, cpu_env, fp0, fp1, fp2); | ||
2411 | - tcg_temp_free_i64(fp0); | ||
2412 | - tcg_temp_free_i64(fp1); | ||
2413 | gen_store_fpr64(ctx, fp2, fd); | ||
2414 | - tcg_temp_free_i64(fp2); | ||
2415 | } | ||
2416 | break; | ||
2417 | case OPC_NMSUB_S: | ||
2418 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2419 | gen_load_fpr32(ctx, fp1, ft); | ||
2420 | gen_load_fpr32(ctx, fp2, fr); | ||
2421 | gen_helper_float_nmsub_s(fp2, cpu_env, fp0, fp1, fp2); | ||
2422 | - tcg_temp_free_i32(fp0); | ||
2423 | - tcg_temp_free_i32(fp1); | ||
2424 | gen_store_fpr32(ctx, fp2, fd); | ||
2425 | - tcg_temp_free_i32(fp2); | ||
2426 | } | ||
2427 | break; | ||
2428 | case OPC_NMSUB_D: | ||
2429 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2430 | gen_load_fpr64(ctx, fp1, ft); | ||
2431 | gen_load_fpr64(ctx, fp2, fr); | ||
2432 | gen_helper_float_nmsub_d(fp2, cpu_env, fp0, fp1, fp2); | ||
2433 | - tcg_temp_free_i64(fp0); | ||
2434 | - tcg_temp_free_i64(fp1); | ||
2435 | gen_store_fpr64(ctx, fp2, fd); | ||
2436 | - tcg_temp_free_i64(fp2); | ||
2437 | } | ||
2438 | break; | ||
2439 | case OPC_NMSUB_PS: | ||
2440 | @@ -XXX,XX +XXX,XX @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, | ||
2441 | gen_load_fpr64(ctx, fp1, ft); | ||
2442 | gen_load_fpr64(ctx, fp2, fr); | ||
2443 | gen_helper_float_nmsub_ps(fp2, cpu_env, fp0, fp1, fp2); | ||
2444 | - tcg_temp_free_i64(fp0); | ||
2445 | - tcg_temp_free_i64(fp1); | ||
2446 | gen_store_fpr64(ctx, fp2, fd); | ||
2447 | - tcg_temp_free_i64(fp2); | ||
2448 | } | ||
2449 | break; | ||
2450 | default: | ||
2451 | @@ -XXX,XX +XXX,XX @@ void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel) | ||
2452 | gen_reserved_instruction(ctx); | ||
2453 | break; | ||
2454 | } | ||
2455 | - tcg_temp_free(t0); | ||
2456 | } | ||
2457 | |||
2458 | static inline void clear_branch_hflags(DisasContext *ctx) | ||
2459 | @@ -XXX,XX +XXX,XX @@ static void gen_branch(DisasContext *ctx, int insn_bytes) | ||
2460 | |||
2461 | tcg_gen_andi_tl(t0, btarget, 0x1); | ||
2462 | tcg_gen_trunc_tl_i32(t1, t0); | ||
2463 | - tcg_temp_free(t0); | ||
2464 | tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16); | ||
2465 | tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT); | ||
2466 | tcg_gen_or_i32(hflags, hflags, t1); | ||
2467 | - tcg_temp_free_i32(t1); | ||
2468 | |||
2469 | tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1); | ||
2470 | } else { | ||
2471 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, | ||
2472 | "\n", ctx->base.pc_next); | ||
2473 | #endif | ||
2474 | gen_reserved_instruction(ctx); | ||
2475 | - goto out; | ||
2476 | + return; | ||
2477 | } | ||
2478 | |||
2479 | /* Load needed operands and calculate btarget */ | ||
2480 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, | ||
2481 | |||
2482 | gen_load_gpr(tbase, rt); | ||
2483 | gen_op_addr_add(ctx, btarget, tbase, toffset); | ||
2484 | - tcg_temp_free(tbase); | ||
2485 | } | ||
2486 | break; | ||
2487 | default: | ||
2488 | MIPS_INVAL("Compact branch/jump"); | ||
2489 | gen_reserved_instruction(ctx); | ||
2490 | - goto out; | ||
2491 | + return; | ||
2492 | } | ||
2493 | |||
2494 | if (bcond_compute == 0) { | ||
2495 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, | ||
2496 | default: | ||
2497 | MIPS_INVAL("Compact branch/jump"); | ||
2498 | gen_reserved_instruction(ctx); | ||
2499 | - goto out; | ||
2500 | + return; | ||
2501 | } | ||
2502 | |||
2503 | /* Generating branch here as compact branches don't have delay slot */ | ||
2504 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, | ||
2505 | /* OPC_BNVC */ | ||
2506 | tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs); | ||
2507 | } | ||
2508 | - tcg_temp_free(input_overflow); | ||
2509 | - tcg_temp_free(t4); | ||
2510 | - tcg_temp_free(t3); | ||
2511 | - tcg_temp_free(t2); | ||
2512 | } else if (rs < rt && rs == 0) { | ||
2513 | /* OPC_BEQZALC, OPC_BNEZALC */ | ||
2514 | if (opc == OPC_BEQZALC) { | ||
2515 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, | ||
2516 | default: | ||
2517 | MIPS_INVAL("Compact conditional branch/jump"); | ||
2518 | gen_reserved_instruction(ctx); | ||
2519 | - goto out; | ||
2520 | + return; | ||
2521 | } | ||
2522 | |||
2523 | /* Generating branch here as compact branches don't have delay slot */ | ||
2524 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, | ||
2525 | |||
2526 | ctx->hflags |= MIPS_HFLAG_FBNSLOT; | ||
2527 | } | ||
2528 | - | ||
2529 | -out: | ||
2530 | - tcg_temp_free(t0); | ||
2531 | - tcg_temp_free(t1); | ||
2532 | } | ||
2533 | |||
2534 | void gen_addiupc(DisasContext *ctx, int rx, int imm, | ||
2535 | @@ -XXX,XX +XXX,XX @@ void gen_addiupc(DisasContext *ctx, int rx, int imm, | ||
2536 | if (!is_64_bit) { | ||
2537 | tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]); | ||
2538 | } | ||
2539 | - | ||
2540 | - tcg_temp_free(t0); | ||
2541 | } | ||
2542 | |||
2543 | static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, | ||
2544 | @@ -XXX,XX +XXX,XX @@ static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, | ||
2545 | TCGv t1 = tcg_temp_new(); | ||
2546 | gen_base_offset_addr(ctx, t1, base, offset); | ||
2547 | gen_helper_cache(cpu_env, t1, t0); | ||
2548 | - tcg_temp_free(t1); | ||
2549 | - tcg_temp_free_i32(t0); | ||
2550 | } | ||
2551 | |||
2552 | static inline bool is_uhi(DisasContext *ctx, int sdbbp_code) | ||
2553 | @@ -XXX,XX +XXX,XX @@ void gen_ldxs(DisasContext *ctx, int base, int index, int rd) | ||
2554 | |||
2555 | tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); | ||
2556 | gen_store_gpr(t1, rd); | ||
2557 | - | ||
2558 | - tcg_temp_free(t0); | ||
2559 | - tcg_temp_free(t1); | ||
2560 | } | ||
2561 | |||
2562 | static void gen_sync(int stype) | ||
2563 | @@ -XXX,XX +XXX,XX @@ static void gen_mips_lx(DisasContext *ctx, uint32_t opc, | ||
2564 | break; | ||
2565 | #endif | ||
2566 | } | ||
2567 | - tcg_temp_free(t0); | ||
2568 | } | ||
2569 | |||
2570 | static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2571 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2572 | TCGv_i32 sa_t = tcg_const_i32(v2); | ||
2573 | gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t, | ||
2574 | cpu_gpr[ret]); | ||
2575 | - tcg_temp_free_i32(sa_t); | ||
2576 | break; | ||
2577 | } | ||
2578 | case OPC_PRECR_SRA_R_PH_W: | ||
2579 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2580 | TCGv_i32 sa_t = tcg_const_i32(v2); | ||
2581 | gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t, | ||
2582 | cpu_gpr[ret]); | ||
2583 | - tcg_temp_free_i32(sa_t); | ||
2584 | break; | ||
2585 | } | ||
2586 | case OPC_PRECRQ_PH_W: | ||
2587 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2588 | { | ||
2589 | TCGv_i32 ret_t = tcg_const_i32(ret); | ||
2590 | gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t); | ||
2591 | - tcg_temp_free_i32(ret_t); | ||
2592 | break; | ||
2593 | } | ||
2594 | case OPC_PRECR_SRA_R_QH_PW: | ||
2595 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2596 | { | ||
2597 | TCGv_i32 sa_v = tcg_const_i32(ret); | ||
2598 | gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v); | ||
2599 | - tcg_temp_free_i32(sa_v); | ||
2600 | break; | ||
2601 | } | ||
2602 | case OPC_PRECRQ_OB_QH: | ||
2603 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2604 | break; | ||
2605 | #endif | ||
2606 | } | ||
2607 | - | ||
2608 | - tcg_temp_free(v1_t); | ||
2609 | - tcg_temp_free(v2_t); | ||
2610 | } | ||
2611 | |||
2612 | static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, | ||
2613 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, | ||
2614 | break; | ||
2615 | #endif | ||
2616 | } | ||
2617 | - | ||
2618 | - tcg_temp_free(t0); | ||
2619 | - tcg_temp_free(v1_t); | ||
2620 | - tcg_temp_free(v2_t); | ||
2621 | } | ||
2622 | |||
2623 | static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2624 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2625 | break; | ||
2626 | #endif | ||
2627 | } | ||
2628 | - | ||
2629 | - tcg_temp_free_i32(t0); | ||
2630 | - tcg_temp_free(v1_t); | ||
2631 | - tcg_temp_free(v2_t); | ||
2632 | } | ||
2633 | |||
2634 | static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2635 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2636 | break; | ||
2637 | #endif | ||
2638 | } | ||
2639 | - tcg_temp_free(t0); | ||
2640 | - tcg_temp_free(val_t); | ||
2641 | } | ||
2642 | |||
2643 | static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, | ||
2644 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, | ||
2645 | break; | ||
2646 | #endif | ||
2647 | } | ||
2648 | - | ||
2649 | - tcg_temp_free(t1); | ||
2650 | - tcg_temp_free(v1_t); | ||
2651 | - tcg_temp_free(v2_t); | ||
2652 | } | ||
2653 | |||
2654 | static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, | ||
2655 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, | ||
2656 | break; | ||
2657 | #endif | ||
2658 | } | ||
2659 | - tcg_temp_free(t0); | ||
2660 | } | ||
2661 | |||
2662 | static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2663 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
2664 | break; | ||
2665 | #endif | ||
2666 | } | ||
2667 | - | ||
2668 | - tcg_temp_free(t0); | ||
2669 | - tcg_temp_free(t1); | ||
2670 | - tcg_temp_free(v1_t); | ||
2671 | } | ||
2672 | |||
2673 | /* End MIPSDSP functions. */ | ||
2674 | @@ -XXX,XX +XXX,XX @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) | ||
2675 | gen_load_gpr(t1, rs); | ||
2676 | |||
2677 | gen_helper_insv(cpu_gpr[rt], cpu_env, t1, t0); | ||
2678 | - | ||
2679 | - tcg_temp_free(t0); | ||
2680 | - tcg_temp_free(t1); | ||
2681 | break; | ||
2682 | } | ||
2683 | default: /* Invalid */ | ||
2684 | @@ -XXX,XX +XXX,XX @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) | ||
2685 | gen_load_gpr(t1, rs); | ||
2686 | |||
2687 | gen_helper_dinsv(cpu_gpr[rt], cpu_env, t1, t0); | ||
2688 | - | ||
2689 | - tcg_temp_free(t0); | ||
2690 | - tcg_temp_free(t1); | ||
2691 | break; | ||
2692 | } | ||
2693 | default: /* Invalid */ | ||
2694 | @@ -XXX,XX +XXX,XX @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) | ||
2695 | gen_load_gpr(t0, rt); | ||
2696 | gen_load_gpr(t1, rs); | ||
2697 | gen_helper_fork(t0, t1); | ||
2698 | - tcg_temp_free(t0); | ||
2699 | - tcg_temp_free(t1); | ||
2700 | } | ||
2701 | break; | ||
2702 | case OPC_YIELD: | ||
2703 | @@ -XXX,XX +XXX,XX @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) | ||
2704 | gen_load_gpr(t0, rs); | ||
2705 | gen_helper_yield(t0, cpu_env, t0); | ||
2706 | gen_store_gpr(t0, rd); | ||
2707 | - tcg_temp_free(t0); | ||
2708 | } | ||
2709 | break; | ||
2710 | default: | ||
2711 | @@ -XXX,XX +XXX,XX @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) | ||
2712 | gen_reserved_instruction(ctx); | ||
2713 | break; | ||
2714 | } | ||
2715 | - tcg_temp_free(t0); | ||
2716 | } | ||
2717 | #endif /* !CONFIG_USER_ONLY */ | ||
2718 | break; | ||
2719 | @@ -XXX,XX +XXX,XX @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) | ||
2720 | TCGv t0 = tcg_temp_new(); | ||
2721 | gen_load_gpr(t0, rs); | ||
2722 | tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16); | ||
2723 | - tcg_temp_free(t0); | ||
2724 | } | ||
2725 | #else | ||
2726 | gen_reserved_instruction(ctx); | ||
2727 | -- | ||
2728 | 2.34.1 | ||
2729 | |||
2730 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | Translators are no longer required to free tcg temporaries. | |
2 | Remove the g1 and g2 members of DisasCompare, as they were | ||
3 | used to track which temps needed to be freed. | ||
4 | |||
5 | Acked-by: David Hildenbrand <david@redhat.com> | ||
6 | Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> | ||
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/s390x/tcg/translate.c | 46 +----------------------------------- | ||
11 | 1 file changed, 1 insertion(+), 45 deletions(-) | ||
12 | |||
13 | diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/s390x/tcg/translate.c | ||
16 | +++ b/target/s390x/tcg/translate.c | ||
17 | @@ -XXX,XX +XXX,XX @@ struct DisasContext { | ||
18 | typedef struct { | ||
19 | TCGCond cond:8; | ||
20 | bool is_64; | ||
21 | - bool g1; | ||
22 | - bool g2; | ||
23 | union { | ||
24 | struct { TCGv_i64 a, b; } s64; | ||
25 | struct { TCGv_i32 a, b; } s32; | ||
26 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
27 | c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); | ||
28 | c->u.s32.a = cc_op; | ||
29 | c->u.s32.b = cc_op; | ||
30 | - c->g1 = c->g2 = true; | ||
31 | c->is_64 = false; | ||
32 | return; | ||
33 | } | ||
34 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
35 | |||
36 | /* Load up the arguments of the comparison. */ | ||
37 | c->is_64 = true; | ||
38 | - c->g1 = c->g2 = false; | ||
39 | switch (old_cc_op) { | ||
40 | case CC_OP_LTGT0_32: | ||
41 | c->is_64 = false; | ||
42 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
43 | case CC_OP_FLOGR: | ||
44 | c->u.s64.a = cc_dst; | ||
45 | c->u.s64.b = tcg_constant_i64(0); | ||
46 | - c->g1 = true; | ||
47 | break; | ||
48 | case CC_OP_LTGT_64: | ||
49 | case CC_OP_LTUGTU_64: | ||
50 | c->u.s64.a = cc_src; | ||
51 | c->u.s64.b = cc_dst; | ||
52 | - c->g1 = c->g2 = true; | ||
53 | break; | ||
54 | |||
55 | case CC_OP_TM_32: | ||
56 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
57 | case CC_OP_SUBU: | ||
58 | c->is_64 = true; | ||
59 | c->u.s64.b = tcg_constant_i64(0); | ||
60 | - c->g1 = true; | ||
61 | switch (mask) { | ||
62 | case 8 | 2: | ||
63 | case 4 | 1: /* result */ | ||
64 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
65 | case CC_OP_STATIC: | ||
66 | c->is_64 = false; | ||
67 | c->u.s32.a = cc_op; | ||
68 | - c->g1 = true; | ||
69 | switch (mask) { | ||
70 | case 0x8 | 0x4 | 0x2: /* cc != 3 */ | ||
71 | cond = TCG_COND_NE; | ||
72 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
73 | break; | ||
74 | case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ | ||
75 | cond = TCG_COND_EQ; | ||
76 | - c->g1 = false; | ||
77 | c->u.s32.a = tcg_temp_new_i32(); | ||
78 | c->u.s32.b = tcg_constant_i32(0); | ||
79 | tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); | ||
80 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
81 | break; | ||
82 | case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ | ||
83 | cond = TCG_COND_NE; | ||
84 | - c->g1 = false; | ||
85 | c->u.s32.a = tcg_temp_new_i32(); | ||
86 | c->u.s32.b = tcg_constant_i32(0); | ||
87 | tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); | ||
88 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
89 | default: | ||
90 | /* CC is masked by something else: (8 >> cc) & mask. */ | ||
91 | cond = TCG_COND_NE; | ||
92 | - c->g1 = false; | ||
93 | c->u.s32.a = tcg_temp_new_i32(); | ||
94 | c->u.s32.b = tcg_constant_i32(0); | ||
95 | tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op); | ||
96 | @@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) | ||
97 | c->cond = cond; | ||
98 | } | ||
99 | |||
100 | -static void free_compare(DisasCompare *c) | ||
101 | -{ | ||
102 | - if (!c->g1) { | ||
103 | - if (c->is_64) { | ||
104 | - tcg_temp_free_i64(c->u.s64.a); | ||
105 | - } else { | ||
106 | - tcg_temp_free_i32(c->u.s32.a); | ||
107 | - } | ||
108 | - } | ||
109 | - if (!c->g2) { | ||
110 | - if (c->is_64) { | ||
111 | - tcg_temp_free_i64(c->u.s64.b); | ||
112 | - } else { | ||
113 | - tcg_temp_free_i32(c->u.s32.b); | ||
114 | - } | ||
115 | - } | ||
116 | -} | ||
117 | - | ||
118 | /* ====================================================================== */ | ||
119 | /* Define the insn format enumeration. */ | ||
120 | #define F0(N) FMT_##N, | ||
121 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, | ||
122 | } | ||
123 | |||
124 | egress: | ||
125 | - free_compare(c); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) | ||
130 | |||
131 | c.cond = TCG_COND_NE; | ||
132 | c.is_64 = false; | ||
133 | - c.g1 = false; | ||
134 | - c.g2 = false; | ||
135 | |||
136 | t = tcg_temp_new_i64(); | ||
137 | tcg_gen_subi_i64(t, regs[r1], 1); | ||
138 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) | ||
139 | |||
140 | c.cond = TCG_COND_NE; | ||
141 | c.is_64 = false; | ||
142 | - c.g1 = false; | ||
143 | - c.g2 = false; | ||
144 | |||
145 | t = tcg_temp_new_i64(); | ||
146 | tcg_gen_shri_i64(t, regs[r1], 32); | ||
147 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) | ||
148 | |||
149 | c.cond = TCG_COND_NE; | ||
150 | c.is_64 = true; | ||
151 | - c.g1 = true; | ||
152 | - c.g2 = false; | ||
153 | |||
154 | tcg_gen_subi_i64(regs[r1], regs[r1], 1); | ||
155 | c.u.s64.a = regs[r1]; | ||
156 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) | ||
157 | |||
158 | c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); | ||
159 | c.is_64 = false; | ||
160 | - c.g1 = false; | ||
161 | - c.g2 = false; | ||
162 | |||
163 | t = tcg_temp_new_i64(); | ||
164 | tcg_gen_add_i64(t, regs[r1], regs[r3]); | ||
165 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) | ||
166 | |||
167 | if (r1 == (r3 | 1)) { | ||
168 | c.u.s64.b = load_reg(r3 | 1); | ||
169 | - c.g2 = false; | ||
170 | } else { | ||
171 | c.u.s64.b = regs[r3 | 1]; | ||
172 | - c.g2 = true; | ||
173 | } | ||
174 | |||
175 | tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]); | ||
176 | c.u.s64.a = regs[r1]; | ||
177 | - c.g1 = true; | ||
178 | |||
179 | return help_branch(s, &c, is_imm, imm, o->in2); | ||
180 | } | ||
181 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o) | ||
182 | if (s->insn->data) { | ||
183 | c.cond = tcg_unsigned_cond(c.cond); | ||
184 | } | ||
185 | - c.is_64 = c.g1 = c.g2 = true; | ||
186 | + c.is_64 = true; | ||
187 | c.u.s64.a = o->in1; | ||
188 | c.u.s64.b = o->in2; | ||
189 | |||
190 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o) | ||
191 | if (c.is_64) { | ||
192 | tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, | ||
193 | o->in2, o->in1); | ||
194 | - free_compare(&c); | ||
195 | } else { | ||
196 | TCGv_i32 t32 = tcg_temp_new_i32(); | ||
197 | TCGv_i64 t, z; | ||
198 | |||
199 | tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b); | ||
200 | - free_compare(&c); | ||
201 | |||
202 | t = tcg_temp_new_i64(); | ||
203 | tcg_gen_extu_i32_i64(t, t32); | ||
204 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) | ||
205 | } else { | ||
206 | tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab); | ||
207 | } | ||
208 | - free_compare(&c); | ||
209 | |||
210 | r1 = get_field(s, r1); | ||
211 | a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); | ||
212 | -- | ||
213 | 2.34.1 | ||
214 | |||
215 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Acked-by: David Hildenbrand <david@redhat.com> | ||
4 | Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/s390x/tcg/translate_vx.c.inc | 143 ---------------------------- | ||
9 | 1 file changed, 143 deletions(-) | ||
10 | |||
11 | diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/s390x/tcg/translate_vx.c.inc | ||
14 | +++ b/target/s390x/tcg/translate_vx.c.inc | ||
15 | @@ -XXX,XX +XXX,XX @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, | ||
16 | /* generate the final ptr by adding cpu_env */ | ||
17 | tcg_gen_trunc_i64_ptr(ptr, tmp); | ||
18 | tcg_gen_add_ptr(ptr, ptr, cpu_env); | ||
19 | - | ||
20 | - tcg_temp_free_i64(tmp); | ||
21 | } | ||
22 | |||
23 | #define gen_gvec_2(v1, v2, gen) \ | ||
24 | @@ -XXX,XX +XXX,XX @@ static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a, | ||
25 | fn(dl, dh, al, ah, bl, bh); | ||
26 | write_vec_element_i64(dh, d, 0, ES_64); | ||
27 | write_vec_element_i64(dl, d, 1, ES_64); | ||
28 | - | ||
29 | - tcg_temp_free_i64(dh); | ||
30 | - tcg_temp_free_i64(dl); | ||
31 | - tcg_temp_free_i64(ah); | ||
32 | - tcg_temp_free_i64(al); | ||
33 | - tcg_temp_free_i64(bh); | ||
34 | - tcg_temp_free_i64(bl); | ||
35 | } | ||
36 | |||
37 | typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, | ||
38 | @@ -XXX,XX +XXX,XX @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a, | ||
39 | fn(dl, dh, al, ah, bl, bh, cl, ch); | ||
40 | write_vec_element_i64(dh, d, 0, ES_64); | ||
41 | write_vec_element_i64(dl, d, 1, ES_64); | ||
42 | - | ||
43 | - tcg_temp_free_i64(dh); | ||
44 | - tcg_temp_free_i64(dl); | ||
45 | - tcg_temp_free_i64(ah); | ||
46 | - tcg_temp_free_i64(al); | ||
47 | - tcg_temp_free_i64(bh); | ||
48 | - tcg_temp_free_i64(bl); | ||
49 | - tcg_temp_free_i64(ch); | ||
50 | - tcg_temp_free_i64(cl); | ||
51 | } | ||
52 | |||
53 | static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, | ||
54 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vge(DisasContext *s, DisasOps *o) | ||
55 | |||
56 | tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); | ||
57 | write_vec_element_i64(tmp, get_field(s, v1), enr, es); | ||
58 | - tcg_temp_free_i64(tmp); | ||
59 | return DISAS_NEXT; | ||
60 | } | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) | ||
63 | write_vec_element_i64(t, get_field(s, v1), 0, ES_64); | ||
64 | tcg_gen_movi_i64(t, generate_byte_mask(i2)); | ||
65 | write_vec_element_i64(t, get_field(s, v1), 1, ES_64); | ||
66 | - tcg_temp_free_i64(t); | ||
67 | } | ||
68 | return DISAS_NEXT; | ||
69 | } | ||
70 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vl(DisasContext *s, DisasOps *o) | ||
71 | tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); | ||
72 | write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); | ||
73 | write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); | ||
74 | - tcg_temp_free(t0); | ||
75 | - tcg_temp_free(t1); | ||
76 | return DISAS_NEXT; | ||
77 | } | ||
78 | |||
79 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) | ||
80 | tmp = tcg_temp_new_i64(); | ||
81 | tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); | ||
82 | gen_gvec_dup_i64(es, get_field(s, v1), tmp); | ||
83 | - tcg_temp_free_i64(tmp); | ||
84 | return DISAS_NEXT; | ||
85 | } | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlebr(DisasContext *s, DisasOps *o) | ||
88 | tmp = tcg_temp_new_i64(); | ||
89 | tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); | ||
90 | write_vec_element_i64(tmp, get_field(s, v1), enr, es); | ||
91 | - tcg_temp_free_i64(tmp); | ||
92 | return DISAS_NEXT; | ||
93 | } | ||
94 | |||
95 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlbrrep(DisasContext *s, DisasOps *o) | ||
96 | tmp = tcg_temp_new_i64(); | ||
97 | tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); | ||
98 | gen_gvec_dup_i64(es, get_field(s, v1), tmp); | ||
99 | - tcg_temp_free_i64(tmp); | ||
100 | return DISAS_NEXT; | ||
101 | } | ||
102 | |||
103 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vllebrz(DisasContext *s, DisasOps *o) | ||
104 | |||
105 | write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); | ||
106 | write_vec_element_i64(tcg_constant_i64(0), get_field(s, v1), 1, ES_64); | ||
107 | - tcg_temp_free_i64(tmp); | ||
108 | return DISAS_NEXT; | ||
109 | } | ||
110 | |||
111 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlbr(DisasContext *s, DisasOps *o) | ||
112 | write: | ||
113 | write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); | ||
114 | write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); | ||
115 | - | ||
116 | - tcg_temp_free(t0); | ||
117 | - tcg_temp_free(t1); | ||
118 | return DISAS_NEXT; | ||
119 | } | ||
120 | |||
121 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vle(DisasContext *s, DisasOps *o) | ||
122 | tmp = tcg_temp_new_i64(); | ||
123 | tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); | ||
124 | write_vec_element_i64(tmp, get_field(s, v1), enr, es); | ||
125 | - tcg_temp_free_i64(tmp); | ||
126 | return DISAS_NEXT; | ||
127 | } | ||
128 | |||
129 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vler(DisasContext *s, DisasOps *o) | ||
130 | |||
131 | write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); | ||
132 | write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); | ||
133 | - tcg_temp_free(t0); | ||
134 | - tcg_temp_free(t1); | ||
135 | return DISAS_NEXT; | ||
136 | } | ||
137 | |||
138 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) | ||
139 | default: | ||
140 | g_assert_not_reached(); | ||
141 | } | ||
142 | - tcg_temp_free_ptr(ptr); | ||
143 | - | ||
144 | return DISAS_NEXT; | ||
145 | } | ||
146 | |||
147 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) | ||
148 | tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es); | ||
149 | gen_gvec_dup_imm(es, get_field(s, v1), 0); | ||
150 | write_vec_element_i64(t, get_field(s, v1), enr, es); | ||
151 | - tcg_temp_free_i64(t); | ||
152 | return DISAS_NEXT; | ||
153 | } | ||
154 | |||
155 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) | ||
156 | |||
157 | /* Store the last element, loaded first */ | ||
158 | write_vec_element_i64(t0, v1, 1, ES_64); | ||
159 | - | ||
160 | - tcg_temp_free_i64(t0); | ||
161 | - tcg_temp_free_i64(t1); | ||
162 | return DISAS_NEXT; | ||
163 | } | ||
164 | |||
165 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) | ||
166 | |||
167 | tcg_gen_addi_ptr(a0, cpu_env, v1_offs); | ||
168 | gen_helper_vll(cpu_env, a0, o->addr1, bytes); | ||
169 | - tcg_temp_free_i64(bytes); | ||
170 | - tcg_temp_free_ptr(a0); | ||
171 | return DISAS_NEXT; | ||
172 | } | ||
173 | |||
174 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) | ||
175 | default: | ||
176 | g_assert_not_reached(); | ||
177 | } | ||
178 | - tcg_temp_free_ptr(ptr); | ||
179 | - | ||
180 | return DISAS_NEXT; | ||
181 | } | ||
182 | |||
183 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vll(DisasContext *s, DisasOps *o) | ||
184 | tcg_gen_addi_i64(o->in2, o->in2, 1); | ||
185 | tcg_gen_addi_ptr(a0, cpu_env, v1_offs); | ||
186 | gen_helper_vll(cpu_env, a0, o->addr1, o->in2); | ||
187 | - tcg_temp_free_ptr(a0); | ||
188 | return DISAS_NEXT; | ||
189 | } | ||
190 | |||
191 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) | ||
192 | write_vec_element_i64(tmp, v1, dst_idx, es); | ||
193 | } | ||
194 | } | ||
195 | - tcg_temp_free_i64(tmp); | ||
196 | return DISAS_NEXT; | ||
197 | } | ||
198 | |||
199 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) | ||
200 | } | ||
201 | write_vec_element_i64(tmp, v1, dst_idx, dst_es); | ||
202 | } | ||
203 | - tcg_temp_free_i64(tmp); | ||
204 | } else { | ||
205 | gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]); | ||
206 | } | ||
207 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) | ||
208 | read_vec_element_i64(t1, get_field(s, v3), i3, ES_64); | ||
209 | write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); | ||
210 | write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); | ||
211 | - tcg_temp_free_i64(t0); | ||
212 | - tcg_temp_free_i64(t1); | ||
213 | return DISAS_NEXT; | ||
214 | } | ||
215 | |||
216 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) | ||
217 | |||
218 | read_vec_element_i64(tmp, get_field(s, v1), enr, es); | ||
219 | tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); | ||
220 | - tcg_temp_free_i64(tmp); | ||
221 | return DISAS_NEXT; | ||
222 | } | ||
223 | |||
224 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) | ||
225 | write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); | ||
226 | read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN); | ||
227 | write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); | ||
228 | - tcg_temp_free_i64(tmp); | ||
229 | return DISAS_NEXT; | ||
230 | } | ||
231 | |||
232 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o) | ||
233 | gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); | ||
234 | read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); | ||
235 | tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ); | ||
236 | - tcg_temp_free_i64(tmp); | ||
237 | return DISAS_NEXT; | ||
238 | } | ||
239 | |||
240 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vstebr(DisasContext *s, DisasOps *o) | ||
241 | tmp = tcg_temp_new_i64(); | ||
242 | read_vec_element_i64(tmp, get_field(s, v1), enr, es); | ||
243 | tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es); | ||
244 | - tcg_temp_free_i64(tmp); | ||
245 | return DISAS_NEXT; | ||
246 | } | ||
247 | |||
248 | @@ -XXX,XX +XXX,XX @@ write: | ||
249 | tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ); | ||
250 | gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); | ||
251 | tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ); | ||
252 | - | ||
253 | - tcg_temp_free(t0); | ||
254 | - tcg_temp_free(t1); | ||
255 | return DISAS_NEXT; | ||
256 | } | ||
257 | |||
258 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vste(DisasContext *s, DisasOps *o) | ||
259 | tmp = tcg_temp_new_i64(); | ||
260 | read_vec_element_i64(tmp, get_field(s, v1), enr, es); | ||
261 | tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); | ||
262 | - tcg_temp_free_i64(tmp); | ||
263 | return DISAS_NEXT; | ||
264 | } | ||
265 | |||
266 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vster(DisasContext *s, DisasOps *o) | ||
267 | tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ); | ||
268 | gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); | ||
269 | tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ); | ||
270 | - | ||
271 | - tcg_temp_free(t0); | ||
272 | - tcg_temp_free(t1); | ||
273 | return DISAS_NEXT; | ||
274 | } | ||
275 | |||
276 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) | ||
277 | } | ||
278 | gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); | ||
279 | } | ||
280 | - tcg_temp_free_i64(tmp); | ||
281 | return DISAS_NEXT; | ||
282 | } | ||
283 | |||
284 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) | ||
285 | tcg_gen_addi_i64(o->in2, o->in2, 1); | ||
286 | tcg_gen_addi_ptr(a0, cpu_env, v1_offs); | ||
287 | gen_helper_vstl(cpu_env, a0, o->addr1, o->in2); | ||
288 | - tcg_temp_free_ptr(a0); | ||
289 | return DISAS_NEXT; | ||
290 | } | ||
291 | |||
292 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vup(DisasContext *s, DisasOps *o) | ||
293 | write_vec_element_i64(tmp, v1, dst_idx, dst_es); | ||
294 | } | ||
295 | } | ||
296 | - tcg_temp_free_i64(tmp); | ||
297 | return DISAS_NEXT; | ||
298 | } | ||
299 | |||
300 | @@ -XXX,XX +XXX,XX @@ static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es) | ||
301 | /* Isolate and shift the carry into position */ | ||
302 | tcg_gen_and_i64(d, d, msb_mask); | ||
303 | tcg_gen_shri_i64(d, d, msb_bit_nr); | ||
304 | - | ||
305 | - tcg_temp_free_i64(t1); | ||
306 | - tcg_temp_free_i64(t2); | ||
307 | - tcg_temp_free_i64(t3); | ||
308 | } | ||
309 | |||
310 | static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
311 | @@ -XXX,XX +XXX,XX @@ static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
312 | |||
313 | tcg_gen_add_i32(t, a, b); | ||
314 | tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b); | ||
315 | - tcg_temp_free_i32(t); | ||
316 | } | ||
317 | |||
318 | static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
319 | @@ -XXX,XX +XXX,XX @@ static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) | ||
320 | |||
321 | tcg_gen_add_i64(t, a, b); | ||
322 | tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b); | ||
323 | - tcg_temp_free_i64(t); | ||
324 | } | ||
325 | |||
326 | static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, | ||
327 | @@ -XXX,XX +XXX,XX @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, | ||
328 | tcg_gen_add2_i64(tl, th, th, zero, ah, zero); | ||
329 | tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); | ||
330 | tcg_gen_mov_i64(dh, zero); | ||
331 | - | ||
332 | - tcg_temp_free_i64(th); | ||
333 | - tcg_temp_free_i64(tl); | ||
334 | } | ||
335 | |||
336 | static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) | ||
337 | @@ -XXX,XX +XXX,XX @@ static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, | ||
338 | tcg_gen_extract_i64(tl, cl, 0, 1); | ||
339 | tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); | ||
340 | tcg_gen_add2_i64(dl, dh, dl, dh, tl, zero); | ||
341 | - | ||
342 | - tcg_temp_free_i64(tl); | ||
343 | } | ||
344 | |||
345 | static DisasJumpType op_vac(DisasContext *s, DisasOps *o) | ||
346 | @@ -XXX,XX +XXX,XX @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, | ||
347 | tcg_gen_add2_i64(tl, th, th, zero, ah, zero); | ||
348 | tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); | ||
349 | tcg_gen_mov_i64(dh, zero); | ||
350 | - | ||
351 | - tcg_temp_free_i64(tl); | ||
352 | - tcg_temp_free_i64(th); | ||
353 | } | ||
354 | |||
355 | static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o) | ||
356 | @@ -XXX,XX +XXX,XX @@ static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
357 | tcg_gen_addi_i64(t0, t0, 1); | ||
358 | tcg_gen_shri_i64(t0, t0, 1); | ||
359 | tcg_gen_extrl_i64_i32(d, t0); | ||
360 | - | ||
361 | - tcg_temp_free(t0); | ||
362 | - tcg_temp_free(t1); | ||
363 | } | ||
364 | |||
365 | static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) | ||
366 | @@ -XXX,XX +XXX,XX @@ static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) | ||
367 | tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); | ||
368 | gen_addi2_i64(dl, dh, dl, dh, 1); | ||
369 | tcg_gen_extract2_i64(dl, dl, dh, 1); | ||
370 | - | ||
371 | - tcg_temp_free_i64(dh); | ||
372 | - tcg_temp_free_i64(ah); | ||
373 | - tcg_temp_free_i64(bh); | ||
374 | } | ||
375 | |||
376 | static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) | ||
377 | @@ -XXX,XX +XXX,XX @@ static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
378 | tcg_gen_addi_i64(t0, t0, 1); | ||
379 | tcg_gen_shri_i64(t0, t0, 1); | ||
380 | tcg_gen_extrl_i64_i32(d, t0); | ||
381 | - | ||
382 | - tcg_temp_free(t0); | ||
383 | - tcg_temp_free(t1); | ||
384 | } | ||
385 | |||
386 | static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) | ||
387 | @@ -XXX,XX +XXX,XX @@ static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) | ||
388 | tcg_gen_add2_i64(dl, dh, al, zero, bl, zero); | ||
389 | gen_addi2_i64(dl, dh, dl, dh, 1); | ||
390 | tcg_gen_extract2_i64(dl, dl, dh, 1); | ||
391 | - | ||
392 | - tcg_temp_free_i64(dh); | ||
393 | } | ||
394 | |||
395 | static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) | ||
396 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) | ||
397 | } | ||
398 | gen_gvec_dup_imm(ES_32, get_field(s, v1), 0); | ||
399 | write_vec_element_i32(sum, get_field(s, v1), 1, ES_32); | ||
400 | - | ||
401 | - tcg_temp_free_i32(tmp); | ||
402 | - tcg_temp_free_i32(sum); | ||
403 | return DISAS_NEXT; | ||
404 | } | ||
405 | |||
406 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vc(DisasContext *s, DisasOps *o) | ||
407 | read_vec_element_i64(high, get_field(s, v1), 0, ES_64); | ||
408 | read_vec_element_i64(low, get_field(s, v1), 1, ES_64); | ||
409 | gen_op_update2_cc_i64(s, CC_OP_VC, low, high); | ||
410 | - | ||
411 | - tcg_temp_free_i64(low); | ||
412 | - tcg_temp_free_i64(high); | ||
413 | } | ||
414 | return DISAS_NEXT; | ||
415 | } | ||
416 | @@ -XXX,XX +XXX,XX @@ static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) | ||
417 | |||
418 | tcg_gen_mul_i32(t0, a, b); | ||
419 | tcg_gen_add_i32(d, t0, c); | ||
420 | - | ||
421 | - tcg_temp_free_i32(t0); | ||
422 | } | ||
423 | |||
424 | static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) | ||
425 | @@ -XXX,XX +XXX,XX @@ static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) | ||
426 | tcg_gen_mul_i64(t0, t0, t1); | ||
427 | tcg_gen_add_i64(t0, t0, t2); | ||
428 | tcg_gen_extrh_i64_i32(d, t0); | ||
429 | - | ||
430 | - tcg_temp_free(t0); | ||
431 | - tcg_temp_free(t1); | ||
432 | - tcg_temp_free(t2); | ||
433 | } | ||
434 | |||
435 | static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) | ||
436 | @@ -XXX,XX +XXX,XX @@ static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) | ||
437 | tcg_gen_mul_i64(t0, t0, t1); | ||
438 | tcg_gen_add_i64(t0, t0, t2); | ||
439 | tcg_gen_extrh_i64_i32(d, t0); | ||
440 | - | ||
441 | - tcg_temp_free(t0); | ||
442 | - tcg_temp_free(t1); | ||
443 | - tcg_temp_free(t2); | ||
444 | } | ||
445 | |||
446 | static DisasJumpType op_vma(DisasContext *s, DisasOps *o) | ||
447 | @@ -XXX,XX +XXX,XX @@ static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
448 | TCGv_i32 t = tcg_temp_new_i32(); | ||
449 | |||
450 | tcg_gen_muls2_i32(t, d, a, b); | ||
451 | - tcg_temp_free_i32(t); | ||
452 | } | ||
453 | |||
454 | static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
455 | @@ -XXX,XX +XXX,XX @@ static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) | ||
456 | TCGv_i32 t = tcg_temp_new_i32(); | ||
457 | |||
458 | tcg_gen_mulu2_i32(t, d, a, b); | ||
459 | - tcg_temp_free_i32(t); | ||
460 | } | ||
461 | |||
462 | static DisasJumpType op_vm(DisasContext *s, DisasOps *o) | ||
463 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) | ||
464 | /* Store final result into v1. */ | ||
465 | write_vec_element_i64(h1, get_field(s, v1), 0, ES_64); | ||
466 | write_vec_element_i64(l1, get_field(s, v1), 1, ES_64); | ||
467 | - | ||
468 | - tcg_temp_free_i64(l1); | ||
469 | - tcg_temp_free_i64(h1); | ||
470 | - tcg_temp_free_i64(l2); | ||
471 | - tcg_temp_free_i64(h2); | ||
472 | return DISAS_NEXT; | ||
473 | } | ||
474 | |||
475 | @@ -XXX,XX +XXX,XX @@ static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c) | ||
476 | tcg_gen_and_i32(t, t, b); | ||
477 | tcg_gen_andc_i32(d, d, b); | ||
478 | tcg_gen_or_i32(d, d, t); | ||
479 | - | ||
480 | - tcg_temp_free_i32(t); | ||
481 | } | ||
482 | |||
483 | static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) | ||
484 | @@ -XXX,XX +XXX,XX @@ static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) | ||
485 | tcg_gen_and_i64(t, t, b); | ||
486 | tcg_gen_andc_i64(d, d, b); | ||
487 | tcg_gen_or_i64(d, d, t); | ||
488 | - | ||
489 | - tcg_temp_free_i64(t); | ||
490 | } | ||
491 | |||
492 | static DisasJumpType op_verim(DisasContext *s, DisasOps *o) | ||
493 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o) | ||
494 | default: | ||
495 | g_assert_not_reached(); | ||
496 | } | ||
497 | - tcg_temp_free_i32(shift); | ||
498 | } | ||
499 | return DISAS_NEXT; | ||
500 | } | ||
501 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_vsh_by_byte(DisasContext *s, DisasOps *o, | ||
502 | read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); | ||
503 | tcg_gen_andi_i64(shift, shift, byte ? 0x78 : 7); | ||
504 | gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen); | ||
505 | - tcg_temp_free_i64(shift); | ||
506 | } | ||
507 | return DISAS_NEXT; | ||
508 | } | ||
509 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vsld(DisasContext *s, DisasOps *o) | ||
510 | |||
511 | write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); | ||
512 | write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); | ||
513 | - | ||
514 | - tcg_temp_free(t0); | ||
515 | - tcg_temp_free(t1); | ||
516 | - tcg_temp_free(t2); | ||
517 | return DISAS_NEXT; | ||
518 | } | ||
519 | |||
520 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vsrd(DisasContext *s, DisasOps *o) | ||
521 | |||
522 | write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); | ||
523 | write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); | ||
524 | - | ||
525 | - tcg_temp_free(t0); | ||
526 | - tcg_temp_free(t1); | ||
527 | - tcg_temp_free(t2); | ||
528 | return DISAS_NEXT; | ||
529 | } | ||
530 | |||
531 | @@ -XXX,XX +XXX,XX @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, | ||
532 | /* "invert" the result: -1 -> 0; 0 -> 1 */ | ||
533 | tcg_gen_addi_i64(dl, th, 1); | ||
534 | tcg_gen_mov_i64(dh, zero); | ||
535 | - | ||
536 | - tcg_temp_free_i64(th); | ||
537 | - tcg_temp_free_i64(tl); | ||
538 | } | ||
539 | |||
540 | static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) | ||
541 | @@ -XXX,XX +XXX,XX @@ static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, | ||
542 | tcg_gen_not_i64(tl, bl); | ||
543 | tcg_gen_not_i64(th, bh); | ||
544 | gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch); | ||
545 | - tcg_temp_free_i64(tl); | ||
546 | - tcg_temp_free_i64(th); | ||
547 | } | ||
548 | |||
549 | static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o) | ||
550 | @@ -XXX,XX +XXX,XX @@ static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, | ||
551 | tcg_gen_not_i64(tl, bl); | ||
552 | tcg_gen_not_i64(th, bh); | ||
553 | gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch); | ||
554 | - | ||
555 | - tcg_temp_free_i64(tl); | ||
556 | - tcg_temp_free_i64(th); | ||
557 | } | ||
558 | |||
559 | static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o) | ||
560 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) | ||
561 | } | ||
562 | write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64); | ||
563 | } | ||
564 | - tcg_temp_free_i64(sum); | ||
565 | - tcg_temp_free_i64(tmp); | ||
566 | return DISAS_NEXT; | ||
567 | } | ||
568 | |||
569 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) | ||
570 | } | ||
571 | write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64); | ||
572 | write_vec_element_i64(suml, get_field(s, v1), 1, ES_64); | ||
573 | - | ||
574 | - tcg_temp_free_i64(sumh); | ||
575 | - tcg_temp_free_i64(suml); | ||
576 | - tcg_temp_free_i64(tmpl); | ||
577 | return DISAS_NEXT; | ||
578 | } | ||
579 | |||
580 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) | ||
581 | } | ||
582 | write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32); | ||
583 | } | ||
584 | - tcg_temp_free_i32(sum); | ||
585 | - tcg_temp_free_i32(tmp); | ||
586 | return DISAS_NEXT; | ||
587 | } | ||
588 | |||
589 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) | ||
590 | read_vec_element_i64(tmp, v2, 1, ES_64); | ||
591 | write_vec_element_i64(tmp, v1, 1, ES_64); | ||
592 | } | ||
593 | - | ||
594 | - tcg_temp_free_i64(tmp); | ||
595 | - | ||
596 | return DISAS_NEXT; | ||
597 | } | ||
598 | |||
599 | -- | ||
600 | 2.34.1 | ||
601 | |||
602 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Translators are no longer required to free tcg temporaries. | ||
1 | 2 | ||
3 | Acked-by: David Hildenbrand <david@redhat.com> | ||
4 | Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/s390x/tcg/translate.c | 105 ----------------------------------- | ||
9 | 1 file changed, 105 deletions(-) | ||
10 | |||
11 | diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/s390x/tcg/translate.c | ||
14 | +++ b/target/s390x/tcg/translate.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static TCGv_i128 load_freg_128(int reg) | ||
16 | TCGv_i128 r = tcg_temp_new_i128(); | ||
17 | |||
18 | tcg_gen_concat_i64_i128(r, l, h); | ||
19 | - tcg_temp_free_i64(h); | ||
20 | - tcg_temp_free_i64(l); | ||
21 | return r; | ||
22 | } | ||
23 | |||
24 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, | ||
25 | TCGv_i64 z = tcg_constant_i64(0); | ||
26 | tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b); | ||
27 | tcg_gen_extu_i32_i64(t1, t0); | ||
28 | - tcg_temp_free_i32(t0); | ||
29 | tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next); | ||
30 | per_branch_cond(s, TCG_COND_NE, t1, z); | ||
31 | - tcg_temp_free_i64(t1); | ||
32 | } | ||
33 | |||
34 | ret = DISAS_PC_UPDATED; | ||
35 | @@ -XXX,XX +XXX,XX @@ static void save_link_info(DisasContext *s, DisasOps *o) | ||
36 | tcg_gen_extu_i32_i64(t, cc_op); | ||
37 | tcg_gen_shli_i64(t, t, 28); | ||
38 | tcg_gen_or_i64(o->out, o->out, t); | ||
39 | - tcg_temp_free_i64(t); | ||
40 | } | ||
41 | |||
42 | static DisasJumpType op_bal(DisasContext *s, DisasOps *o) | ||
43 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) | ||
44 | c.u.s32.a = tcg_temp_new_i32(); | ||
45 | c.u.s32.b = tcg_constant_i32(0); | ||
46 | tcg_gen_extrl_i64_i32(c.u.s32.a, t); | ||
47 | - tcg_temp_free_i64(t); | ||
48 | |||
49 | return help_branch(s, &c, is_imm, imm, o->in2); | ||
50 | } | ||
51 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) | ||
52 | c.u.s32.a = tcg_temp_new_i32(); | ||
53 | c.u.s32.b = tcg_constant_i32(0); | ||
54 | tcg_gen_extrl_i64_i32(c.u.s32.a, t); | ||
55 | - tcg_temp_free_i64(t); | ||
56 | |||
57 | return help_branch(s, &c, 1, imm, o->in2); | ||
58 | } | ||
59 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) | ||
60 | tcg_gen_extrl_i64_i32(c.u.s32.a, t); | ||
61 | tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); | ||
62 | store_reg32_i64(r1, t); | ||
63 | - tcg_temp_free_i64(t); | ||
64 | |||
65 | return help_branch(s, &c, is_imm, imm, o->in2); | ||
66 | } | ||
67 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) | ||
68 | gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]); | ||
69 | set_cc_static(s); | ||
70 | tcg_gen_extr_i128_i64(o->out, len, pair); | ||
71 | - tcg_temp_free_i128(pair); | ||
72 | |||
73 | tcg_gen_add_i64(regs[r2], regs[r2], len); | ||
74 | tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len); | ||
75 | - tcg_temp_free_i64(len); | ||
76 | |||
77 | return DISAS_NEXT; | ||
78 | } | ||
79 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_clm(DisasContext *s, DisasOps *o) | ||
80 | tcg_gen_extrl_i64_i32(t1, o->in1); | ||
81 | gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); | ||
82 | set_cc_static(s); | ||
83 | - tcg_temp_free_i32(t1); | ||
84 | return DISAS_NEXT; | ||
85 | } | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_clst(DisasContext *s, DisasOps *o) | ||
88 | |||
89 | gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2); | ||
90 | tcg_gen_extr_i128_i64(o->in2, o->in1, pair); | ||
91 | - tcg_temp_free_i128(pair); | ||
92 | |||
93 | set_cc_static(s); | ||
94 | return DISAS_NEXT; | ||
95 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cps(DisasContext *s, DisasOps *o) | ||
96 | tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull); | ||
97 | tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); | ||
98 | tcg_gen_or_i64(o->out, o->out, t); | ||
99 | - tcg_temp_free_i64(t); | ||
100 | return DISAS_NEXT; | ||
101 | } | ||
102 | |||
103 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cs(DisasContext *s, DisasOps *o) | ||
104 | addr = get_address(s, 0, b2, d2); | ||
105 | tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1, | ||
106 | get_mem_index(s), s->insn->data | MO_ALIGN); | ||
107 | - tcg_temp_free_i64(addr); | ||
108 | |||
109 | /* Are the memory and expected values (un)equal? Note that this setcond | ||
110 | produces the output CC value, thus the NE sense of the test. */ | ||
111 | cc = tcg_temp_new_i64(); | ||
112 | tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); | ||
113 | tcg_gen_extrl_i64_i32(cc_op, cc); | ||
114 | - tcg_temp_free_i64(cc); | ||
115 | set_cc_static(s); | ||
116 | |||
117 | return DISAS_NEXT; | ||
118 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) | ||
119 | tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE)); | ||
120 | tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2, | ||
121 | get_mem_index(s), mop | MO_ALIGN); | ||
122 | - tcg_temp_free_i64(addr); | ||
123 | |||
124 | /* Are the memory and expected values (un)equal? */ | ||
125 | cc = tcg_temp_new_i64(); | ||
126 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) | ||
127 | } else { | ||
128 | tcg_gen_mov_i64(o->out, old); | ||
129 | } | ||
130 | - tcg_temp_free_i64(old); | ||
131 | |||
132 | /* If the comparison was equal, and the LSB of R2 was set, | ||
133 | then we need to flush the TLB (for all cpus). */ | ||
134 | tcg_gen_xori_i64(cc, cc, 1); | ||
135 | tcg_gen_and_i64(cc, cc, o->in2); | ||
136 | tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab); | ||
137 | - tcg_temp_free_i64(cc); | ||
138 | |||
139 | gen_helper_purge(cpu_env); | ||
140 | gen_set_label(lab); | ||
141 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) | ||
142 | TCGv_i32 t2 = tcg_temp_new_i32(); | ||
143 | tcg_gen_extrl_i64_i32(t2, o->in1); | ||
144 | gen_helper_cvd(t1, t2); | ||
145 | - tcg_temp_free_i32(t2); | ||
146 | tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); | ||
147 | - tcg_temp_free_i64(t1); | ||
148 | return DISAS_NEXT; | ||
149 | } | ||
150 | |||
151 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) | ||
152 | |||
153 | gen_helper_divs64(t, cpu_env, o->in1, o->in2); | ||
154 | tcg_gen_extr_i128_i64(o->out2, o->out, t); | ||
155 | - tcg_temp_free_i128(t); | ||
156 | return DISAS_NEXT; | ||
157 | } | ||
158 | |||
159 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) | ||
160 | |||
161 | gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2); | ||
162 | tcg_gen_extr_i128_i64(o->out2, o->out, t); | ||
163 | - tcg_temp_free_i128(t); | ||
164 | return DISAS_NEXT; | ||
165 | } | ||
166 | |||
167 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) | ||
168 | if (r2 != 0) { | ||
169 | store_reg32_i64(r2, psw_mask); | ||
170 | } | ||
171 | - | ||
172 | - tcg_temp_free_i64(t); | ||
173 | return DISAS_NEXT; | ||
174 | } | ||
175 | |||
176 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o) | ||
177 | |||
178 | tcg_gen_movi_i64(tmp, ccm); | ||
179 | gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); | ||
180 | - tcg_temp_free_i64(tmp); | ||
181 | return DISAS_NEXT; | ||
182 | } | ||
183 | |||
184 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) | ||
185 | tcg_gen_extu_i32_i64(t2, cc_op); | ||
186 | tcg_gen_deposit_i64(t1, t1, t2, 4, 60); | ||
187 | tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8); | ||
188 | - tcg_temp_free_i64(t1); | ||
189 | - tcg_temp_free_i64(t2); | ||
190 | return DISAS_NEXT; | ||
191 | } | ||
192 | |||
193 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o) | ||
194 | |||
195 | t = tcg_temp_new_i64(); | ||
196 | tcg_gen_extu_i32_i64(t, t32); | ||
197 | - tcg_temp_free_i32(t32); | ||
198 | |||
199 | z = tcg_constant_i64(0); | ||
200 | tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1); | ||
201 | - tcg_temp_free_i64(t); | ||
202 | } | ||
203 | |||
204 | return DISAS_NEXT; | ||
205 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) | ||
206 | /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ | ||
207 | tcg_gen_shli_i64(t1, t1, 32); | ||
208 | gen_helper_load_psw(cpu_env, t1, t2); | ||
209 | - tcg_temp_free_i64(t1); | ||
210 | - tcg_temp_free_i64(t2); | ||
211 | return DISAS_NORETURN; | ||
212 | } | ||
213 | |||
214 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) | ||
215 | tcg_gen_addi_i64(o->in2, o->in2, 8); | ||
216 | tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); | ||
217 | gen_helper_load_psw(cpu_env, t1, t2); | ||
218 | - tcg_temp_free_i64(t1); | ||
219 | - tcg_temp_free_i64(t2); | ||
220 | return DISAS_NORETURN; | ||
221 | } | ||
222 | #endif | ||
223 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) | ||
224 | if (unlikely(r1 == r3)) { | ||
225 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
226 | store_reg32_i64(r1, t1); | ||
227 | - tcg_temp_free(t1); | ||
228 | return DISAS_NEXT; | ||
229 | } | ||
230 | |||
231 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) | ||
232 | |||
233 | /* Only two registers to read. */ | ||
234 | if (((r1 + 1) & 15) == r3) { | ||
235 | - tcg_temp_free(t2); | ||
236 | - tcg_temp_free(t1); | ||
237 | return DISAS_NEXT; | ||
238 | } | ||
239 | |||
240 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) | ||
241 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
242 | store_reg32_i64(r1, t1); | ||
243 | } | ||
244 | - tcg_temp_free(t2); | ||
245 | - tcg_temp_free(t1); | ||
246 | - | ||
247 | return DISAS_NEXT; | ||
248 | } | ||
249 | |||
250 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) | ||
251 | if (unlikely(r1 == r3)) { | ||
252 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
253 | store_reg32h_i64(r1, t1); | ||
254 | - tcg_temp_free(t1); | ||
255 | return DISAS_NEXT; | ||
256 | } | ||
257 | |||
258 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) | ||
259 | |||
260 | /* Only two registers to read. */ | ||
261 | if (((r1 + 1) & 15) == r3) { | ||
262 | - tcg_temp_free(t2); | ||
263 | - tcg_temp_free(t1); | ||
264 | return DISAS_NEXT; | ||
265 | } | ||
266 | |||
267 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) | ||
268 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); | ||
269 | store_reg32h_i64(r1, t1); | ||
270 | } | ||
271 | - tcg_temp_free(t2); | ||
272 | - tcg_temp_free(t1); | ||
273 | - | ||
274 | return DISAS_NEXT; | ||
275 | } | ||
276 | |||
277 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) | ||
278 | tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); | ||
279 | tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); | ||
280 | tcg_gen_mov_i64(regs[r1], t1); | ||
281 | - tcg_temp_free(t2); | ||
282 | |||
283 | /* Only two registers to read. */ | ||
284 | if (((r1 + 1) & 15) == r3) { | ||
285 | - tcg_temp_free(t1); | ||
286 | return DISAS_NEXT; | ||
287 | } | ||
288 | |||
289 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) | ||
290 | tcg_gen_add_i64(o->in2, o->in2, t1); | ||
291 | tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); | ||
292 | } | ||
293 | - tcg_temp_free(t1); | ||
294 | - | ||
295 | return DISAS_NEXT; | ||
296 | } | ||
297 | |||
298 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) | ||
299 | a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); | ||
300 | tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); | ||
301 | tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); | ||
302 | - tcg_temp_free_i64(a1); | ||
303 | - tcg_temp_free_i64(a2); | ||
304 | |||
305 | /* ... and indicate that we performed them while interlocked. */ | ||
306 | gen_op_movi_cc(s, 0); | ||
307 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) | ||
308 | } | ||
309 | |||
310 | tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1])); | ||
311 | - tcg_temp_free_i64(ar1); | ||
312 | - | ||
313 | return DISAS_NEXT; | ||
314 | } | ||
315 | |||
316 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) | ||
317 | { | ||
318 | TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); | ||
319 | gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3); | ||
320 | - tcg_temp_free_i64(r3); | ||
321 | return DISAS_NEXT; | ||
322 | } | ||
323 | |||
324 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_madb(DisasContext *s, DisasOps *o) | ||
325 | { | ||
326 | TCGv_i64 r3 = load_freg(get_field(s, r3)); | ||
327 | gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3); | ||
328 | - tcg_temp_free_i64(r3); | ||
329 | return DISAS_NEXT; | ||
330 | } | ||
331 | |||
332 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) | ||
333 | { | ||
334 | TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); | ||
335 | gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3); | ||
336 | - tcg_temp_free_i64(r3); | ||
337 | return DISAS_NEXT; | ||
338 | } | ||
339 | |||
340 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) | ||
341 | { | ||
342 | TCGv_i64 r3 = load_freg(get_field(s, r3)); | ||
343 | gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3); | ||
344 | - tcg_temp_free_i64(r3); | ||
345 | return DISAS_NEXT; | ||
346 | } | ||
347 | |||
348 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) | ||
349 | |||
350 | tcg_gen_neg_i64(n, o->in2); | ||
351 | tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2); | ||
352 | - tcg_temp_free_i64(n); | ||
353 | return DISAS_NEXT; | ||
354 | } | ||
355 | |||
356 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) | ||
357 | tcg_gen_extrl_i64_i32(t2, o->in2); | ||
358 | tcg_gen_rotl_i32(to, t1, t2); | ||
359 | tcg_gen_extu_i32_i64(o->out, to); | ||
360 | - tcg_temp_free_i32(t1); | ||
361 | - tcg_temp_free_i32(t2); | ||
362 | - tcg_temp_free_i32(to); | ||
363 | return DISAS_NEXT; | ||
364 | } | ||
365 | |||
366 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) | ||
367 | h = tcg_temp_new_i64(); | ||
368 | tcg_gen_shri_i64(h, regs[r1], 32); | ||
369 | tcg_gen_qemu_st32(h, a, get_mem_index(s)); | ||
370 | - tcg_temp_free_i64(h); | ||
371 | break; | ||
372 | default: | ||
373 | g_assert_not_reached(); | ||
374 | } | ||
375 | - tcg_temp_free_i64(a); | ||
376 | |||
377 | gen_set_label(lab); | ||
378 | return DISAS_NEXT; | ||
379 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_sla(DisasContext *s, DisasOps *o) | ||
380 | t = o->in1; | ||
381 | } | ||
382 | gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2); | ||
383 | - if (s->insn->data == 31) { | ||
384 | - tcg_temp_free_i64(t); | ||
385 | - } | ||
386 | tcg_gen_shl_i64(o->out, o->in1, o->in2); | ||
387 | /* The arithmetic left shift is curious in that it does not affect | ||
388 | the sign bit. Copy that over from the source unchanged. */ | ||
389 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) | ||
390 | tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); | ||
391 | tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3); | ||
392 | tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); | ||
393 | - | ||
394 | - tcg_temp_free_i64(tmp); | ||
395 | return DISAS_NEXT; | ||
396 | } | ||
397 | |||
398 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) | ||
399 | |||
400 | /* store second operand in GR1 */ | ||
401 | tcg_gen_mov_i64(regs[1], o->in2); | ||
402 | - | ||
403 | - tcg_temp_free_i64(tmp); | ||
404 | return DISAS_NEXT; | ||
405 | } | ||
406 | |||
407 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) | ||
408 | tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s)); | ||
409 | tcg_gen_addi_i64(o->in2, o->in2, 8); | ||
410 | tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s)); | ||
411 | - tcg_temp_free_i64(c1); | ||
412 | - tcg_temp_free_i64(c2); | ||
413 | - tcg_temp_free_i64(todpr); | ||
414 | /* ??? We don't implement clock states. */ | ||
415 | gen_op_movi_cc(s, 0); | ||
416 | return DISAS_NEXT; | ||
417 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) | ||
418 | t = tcg_temp_new_i64(); | ||
419 | tcg_gen_shri_i64(t, psw_mask, 56); | ||
420 | tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); | ||
421 | - tcg_temp_free_i64(t); | ||
422 | |||
423 | if (s->fields.op == 0xac) { | ||
424 | tcg_gen_andi_i64(psw_mask, psw_mask, | ||
425 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) | ||
426 | } | ||
427 | break; | ||
428 | } | ||
429 | - tcg_temp_free_i64(tmp); | ||
430 | return DISAS_NEXT; | ||
431 | } | ||
432 | |||
433 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) | ||
434 | tcg_gen_add_i64(o->in2, o->in2, t4); | ||
435 | r1 = (r1 + 1) & 15; | ||
436 | } | ||
437 | - | ||
438 | - tcg_temp_free_i64(t); | ||
439 | return DISAS_NEXT; | ||
440 | } | ||
441 | |||
442 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_tre(DisasContext *s, DisasOps *o) | ||
443 | |||
444 | gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2); | ||
445 | tcg_gen_extr_i128_i64(o->out2, o->out, pair); | ||
446 | - tcg_temp_free_i128(pair); | ||
447 | set_cc_static(s); | ||
448 | return DISAS_NEXT; | ||
449 | } | ||
450 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) | ||
451 | } | ||
452 | gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes); | ||
453 | |||
454 | - tcg_temp_free_i32(tst); | ||
455 | set_cc_static(s); | ||
456 | return DISAS_NEXT; | ||
457 | } | ||
458 | @@ -XXX,XX +XXX,XX @@ static void wout_r1_D32(DisasContext *s, DisasOps *o) | ||
459 | store_reg32_i64(r1 + 1, o->out); | ||
460 | tcg_gen_shri_i64(t, o->out, 32); | ||
461 | store_reg32_i64(r1, t); | ||
462 | - tcg_temp_free_i64(t); | ||
463 | } | ||
464 | #define SPEC_wout_r1_D32 SPEC_r1_even | ||
465 | |||
466 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) | ||
467 | } | ||
468 | } | ||
469 | |||
470 | - /* Free any temporaries created by the helpers. */ | ||
471 | - if (o.out && !o.g_out) { | ||
472 | - tcg_temp_free_i64(o.out); | ||
473 | - } | ||
474 | - if (o.out2 && !o.g_out2) { | ||
475 | - tcg_temp_free_i64(o.out2); | ||
476 | - } | ||
477 | - if (o.in1 && !o.g_in1) { | ||
478 | - tcg_temp_free_i64(o.in1); | ||
479 | - } | ||
480 | - if (o.in2 && !o.g_in2) { | ||
481 | - tcg_temp_free_i64(o.in2); | ||
482 | - } | ||
483 | - if (o.addr1) { | ||
484 | - tcg_temp_free_i64(o.addr1); | ||
485 | - } | ||
486 | - if (o.out_128) { | ||
487 | - tcg_temp_free_i128(o.out_128); | ||
488 | - } | ||
489 | - if (o.in1_128) { | ||
490 | - tcg_temp_free_i128(o.in1_128); | ||
491 | - } | ||
492 | - if (o.in2_128) { | ||
493 | - tcg_temp_free_i128(o.in2_128); | ||
494 | - } | ||
495 | /* io should be the last instruction in tb when icount is enabled */ | ||
496 | if (unlikely(icount && ret == DISAS_NEXT)) { | ||
497 | ret = DISAS_TOO_MANY; | ||
498 | -- | ||
499 | 2.34.1 | ||
500 | |||
501 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | These were trying to determine if o->in2 was available for | ||
2 | use as a temporary. It's better to just allocate a new one. | ||
1 | 3 | ||
4 | Acked-by: David Hildenbrand <david@redhat.com> | ||
5 | Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | --- | ||
9 | target/s390x/tcg/translate.c | 20 ++++++++++---------- | ||
10 | 1 file changed, 10 insertions(+), 10 deletions(-) | ||
11 | |||
12 | diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/target/s390x/tcg/translate.c | ||
15 | +++ b/target/s390x/tcg/translate.c | ||
16 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_andi(DisasContext *s, DisasOps *o) | ||
17 | int shift = s->insn->data & 0xff; | ||
18 | int size = s->insn->data >> 8; | ||
19 | uint64_t mask = ((1ull << size) - 1) << shift; | ||
20 | + TCGv_i64 t = tcg_temp_new_i64(); | ||
21 | |||
22 | - assert(!o->g_in2); | ||
23 | - tcg_gen_shli_i64(o->in2, o->in2, shift); | ||
24 | - tcg_gen_ori_i64(o->in2, o->in2, ~mask); | ||
25 | - tcg_gen_and_i64(o->out, o->in1, o->in2); | ||
26 | + tcg_gen_shli_i64(t, o->in2, shift); | ||
27 | + tcg_gen_ori_i64(t, t, ~mask); | ||
28 | + tcg_gen_and_i64(o->out, o->in1, t); | ||
29 | |||
30 | /* Produce the CC from only the bits manipulated. */ | ||
31 | tcg_gen_andi_i64(cc_dst, o->out, mask); | ||
32 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ori(DisasContext *s, DisasOps *o) | ||
33 | int shift = s->insn->data & 0xff; | ||
34 | int size = s->insn->data >> 8; | ||
35 | uint64_t mask = ((1ull << size) - 1) << shift; | ||
36 | + TCGv_i64 t = tcg_temp_new_i64(); | ||
37 | |||
38 | - assert(!o->g_in2); | ||
39 | - tcg_gen_shli_i64(o->in2, o->in2, shift); | ||
40 | - tcg_gen_or_i64(o->out, o->in1, o->in2); | ||
41 | + tcg_gen_shli_i64(t, o->in2, shift); | ||
42 | + tcg_gen_or_i64(o->out, o->in1, t); | ||
43 | |||
44 | /* Produce the CC from only the bits manipulated. */ | ||
45 | tcg_gen_andi_i64(cc_dst, o->out, mask); | ||
46 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_xori(DisasContext *s, DisasOps *o) | ||
47 | int shift = s->insn->data & 0xff; | ||
48 | int size = s->insn->data >> 8; | ||
49 | uint64_t mask = ((1ull << size) - 1) << shift; | ||
50 | + TCGv_i64 t = tcg_temp_new_i64(); | ||
51 | |||
52 | - assert(!o->g_in2); | ||
53 | - tcg_gen_shli_i64(o->in2, o->in2, shift); | ||
54 | - tcg_gen_xor_i64(o->out, o->in1, o->in2); | ||
55 | + tcg_gen_shli_i64(t, o->in2, shift); | ||
56 | + tcg_gen_xor_i64(o->out, o->in1, t); | ||
57 | |||
58 | /* Produce the CC from only the bits manipulated. */ | ||
59 | tcg_gen_andi_i64(cc_dst, o->out, mask); | ||
60 | -- | ||
61 | 2.34.1 | ||
62 | |||
63 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | These fields are no longer read, so remove them and the writes. | ||
1 | 2 | ||
3 | Acked-by: David Hildenbrand <david@redhat.com> | ||
4 | Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/s390x/tcg/translate.c | 17 ----------------- | ||
9 | 1 file changed, 17 deletions(-) | ||
10 | |||
11 | diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/s390x/tcg/translate.c | ||
14 | +++ b/target/s390x/tcg/translate.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static const DisasFormatInfo format_info[] = { | ||
16 | them, and store them back. See the "in1", "in2", "prep", "wout" sets | ||
17 | of routines below for more details. */ | ||
18 | typedef struct { | ||
19 | - bool g_out, g_out2, g_in1, g_in2; | ||
20 | TCGv_i64 out, out2, in1, in2; | ||
21 | TCGv_i64 addr1; | ||
22 | TCGv_i128 out_128, in1_128, in2_128; | ||
23 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_mc(DisasContext *s, DisasOps *o) | ||
24 | static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) | ||
25 | { | ||
26 | o->out = o->in2; | ||
27 | - o->g_out = o->g_in2; | ||
28 | o->in2 = NULL; | ||
29 | - o->g_in2 = false; | ||
30 | return DISAS_NEXT; | ||
31 | } | ||
32 | |||
33 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) | ||
34 | TCGv ar1 = tcg_temp_new_i64(); | ||
35 | |||
36 | o->out = o->in2; | ||
37 | - o->g_out = o->g_in2; | ||
38 | o->in2 = NULL; | ||
39 | - o->g_in2 = false; | ||
40 | |||
41 | switch (s->base.tb->flags & FLAG_MASK_ASC) { | ||
42 | case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: | ||
43 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_movx(DisasContext *s, DisasOps *o) | ||
44 | { | ||
45 | o->out = o->in1; | ||
46 | o->out2 = o->in2; | ||
47 | - o->g_out = o->g_in1; | ||
48 | - o->g_out2 = o->g_in2; | ||
49 | o->in1 = NULL; | ||
50 | o->in2 = NULL; | ||
51 | - o->g_in1 = o->g_in2 = false; | ||
52 | return DISAS_NEXT; | ||
53 | } | ||
54 | |||
55 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) | ||
56 | /* If this is a test-only form, arrange to discard the result. */ | ||
57 | if (i3 & 0x80) { | ||
58 | o->out = tcg_temp_new_i64(); | ||
59 | - o->g_out = false; | ||
60 | } | ||
61 | |||
62 | i3 &= 63; | ||
63 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) | ||
64 | { | ||
65 | o->out = tcg_const_i64(0); | ||
66 | o->out2 = o->out; | ||
67 | - o->g_out2 = true; | ||
68 | return DISAS_NEXT; | ||
69 | } | ||
70 | |||
71 | @@ -XXX,XX +XXX,XX @@ static void prep_new_x(DisasContext *s, DisasOps *o) | ||
72 | static void prep_r1(DisasContext *s, DisasOps *o) | ||
73 | { | ||
74 | o->out = regs[get_field(s, r1)]; | ||
75 | - o->g_out = true; | ||
76 | } | ||
77 | #define SPEC_prep_r1 0 | ||
78 | |||
79 | @@ -XXX,XX +XXX,XX @@ static void prep_r1_P(DisasContext *s, DisasOps *o) | ||
80 | int r1 = get_field(s, r1); | ||
81 | o->out = regs[r1]; | ||
82 | o->out2 = regs[r1 + 1]; | ||
83 | - o->g_out = o->g_out2 = true; | ||
84 | } | ||
85 | #define SPEC_prep_r1_P SPEC_r1_even | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static void in1_r1(DisasContext *s, DisasOps *o) | ||
88 | static void in1_r1_o(DisasContext *s, DisasOps *o) | ||
89 | { | ||
90 | o->in1 = regs[get_field(s, r1)]; | ||
91 | - o->g_in1 = true; | ||
92 | } | ||
93 | #define SPEC_in1_r1_o 0 | ||
94 | |||
95 | @@ -XXX,XX +XXX,XX @@ static void in1_r1p1(DisasContext *s, DisasOps *o) | ||
96 | static void in1_r1p1_o(DisasContext *s, DisasOps *o) | ||
97 | { | ||
98 | o->in1 = regs[get_field(s, r1) + 1]; | ||
99 | - o->g_in1 = true; | ||
100 | } | ||
101 | #define SPEC_in1_r1p1_o SPEC_r1_even | ||
102 | |||
103 | @@ -XXX,XX +XXX,XX @@ static void in1_r3(DisasContext *s, DisasOps *o) | ||
104 | static void in1_r3_o(DisasContext *s, DisasOps *o) | ||
105 | { | ||
106 | o->in1 = regs[get_field(s, r3)]; | ||
107 | - o->g_in1 = true; | ||
108 | } | ||
109 | #define SPEC_in1_r3_o 0 | ||
110 | |||
111 | @@ -XXX,XX +XXX,XX @@ static void in1_m1_64(DisasContext *s, DisasOps *o) | ||
112 | static void in2_r1_o(DisasContext *s, DisasOps *o) | ||
113 | { | ||
114 | o->in2 = regs[get_field(s, r1)]; | ||
115 | - o->g_in2 = true; | ||
116 | } | ||
117 | #define SPEC_in2_r1_o 0 | ||
118 | |||
119 | @@ -XXX,XX +XXX,XX @@ static void in2_r2(DisasContext *s, DisasOps *o) | ||
120 | static void in2_r2_o(DisasContext *s, DisasOps *o) | ||
121 | { | ||
122 | o->in2 = regs[get_field(s, r2)]; | ||
123 | - o->g_in2 = true; | ||
124 | } | ||
125 | #define SPEC_in2_r2_o 0 | ||
126 | |||
127 | -- | ||
128 | 2.34.1 | ||
129 | |||
130 | diff view generated by jsdifflib |
1 | Use the pc coming from db->pc_first rather than the TB. | 1 | Move the tcg_temp_free_* and tcg_temp_ebb_new_* declarations |
---|---|---|---|
2 | 2 | and inlines to the new header. These are private to the | |
3 | Use the cached host_addr rather than re-computing for the | 3 | implementation, and will prevent tcg_temp_free_* from creeping |
4 | first page. We still need a separate lookup for the second | 4 | back into the guest front ends. |
5 | page because it won't be computed for DisasContextBase until | ||
6 | the translator actually performs a read from the page. | ||
7 | 5 | ||
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 10 | --- |
11 | include/exec/plugin-gen.h | 7 ++++--- | 11 | include/tcg/tcg-temp-internal.h | 83 +++++++++++++++++++++++++++++++++ |
12 | accel/tcg/plugin-gen.c | 22 +++++++++++----------- | 12 | include/tcg/tcg.h | 54 --------------------- |
13 | accel/tcg/translator.c | 2 +- | 13 | accel/tcg/plugin-gen.c | 1 + |
14 | 3 files changed, 16 insertions(+), 15 deletions(-) | 14 | tcg/tcg-op-gvec.c | 1 + |
15 | 15 | tcg/tcg-op-vec.c | 1 + | |
16 | diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h | 16 | tcg/tcg-op.c | 1 + |
17 | index XXXXXXX..XXXXXXX 100644 | 17 | tcg/tcg.c | 1 + |
18 | --- a/include/exec/plugin-gen.h | 18 | 7 files changed, 88 insertions(+), 54 deletions(-) |
19 | +++ b/include/exec/plugin-gen.h | 19 | create mode 100644 include/tcg/tcg-temp-internal.h |
20 | @@ -XXX,XX +XXX,XX @@ struct DisasContextBase; | 20 | |
21 | 21 | diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h | |
22 | #ifdef CONFIG_PLUGIN | 22 | new file mode 100644 |
23 | 23 | index XXXXXXX..XXXXXXX | |
24 | -bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress); | 24 | --- /dev/null |
25 | +bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, | 25 | +++ b/include/tcg/tcg-temp-internal.h |
26 | + bool supress); | 26 | @@ -XXX,XX +XXX,XX @@ |
27 | void plugin_gen_tb_end(CPUState *cpu); | 27 | +/* |
28 | void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db); | 28 | + * TCG internals related to TCG temp allocation |
29 | void plugin_gen_insn_end(void); | 29 | + * |
30 | @@ -XXX,XX +XXX,XX @@ static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) | 30 | + * Copyright (c) 2008 Fabrice Bellard |
31 | 31 | + * | |
32 | #else /* !CONFIG_PLUGIN */ | 32 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
33 | 33 | + * of this software and associated documentation files (the "Software"), to deal | |
34 | -static inline | 34 | + * in the Software without restriction, including without limitation the rights |
35 | -bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress) | 35 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
36 | +static inline bool | 36 | + * copies of the Software, and to permit persons to whom the Software is |
37 | +plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup) | 37 | + * furnished to do so, subject to the following conditions: |
38 | { | 38 | + * |
39 | return false; | 39 | + * The above copyright notice and this permission notice shall be included in |
40 | } | 40 | + * all copies or substantial portions of the Software. |
41 | + * | ||
42 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
43 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
44 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
45 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
46 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
47 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
48 | + * THE SOFTWARE. | ||
49 | + */ | ||
50 | + | ||
51 | +#ifndef TCG_TEMP_INTERNAL_H | ||
52 | +#define TCG_TEMP_INTERNAL_H | ||
53 | + | ||
54 | +/* | ||
55 | + * Allocation and freeing of EBB temps is reserved to TCG internals | ||
56 | + */ | ||
57 | + | ||
58 | +void tcg_temp_free_internal(TCGTemp *); | ||
59 | + | ||
60 | +static inline void tcg_temp_free_i32(TCGv_i32 arg) | ||
61 | +{ | ||
62 | + tcg_temp_free_internal(tcgv_i32_temp(arg)); | ||
63 | +} | ||
64 | + | ||
65 | +static inline void tcg_temp_free_i64(TCGv_i64 arg) | ||
66 | +{ | ||
67 | + tcg_temp_free_internal(tcgv_i64_temp(arg)); | ||
68 | +} | ||
69 | + | ||
70 | +static inline void tcg_temp_free_i128(TCGv_i128 arg) | ||
71 | +{ | ||
72 | + tcg_temp_free_internal(tcgv_i128_temp(arg)); | ||
73 | +} | ||
74 | + | ||
75 | +static inline void tcg_temp_free_ptr(TCGv_ptr arg) | ||
76 | +{ | ||
77 | + tcg_temp_free_internal(tcgv_ptr_temp(arg)); | ||
78 | +} | ||
79 | + | ||
80 | +static inline void tcg_temp_free_vec(TCGv_vec arg) | ||
81 | +{ | ||
82 | + tcg_temp_free_internal(tcgv_vec_temp(arg)); | ||
83 | +} | ||
84 | + | ||
85 | +static inline TCGv_i32 tcg_temp_ebb_new_i32(void) | ||
86 | +{ | ||
87 | + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB); | ||
88 | + return temp_tcgv_i32(t); | ||
89 | +} | ||
90 | + | ||
91 | +static inline TCGv_i64 tcg_temp_ebb_new_i64(void) | ||
92 | +{ | ||
93 | + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB); | ||
94 | + return temp_tcgv_i64(t); | ||
95 | +} | ||
96 | + | ||
97 | +static inline TCGv_i128 tcg_temp_ebb_new_i128(void) | ||
98 | +{ | ||
99 | + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB); | ||
100 | + return temp_tcgv_i128(t); | ||
101 | +} | ||
102 | + | ||
103 | +static inline TCGv_ptr tcg_temp_ebb_new_ptr(void) | ||
104 | +{ | ||
105 | + TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB); | ||
106 | + return temp_tcgv_ptr(t); | ||
107 | +} | ||
108 | + | ||
109 | +#endif /* TCG_TEMP_FREE_H */ | ||
110 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | ||
111 | index XXXXXXX..XXXXXXX 100644 | ||
112 | --- a/include/tcg/tcg.h | ||
113 | +++ b/include/tcg/tcg.h | ||
114 | @@ -XXX,XX +XXX,XX @@ void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); | ||
115 | TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr, | ||
116 | intptr_t, const char *); | ||
117 | TCGTemp *tcg_temp_new_internal(TCGType, TCGTempKind); | ||
118 | -void tcg_temp_free_internal(TCGTemp *); | ||
119 | TCGv_vec tcg_temp_new_vec(TCGType type); | ||
120 | TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match); | ||
121 | |||
122 | -static inline void tcg_temp_free_i32(TCGv_i32 arg) | ||
123 | -{ | ||
124 | - tcg_temp_free_internal(tcgv_i32_temp(arg)); | ||
125 | -} | ||
126 | - | ||
127 | -static inline void tcg_temp_free_i64(TCGv_i64 arg) | ||
128 | -{ | ||
129 | - tcg_temp_free_internal(tcgv_i64_temp(arg)); | ||
130 | -} | ||
131 | - | ||
132 | -static inline void tcg_temp_free_i128(TCGv_i128 arg) | ||
133 | -{ | ||
134 | - tcg_temp_free_internal(tcgv_i128_temp(arg)); | ||
135 | -} | ||
136 | - | ||
137 | -static inline void tcg_temp_free_ptr(TCGv_ptr arg) | ||
138 | -{ | ||
139 | - tcg_temp_free_internal(tcgv_ptr_temp(arg)); | ||
140 | -} | ||
141 | - | ||
142 | -static inline void tcg_temp_free_vec(TCGv_vec arg) | ||
143 | -{ | ||
144 | - tcg_temp_free_internal(tcgv_vec_temp(arg)); | ||
145 | -} | ||
146 | - | ||
147 | static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, | ||
148 | const char *name) | ||
149 | { | ||
150 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, | ||
151 | return temp_tcgv_i32(t); | ||
152 | } | ||
153 | |||
154 | -/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */ | ||
155 | -static inline TCGv_i32 tcg_temp_ebb_new_i32(void) | ||
156 | -{ | ||
157 | - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB); | ||
158 | - return temp_tcgv_i32(t); | ||
159 | -} | ||
160 | - | ||
161 | static inline TCGv_i32 tcg_temp_new_i32(void) | ||
162 | { | ||
163 | TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB); | ||
164 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset, | ||
165 | return temp_tcgv_i64(t); | ||
166 | } | ||
167 | |||
168 | -/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */ | ||
169 | -static inline TCGv_i64 tcg_temp_ebb_new_i64(void) | ||
170 | -{ | ||
171 | - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB); | ||
172 | - return temp_tcgv_i64(t); | ||
173 | -} | ||
174 | - | ||
175 | static inline TCGv_i64 tcg_temp_new_i64(void) | ||
176 | { | ||
177 | TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB); | ||
178 | return temp_tcgv_i64(t); | ||
179 | } | ||
180 | |||
181 | -/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */ | ||
182 | -static inline TCGv_i128 tcg_temp_ebb_new_i128(void) | ||
183 | -{ | ||
184 | - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB); | ||
185 | - return temp_tcgv_i128(t); | ||
186 | -} | ||
187 | - | ||
188 | static inline TCGv_i128 tcg_temp_new_i128(void) | ||
189 | { | ||
190 | TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB); | ||
191 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset, | ||
192 | return temp_tcgv_ptr(t); | ||
193 | } | ||
194 | |||
195 | -/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */ | ||
196 | -static inline TCGv_ptr tcg_temp_ebb_new_ptr(void) | ||
197 | -{ | ||
198 | - TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB); | ||
199 | - return temp_tcgv_ptr(t); | ||
200 | -} | ||
201 | - | ||
202 | static inline TCGv_ptr tcg_temp_new_ptr(void) | ||
203 | { | ||
204 | TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB); | ||
41 | diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c | 205 | diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c |
42 | index XXXXXXX..XXXXXXX 100644 | 206 | index XXXXXXX..XXXXXXX 100644 |
43 | --- a/accel/tcg/plugin-gen.c | 207 | --- a/accel/tcg/plugin-gen.c |
44 | +++ b/accel/tcg/plugin-gen.c | 208 | +++ b/accel/tcg/plugin-gen.c |
45 | @@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb) | 209 | @@ -XXX,XX +XXX,XX @@ |
46 | pr_ops(); | 210 | */ |
47 | } | 211 | #include "qemu/osdep.h" |
48 | 212 | #include "tcg/tcg.h" | |
49 | -bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only) | 213 | +#include "tcg/tcg-temp-internal.h" |
50 | +bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db, | 214 | #include "tcg/tcg-op.h" |
51 | + bool mem_only) | 215 | #include "exec/exec-all.h" |
52 | { | 216 | #include "exec/plugin-gen.h" |
53 | bool ret = false; | 217 | diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c |
54 | 218 | index XXXXXXX..XXXXXXX 100644 | |
55 | @@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_onl | 219 | --- a/tcg/tcg-op-gvec.c |
56 | 220 | +++ b/tcg/tcg-op-gvec.c | |
57 | ret = true; | 221 | @@ -XXX,XX +XXX,XX @@ |
58 | 222 | ||
59 | - ptb->vaddr = tb->pc; | 223 | #include "qemu/osdep.h" |
60 | + ptb->vaddr = db->pc_first; | 224 | #include "tcg/tcg.h" |
61 | ptb->vaddr2 = -1; | 225 | +#include "tcg/tcg-temp-internal.h" |
62 | - get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); | 226 | #include "tcg/tcg-op.h" |
63 | + ptb->haddr1 = db->host_addr[0]; | 227 | #include "tcg/tcg-op-gvec.h" |
64 | ptb->haddr2 = NULL; | 228 | #include "tcg/tcg-gvec-desc.h" |
65 | ptb->mem_only = mem_only; | 229 | diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c |
66 | 230 | index XXXXXXX..XXXXXXX 100644 | |
67 | @@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) | 231 | --- a/tcg/tcg-op-vec.c |
68 | * Note that we skip this when haddr1 == NULL, e.g. when we're | 232 | +++ b/tcg/tcg-op-vec.c |
69 | * fetching instructions from a region not backed by RAM. | 233 | @@ -XXX,XX +XXX,XX @@ |
70 | */ | 234 | |
71 | - if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) && | 235 | #include "qemu/osdep.h" |
72 | - unlikely((db->pc_next & TARGET_PAGE_MASK) != | 236 | #include "tcg/tcg.h" |
73 | - (db->pc_first & TARGET_PAGE_MASK))) { | 237 | +#include "tcg/tcg-temp-internal.h" |
74 | - get_page_addr_code_hostp(cpu->env_ptr, db->pc_next, | 238 | #include "tcg/tcg-op.h" |
75 | - &ptb->haddr2); | 239 | #include "tcg/tcg-mo.h" |
76 | - ptb->vaddr2 = db->pc_next; | 240 | #include "tcg-internal.h" |
77 | - } | 241 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c |
78 | - if (likely(ptb->vaddr2 == -1)) { | 242 | index XXXXXXX..XXXXXXX 100644 |
79 | + if (ptb->haddr1 == NULL) { | 243 | --- a/tcg/tcg-op.c |
80 | + pinsn->haddr = NULL; | 244 | +++ b/tcg/tcg-op.c |
81 | + } else if (is_same_page(db, db->pc_next)) { | 245 | @@ -XXX,XX +XXX,XX @@ |
82 | pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; | 246 | #include "qemu/osdep.h" |
83 | } else { | 247 | #include "exec/exec-all.h" |
84 | + if (ptb->vaddr2 == -1) { | 248 | #include "tcg/tcg.h" |
85 | + ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first); | 249 | +#include "tcg/tcg-temp-internal.h" |
86 | + get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2); | 250 | #include "tcg/tcg-op.h" |
87 | + } | 251 | #include "tcg/tcg-mo.h" |
88 | pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; | 252 | #include "exec/plugin-gen.h" |
89 | } | 253 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
90 | } | 254 | index XXXXXXX..XXXXXXX 100644 |
91 | diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c | 255 | --- a/tcg/tcg.c |
92 | index XXXXXXX..XXXXXXX 100644 | 256 | +++ b/tcg/tcg.c |
93 | --- a/accel/tcg/translator.c | 257 | @@ -XXX,XX +XXX,XX @@ |
94 | +++ b/accel/tcg/translator.c | 258 | #include "elf.h" |
95 | @@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns, | 259 | #include "exec/log.h" |
96 | ops->tb_start(db, cpu); | 260 | #include "tcg/tcg-ldst.h" |
97 | tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ | 261 | +#include "tcg/tcg-temp-internal.h" |
98 | 262 | #include "tcg-internal.h" | |
99 | - plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY); | 263 | #include "accel/tcg/perf.h" |
100 | + plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY); | 264 | |
101 | |||
102 | while (true) { | ||
103 | db->num_insns++; | ||
104 | -- | 265 | -- |
105 | 2.34.1 | 266 | 2.34.1 |
106 | 267 | ||
107 | 268 | diff view generated by jsdifflib |
1 | Prepare for targets to be able to produce TBs that can | 1 | From: Anton Johansson <anjo@rev.ng> |
---|---|---|---|
2 | run in more than one virtual context. | ||
3 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-2-anjo@rev.ng> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 9 | --- |
7 | accel/tcg/internal.h | 4 +++ | 10 | include/exec/cpu-defs.h | 11 ++++++++--- |
8 | accel/tcg/tb-jmp-cache.h | 41 +++++++++++++++++++++++++ | 11 | 1 file changed, 8 insertions(+), 3 deletions(-) |
9 | include/exec/cpu-defs.h | 3 ++ | ||
10 | include/exec/exec-all.h | 32 ++++++++++++++++++-- | ||
11 | accel/tcg/cpu-exec.c | 16 ++++++---- | ||
12 | accel/tcg/translate-all.c | 64 ++++++++++++++++++++++++++------------- | ||
13 | 6 files changed, 131 insertions(+), 29 deletions(-) | ||
14 | 12 | ||
15 | diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/accel/tcg/internal.h | ||
18 | +++ b/accel/tcg/internal.h | ||
19 | @@ -XXX,XX +XXX,XX @@ void tb_htable_init(void); | ||
20 | /* Return the current PC from CPU, which may be cached in TB. */ | ||
21 | static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) | ||
22 | { | ||
23 | +#if TARGET_TB_PCREL | ||
24 | + return cpu->cc->get_pc(cpu); | ||
25 | +#else | ||
26 | return tb_pc(tb); | ||
27 | +#endif | ||
28 | } | ||
29 | |||
30 | #endif /* ACCEL_TCG_INTERNAL_H */ | ||
31 | diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/accel/tcg/tb-jmp-cache.h | ||
34 | +++ b/accel/tcg/tb-jmp-cache.h | ||
35 | @@ -XXX,XX +XXX,XX @@ | ||
36 | |||
37 | /* | ||
38 | * Accessed in parallel; all accesses to 'tb' must be atomic. | ||
39 | + * For TARGET_TB_PCREL, accesses to 'pc' must be protected by | ||
40 | + * a load_acquire/store_release to 'tb'. | ||
41 | */ | ||
42 | struct CPUJumpCache { | ||
43 | struct { | ||
44 | TranslationBlock *tb; | ||
45 | +#if TARGET_TB_PCREL | ||
46 | + target_ulong pc; | ||
47 | +#endif | ||
48 | } array[TB_JMP_CACHE_SIZE]; | ||
49 | }; | ||
50 | |||
51 | +static inline TranslationBlock * | ||
52 | +tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash) | ||
53 | +{ | ||
54 | +#if TARGET_TB_PCREL | ||
55 | + /* Use acquire to ensure current load of pc from jc. */ | ||
56 | + return qatomic_load_acquire(&jc->array[hash].tb); | ||
57 | +#else | ||
58 | + /* Use rcu_read to ensure current load of pc from *tb. */ | ||
59 | + return qatomic_rcu_read(&jc->array[hash].tb); | ||
60 | +#endif | ||
61 | +} | ||
62 | + | ||
63 | +static inline target_ulong | ||
64 | +tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb) | ||
65 | +{ | ||
66 | +#if TARGET_TB_PCREL | ||
67 | + return jc->array[hash].pc; | ||
68 | +#else | ||
69 | + return tb_pc(tb); | ||
70 | +#endif | ||
71 | +} | ||
72 | + | ||
73 | +static inline void | ||
74 | +tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash, | ||
75 | + TranslationBlock *tb, target_ulong pc) | ||
76 | +{ | ||
77 | +#if TARGET_TB_PCREL | ||
78 | + jc->array[hash].pc = pc; | ||
79 | + /* Use store_release on tb to ensure pc is written first. */ | ||
80 | + qatomic_store_release(&jc->array[hash].tb, tb); | ||
81 | +#else | ||
82 | + /* Use the pc value already stored in tb->pc. */ | ||
83 | + qatomic_set(&jc->array[hash].tb, tb); | ||
84 | +#endif | ||
85 | +} | ||
86 | + | ||
87 | #endif /* ACCEL_TCG_TB_JMP_CACHE_H */ | ||
88 | diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h | 13 | diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h |
89 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
90 | --- a/include/exec/cpu-defs.h | 15 | --- a/include/exec/cpu-defs.h |
91 | +++ b/include/exec/cpu-defs.h | 16 | +++ b/include/exec/cpu-defs.h |
92 | @@ -XXX,XX +XXX,XX @@ | 17 | @@ -XXX,XX +XXX,XX @@ |
93 | # error TARGET_PAGE_BITS must be defined in cpu-param.h | 18 | #ifndef TARGET_LONG_BITS |
19 | # error TARGET_LONG_BITS must be defined in cpu-param.h | ||
20 | #endif | ||
21 | -#ifndef NB_MMU_MODES | ||
22 | -# error NB_MMU_MODES must be defined in cpu-param.h | ||
23 | -#endif | ||
24 | #ifndef TARGET_PHYS_ADDR_SPACE_BITS | ||
25 | # error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h | ||
26 | #endif | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
94 | # endif | 28 | # endif |
95 | #endif | 29 | #endif |
96 | +#ifndef TARGET_TB_PCREL | 30 | |
97 | +# define TARGET_TB_PCREL 0 | 31 | +/* |
98 | +#endif | 32 | + * Fix the number of mmu modes to 16, which is also the maximum |
99 | 33 | + * supported by the softmmu tlb api. | |
100 | #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) | 34 | + */ |
101 | 35 | +#ifndef NB_MMU_MODES | |
102 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 36 | +#define NB_MMU_MODES 16 |
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/include/exec/exec-all.h | ||
105 | +++ b/include/exec/exec-all.h | ||
106 | @@ -XXX,XX +XXX,XX @@ struct tb_tc { | ||
107 | }; | ||
108 | |||
109 | struct TranslationBlock { | ||
110 | - target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ | ||
111 | - target_ulong cs_base; /* CS base for this block */ | ||
112 | +#if !TARGET_TB_PCREL | ||
113 | + /* | ||
114 | + * Guest PC corresponding to this block. This must be the true | ||
115 | + * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and | ||
116 | + * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or | ||
117 | + * privilege, must store those bits elsewhere. | ||
118 | + * | ||
119 | + * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are | ||
120 | + * written such that the TB is associated only with the physical | ||
121 | + * page and may be run in any virtual address context. In this case, | ||
122 | + * PC must always be taken from ENV in a target-specific manner. | ||
123 | + * Unwind information is taken as offsets from the page, to be | ||
124 | + * deposited into the "current" PC. | ||
125 | + */ | ||
126 | + target_ulong pc; | ||
127 | +#endif | 37 | +#endif |
128 | + | 38 | + |
129 | + /* | 39 | #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) |
130 | + * Target-specific data associated with the TranslationBlock, e.g.: | 40 | |
131 | + * x86: the original user, the Code Segment virtual base, | 41 | /* target_ulong is the type of a virtual address */ |
132 | + * arm: an extension of tb->flags, | ||
133 | + * s390x: instruction data for EXECUTE, | ||
134 | + * sparc: the next pc of the instruction queue (for delay slots). | ||
135 | + */ | ||
136 | + target_ulong cs_base; | ||
137 | + | ||
138 | uint32_t flags; /* flags defining in which context the code was generated */ | ||
139 | uint32_t cflags; /* compile flags */ | ||
140 | |||
141 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | ||
142 | /* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */ | ||
143 | static inline target_ulong tb_pc(const TranslationBlock *tb) | ||
144 | { | ||
145 | +#if TARGET_TB_PCREL | ||
146 | + qemu_build_not_reached(); | ||
147 | +#else | ||
148 | return tb->pc; | ||
149 | +#endif | ||
150 | } | ||
151 | |||
152 | /* Hide the qatomic_read to make code a little easier on the eyes */ | ||
153 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
154 | index XXXXXXX..XXXXXXX 100644 | ||
155 | --- a/accel/tcg/cpu-exec.c | ||
156 | +++ b/accel/tcg/cpu-exec.c | ||
157 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
158 | const TranslationBlock *tb = p; | ||
159 | const struct tb_desc *desc = d; | ||
160 | |||
161 | - if (tb_pc(tb) == desc->pc && | ||
162 | + if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) && | ||
163 | tb->page_addr[0] == desc->page_addr0 && | ||
164 | tb->cs_base == desc->cs_base && | ||
165 | tb->flags == desc->flags && | ||
166 | @@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
167 | return NULL; | ||
168 | } | ||
169 | desc.page_addr0 = phys_pc; | ||
170 | - h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); | ||
171 | + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc), | ||
172 | + flags, cflags, *cpu->trace_dstate); | ||
173 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); | ||
174 | } | ||
175 | |||
176 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
177 | uint32_t flags, uint32_t cflags) | ||
178 | { | ||
179 | TranslationBlock *tb; | ||
180 | + CPUJumpCache *jc; | ||
181 | uint32_t hash; | ||
182 | |||
183 | /* we should never be trying to look up an INVALID tb */ | ||
184 | tcg_debug_assert(!(cflags & CF_INVALID)); | ||
185 | |||
186 | hash = tb_jmp_cache_hash_func(pc); | ||
187 | - tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb); | ||
188 | + jc = cpu->tb_jmp_cache; | ||
189 | + tb = tb_jmp_cache_get_tb(jc, hash); | ||
190 | |||
191 | if (likely(tb && | ||
192 | - tb->pc == pc && | ||
193 | + tb_jmp_cache_get_pc(jc, hash, tb) == pc && | ||
194 | tb->cs_base == cs_base && | ||
195 | tb->flags == flags && | ||
196 | tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||
197 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
198 | if (tb == NULL) { | ||
199 | return NULL; | ||
200 | } | ||
201 | - qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb); | ||
202 | + tb_jmp_cache_set(jc, hash, tb, pc); | ||
203 | return tb; | ||
204 | } | ||
205 | |||
206 | @@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | ||
207 | if (cc->tcg_ops->synchronize_from_tb) { | ||
208 | cc->tcg_ops->synchronize_from_tb(cpu, last_tb); | ||
209 | } else { | ||
210 | + assert(!TARGET_TB_PCREL); | ||
211 | assert(cc->set_pc); | ||
212 | cc->set_pc(cpu, tb_pc(last_tb)); | ||
213 | } | ||
214 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | ||
215 | * for the fast lookup | ||
216 | */ | ||
217 | h = tb_jmp_cache_hash_func(pc); | ||
218 | - qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb); | ||
219 | + tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc); | ||
220 | } | ||
221 | |||
222 | #ifndef CONFIG_USER_ONLY | ||
223 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
224 | index XXXXXXX..XXXXXXX 100644 | ||
225 | --- a/accel/tcg/translate-all.c | ||
226 | +++ b/accel/tcg/translate-all.c | ||
227 | @@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block) | ||
228 | |||
229 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | ||
230 | if (i == 0) { | ||
231 | - prev = (j == 0 ? tb_pc(tb) : 0); | ||
232 | + prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0); | ||
233 | } else { | ||
234 | prev = tcg_ctx->gen_insn_data[i - 1][j]; | ||
235 | } | ||
236 | @@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block) | ||
237 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||
238 | uintptr_t searched_pc, bool reset_icount) | ||
239 | { | ||
240 | - target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) }; | ||
241 | + target_ulong data[TARGET_INSN_START_WORDS]; | ||
242 | uintptr_t host_pc = (uintptr_t)tb->tc.ptr; | ||
243 | CPUArchState *env = cpu->env_ptr; | ||
244 | const uint8_t *p = tb->tc.ptr + tb->tc.size; | ||
245 | @@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||
246 | return -1; | ||
247 | } | ||
248 | |||
249 | + memset(data, 0, sizeof(data)); | ||
250 | + if (!TARGET_TB_PCREL) { | ||
251 | + data[0] = tb_pc(tb); | ||
252 | + } | ||
253 | + | ||
254 | /* Reconstruct the stored insn data while looking for the point at | ||
255 | which the end of the insn exceeds the searched_pc. */ | ||
256 | for (i = 0; i < num_insns; ++i) { | ||
257 | @@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp) | ||
258 | const TranslationBlock *a = ap; | ||
259 | const TranslationBlock *b = bp; | ||
260 | |||
261 | - return tb_pc(a) == tb_pc(b) && | ||
262 | - a->cs_base == b->cs_base && | ||
263 | - a->flags == b->flags && | ||
264 | - (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && | ||
265 | - a->trace_vcpu_dstate == b->trace_vcpu_dstate && | ||
266 | - a->page_addr[0] == b->page_addr[0] && | ||
267 | - a->page_addr[1] == b->page_addr[1]; | ||
268 | + return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) && | ||
269 | + a->cs_base == b->cs_base && | ||
270 | + a->flags == b->flags && | ||
271 | + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && | ||
272 | + a->trace_vcpu_dstate == b->trace_vcpu_dstate && | ||
273 | + a->page_addr[0] == b->page_addr[0] && | ||
274 | + a->page_addr[1] == b->page_addr[1]); | ||
275 | } | ||
276 | |||
277 | void tb_htable_init(void) | ||
278 | @@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest) | ||
279 | qemu_spin_unlock(&dest->jmp_lock); | ||
280 | } | ||
281 | |||
282 | +static void tb_jmp_cache_inval_tb(TranslationBlock *tb) | ||
283 | +{ | ||
284 | + CPUState *cpu; | ||
285 | + | ||
286 | + if (TARGET_TB_PCREL) { | ||
287 | + /* A TB may be at any virtual address */ | ||
288 | + CPU_FOREACH(cpu) { | ||
289 | + tcg_flush_jmp_cache(cpu); | ||
290 | + } | ||
291 | + } else { | ||
292 | + uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb)); | ||
293 | + | ||
294 | + CPU_FOREACH(cpu) { | ||
295 | + CPUJumpCache *jc = cpu->tb_jmp_cache; | ||
296 | + | ||
297 | + if (qatomic_read(&jc->array[h].tb) == tb) { | ||
298 | + qatomic_set(&jc->array[h].tb, NULL); | ||
299 | + } | ||
300 | + } | ||
301 | + } | ||
302 | +} | ||
303 | + | ||
304 | /* | ||
305 | * In user-mode, call with mmap_lock held. | ||
306 | * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' | ||
307 | @@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest) | ||
308 | */ | ||
309 | static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
310 | { | ||
311 | - CPUState *cpu; | ||
312 | PageDesc *p; | ||
313 | uint32_t h; | ||
314 | tb_page_addr_t phys_pc; | ||
315 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
316 | |||
317 | /* remove the TB from the hash list */ | ||
318 | phys_pc = tb->page_addr[0]; | ||
319 | - h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags, | ||
320 | - tb->trace_vcpu_dstate); | ||
321 | + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), | ||
322 | + tb->flags, orig_cflags, tb->trace_vcpu_dstate); | ||
323 | if (!qht_remove(&tb_ctx.htable, tb, h)) { | ||
324 | return; | ||
325 | } | ||
326 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
327 | } | ||
328 | |||
329 | /* remove the TB from the hash list */ | ||
330 | - h = tb_jmp_cache_hash_func(tb->pc); | ||
331 | - CPU_FOREACH(cpu) { | ||
332 | - CPUJumpCache *jc = cpu->tb_jmp_cache; | ||
333 | - if (qatomic_read(&jc->array[h].tb) == tb) { | ||
334 | - qatomic_set(&jc->array[h].tb, NULL); | ||
335 | - } | ||
336 | - } | ||
337 | + tb_jmp_cache_inval_tb(tb); | ||
338 | |||
339 | /* suppress this TB from the two jump lists */ | ||
340 | tb_remove_from_jmp_list(tb, 0); | ||
341 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
342 | } | ||
343 | |||
344 | /* add in the hash table */ | ||
345 | - h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags, | ||
346 | - tb->trace_vcpu_dstate); | ||
347 | + h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)), | ||
348 | + tb->flags, tb->cflags, tb->trace_vcpu_dstate); | ||
349 | qht_insert(&tb_ctx.htable, tb, h, &existing_tb); | ||
350 | |||
351 | /* remove TB from the page(s) if we couldn't insert it */ | ||
352 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
353 | |||
354 | gen_code_buf = tcg_ctx->code_gen_ptr; | ||
355 | tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); | ||
356 | +#if !TARGET_TB_PCREL | ||
357 | tb->pc = pc; | ||
358 | +#endif | ||
359 | tb->cs_base = cs_base; | ||
360 | tb->flags = flags; | ||
361 | tb->cflags = cflags; | ||
362 | -- | 42 | -- |
363 | 2.34.1 | 43 | 2.34.1 |
364 | 44 | ||
365 | 45 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-3-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/alpha/cpu-param.h | 2 -- | ||
11 | 1 file changed, 2 deletions(-) | ||
12 | |||
13 | diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/alpha/cpu-param.h | ||
16 | +++ b/target/alpha/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PHYS_ADDR_SPACE_BITS 44 | ||
19 | #define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) | ||
20 | |||
21 | -#define NB_MMU_MODES 3 | ||
22 | - | ||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-4-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/arm/cpu-param.h | 2 -- | ||
11 | 1 file changed, 2 deletions(-) | ||
12 | |||
13 | diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/cpu-param.h | ||
16 | +++ b/target/arm/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | bool guarded; | ||
19 | #endif | ||
20 | |||
21 | -#define NB_MMU_MODES 12 | ||
22 | - | ||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-5-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/avr/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/avr/cpu-param.h | ||
16 | +++ b/target/avr/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PAGE_BITS 8 | ||
19 | #define TARGET_PHYS_ADDR_SPACE_BITS 24 | ||
20 | #define TARGET_VIRT_ADDR_SPACE_BITS 24 | ||
21 | -#define NB_MMU_MODES 2 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-6-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/cris/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/cris/cpu-param.h | ||
16 | +++ b/target/cris/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PAGE_BITS 13 | ||
19 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | ||
20 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
21 | -#define NB_MMU_MODES 2 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-7-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/hexagon/cpu-param.h | 2 -- | ||
11 | 1 file changed, 2 deletions(-) | ||
12 | |||
13 | diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/hexagon/cpu-param.h | ||
16 | +++ b/target/hexagon/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PHYS_ADDR_SPACE_BITS 36 | ||
19 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
20 | |||
21 | -#define NB_MMU_MODES 1 | ||
22 | - | ||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-8-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/hppa/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/hppa/cpu-param.h | ||
16 | +++ b/target/hppa/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | # define TARGET_PHYS_ADDR_SPACE_BITS 32 | ||
19 | #endif | ||
20 | #define TARGET_PAGE_BITS 12 | ||
21 | -#define NB_MMU_MODES 5 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-9-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/i386/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/i386/cpu-param.h | ||
16 | +++ b/target/i386/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | # define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
19 | #endif | ||
20 | #define TARGET_PAGE_BITS 12 | ||
21 | -#define NB_MMU_MODES 5 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-10-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/loongarch/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/loongarch/cpu-param.h b/target/loongarch/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/loongarch/cpu-param.h | ||
16 | +++ b/target/loongarch/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_VIRT_ADDR_SPACE_BITS 48 | ||
19 | |||
20 | #define TARGET_PAGE_BITS 14 | ||
21 | -#define NB_MMU_MODES 5 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-11-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/m68k/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/m68k/cpu-param.h | ||
16 | +++ b/target/m68k/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PAGE_BITS 12 | ||
19 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | ||
20 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
21 | -#define NB_MMU_MODES 2 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-12-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/microblaze/cpu-param.h | 1 - | ||
11 | target/microblaze/cpu.h | 2 +- | ||
12 | 2 files changed, 1 insertion(+), 2 deletions(-) | ||
13 | |||
14 | diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/target/microblaze/cpu-param.h | ||
17 | +++ b/target/microblaze/cpu-param.h | ||
18 | @@ -XXX,XX +XXX,XX @@ | ||
19 | |||
20 | /* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */ | ||
21 | #define TARGET_PAGE_BITS 12 | ||
22 | -#define NB_MMU_MODES 3 | ||
23 | |||
24 | #endif | ||
25 | diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/target/microblaze/cpu.h | ||
28 | +++ b/target/microblaze/cpu.h | ||
29 | @@ -XXX,XX +XXX,XX @@ void mb_tcg_init(void); | ||
30 | #define MMU_NOMMU_IDX 0 | ||
31 | #define MMU_KERNEL_IDX 1 | ||
32 | #define MMU_USER_IDX 2 | ||
33 | -/* See NB_MMU_MODES further up the file. */ | ||
34 | +/* See NB_MMU_MODES in cpu-defs.h. */ | ||
35 | |||
36 | #include "exec/cpu-all.h" | ||
37 | |||
38 | -- | ||
39 | 2.34.1 | ||
40 | |||
41 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-13-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/mips/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/mips/cpu-param.h | ||
16 | +++ b/target/mips/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PAGE_BITS_VARY | ||
19 | #define TARGET_PAGE_BITS_MIN 12 | ||
20 | #endif | ||
21 | -#define NB_MMU_MODES 4 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-14-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/nios2/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/nios2/cpu-param.h b/target/nios2/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/nios2/cpu-param.h | ||
16 | +++ b/target/nios2/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #else | ||
19 | # define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
20 | #endif | ||
21 | -#define NB_MMU_MODES 2 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-15-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/openrisc/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/openrisc/cpu-param.h | ||
16 | +++ b/target/openrisc/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PAGE_BITS 13 | ||
19 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | ||
20 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
21 | -#define NB_MMU_MODES 3 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-16-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/ppc/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/ppc/cpu-param.h | ||
16 | +++ b/target/ppc/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | # define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
19 | #endif | ||
20 | #define TARGET_PAGE_BITS 12 | ||
21 | -#define NB_MMU_MODES 10 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-17-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/riscv/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/riscv/cpu-param.h | ||
16 | +++ b/target/riscv/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | * - S mode HLV/HLVX/HSV 0b101 | ||
19 | * - M mode HLV/HLVX/HSV 0b111 | ||
20 | */ | ||
21 | -#define NB_MMU_MODES 8 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-18-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/rx/cpu-param.h | 2 -- | ||
11 | 1 file changed, 2 deletions(-) | ||
12 | |||
13 | diff --git a/target/rx/cpu-param.h b/target/rx/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/rx/cpu-param.h | ||
16 | +++ b/target/rx/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | ||
19 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
20 | |||
21 | -#define NB_MMU_MODES 1 | ||
22 | - | ||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-19-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/s390x/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/s390x/cpu-param.h | ||
16 | +++ b/target/s390x/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PAGE_BITS 12 | ||
19 | #define TARGET_PHYS_ADDR_SPACE_BITS 64 | ||
20 | #define TARGET_VIRT_ADDR_SPACE_BITS 64 | ||
21 | -#define NB_MMU_MODES 4 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-20-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/sh4/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/sh4/cpu-param.h | ||
16 | +++ b/target/sh4/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #else | ||
19 | # define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
20 | #endif | ||
21 | -#define NB_MMU_MODES 2 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-21-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/sparc/cpu-param.h | 2 -- | ||
11 | 1 file changed, 2 deletions(-) | ||
12 | |||
13 | diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/sparc/cpu-param.h | ||
16 | +++ b/target/sparc/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | # else | ||
19 | # define TARGET_VIRT_ADDR_SPACE_BITS 44 | ||
20 | # endif | ||
21 | -# define NB_MMU_MODES 6 | ||
22 | #else | ||
23 | # define TARGET_LONG_BITS 32 | ||
24 | # define TARGET_PAGE_BITS 12 /* 4k */ | ||
25 | # define TARGET_PHYS_ADDR_SPACE_BITS 36 | ||
26 | # define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
27 | -# define NB_MMU_MODES 3 | ||
28 | #endif | ||
29 | |||
30 | #endif | ||
31 | -- | ||
32 | 2.34.1 | ||
33 | |||
34 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-22-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/tricore/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/tricore/cpu-param.h | ||
16 | +++ b/target/tricore/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #define TARGET_PAGE_BITS 14 | ||
19 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | ||
20 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
21 | -#define NB_MMU_MODES 3 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Anton Johansson <anjo@rev.ng> | ||
1 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20230306175230.7110-23-anjo@rev.ng> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/xtensa/cpu-param.h | 1 - | ||
11 | 1 file changed, 1 deletion(-) | ||
12 | |||
13 | diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/xtensa/cpu-param.h | ||
16 | +++ b/target/xtensa/cpu-param.h | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | #else | ||
19 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | ||
20 | #endif | ||
21 | -#define NB_MMU_MODES 4 | ||
22 | |||
23 | #endif | ||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
1 | Allow the target to cache items from the guest page tables. | 1 | From: Anton Johansson <anjo@rev.ng> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Anton Johansson <anjo@rev.ng> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 7 | Message-Id: <20230306175230.7110-24-anjo@rev.ng> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 9 | --- |
8 | include/exec/cpu-defs.h | 9 +++++++++ | 10 | include/exec/cpu-defs.h | 2 -- |
9 | 1 file changed, 9 insertions(+) | 11 | 1 file changed, 2 deletions(-) |
10 | 12 | ||
11 | diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h | 13 | diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h |
12 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/include/exec/cpu-defs.h | 15 | --- a/include/exec/cpu-defs.h |
14 | +++ b/include/exec/cpu-defs.h | 16 | +++ b/include/exec/cpu-defs.h |
15 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull { | 17 | @@ -XXX,XX +XXX,XX @@ |
16 | 18 | * Fix the number of mmu modes to 16, which is also the maximum | |
17 | /* @lg_page_size contains the log2 of the page size. */ | 19 | * supported by the softmmu tlb api. |
18 | uint8_t lg_page_size; | 20 | */ |
19 | + | 21 | -#ifndef NB_MMU_MODES |
20 | + /* | 22 | #define NB_MMU_MODES 16 |
21 | + * Allow target-specific additions to this structure. | 23 | -#endif |
22 | + * This may be used to cache items from the guest cpu | 24 | |
23 | + * page tables for later use by the implementation. | 25 | #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) |
24 | + */ | 26 | |
25 | +#ifdef TARGET_PAGE_ENTRY_EXTRA | ||
26 | + TARGET_PAGE_ENTRY_EXTRA | ||
27 | +#endif | ||
28 | } CPUTLBEntryFull; | ||
29 | |||
30 | /* | ||
31 | -- | 27 | -- |
32 | 2.34.1 | 28 | 2.34.1 |
33 | 29 | ||
34 | 30 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The use of separate data/port variables is existing | ||
2 | practice elsewhere, e.g. SBI, CBI. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/avr/translate.c | 18 ++++++++++-------- | ||
8 | 1 file changed, 10 insertions(+), 8 deletions(-) | ||
9 | |||
10 | diff --git a/target/avr/translate.c b/target/avr/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/avr/translate.c | ||
13 | +++ b/target/avr/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a) | ||
15 | */ | ||
16 | static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) | ||
17 | { | ||
18 | - TCGv temp = tcg_const_i32(a->reg); | ||
19 | + TCGv data = tcg_temp_new_i32(); | ||
20 | + TCGv port = tcg_constant_i32(a->reg); | ||
21 | |||
22 | - gen_helper_inb(temp, cpu_env, temp); | ||
23 | - tcg_gen_andi_tl(temp, temp, 1 << a->bit); | ||
24 | + gen_helper_inb(data, cpu_env, port); | ||
25 | + tcg_gen_andi_tl(data, data, 1 << a->bit); | ||
26 | ctx->skip_cond = TCG_COND_EQ; | ||
27 | - ctx->skip_var0 = temp; | ||
28 | + ctx->skip_var0 = data; | ||
29 | |||
30 | return true; | ||
31 | } | ||
32 | @@ -XXX,XX +XXX,XX @@ static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) | ||
33 | */ | ||
34 | static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a) | ||
35 | { | ||
36 | - TCGv temp = tcg_const_i32(a->reg); | ||
37 | + TCGv data = tcg_temp_new_i32(); | ||
38 | + TCGv port = tcg_constant_i32(a->reg); | ||
39 | |||
40 | - gen_helper_inb(temp, cpu_env, temp); | ||
41 | - tcg_gen_andi_tl(temp, temp, 1 << a->bit); | ||
42 | + gen_helper_inb(data, cpu_env, port); | ||
43 | + tcg_gen_andi_tl(data, data, 1 << a->bit); | ||
44 | ctx->skip_cond = TCG_COND_NE; | ||
45 | - ctx->skip_var0 = temp; | ||
46 | + ctx->skip_var0 = data; | ||
47 | |||
48 | return true; | ||
49 | } | ||
50 | -- | ||
51 | 2.34.1 | ||
52 | |||
53 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/avr/translate.c | 30 +++++++++++++++--------------- | ||
7 | 1 file changed, 15 insertions(+), 15 deletions(-) | ||
8 | |||
9 | diff --git a/target/avr/translate.c b/target/avr/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/avr/translate.c | ||
12 | +++ b/target/avr/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static bool trans_SUB(DisasContext *ctx, arg_SUB *a) | ||
14 | static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a) | ||
15 | { | ||
16 | TCGv Rd = cpu_r[a->rd]; | ||
17 | - TCGv Rr = tcg_const_i32(a->imm); | ||
18 | + TCGv Rr = tcg_constant_i32(a->imm); | ||
19 | TCGv R = tcg_temp_new_i32(); | ||
20 | |||
21 | tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */ | ||
22 | @@ -XXX,XX +XXX,XX @@ static bool trans_SBC(DisasContext *ctx, arg_SBC *a) | ||
23 | TCGv Rd = cpu_r[a->rd]; | ||
24 | TCGv Rr = cpu_r[a->rr]; | ||
25 | TCGv R = tcg_temp_new_i32(); | ||
26 | - TCGv zero = tcg_const_i32(0); | ||
27 | + TCGv zero = tcg_constant_i32(0); | ||
28 | |||
29 | tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ | ||
30 | tcg_gen_sub_tl(R, R, cpu_Cf); | ||
31 | @@ -XXX,XX +XXX,XX @@ static bool trans_SBC(DisasContext *ctx, arg_SBC *a) | ||
32 | static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a) | ||
33 | { | ||
34 | TCGv Rd = cpu_r[a->rd]; | ||
35 | - TCGv Rr = tcg_const_i32(a->imm); | ||
36 | + TCGv Rr = tcg_constant_i32(a->imm); | ||
37 | TCGv R = tcg_temp_new_i32(); | ||
38 | - TCGv zero = tcg_const_i32(0); | ||
39 | + TCGv zero = tcg_constant_i32(0); | ||
40 | |||
41 | tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ | ||
42 | tcg_gen_sub_tl(R, R, cpu_Cf); | ||
43 | @@ -XXX,XX +XXX,XX @@ static bool trans_COM(DisasContext *ctx, arg_COM *a) | ||
44 | static bool trans_NEG(DisasContext *ctx, arg_NEG *a) | ||
45 | { | ||
46 | TCGv Rd = cpu_r[a->rd]; | ||
47 | - TCGv t0 = tcg_const_i32(0); | ||
48 | + TCGv t0 = tcg_constant_i32(0); | ||
49 | TCGv R = tcg_temp_new_i32(); | ||
50 | |||
51 | tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */ | ||
52 | @@ -XXX,XX +XXX,XX @@ static void gen_jmp_z(DisasContext *ctx) | ||
53 | static void gen_push_ret(DisasContext *ctx, int ret) | ||
54 | { | ||
55 | if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) { | ||
56 | - TCGv t0 = tcg_const_i32((ret & 0x0000ff)); | ||
57 | + TCGv t0 = tcg_constant_i32(ret & 0x0000ff); | ||
58 | |||
59 | tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB); | ||
60 | tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); | ||
61 | } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) { | ||
62 | - TCGv t0 = tcg_const_i32((ret & 0x00ffff)); | ||
63 | + TCGv t0 = tcg_constant_i32(ret & 0x00ffff); | ||
64 | |||
65 | tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); | ||
66 | tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW); | ||
67 | tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); | ||
68 | } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) { | ||
69 | - TCGv lo = tcg_const_i32((ret & 0x0000ff)); | ||
70 | - TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8); | ||
71 | + TCGv lo = tcg_constant_i32(ret & 0x0000ff); | ||
72 | + TCGv hi = tcg_constant_i32((ret & 0xffff00) >> 8); | ||
73 | |||
74 | tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB); | ||
75 | tcg_gen_subi_tl(cpu_sp, cpu_sp, 2); | ||
76 | @@ -XXX,XX +XXX,XX @@ static bool trans_CPC(DisasContext *ctx, arg_CPC *a) | ||
77 | TCGv Rd = cpu_r[a->rd]; | ||
78 | TCGv Rr = cpu_r[a->rr]; | ||
79 | TCGv R = tcg_temp_new_i32(); | ||
80 | - TCGv zero = tcg_const_i32(0); | ||
81 | + TCGv zero = tcg_constant_i32(0); | ||
82 | |||
83 | tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */ | ||
84 | tcg_gen_sub_tl(R, R, cpu_Cf); | ||
85 | @@ -XXX,XX +XXX,XX @@ static bool trans_CPI(DisasContext *ctx, arg_CPI *a) | ||
86 | { | ||
87 | TCGv Rd = cpu_r[a->rd]; | ||
88 | int Imm = a->imm; | ||
89 | - TCGv Rr = tcg_const_i32(Imm); | ||
90 | + TCGv Rr = tcg_constant_i32(Imm); | ||
91 | TCGv R = tcg_temp_new_i32(); | ||
92 | |||
93 | tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */ | ||
94 | @@ -XXX,XX +XXX,XX @@ static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a) | ||
95 | static bool trans_IN(DisasContext *ctx, arg_IN *a) | ||
96 | { | ||
97 | TCGv Rd = cpu_r[a->rd]; | ||
98 | - TCGv port = tcg_const_i32(a->imm); | ||
99 | + TCGv port = tcg_constant_i32(a->imm); | ||
100 | |||
101 | gen_helper_inb(Rd, cpu_env, port); | ||
102 | return true; | ||
103 | @@ -XXX,XX +XXX,XX @@ static bool trans_IN(DisasContext *ctx, arg_IN *a) | ||
104 | static bool trans_OUT(DisasContext *ctx, arg_OUT *a) | ||
105 | { | ||
106 | TCGv Rd = cpu_r[a->rd]; | ||
107 | - TCGv port = tcg_const_i32(a->imm); | ||
108 | + TCGv port = tcg_constant_i32(a->imm); | ||
109 | |||
110 | gen_helper_outb(cpu_env, port, Rd); | ||
111 | return true; | ||
112 | @@ -XXX,XX +XXX,XX @@ static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a) | ||
113 | static bool trans_SBI(DisasContext *ctx, arg_SBI *a) | ||
114 | { | ||
115 | TCGv data = tcg_temp_new_i32(); | ||
116 | - TCGv port = tcg_const_i32(a->reg); | ||
117 | + TCGv port = tcg_constant_i32(a->reg); | ||
118 | |||
119 | gen_helper_inb(data, cpu_env, port); | ||
120 | tcg_gen_ori_tl(data, data, 1 << a->bit); | ||
121 | @@ -XXX,XX +XXX,XX @@ static bool trans_SBI(DisasContext *ctx, arg_SBI *a) | ||
122 | static bool trans_CBI(DisasContext *ctx, arg_CBI *a) | ||
123 | { | ||
124 | TCGv data = tcg_temp_new_i32(); | ||
125 | - TCGv port = tcg_const_i32(a->reg); | ||
126 | + TCGv port = tcg_constant_i32(a->reg); | ||
127 | |||
128 | gen_helper_inb(data, cpu_env, port); | ||
129 | tcg_gen_andi_tl(data, data, ~(1 << a->bit)); | ||
130 | -- | ||
131 | 2.34.1 | ||
132 | |||
133 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/cris/translate.c | 46 +++++++++++++++------------------ | ||
7 | target/cris/translate_v10.c.inc | 26 +++++++++---------- | ||
8 | 2 files changed, 34 insertions(+), 38 deletions(-) | ||
9 | |||
10 | diff --git a/target/cris/translate.c b/target/cris/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/cris/translate.c | ||
13 | +++ b/target/cris/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static const int preg_sizes[] = { | ||
15 | #define t_gen_mov_env_TN(member, tn) \ | ||
16 | tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member)) | ||
17 | #define t_gen_movi_env_TN(member, c) \ | ||
18 | - do { \ | ||
19 | - TCGv tc = tcg_const_tl(c); \ | ||
20 | - t_gen_mov_env_TN(member, tc); \ | ||
21 | - } while (0) | ||
22 | + t_gen_mov_env_TN(member, tcg_constant_tl(c)) | ||
23 | |||
24 | static inline void t_gen_mov_TN_preg(TCGv tn, int r) | ||
25 | { | ||
26 | @@ -XXX,XX +XXX,XX @@ static void cris_lock_irq(DisasContext *dc) | ||
27 | |||
28 | static inline void t_gen_raise_exception(uint32_t index) | ||
29 | { | ||
30 | - TCGv_i32 tmp = tcg_const_i32(index); | ||
31 | - gen_helper_raise_exception(cpu_env, tmp); | ||
32 | + gen_helper_raise_exception(cpu_env, tcg_constant_i32(index)); | ||
33 | } | ||
34 | |||
35 | static void t_gen_lsl(TCGv d, TCGv a, TCGv b) | ||
36 | @@ -XXX,XX +XXX,XX @@ static void t_gen_lsl(TCGv d, TCGv a, TCGv b) | ||
37 | TCGv t0, t_31; | ||
38 | |||
39 | t0 = tcg_temp_new(); | ||
40 | - t_31 = tcg_const_tl(31); | ||
41 | + t_31 = tcg_constant_tl(31); | ||
42 | tcg_gen_shl_tl(d, a, b); | ||
43 | |||
44 | tcg_gen_sub_tl(t0, t_31, b); | ||
45 | @@ -XXX,XX +XXX,XX @@ static int dec_addq(CPUCRISState *env, DisasContext *dc) | ||
46 | |||
47 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
48 | |||
49 | - c = tcg_const_tl(dc->op1); | ||
50 | + c = tcg_constant_tl(dc->op1); | ||
51 | cris_alu(dc, CC_OP_ADD, | ||
52 | cpu_R[dc->op2], cpu_R[dc->op2], c, 4); | ||
53 | return 2; | ||
54 | @@ -XXX,XX +XXX,XX @@ static int dec_subq(CPUCRISState *env, DisasContext *dc) | ||
55 | LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2); | ||
56 | |||
57 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
58 | - c = tcg_const_tl(dc->op1); | ||
59 | + c = tcg_constant_tl(dc->op1); | ||
60 | cris_alu(dc, CC_OP_SUB, | ||
61 | cpu_R[dc->op2], cpu_R[dc->op2], c, 4); | ||
62 | return 2; | ||
63 | @@ -XXX,XX +XXX,XX @@ static int dec_cmpq(CPUCRISState *env, DisasContext *dc) | ||
64 | LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2); | ||
65 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
66 | |||
67 | - c = tcg_const_tl(imm); | ||
68 | + c = tcg_constant_tl(imm); | ||
69 | cris_alu(dc, CC_OP_CMP, | ||
70 | cpu_R[dc->op2], cpu_R[dc->op2], c, 4); | ||
71 | return 2; | ||
72 | @@ -XXX,XX +XXX,XX @@ static int dec_andq(CPUCRISState *env, DisasContext *dc) | ||
73 | LOG_DIS("andq %d, $r%d\n", imm, dc->op2); | ||
74 | cris_cc_mask(dc, CC_MASK_NZ); | ||
75 | |||
76 | - c = tcg_const_tl(imm); | ||
77 | + c = tcg_constant_tl(imm); | ||
78 | cris_alu(dc, CC_OP_AND, | ||
79 | cpu_R[dc->op2], cpu_R[dc->op2], c, 4); | ||
80 | return 2; | ||
81 | @@ -XXX,XX +XXX,XX @@ static int dec_orq(CPUCRISState *env, DisasContext *dc) | ||
82 | LOG_DIS("orq %d, $r%d\n", imm, dc->op2); | ||
83 | cris_cc_mask(dc, CC_MASK_NZ); | ||
84 | |||
85 | - c = tcg_const_tl(imm); | ||
86 | + c = tcg_constant_tl(imm); | ||
87 | cris_alu(dc, CC_OP_OR, | ||
88 | cpu_R[dc->op2], cpu_R[dc->op2], c, 4); | ||
89 | return 2; | ||
90 | @@ -XXX,XX +XXX,XX @@ static int dec_btstq(CPUCRISState *env, DisasContext *dc) | ||
91 | LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2); | ||
92 | |||
93 | cris_cc_mask(dc, CC_MASK_NZ); | ||
94 | - c = tcg_const_tl(dc->op1); | ||
95 | + c = tcg_constant_tl(dc->op1); | ||
96 | cris_evaluate_flags(dc); | ||
97 | gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2], | ||
98 | c, cpu_PR[PR_CCS]); | ||
99 | @@ -XXX,XX +XXX,XX @@ static int dec_move_rs(CPUCRISState *env, DisasContext *dc) | ||
100 | { | ||
101 | TCGv c2, c1; | ||
102 | LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2); | ||
103 | - c1 = tcg_const_tl(dc->op1); | ||
104 | - c2 = tcg_const_tl(dc->op2); | ||
105 | + c1 = tcg_constant_tl(dc->op1); | ||
106 | + c2 = tcg_constant_tl(dc->op2); | ||
107 | cris_cc_mask(dc, 0); | ||
108 | gen_helper_movl_sreg_reg(cpu_env, c2, c1); | ||
109 | return 2; | ||
110 | @@ -XXX,XX +XXX,XX @@ static int dec_move_sr(CPUCRISState *env, DisasContext *dc) | ||
111 | { | ||
112 | TCGv c2, c1; | ||
113 | LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1); | ||
114 | - c1 = tcg_const_tl(dc->op1); | ||
115 | - c2 = tcg_const_tl(dc->op2); | ||
116 | + c1 = tcg_constant_tl(dc->op1); | ||
117 | + c2 = tcg_constant_tl(dc->op2); | ||
118 | cris_cc_mask(dc, 0); | ||
119 | gen_helper_movl_reg_sreg(cpu_env, c1, c2); | ||
120 | return 2; | ||
121 | @@ -XXX,XX +XXX,XX @@ static int dec_test_m(CPUCRISState *env, DisasContext *dc) | ||
122 | cris_cc_mask(dc, CC_MASK_NZ); | ||
123 | tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3); | ||
124 | |||
125 | - c = tcg_const_tl(0); | ||
126 | + c = tcg_constant_tl(0); | ||
127 | cris_alu(dc, CC_OP_CMP, | ||
128 | cpu_R[dc->op2], t[1], c, memsize_zz(dc)); | ||
129 | do_postinc(dc, memsize); | ||
130 | @@ -XXX,XX +XXX,XX @@ static int dec_jas_r(CPUCRISState *env, DisasContext *dc) | ||
131 | if (dc->op2 > 15) { | ||
132 | abort(); | ||
133 | } | ||
134 | - c = tcg_const_tl(dc->pc + 4); | ||
135 | + c = tcg_constant_tl(dc->pc + 4); | ||
136 | t_gen_mov_preg_TN(dc, dc->op2, c); | ||
137 | |||
138 | cris_prepare_jmp(dc, JMP_INDIRECT); | ||
139 | @@ -XXX,XX +XXX,XX @@ static int dec_jas_im(CPUCRISState *env, DisasContext *dc) | ||
140 | |||
141 | LOG_DIS("jas 0x%x\n", imm); | ||
142 | cris_cc_mask(dc, 0); | ||
143 | - c = tcg_const_tl(dc->pc + 8); | ||
144 | + c = tcg_constant_tl(dc->pc + 8); | ||
145 | /* Store the return address in Pd. */ | ||
146 | t_gen_mov_preg_TN(dc, dc->op2, c); | ||
147 | |||
148 | @@ -XXX,XX +XXX,XX @@ static int dec_jasc_im(CPUCRISState *env, DisasContext *dc) | ||
149 | |||
150 | LOG_DIS("jasc 0x%x\n", imm); | ||
151 | cris_cc_mask(dc, 0); | ||
152 | - c = tcg_const_tl(dc->pc + 8 + 4); | ||
153 | + c = tcg_constant_tl(dc->pc + 8 + 4); | ||
154 | /* Store the return address in Pd. */ | ||
155 | t_gen_mov_preg_TN(dc, dc->op2, c); | ||
156 | |||
157 | @@ -XXX,XX +XXX,XX @@ static int dec_jasc_r(CPUCRISState *env, DisasContext *dc) | ||
158 | cris_cc_mask(dc, 0); | ||
159 | /* Store the return address in Pd. */ | ||
160 | tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]); | ||
161 | - c = tcg_const_tl(dc->pc + 4 + 4); | ||
162 | + c = tcg_constant_tl(dc->pc + 4 + 4); | ||
163 | t_gen_mov_preg_TN(dc, dc->op2, c); | ||
164 | cris_prepare_jmp(dc, JMP_INDIRECT); | ||
165 | return 2; | ||
166 | @@ -XXX,XX +XXX,XX @@ static int dec_bas_im(CPUCRISState *env, DisasContext *dc) | ||
167 | |||
168 | LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2); | ||
169 | cris_cc_mask(dc, 0); | ||
170 | - c = tcg_const_tl(dc->pc + 8); | ||
171 | + c = tcg_constant_tl(dc->pc + 8); | ||
172 | /* Store the return address in Pd. */ | ||
173 | t_gen_mov_preg_TN(dc, dc->op2, c); | ||
174 | |||
175 | @@ -XXX,XX +XXX,XX @@ static int dec_basc_im(CPUCRISState *env, DisasContext *dc) | ||
176 | |||
177 | LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2); | ||
178 | cris_cc_mask(dc, 0); | ||
179 | - c = tcg_const_tl(dc->pc + 12); | ||
180 | + c = tcg_constant_tl(dc->pc + 12); | ||
181 | /* Store the return address in Pd. */ | ||
182 | t_gen_mov_preg_TN(dc, dc->op2, c); | ||
183 | |||
184 | @@ -XXX,XX +XXX,XX @@ static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc) | ||
185 | cris_cc_mask(dc, 0); | ||
186 | |||
187 | if (dc->op2 == 15) { | ||
188 | - tcg_gen_st_i32(tcg_const_i32(1), cpu_env, | ||
189 | + tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, | ||
190 | -offsetof(CRISCPU, env) + offsetof(CPUState, halted)); | ||
191 | tcg_gen_movi_tl(env_pc, dc->pc + 2); | ||
192 | t_gen_raise_exception(EXCP_HLT); | ||
193 | diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc | ||
194 | index XXXXXXX..XXXXXXX 100644 | ||
195 | --- a/target/cris/translate_v10.c.inc | ||
196 | +++ b/target/cris/translate_v10.c.inc | ||
197 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
198 | LOG_DIS("moveq %d, $r%d\n", simm, dc->dst); | ||
199 | |||
200 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
201 | - c = tcg_const_tl(simm); | ||
202 | + c = tcg_constant_tl(simm); | ||
203 | cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], | ||
204 | cpu_R[dc->dst], c, 4); | ||
205 | break; | ||
206 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
207 | LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst); | ||
208 | |||
209 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
210 | - c = tcg_const_tl(simm); | ||
211 | + c = tcg_constant_tl(simm); | ||
212 | cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], | ||
213 | cpu_R[dc->dst], c, 4); | ||
214 | break; | ||
215 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
216 | LOG_DIS("addq %d, $r%d\n", imm, dc->dst); | ||
217 | |||
218 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
219 | - c = tcg_const_tl(imm); | ||
220 | + c = tcg_constant_tl(imm); | ||
221 | cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst], | ||
222 | cpu_R[dc->dst], c, 4); | ||
223 | break; | ||
224 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
225 | LOG_DIS("andq %d, $r%d\n", simm, dc->dst); | ||
226 | |||
227 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
228 | - c = tcg_const_tl(simm); | ||
229 | + c = tcg_constant_tl(simm); | ||
230 | cris_alu(dc, CC_OP_AND, cpu_R[dc->dst], | ||
231 | cpu_R[dc->dst], c, 4); | ||
232 | break; | ||
233 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
234 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
235 | op = imm & (1 << 5); | ||
236 | imm &= 0x1f; | ||
237 | - c = tcg_const_tl(imm); | ||
238 | + c = tcg_constant_tl(imm); | ||
239 | if (op) { | ||
240 | cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst], | ||
241 | cpu_R[dc->dst], c, 4); | ||
242 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
243 | } | ||
244 | imm &= 0x1f; | ||
245 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
246 | - c = tcg_const_tl(imm); | ||
247 | + c = tcg_constant_tl(imm); | ||
248 | cris_alu(dc, op, cpu_R[dc->dst], | ||
249 | cpu_R[dc->dst], c, 4); | ||
250 | break; | ||
251 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
252 | LOG_DIS("subq %d, $r%d\n", imm, dc->dst); | ||
253 | |||
254 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
255 | - c = tcg_const_tl(imm); | ||
256 | + c = tcg_constant_tl(imm); | ||
257 | cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst], | ||
258 | cpu_R[dc->dst], c, 4); | ||
259 | break; | ||
260 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_quick_imm(DisasContext *dc) | ||
261 | LOG_DIS("andq %d, $r%d\n", simm, dc->dst); | ||
262 | |||
263 | cris_cc_mask(dc, CC_MASK_NZVC); | ||
264 | - c = tcg_const_tl(simm); | ||
265 | + c = tcg_constant_tl(simm); | ||
266 | cris_alu(dc, CC_OP_OR, cpu_R[dc->dst], | ||
267 | cpu_R[dc->dst], c, 4); | ||
268 | break; | ||
269 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) | ||
270 | cris_alu_m_alloc_temps(t); | ||
271 | insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]); | ||
272 | tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3); | ||
273 | - c = tcg_const_tl(0); | ||
274 | + c = tcg_constant_tl(0); | ||
275 | cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], | ||
276 | t[0], c, size); | ||
277 | break; | ||
278 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) | ||
279 | if (dc->mode == CRISV10_MODE_AUTOINC) | ||
280 | insn_len += size; | ||
281 | |||
282 | - c = tcg_const_tl(dc->pc + insn_len); | ||
283 | + c = tcg_constant_tl(dc->pc + insn_len); | ||
284 | t_gen_mov_preg_TN(dc, dc->dst, c); | ||
285 | dc->jmp_pc = imm; | ||
286 | cris_prepare_jmp(dc, JMP_DIRECT); | ||
287 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) | ||
288 | LOG_DIS("break %d\n", dc->src); | ||
289 | cris_evaluate_flags(dc); | ||
290 | tcg_gen_movi_tl(env_pc, dc->pc + 2); | ||
291 | - c = tcg_const_tl(dc->src + 2); | ||
292 | + c = tcg_constant_tl(dc->src + 2); | ||
293 | t_gen_mov_env_TN(trap_vector, c); | ||
294 | t_gen_raise_exception(EXCP_BREAK); | ||
295 | dc->base.is_jmp = DISAS_NORETURN; | ||
296 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) | ||
297 | LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size, | ||
298 | dc->opcode, dc->src, dc->dst); | ||
299 | t[0] = tcg_temp_new(); | ||
300 | - c = tcg_const_tl(dc->pc + insn_len); | ||
301 | + c = tcg_constant_tl(dc->pc + insn_len); | ||
302 | t_gen_mov_preg_TN(dc, dc->dst, c); | ||
303 | crisv10_prepare_memaddr(dc, t[0], size); | ||
304 | gen_load(dc, env_btarget, t[0], 4, 0); | ||
305 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) | ||
306 | LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n", | ||
307 | dc->pc, dc->opcode, dc->dst, dc->src); | ||
308 | tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]); | ||
309 | - c = tcg_const_tl(dc->pc + insn_len); | ||
310 | + c = tcg_constant_tl(dc->pc + insn_len); | ||
311 | t_gen_mov_preg_TN(dc, dc->dst, c); | ||
312 | cris_prepare_jmp(dc, JMP_INDIRECT); | ||
313 | dc->delayed_branch--; /* v10 has no dslot here. */ | ||
314 | -- | ||
315 | 2.34.1 | ||
316 | |||
317 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Use a C test instead of a pre-processor test for the id. | ||
2 | Use tcg_constant_i64 instead of tcg_const_i64. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/hppa/translate.c | 14 +++++++++----- | ||
8 | 1 file changed, 9 insertions(+), 5 deletions(-) | ||
9 | |||
10 | diff --git a/target/hppa/translate.c b/target/hppa/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/hppa/translate.c | ||
13 | +++ b/target/hppa/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) | ||
15 | |||
16 | static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) | ||
17 | { | ||
18 | + uint64_t ret; | ||
19 | + | ||
20 | + if (TARGET_REGISTER_BITS == 64) { | ||
21 | + ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ | ||
22 | + } else { | ||
23 | + ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ | ||
24 | + } | ||
25 | + | ||
26 | nullify_over(ctx); | ||
27 | -#if TARGET_REGISTER_BITS == 64 | ||
28 | - save_frd(0, tcg_const_i64(0x13080000000000ULL)); /* PA8700 (PCX-W2) */ | ||
29 | -#else | ||
30 | - save_frd(0, tcg_const_i64(0x0f080000000000ULL)); /* PA7300LC (PCX-L2) */ | ||
31 | -#endif | ||
32 | + save_frd(0, tcg_constant_i64(ret)); | ||
33 | return nullify_end(ctx); | ||
34 | } | ||
35 | |||
36 | -- | ||
37 | 2.34.1 | ||
38 | |||
39 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All uses were read-write, so replace with a new | ||
2 | allocation and initialization. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/hppa/translate.c | 21 +++++++++++---------- | ||
8 | 1 file changed, 11 insertions(+), 10 deletions(-) | ||
9 | |||
10 | diff --git a/target/hppa/translate.c b/target/hppa/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/hppa/translate.c | ||
13 | +++ b/target/hppa/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ | ||
15 | #define tcg_gen_extract_reg tcg_gen_extract_i64 | ||
16 | #define tcg_gen_sextract_reg tcg_gen_sextract_i64 | ||
17 | #define tcg_gen_extract2_reg tcg_gen_extract2_i64 | ||
18 | -#define tcg_const_reg tcg_const_i64 | ||
19 | -#define tcg_const_local_reg tcg_const_local_i64 | ||
20 | #define tcg_constant_reg tcg_constant_i64 | ||
21 | #define tcg_gen_movcond_reg tcg_gen_movcond_i64 | ||
22 | #define tcg_gen_add2_reg tcg_gen_add2_i64 | ||
23 | @@ -XXX,XX +XXX,XX @@ | ||
24 | #define tcg_gen_extract_reg tcg_gen_extract_i32 | ||
25 | #define tcg_gen_sextract_reg tcg_gen_sextract_i32 | ||
26 | #define tcg_gen_extract2_reg tcg_gen_extract2_i32 | ||
27 | -#define tcg_const_reg tcg_const_i32 | ||
28 | -#define tcg_const_local_reg tcg_const_local_i32 | ||
29 | #define tcg_constant_reg tcg_constant_i32 | ||
30 | #define tcg_gen_movcond_reg tcg_gen_movcond_i32 | ||
31 | #define tcg_gen_add2_reg tcg_gen_add2_i32 | ||
32 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 load_frw_i32(unsigned rt) | ||
33 | static TCGv_i32 load_frw0_i32(unsigned rt) | ||
34 | { | ||
35 | if (rt == 0) { | ||
36 | - return tcg_const_i32(0); | ||
37 | + TCGv_i32 ret = tcg_temp_new_i32(); | ||
38 | + tcg_gen_movi_i32(ret, 0); | ||
39 | + return ret; | ||
40 | } else { | ||
41 | return load_frw_i32(rt); | ||
42 | } | ||
43 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 load_frw0_i32(unsigned rt) | ||
44 | |||
45 | static TCGv_i64 load_frw0_i64(unsigned rt) | ||
46 | { | ||
47 | + TCGv_i64 ret = tcg_temp_new_i64(); | ||
48 | if (rt == 0) { | ||
49 | - return tcg_const_i64(0); | ||
50 | + tcg_gen_movi_i64(ret, 0); | ||
51 | } else { | ||
52 | - TCGv_i64 ret = tcg_temp_new_i64(); | ||
53 | tcg_gen_ld32u_i64(ret, cpu_env, | ||
54 | offsetof(CPUHPPAState, fr[rt & 31]) | ||
55 | + (rt & 32 ? LO_OFS : HI_OFS)); | ||
56 | - return ret; | ||
57 | } | ||
58 | + return ret; | ||
59 | } | ||
60 | |||
61 | static void save_frw_i32(unsigned rt, TCGv_i32 val) | ||
62 | @@ -XXX,XX +XXX,XX @@ static TCGv_i64 load_frd(unsigned rt) | ||
63 | static TCGv_i64 load_frd0(unsigned rt) | ||
64 | { | ||
65 | if (rt == 0) { | ||
66 | - return tcg_const_i64(0); | ||
67 | + TCGv_i64 ret = tcg_temp_new_i64(); | ||
68 | + tcg_gen_movi_i64(ret, 0); | ||
69 | + return ret; | ||
70 | } else { | ||
71 | return load_frd(rt); | ||
72 | } | ||
73 | @@ -XXX,XX +XXX,XX @@ static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, | ||
74 | /* Convert big-endian bit numbering in SAR to left-shift. */ | ||
75 | tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1); | ||
76 | |||
77 | - mask = tcg_const_reg(msb + (msb - 1)); | ||
78 | + mask = tcg_temp_new(); | ||
79 | + tcg_gen_movi_reg(mask, msb + (msb - 1)); | ||
80 | tcg_gen_and_reg(tmp, val, mask); | ||
81 | if (rs) { | ||
82 | tcg_gen_shl_reg(mask, mask, shift); | ||
83 | -- | ||
84 | 2.34.1 | ||
85 | |||
86 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All uses are strictly read-only. Most of the obviously so, | ||
2 | as direct arguments to gen_helper_*. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/i386/tcg/translate.c | 83 +++++++++++++++++++------------------ | ||
8 | 1 file changed, 42 insertions(+), 41 deletions(-) | ||
9 | |||
10 | diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/i386/tcg/translate.c | ||
13 | +++ b/target/i386/tcg/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void gen_compute_eflags(DisasContext *s) | ||
15 | live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; | ||
16 | dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); | ||
17 | if (dead) { | ||
18 | - zero = tcg_const_tl(0); | ||
19 | + zero = tcg_constant_tl(0); | ||
20 | if (dead & USES_CC_DST) { | ||
21 | dst = zero; | ||
22 | } | ||
23 | @@ -XXX,XX +XXX,XX @@ static void gen_helper_fp_arith_ST0_FT0(int op) | ||
24 | /* NOTE the exception in "r" op ordering */ | ||
25 | static void gen_helper_fp_arith_STN_ST0(int op, int opreg) | ||
26 | { | ||
27 | - TCGv_i32 tmp = tcg_const_i32(opreg); | ||
28 | + TCGv_i32 tmp = tcg_constant_i32(opreg); | ||
29 | switch (op) { | ||
30 | case 0: | ||
31 | gen_helper_fadd_STN_ST0(cpu_env, tmp); | ||
32 | @@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *s, int trapno) | ||
33 | { | ||
34 | gen_update_cc_op(s); | ||
35 | gen_update_eip_cur(s); | ||
36 | - gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); | ||
37 | + gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno)); | ||
38 | s->base.is_jmp = DISAS_NORETURN; | ||
39 | } | ||
40 | |||
41 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, | ||
42 | /* Store the results into the CC variables. If we know that the | ||
43 | variable must be dead, store unconditionally. Otherwise we'll | ||
44 | need to not disrupt the current contents. */ | ||
45 | - z_tl = tcg_const_tl(0); | ||
46 | + z_tl = tcg_constant_tl(0); | ||
47 | if (cc_op_live[s->cc_op] & USES_CC_DST) { | ||
48 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl, | ||
49 | result, cpu_cc_dst); | ||
50 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, | ||
51 | } | ||
52 | |||
53 | /* Conditionally store the CC_OP value. */ | ||
54 | - z32 = tcg_const_i32(0); | ||
55 | + z32 = tcg_constant_i32(0); | ||
56 | s32 = tcg_temp_new_i32(); | ||
57 | tcg_gen_trunc_tl_i32(s32, count); | ||
58 | tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop); | ||
59 | @@ -XXX,XX +XXX,XX @@ static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) | ||
60 | is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. | ||
61 | Otherwise reuse CC_OP_ADCOX which have the C and O flags split out | ||
62 | exactly as we computed above. */ | ||
63 | - t0 = tcg_const_i32(0); | ||
64 | + t0 = tcg_constant_i32(0); | ||
65 | t1 = tcg_temp_new_i32(); | ||
66 | tcg_gen_trunc_tl_i32(t1, s->T1); | ||
67 | tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX); | ||
68 | @@ -XXX,XX +XXX,XX @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, | ||
69 | cc.reg = t0; | ||
70 | } | ||
71 | if (!cc.use_reg2) { | ||
72 | - cc.reg2 = tcg_const_tl(cc.imm); | ||
73 | + cc.reg2 = tcg_constant_tl(cc.imm); | ||
74 | } | ||
75 | |||
76 | tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, | ||
77 | @@ -XXX,XX +XXX,XX @@ static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg) | ||
78 | { | ||
79 | if (PE(s) && !VM86(s)) { | ||
80 | tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); | ||
81 | - gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32); | ||
82 | + gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32); | ||
83 | /* abort translation because the addseg value may change or | ||
84 | because ss32 may change. For R_SS, translation must always | ||
85 | stop as a special handling must be done to disable hardware | ||
86 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
87 | gen_op_mov_v_reg(s, ot, s->T1, reg); | ||
88 | |||
89 | if (shift) { | ||
90 | - TCGv imm = tcg_const_tl(x86_ldub_code(env, s)); | ||
91 | + TCGv imm = tcg_constant_tl(x86_ldub_code(env, s)); | ||
92 | gen_shiftd_rm_T1(s, ot, opreg, op, imm); | ||
93 | } else { | ||
94 | gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); | ||
95 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
96 | break; | ||
97 | case 0x0c: /* fldenv mem */ | ||
98 | gen_helper_fldenv(cpu_env, s->A0, | ||
99 | - tcg_const_i32(dflag - 1)); | ||
100 | + tcg_constant_i32(dflag - 1)); | ||
101 | update_fip = update_fdp = false; | ||
102 | break; | ||
103 | case 0x0d: /* fldcw mem */ | ||
104 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
105 | break; | ||
106 | case 0x0e: /* fnstenv mem */ | ||
107 | gen_helper_fstenv(cpu_env, s->A0, | ||
108 | - tcg_const_i32(dflag - 1)); | ||
109 | + tcg_constant_i32(dflag - 1)); | ||
110 | update_fip = update_fdp = false; | ||
111 | break; | ||
112 | case 0x0f: /* fnstcw mem */ | ||
113 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
114 | break; | ||
115 | case 0x2c: /* frstor mem */ | ||
116 | gen_helper_frstor(cpu_env, s->A0, | ||
117 | - tcg_const_i32(dflag - 1)); | ||
118 | + tcg_constant_i32(dflag - 1)); | ||
119 | update_fip = update_fdp = false; | ||
120 | break; | ||
121 | case 0x2e: /* fnsave mem */ | ||
122 | gen_helper_fsave(cpu_env, s->A0, | ||
123 | - tcg_const_i32(dflag - 1)); | ||
124 | + tcg_constant_i32(dflag - 1)); | ||
125 | update_fip = update_fdp = false; | ||
126 | break; | ||
127 | case 0x2f: /* fnstsw mem */ | ||
128 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
129 | case 0x08: /* fld sti */ | ||
130 | gen_helper_fpush(cpu_env); | ||
131 | gen_helper_fmov_ST0_STN(cpu_env, | ||
132 | - tcg_const_i32((opreg + 1) & 7)); | ||
133 | + tcg_constant_i32((opreg + 1) & 7)); | ||
134 | break; | ||
135 | case 0x09: /* fxchg sti */ | ||
136 | case 0x29: /* fxchg4 sti, undocumented op */ | ||
137 | case 0x39: /* fxchg7 sti, undocumented op */ | ||
138 | - gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); | ||
139 | + gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
140 | break; | ||
141 | case 0x0a: /* grp d9/2 */ | ||
142 | switch (rm) { | ||
143 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
144 | } | ||
145 | } else { | ||
146 | gen_helper_fmov_FT0_STN(cpu_env, | ||
147 | - tcg_const_i32(opreg)); | ||
148 | + tcg_constant_i32(opreg)); | ||
149 | gen_helper_fp_arith_ST0_FT0(op1); | ||
150 | } | ||
151 | } | ||
152 | break; | ||
153 | case 0x02: /* fcom */ | ||
154 | case 0x22: /* fcom2, undocumented op */ | ||
155 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
156 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
157 | gen_helper_fcom_ST0_FT0(cpu_env); | ||
158 | break; | ||
159 | case 0x03: /* fcomp */ | ||
160 | case 0x23: /* fcomp3, undocumented op */ | ||
161 | case 0x32: /* fcomp5, undocumented op */ | ||
162 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
163 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
164 | gen_helper_fcom_ST0_FT0(cpu_env); | ||
165 | gen_helper_fpop(cpu_env); | ||
166 | break; | ||
167 | case 0x15: /* da/5 */ | ||
168 | switch (rm) { | ||
169 | case 1: /* fucompp */ | ||
170 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); | ||
171 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1)); | ||
172 | gen_helper_fucom_ST0_FT0(cpu_env); | ||
173 | gen_helper_fpop(cpu_env); | ||
174 | gen_helper_fpop(cpu_env); | ||
175 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
176 | goto illegal_op; | ||
177 | } | ||
178 | gen_update_cc_op(s); | ||
179 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
180 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
181 | gen_helper_fucomi_ST0_FT0(cpu_env); | ||
182 | set_cc_op(s, CC_OP_EFLAGS); | ||
183 | break; | ||
184 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
185 | goto illegal_op; | ||
186 | } | ||
187 | gen_update_cc_op(s); | ||
188 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
189 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
190 | gen_helper_fcomi_ST0_FT0(cpu_env); | ||
191 | set_cc_op(s, CC_OP_EFLAGS); | ||
192 | break; | ||
193 | case 0x28: /* ffree sti */ | ||
194 | - gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); | ||
195 | + gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg)); | ||
196 | break; | ||
197 | case 0x2a: /* fst sti */ | ||
198 | - gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); | ||
199 | + gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg)); | ||
200 | break; | ||
201 | case 0x2b: /* fstp sti */ | ||
202 | case 0x0b: /* fstp1 sti, undocumented op */ | ||
203 | case 0x3a: /* fstp8 sti, undocumented op */ | ||
204 | case 0x3b: /* fstp9 sti, undocumented op */ | ||
205 | - gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); | ||
206 | + gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg)); | ||
207 | gen_helper_fpop(cpu_env); | ||
208 | break; | ||
209 | case 0x2c: /* fucom st(i) */ | ||
210 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
211 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
212 | gen_helper_fucom_ST0_FT0(cpu_env); | ||
213 | break; | ||
214 | case 0x2d: /* fucomp st(i) */ | ||
215 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
216 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
217 | gen_helper_fucom_ST0_FT0(cpu_env); | ||
218 | gen_helper_fpop(cpu_env); | ||
219 | break; | ||
220 | case 0x33: /* de/3 */ | ||
221 | switch (rm) { | ||
222 | case 1: /* fcompp */ | ||
223 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); | ||
224 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1)); | ||
225 | gen_helper_fcom_ST0_FT0(cpu_env); | ||
226 | gen_helper_fpop(cpu_env); | ||
227 | gen_helper_fpop(cpu_env); | ||
228 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
229 | } | ||
230 | break; | ||
231 | case 0x38: /* ffreep sti, undocumented op */ | ||
232 | - gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); | ||
233 | + gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg)); | ||
234 | gen_helper_fpop(cpu_env); | ||
235 | break; | ||
236 | case 0x3c: /* df/4 */ | ||
237 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
238 | goto illegal_op; | ||
239 | } | ||
240 | gen_update_cc_op(s); | ||
241 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
242 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
243 | gen_helper_fucomi_ST0_FT0(cpu_env); | ||
244 | gen_helper_fpop(cpu_env); | ||
245 | set_cc_op(s, CC_OP_EFLAGS); | ||
246 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
247 | goto illegal_op; | ||
248 | } | ||
249 | gen_update_cc_op(s); | ||
250 | - gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); | ||
251 | + gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); | ||
252 | gen_helper_fcomi_ST0_FT0(cpu_env); | ||
253 | gen_helper_fpop(cpu_env); | ||
254 | set_cc_op(s, CC_OP_EFLAGS); | ||
255 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
256 | op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); | ||
257 | l1 = gen_new_label(); | ||
258 | gen_jcc1_noeob(s, op1, l1); | ||
259 | - gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); | ||
260 | + gen_helper_fmov_ST0_STN(cpu_env, | ||
261 | + tcg_constant_i32(opreg)); | ||
262 | gen_set_label(l1); | ||
263 | } | ||
264 | break; | ||
265 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
266 | if (PE(s) && !VM86(s)) { | ||
267 | gen_update_cc_op(s); | ||
268 | gen_update_eip_cur(s); | ||
269 | - gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), | ||
270 | - tcg_const_i32(val)); | ||
271 | + gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1), | ||
272 | + tcg_constant_i32(val)); | ||
273 | } else { | ||
274 | gen_stack_A0(s); | ||
275 | /* pop offset */ | ||
276 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
277 | if (!check_vm86_iopl(s)) { | ||
278 | break; | ||
279 | } | ||
280 | - gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); | ||
281 | + gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1)); | ||
282 | } else { | ||
283 | gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1), | ||
284 | eip_next_i32(s)); | ||
285 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
286 | if (val == 0) { | ||
287 | gen_exception(s, EXCP00_DIVZ); | ||
288 | } else { | ||
289 | - gen_helper_aam(cpu_env, tcg_const_i32(val)); | ||
290 | + gen_helper_aam(cpu_env, tcg_constant_i32(val)); | ||
291 | set_cc_op(s, CC_OP_LOGICB); | ||
292 | } | ||
293 | break; | ||
294 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
295 | if (CODE64(s)) | ||
296 | goto illegal_op; | ||
297 | val = x86_ldub_code(env, s); | ||
298 | - gen_helper_aad(cpu_env, tcg_const_i32(val)); | ||
299 | + gen_helper_aad(cpu_env, tcg_constant_i32(val)); | ||
300 | set_cc_op(s, CC_OP_LOGICB); | ||
301 | break; | ||
302 | /************************/ | ||
303 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
304 | if (!PE(s)) { | ||
305 | gen_exception_gpf(s); | ||
306 | } else { | ||
307 | - gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); | ||
308 | + gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1)); | ||
309 | s->base.is_jmp = DISAS_EOB_ONLY; | ||
310 | } | ||
311 | break; | ||
312 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
313 | if (!PE(s)) { | ||
314 | gen_exception_gpf(s); | ||
315 | } else { | ||
316 | - gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1)); | ||
317 | + gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1)); | ||
318 | /* condition codes are modified only in long mode */ | ||
319 | if (LMA(s)) { | ||
320 | set_cc_op(s, CC_OP_EFLAGS); | ||
321 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
322 | } | ||
323 | gen_update_cc_op(s); | ||
324 | gen_update_eip_cur(s); | ||
325 | - gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), | ||
326 | + gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1), | ||
327 | cur_insn_len_i32(s)); | ||
328 | tcg_gen_exit_tb(NULL, 0); | ||
329 | s->base.is_jmp = DISAS_NORETURN; | ||
330 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
331 | } | ||
332 | gen_update_cc_op(s); | ||
333 | gen_update_eip_cur(s); | ||
334 | - gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); | ||
335 | + gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1)); | ||
336 | break; | ||
337 | |||
338 | case 0xdb: /* VMSAVE */ | ||
339 | @@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu) | ||
340 | } | ||
341 | gen_update_cc_op(s); | ||
342 | gen_update_eip_cur(s); | ||
343 | - gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); | ||
344 | + gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1)); | ||
345 | break; | ||
346 | |||
347 | case 0xdc: /* STGI */ | ||
348 | -- | ||
349 | 2.34.1 | ||
350 | |||
351 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | In several instances, a temp is initialized with a | ||
2 | for use as a constant, and then subsequently used | ||
3 | as an unrelated temp. Split them. | ||
1 | 4 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/m68k/translate.c | 29 ++++++++++++++++------------- | ||
9 | 1 file changed, 16 insertions(+), 13 deletions(-) | ||
10 | |||
11 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/m68k/translate.c | ||
14 | +++ b/target/m68k/translate.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static void bcd_add(TCGv dest, TCGv src) | ||
16 | * = result with some possible exceeding 0x6 | ||
17 | */ | ||
18 | |||
19 | - t0 = tcg_const_i32(0x066); | ||
20 | - tcg_gen_add_i32(t0, t0, src); | ||
21 | + t0 = tcg_temp_new(); | ||
22 | + tcg_gen_addi_i32(t0, src, 0x066); | ||
23 | |||
24 | t1 = tcg_temp_new(); | ||
25 | tcg_gen_add_i32(t1, t0, dest); | ||
26 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(nbcd) | ||
27 | |||
28 | SRC_EA(env, src, OS_BYTE, 0, &addr); | ||
29 | |||
30 | - dest = tcg_const_i32(0); | ||
31 | + dest = tcg_temp_new(); | ||
32 | + tcg_gen_movi_i32(dest, 0); | ||
33 | bcd_sub(dest, src); | ||
34 | |||
35 | DEST_EA(env, insn, OS_BYTE, dest, &addr); | ||
36 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(bitop_reg) | ||
37 | else | ||
38 | tcg_gen_andi_i32(src2, DREG(insn, 9), 31); | ||
39 | |||
40 | - tmp = tcg_const_i32(1); | ||
41 | - tcg_gen_shl_i32(tmp, tmp, src2); | ||
42 | + tmp = tcg_temp_new(); | ||
43 | + tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2); | ||
44 | |||
45 | tcg_gen_and_i32(QREG_CC_Z, src1, tmp); | ||
46 | |||
47 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(suba) | ||
48 | |||
49 | static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) | ||
50 | { | ||
51 | - TCGv tmp; | ||
52 | + TCGv tmp, zero; | ||
53 | |||
54 | gen_flush_flags(s); /* compute old Z */ | ||
55 | |||
56 | @@ -XXX,XX +XXX,XX @@ static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) | ||
57 | * (X, N) = dest - (src + X); | ||
58 | */ | ||
59 | |||
60 | - tmp = tcg_const_i32(0); | ||
61 | - tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp); | ||
62 | - tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X); | ||
63 | + zero = tcg_constant_i32(0); | ||
64 | + tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero); | ||
65 | + tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X); | ||
66 | gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); | ||
67 | tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1); | ||
68 | |||
69 | /* Compute signed-overflow for subtract. */ | ||
70 | |||
71 | + tmp = tcg_temp_new(); | ||
72 | tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest); | ||
73 | tcg_gen_xor_i32(tmp, dest, src); | ||
74 | tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp); | ||
75 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(adda) | ||
76 | |||
77 | static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) | ||
78 | { | ||
79 | - TCGv tmp; | ||
80 | + TCGv tmp, zero; | ||
81 | |||
82 | gen_flush_flags(s); /* compute old Z */ | ||
83 | |||
84 | @@ -XXX,XX +XXX,XX @@ static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) | ||
85 | * (X, N) = src + dest + X; | ||
86 | */ | ||
87 | |||
88 | - tmp = tcg_const_i32(0); | ||
89 | - tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp); | ||
90 | - tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp); | ||
91 | + zero = tcg_constant_i32(0); | ||
92 | + tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero); | ||
93 | + tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero); | ||
94 | gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); | ||
95 | |||
96 | /* Compute signed-overflow for addition. */ | ||
97 | |||
98 | + tmp = tcg_temp_new(); | ||
99 | tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src); | ||
100 | tcg_gen_xor_i32(tmp, dest, src); | ||
101 | tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp); | ||
102 | -- | ||
103 | 2.34.1 | ||
104 | |||
105 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Tidy up the whole function, hoisting is_bfffo as a common test | ||
2 | for whether tlen and tofs needed. Use tcg_constant_i32, and load | ||
3 | a separate temporary for mask. | ||
1 | 4 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/m68k/translate.c | 42 ++++++++++++++++++++--------------------- | ||
9 | 1 file changed, 20 insertions(+), 22 deletions(-) | ||
10 | |||
11 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/m68k/translate.c | ||
14 | +++ b/target/m68k/translate.c | ||
15 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(bfop_reg) | ||
16 | TCGv src = DREG(insn, 0); | ||
17 | int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; | ||
18 | int ofs = extract32(ext, 6, 5); /* big bit-endian */ | ||
19 | - TCGv mask, tofs, tlen; | ||
20 | - | ||
21 | - tofs = NULL; | ||
22 | - tlen = NULL; | ||
23 | - if ((insn & 0x0f00) == 0x0d00) { /* bfffo */ | ||
24 | - tofs = tcg_temp_new(); | ||
25 | - tlen = tcg_temp_new(); | ||
26 | - } | ||
27 | + TCGv mask, tofs = NULL, tlen = NULL; | ||
28 | + bool is_bfffo = (insn & 0x0f00) == 0x0d00; | ||
29 | |||
30 | if ((ext & 0x820) == 0) { | ||
31 | /* Immediate width and offset. */ | ||
32 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(bfop_reg) | ||
33 | tcg_gen_rotli_i32(QREG_CC_N, src, ofs); | ||
34 | } | ||
35 | tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski); | ||
36 | - mask = tcg_const_i32(ror32(maski, ofs)); | ||
37 | - if (tofs) { | ||
38 | - tcg_gen_movi_i32(tofs, ofs); | ||
39 | - tcg_gen_movi_i32(tlen, len); | ||
40 | + | ||
41 | + mask = tcg_constant_i32(ror32(maski, ofs)); | ||
42 | + if (is_bfffo) { | ||
43 | + tofs = tcg_constant_i32(ofs); | ||
44 | + tlen = tcg_constant_i32(len); | ||
45 | } | ||
46 | } else { | ||
47 | TCGv tmp = tcg_temp_new(); | ||
48 | + | ||
49 | + mask = tcg_temp_new(); | ||
50 | if (ext & 0x20) { | ||
51 | /* Variable width */ | ||
52 | tcg_gen_subi_i32(tmp, DREG(ext, 0), 1); | ||
53 | tcg_gen_andi_i32(tmp, tmp, 31); | ||
54 | - mask = tcg_const_i32(0x7fffffffu); | ||
55 | - tcg_gen_shr_i32(mask, mask, tmp); | ||
56 | - if (tlen) { | ||
57 | + tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp); | ||
58 | + if (is_bfffo) { | ||
59 | + tlen = tcg_temp_new(); | ||
60 | tcg_gen_addi_i32(tlen, tmp, 1); | ||
61 | } | ||
62 | } else { | ||
63 | /* Immediate width */ | ||
64 | - mask = tcg_const_i32(0x7fffffffu >> (len - 1)); | ||
65 | - if (tlen) { | ||
66 | - tcg_gen_movi_i32(tlen, len); | ||
67 | + tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1)); | ||
68 | + if (is_bfffo) { | ||
69 | + tlen = tcg_constant_i32(len); | ||
70 | } | ||
71 | } | ||
72 | + | ||
73 | if (ext & 0x800) { | ||
74 | /* Variable offset */ | ||
75 | tcg_gen_andi_i32(tmp, DREG(ext, 6), 31); | ||
76 | tcg_gen_rotl_i32(QREG_CC_N, src, tmp); | ||
77 | tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask); | ||
78 | tcg_gen_rotr_i32(mask, mask, tmp); | ||
79 | - if (tofs) { | ||
80 | - tcg_gen_mov_i32(tofs, tmp); | ||
81 | + if (is_bfffo) { | ||
82 | + tofs = tmp; | ||
83 | } | ||
84 | } else { | ||
85 | /* Immediate offset (and variable width) */ | ||
86 | tcg_gen_rotli_i32(QREG_CC_N, src, ofs); | ||
87 | tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask); | ||
88 | tcg_gen_rotri_i32(mask, mask, ofs); | ||
89 | - if (tofs) { | ||
90 | - tcg_gen_movi_i32(tofs, ofs); | ||
91 | + if (is_bfffo) { | ||
92 | + tofs = tcg_constant_i32(ofs); | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | -- | ||
97 | 2.34.1 | ||
98 | |||
99 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/m68k/translate.c | 158 ++++++++++++++++++++-------------------- | ||
7 | 1 file changed, 77 insertions(+), 81 deletions(-) | ||
8 | |||
9 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/m68k/translate.c | ||
12 | +++ b/target/m68k/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static void gen_jmp(DisasContext *s, TCGv dest) | ||
14 | |||
15 | static void gen_raise_exception(int nr) | ||
16 | { | ||
17 | - TCGv_i32 tmp; | ||
18 | - | ||
19 | - tmp = tcg_const_i32(nr); | ||
20 | - gen_helper_raise_exception(cpu_env, tmp); | ||
21 | + gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr)); | ||
22 | } | ||
23 | |||
24 | static void gen_raise_exception_format2(DisasContext *s, int nr, | ||
25 | @@ -XXX,XX +XXX,XX @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) | ||
26 | if ((ext & 0x80) == 0) { | ||
27 | /* base not suppressed */ | ||
28 | if (IS_NULL_QREG(base)) { | ||
29 | - base = tcg_const_i32(offset + bd); | ||
30 | + base = tcg_constant_i32(offset + bd); | ||
31 | bd = 0; | ||
32 | } | ||
33 | if (!IS_NULL_QREG(add)) { | ||
34 | @@ -XXX,XX +XXX,XX @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) | ||
35 | add = tmp; | ||
36 | } | ||
37 | } else { | ||
38 | - add = tcg_const_i32(bd); | ||
39 | + add = tcg_constant_i32(bd); | ||
40 | } | ||
41 | if ((ext & 3) != 0) { | ||
42 | /* memory indirect */ | ||
43 | @@ -XXX,XX +XXX,XX @@ static void gen_flush_flags(DisasContext *s) | ||
44 | break; | ||
45 | |||
46 | default: | ||
47 | - t0 = tcg_const_i32(s->cc_op); | ||
48 | - gen_helper_flush_flags(cpu_env, t0); | ||
49 | + gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op)); | ||
50 | s->cc_op_synced = 1; | ||
51 | break; | ||
52 | } | ||
53 | @@ -XXX,XX +XXX,XX @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, | ||
54 | switch (reg0) { | ||
55 | case 0: /* Absolute short. */ | ||
56 | offset = (int16_t)read_im16(env, s); | ||
57 | - return tcg_const_i32(offset); | ||
58 | + return tcg_constant_i32(offset); | ||
59 | case 1: /* Absolute long. */ | ||
60 | offset = read_im32(env, s); | ||
61 | - return tcg_const_i32(offset); | ||
62 | + return tcg_constant_i32(offset); | ||
63 | case 2: /* pc displacement */ | ||
64 | offset = s->pc; | ||
65 | offset += (int16_t)read_im16(env, s); | ||
66 | - return tcg_const_i32(offset); | ||
67 | + return tcg_constant_i32(offset); | ||
68 | case 3: /* pc index+displacement. */ | ||
69 | return gen_lea_indexed(env, s, NULL_QREG); | ||
70 | case 4: /* Immediate. */ | ||
71 | @@ -XXX,XX +XXX,XX @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, | ||
72 | } | ||
73 | switch (opsize) { | ||
74 | case OS_BYTE: | ||
75 | - tmp = tcg_const_i32((int8_t)read_im8(env, s)); | ||
76 | + tmp = tcg_constant_i32((int8_t)read_im8(env, s)); | ||
77 | gen_helper_exts32(cpu_env, fp, tmp); | ||
78 | break; | ||
79 | case OS_WORD: | ||
80 | - tmp = tcg_const_i32((int16_t)read_im16(env, s)); | ||
81 | + tmp = tcg_constant_i32((int16_t)read_im16(env, s)); | ||
82 | gen_helper_exts32(cpu_env, fp, tmp); | ||
83 | break; | ||
84 | case OS_LONG: | ||
85 | - tmp = tcg_const_i32(read_im32(env, s)); | ||
86 | + tmp = tcg_constant_i32(read_im32(env, s)); | ||
87 | gen_helper_exts32(cpu_env, fp, tmp); | ||
88 | break; | ||
89 | case OS_SINGLE: | ||
90 | - tmp = tcg_const_i32(read_im32(env, s)); | ||
91 | + tmp = tcg_constant_i32(read_im32(env, s)); | ||
92 | gen_helper_extf32(cpu_env, fp, tmp); | ||
93 | break; | ||
94 | case OS_DOUBLE: | ||
95 | - t64 = tcg_const_i64(read_im64(env, s)); | ||
96 | + t64 = tcg_constant_i64(read_im64(env, s)); | ||
97 | gen_helper_extf64(cpu_env, fp, t64); | ||
98 | break; | ||
99 | case OS_EXTENDED: | ||
100 | @@ -XXX,XX +XXX,XX @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, | ||
101 | gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); | ||
102 | break; | ||
103 | } | ||
104 | - tmp = tcg_const_i32(read_im32(env, s) >> 16); | ||
105 | + tmp = tcg_constant_i32(read_im32(env, s) >> 16); | ||
106 | tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); | ||
107 | - t64 = tcg_const_i64(read_im64(env, s)); | ||
108 | + t64 = tcg_constant_i64(read_im64(env, s)); | ||
109 | tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); | ||
110 | break; | ||
111 | case OS_PACKED: | ||
112 | @@ -XXX,XX +XXX,XX @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) | ||
113 | goto done; | ||
114 | case 10: /* PL */ | ||
115 | case 11: /* MI */ | ||
116 | - c->v2 = tcg_const_i32(0); | ||
117 | + c->v2 = tcg_constant_i32(0); | ||
118 | c->v1 = tmp = tcg_temp_new(); | ||
119 | tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V); | ||
120 | gen_ext(tmp, tmp, op - CC_OP_CMPB, 1); | ||
121 | @@ -XXX,XX +XXX,XX @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) | ||
122 | } | ||
123 | } | ||
124 | |||
125 | - c->v2 = tcg_const_i32(0); | ||
126 | + c->v2 = tcg_constant_i32(0); | ||
127 | |||
128 | switch (cond) { | ||
129 | case 0: /* T */ | ||
130 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(movem) | ||
131 | |||
132 | addr = tcg_temp_new(); | ||
133 | tcg_gen_mov_i32(addr, tmp); | ||
134 | - incr = tcg_const_i32(opsize_bytes(opsize)); | ||
135 | + incr = tcg_constant_i32(opsize_bytes(opsize)); | ||
136 | |||
137 | if (is_load) { | ||
138 | /* memory to register */ | ||
139 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(arith_im) | ||
140 | opsize = insn_opsize(insn); | ||
141 | switch (opsize) { | ||
142 | case OS_BYTE: | ||
143 | - im = tcg_const_i32((int8_t)read_im8(env, s)); | ||
144 | + im = tcg_constant_i32((int8_t)read_im8(env, s)); | ||
145 | break; | ||
146 | case OS_WORD: | ||
147 | - im = tcg_const_i32((int16_t)read_im16(env, s)); | ||
148 | + im = tcg_constant_i32((int16_t)read_im16(env, s)); | ||
149 | break; | ||
150 | case OS_LONG: | ||
151 | - im = tcg_const_i32(read_im32(env, s)); | ||
152 | + im = tcg_constant_i32(read_im32(env, s)); | ||
153 | break; | ||
154 | default: | ||
155 | g_assert_not_reached(); | ||
156 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(cas2w) | ||
157 | { | ||
158 | uint16_t ext1, ext2; | ||
159 | TCGv addr1, addr2; | ||
160 | - TCGv regs; | ||
161 | |||
162 | /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ | ||
163 | |||
164 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(cas2w) | ||
165 | * Dc2 = (R2) | ||
166 | */ | ||
167 | |||
168 | - regs = tcg_const_i32(REG(ext2, 6) | | ||
169 | - (REG(ext1, 6) << 3) | | ||
170 | - (REG(ext2, 0) << 6) | | ||
171 | - (REG(ext1, 0) << 9)); | ||
172 | if (tb_cflags(s->base.tb) & CF_PARALLEL) { | ||
173 | gen_helper_exit_atomic(cpu_env); | ||
174 | } else { | ||
175 | + TCGv regs = tcg_constant_i32(REG(ext2, 6) | | ||
176 | + (REG(ext1, 6) << 3) | | ||
177 | + (REG(ext2, 0) << 6) | | ||
178 | + (REG(ext1, 0) << 9)); | ||
179 | gen_helper_cas2w(cpu_env, regs, addr1, addr2); | ||
180 | } | ||
181 | |||
182 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(cas2l) | ||
183 | * Dc2 = (R2) | ||
184 | */ | ||
185 | |||
186 | - regs = tcg_const_i32(REG(ext2, 6) | | ||
187 | - (REG(ext1, 6) << 3) | | ||
188 | - (REG(ext2, 0) << 6) | | ||
189 | - (REG(ext1, 0) << 9)); | ||
190 | + regs = tcg_constant_i32(REG(ext2, 6) | | ||
191 | + (REG(ext1, 6) << 3) | | ||
192 | + (REG(ext2, 0) << 6) | | ||
193 | + (REG(ext1, 0) << 9)); | ||
194 | if (tb_cflags(s->base.tb) & CF_PARALLEL) { | ||
195 | gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2); | ||
196 | } else { | ||
197 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(negx) | ||
198 | * (X, N) = -(src + X); | ||
199 | */ | ||
200 | |||
201 | - z = tcg_const_i32(0); | ||
202 | + z = tcg_constant_i32(0); | ||
203 | tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z); | ||
204 | tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X); | ||
205 | gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); | ||
206 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(clr) | ||
207 | int opsize; | ||
208 | TCGv zero; | ||
209 | |||
210 | - zero = tcg_const_i32(0); | ||
211 | - | ||
212 | + zero = tcg_constant_i32(0); | ||
213 | opsize = insn_opsize(insn); | ||
214 | DEST_EA(env, insn, opsize, zero, NULL); | ||
215 | gen_logic_cc(s, zero, opsize); | ||
216 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(jump) | ||
217 | } | ||
218 | if ((insn & 0x40) == 0) { | ||
219 | /* jsr */ | ||
220 | - gen_push(s, tcg_const_i32(s->pc)); | ||
221 | + gen_push(s, tcg_constant_i32(s->pc)); | ||
222 | } | ||
223 | gen_jmp(s, tmp); | ||
224 | } | ||
225 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(addsubq) | ||
226 | if (imm == 0) { | ||
227 | imm = 8; | ||
228 | } | ||
229 | - val = tcg_const_i32(imm); | ||
230 | + val = tcg_constant_i32(imm); | ||
231 | dest = tcg_temp_new(); | ||
232 | tcg_gen_mov_i32(dest, src); | ||
233 | if ((insn & 0x38) == 0x08) { | ||
234 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(branch) | ||
235 | } | ||
236 | if (op == 1) { | ||
237 | /* bsr */ | ||
238 | - gen_push(s, tcg_const_i32(s->pc)); | ||
239 | + gen_push(s, tcg_constant_i32(s->pc)); | ||
240 | } | ||
241 | if (op > 1) { | ||
242 | /* Bcc */ | ||
243 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(mov3q) | ||
244 | int val; | ||
245 | |||
246 | val = (insn >> 9) & 7; | ||
247 | - if (val == 0) | ||
248 | + if (val == 0) { | ||
249 | val = -1; | ||
250 | - src = tcg_const_i32(val); | ||
251 | + } | ||
252 | + src = tcg_constant_i32(val); | ||
253 | gen_logic_cc(s, src, OS_LONG); | ||
254 | DEST_EA(env, insn, OS_LONG, src, NULL); | ||
255 | } | ||
256 | @@ -XXX,XX +XXX,XX @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) | ||
257 | tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64); | ||
258 | /* Note that C=0 if shift count is 0, and we get that for free. */ | ||
259 | } else { | ||
260 | - TCGv zero = tcg_const_i32(0); | ||
261 | + TCGv zero = tcg_constant_i32(0); | ||
262 | tcg_gen_extrl_i64_i32(QREG_CC_N, t64); | ||
263 | tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits); | ||
264 | tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C, | ||
265 | @@ -XXX,XX +XXX,XX @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) | ||
266 | * V = ((s ^ t) & (-1 << (bits - 1))) != 0 | ||
267 | */ | ||
268 | if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) { | ||
269 | - TCGv_i64 tt = tcg_const_i64(32); | ||
270 | + TCGv_i64 tt = tcg_constant_i64(32); | ||
271 | /* if shift is greater than 32, use 32 */ | ||
272 | tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64); | ||
273 | /* Sign extend the input to 64 bits; re-do the shift. */ | ||
274 | @@ -XXX,XX +XXX,XX @@ static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size) | ||
275 | { | ||
276 | TCGv X, shl, shr, shx, sz, zero; | ||
277 | |||
278 | - sz = tcg_const_i32(size); | ||
279 | + sz = tcg_constant_i32(size); | ||
280 | |||
281 | shr = tcg_temp_new(); | ||
282 | shl = tcg_temp_new(); | ||
283 | @@ -XXX,XX +XXX,XX @@ static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size) | ||
284 | tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */ | ||
285 | tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */ | ||
286 | /* shx = shx < 0 ? size : shx; */ | ||
287 | - zero = tcg_const_i32(0); | ||
288 | + zero = tcg_constant_i32(0); | ||
289 | tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx); | ||
290 | } else { | ||
291 | tcg_gen_mov_i32(shr, shift); /* shr = shift */ | ||
292 | @@ -XXX,XX +XXX,XX @@ static TCGv rotate32_x(TCGv reg, TCGv shift, int left) | ||
293 | |||
294 | /* if shift == 0, register and X are not affected */ | ||
295 | |||
296 | - zero = tcg_const_i32(0); | ||
297 | + zero = tcg_constant_i32(0); | ||
298 | tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X); | ||
299 | tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo); | ||
300 | |||
301 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(rotate_im) | ||
302 | tmp = 8; | ||
303 | } | ||
304 | |||
305 | - shift = tcg_const_i32(tmp); | ||
306 | + shift = tcg_constant_i32(tmp); | ||
307 | if (insn & 8) { | ||
308 | rotate(DREG(insn, 0), shift, left, 32); | ||
309 | } else { | ||
310 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(rotate8_im) | ||
311 | tmp = 8; | ||
312 | } | ||
313 | |||
314 | - shift = tcg_const_i32(tmp); | ||
315 | + shift = tcg_constant_i32(tmp); | ||
316 | if (insn & 8) { | ||
317 | rotate(reg, shift, left, 8); | ||
318 | } else { | ||
319 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(rotate16_im) | ||
320 | tmp = 8; | ||
321 | } | ||
322 | |||
323 | - shift = tcg_const_i32(tmp); | ||
324 | + shift = tcg_constant_i32(tmp); | ||
325 | if (insn & 8) { | ||
326 | rotate(reg, shift, left, 16); | ||
327 | } else { | ||
328 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(rotate_mem) | ||
329 | |||
330 | SRC_EA(env, src, OS_WORD, 0, &addr); | ||
331 | |||
332 | - shift = tcg_const_i32(1); | ||
333 | + shift = tcg_constant_i32(1); | ||
334 | if (insn & 0x0200) { | ||
335 | rotate(src, shift, left, 16); | ||
336 | } else { | ||
337 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(bfext_mem) | ||
338 | if (ext & 0x20) { | ||
339 | len = DREG(ext, 0); | ||
340 | } else { | ||
341 | - len = tcg_const_i32(extract32(ext, 0, 5)); | ||
342 | + len = tcg_constant_i32(extract32(ext, 0, 5)); | ||
343 | } | ||
344 | if (ext & 0x800) { | ||
345 | ofs = DREG(ext, 6); | ||
346 | } else { | ||
347 | - ofs = tcg_const_i32(extract32(ext, 6, 5)); | ||
348 | + ofs = tcg_constant_i32(extract32(ext, 6, 5)); | ||
349 | } | ||
350 | |||
351 | if (is_sign) { | ||
352 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(bfop_mem) | ||
353 | if (ext & 0x20) { | ||
354 | len = DREG(ext, 0); | ||
355 | } else { | ||
356 | - len = tcg_const_i32(extract32(ext, 0, 5)); | ||
357 | + len = tcg_constant_i32(extract32(ext, 0, 5)); | ||
358 | } | ||
359 | if (ext & 0x800) { | ||
360 | ofs = DREG(ext, 6); | ||
361 | } else { | ||
362 | - ofs = tcg_const_i32(extract32(ext, 6, 5)); | ||
363 | + ofs = tcg_constant_i32(extract32(ext, 6, 5)); | ||
364 | } | ||
365 | |||
366 | switch (insn & 0x0f00) { | ||
367 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(bfins_mem) | ||
368 | if (ext & 0x20) { | ||
369 | len = DREG(ext, 0); | ||
370 | } else { | ||
371 | - len = tcg_const_i32(extract32(ext, 0, 5)); | ||
372 | + len = tcg_constant_i32(extract32(ext, 0, 5)); | ||
373 | } | ||
374 | if (ext & 0x800) { | ||
375 | ofs = DREG(ext, 6); | ||
376 | } else { | ||
377 | - ofs = tcg_const_i32(extract32(ext, 6, 5)); | ||
378 | + ofs = tcg_constant_i32(extract32(ext, 6, 5)); | ||
379 | } | ||
380 | |||
381 | gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len); | ||
382 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(move16_mem) | ||
383 | TCGv reg, addr; | ||
384 | |||
385 | reg = AREG(insn, 0); | ||
386 | - addr = tcg_const_i32(read_im32(env, s)); | ||
387 | + addr = tcg_constant_i32(read_im32(env, s)); | ||
388 | |||
389 | if ((insn >> 3) & 1) { | ||
390 | /* MOVE16 (xxx).L, (Ay) */ | ||
391 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(cf_movec) | ||
392 | } else { | ||
393 | reg = DREG(ext, 12); | ||
394 | } | ||
395 | - gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg); | ||
396 | + gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg); | ||
397 | gen_exit_tb(s); | ||
398 | } | ||
399 | |||
400 | DISAS_INSN(m68k_movec) | ||
401 | { | ||
402 | uint16_t ext; | ||
403 | - TCGv reg; | ||
404 | + TCGv reg, creg; | ||
405 | |||
406 | if (IS_USER(s)) { | ||
407 | gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); | ||
408 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(m68k_movec) | ||
409 | } else { | ||
410 | reg = DREG(ext, 12); | ||
411 | } | ||
412 | + creg = tcg_constant_i32(ext & 0xfff); | ||
413 | if (insn & 1) { | ||
414 | - gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg); | ||
415 | + gen_helper_m68k_movec_to(cpu_env, creg, reg); | ||
416 | } else { | ||
417 | - gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff)); | ||
418 | + gen_helper_m68k_movec_from(reg, cpu_env, creg); | ||
419 | } | ||
420 | gen_exit_tb(s); | ||
421 | } | ||
422 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(pflush) | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | - opmode = tcg_const_i32((insn >> 3) & 3); | ||
427 | + opmode = tcg_constant_i32((insn >> 3) & 3); | ||
428 | gen_helper_pflush(cpu_env, AREG(insn, 0), opmode); | ||
429 | } | ||
430 | |||
431 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(ptest) | ||
432 | gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); | ||
433 | return; | ||
434 | } | ||
435 | - is_read = tcg_const_i32((insn >> 5) & 1); | ||
436 | + is_read = tcg_constant_i32((insn >> 5) & 1); | ||
437 | gen_helper_ptest(cpu_env, AREG(insn, 0), is_read); | ||
438 | } | ||
439 | #endif | ||
440 | @@ -XXX,XX +XXX,XX @@ static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, | ||
441 | gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); | ||
442 | return; | ||
443 | } | ||
444 | - tmp = tcg_const_i32(read_im32(env, s)); | ||
445 | + tmp = tcg_constant_i32(read_im32(env, s)); | ||
446 | gen_store_fcr(s, tmp, mask); | ||
447 | return; | ||
448 | } | ||
449 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(fpu) | ||
450 | case 2: | ||
451 | if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) { | ||
452 | /* fmovecr */ | ||
453 | - TCGv rom_offset = tcg_const_i32(opmode); | ||
454 | + TCGv rom_offset = tcg_constant_i32(opmode); | ||
455 | cpu_dest = gen_fp_ptr(REG(ext, 7)); | ||
456 | gen_helper_fconst(cpu_env, cpu_dest, rom_offset); | ||
457 | return; | ||
458 | @@ -XXX,XX +XXX,XX @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) | ||
459 | { | ||
460 | TCGv fpsr; | ||
461 | |||
462 | - c->v2 = tcg_const_i32(0); | ||
463 | + c->v2 = tcg_constant_i32(0); | ||
464 | /* TODO: Raise BSUN exception. */ | ||
465 | fpsr = tcg_temp_new(); | ||
466 | gen_load_fcr(s, fpsr, M68K_FPSR); | ||
467 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(fsave) | ||
468 | |||
469 | if (m68k_feature(s->env, M68K_FEATURE_M68040)) { | ||
470 | /* always write IDLE */ | ||
471 | - TCGv idle = tcg_const_i32(0x41000000); | ||
472 | + TCGv idle = tcg_constant_i32(0x41000000); | ||
473 | DEST_EA(env, insn, OS_LONG, idle, NULL); | ||
474 | } else { | ||
475 | disas_undef(env, s, insn); | ||
476 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(mac) | ||
477 | /* Skip the accumulate if the value is already saturated. */ | ||
478 | l1 = gen_new_label(); | ||
479 | tmp = tcg_temp_new(); | ||
480 | - gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); | ||
481 | + gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc)); | ||
482 | gen_op_jmp_nz32(tmp, l1); | ||
483 | } | ||
484 | #endif | ||
485 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(mac) | ||
486 | tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp); | ||
487 | |||
488 | if (s->env->macsr & MACSR_FI) | ||
489 | - gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); | ||
490 | + gen_helper_macsatf(cpu_env, tcg_constant_i32(acc)); | ||
491 | else if (s->env->macsr & MACSR_SU) | ||
492 | - gen_helper_macsats(cpu_env, tcg_const_i32(acc)); | ||
493 | + gen_helper_macsats(cpu_env, tcg_constant_i32(acc)); | ||
494 | else | ||
495 | - gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); | ||
496 | + gen_helper_macsatu(cpu_env, tcg_constant_i32(acc)); | ||
497 | |||
498 | #if 0 | ||
499 | /* Disabled because conditional branches clobber temporary vars. */ | ||
500 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(mac) | ||
501 | /* Skip the accumulate if the value is already saturated. */ | ||
502 | l1 = gen_new_label(); | ||
503 | tmp = tcg_temp_new(); | ||
504 | - gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); | ||
505 | + gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc)); | ||
506 | gen_op_jmp_nz32(tmp, l1); | ||
507 | } | ||
508 | #endif | ||
509 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(mac) | ||
510 | else | ||
511 | tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp); | ||
512 | if (s->env->macsr & MACSR_FI) | ||
513 | - gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); | ||
514 | + gen_helper_macsatf(cpu_env, tcg_constant_i32(acc)); | ||
515 | else if (s->env->macsr & MACSR_SU) | ||
516 | - gen_helper_macsats(cpu_env, tcg_const_i32(acc)); | ||
517 | + gen_helper_macsats(cpu_env, tcg_constant_i32(acc)); | ||
518 | else | ||
519 | - gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); | ||
520 | + gen_helper_macsatu(cpu_env, tcg_constant_i32(acc)); | ||
521 | #if 0 | ||
522 | /* Disabled because conditional branches clobber temporary vars. */ | ||
523 | if (l1 != -1) | ||
524 | gen_set_label(l1); | ||
525 | #endif | ||
526 | } | ||
527 | - gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc)); | ||
528 | + gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc)); | ||
529 | |||
530 | if (insn & 0x30) { | ||
531 | TCGv rw; | ||
532 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(move_mac) | ||
533 | int src; | ||
534 | TCGv dest; | ||
535 | src = insn & 3; | ||
536 | - dest = tcg_const_i32((insn >> 9) & 3); | ||
537 | - gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src)); | ||
538 | + dest = tcg_constant_i32((insn >> 9) & 3); | ||
539 | + gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src)); | ||
540 | gen_mac_clear_flags(); | ||
541 | gen_helper_mac_set_flags(cpu_env, dest); | ||
542 | } | ||
543 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(from_mext) | ||
544 | TCGv reg; | ||
545 | TCGv acc; | ||
546 | reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); | ||
547 | - acc = tcg_const_i32((insn & 0x400) ? 2 : 0); | ||
548 | + acc = tcg_constant_i32((insn & 0x400) ? 2 : 0); | ||
549 | if (s->env->macsr & MACSR_FI) | ||
550 | gen_helper_get_mac_extf(reg, cpu_env, acc); | ||
551 | else | ||
552 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(to_mac) | ||
553 | } | ||
554 | tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); | ||
555 | gen_mac_clear_flags(); | ||
556 | - gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum)); | ||
557 | + gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum)); | ||
558 | } | ||
559 | |||
560 | DISAS_INSN(to_macsr) | ||
561 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(to_mext) | ||
562 | TCGv val; | ||
563 | TCGv acc; | ||
564 | SRC_EA(env, val, OS_LONG, 0, NULL); | ||
565 | - acc = tcg_const_i32((insn & 0x400) ? 2 : 0); | ||
566 | + acc = tcg_constant_i32((insn & 0x400) ? 2 : 0); | ||
567 | if (s->env->macsr & MACSR_FI) | ||
568 | gen_helper_set_mac_extf(cpu_env, val, acc); | ||
569 | else if (s->env->macsr & MACSR_SU) | ||
570 | -- | ||
571 | 2.34.1 | ||
572 | |||
573 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Common subroutine for LDL and LWL. | ||
2 | Use tcg_constant_tl instead of tcg_const_tl and t2. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/mips/tcg/translate.c | 106 ++++++++++++------------------------ | ||
8 | 1 file changed, 36 insertions(+), 70 deletions(-) | ||
9 | |||
10 | diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/mips/tcg/translate.c | ||
13 | +++ b/target/mips/tcg/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static target_ulong pc_relative_pc(DisasContext *ctx) | ||
15 | return pc; | ||
16 | } | ||
17 | |||
18 | +/* LWL or LDL, depending on MemOp. */ | ||
19 | +static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr, | ||
20 | + int mem_idx, MemOp mop) | ||
21 | +{ | ||
22 | + int sizem1 = memop_size(mop) - 1; | ||
23 | + TCGv t0 = tcg_temp_new(); | ||
24 | + TCGv t1 = tcg_temp_new(); | ||
25 | + | ||
26 | + /* | ||
27 | + * Do a byte access to possibly trigger a page | ||
28 | + * fault with the unaligned address. | ||
29 | + */ | ||
30 | + tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); | ||
31 | + tcg_gen_andi_tl(t1, addr, sizem1); | ||
32 | + if (!cpu_is_bigendian(ctx)) { | ||
33 | + tcg_gen_xori_tl(t1, t1, sizem1); | ||
34 | + } | ||
35 | + tcg_gen_shli_tl(t1, t1, 3); | ||
36 | + tcg_gen_andi_tl(t0, addr, ~sizem1); | ||
37 | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop); | ||
38 | + tcg_gen_shl_tl(t0, t0, t1); | ||
39 | + tcg_gen_shl_tl(t1, tcg_constant_tl(-1), t1); | ||
40 | + tcg_gen_andc_tl(t1, reg, t1); | ||
41 | + tcg_gen_or_tl(reg, t0, t1); | ||
42 | +} | ||
43 | + | ||
44 | /* Load */ | ||
45 | static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
46 | int rt, int base, int offset) | ||
47 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
48 | break; | ||
49 | case OPC_LDL: | ||
50 | t1 = tcg_temp_new(); | ||
51 | - /* | ||
52 | - * Do a byte access to possibly trigger a page | ||
53 | - * fault with the unaligned address. | ||
54 | - */ | ||
55 | - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); | ||
56 | - tcg_gen_andi_tl(t1, t0, 7); | ||
57 | - if (!cpu_is_bigendian(ctx)) { | ||
58 | - tcg_gen_xori_tl(t1, t1, 7); | ||
59 | - } | ||
60 | - tcg_gen_shli_tl(t1, t1, 3); | ||
61 | - tcg_gen_andi_tl(t0, t0, ~7); | ||
62 | - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); | ||
63 | - tcg_gen_shl_tl(t0, t0, t1); | ||
64 | - t2 = tcg_const_tl(-1); | ||
65 | - tcg_gen_shl_tl(t2, t2, t1); | ||
66 | gen_load_gpr(t1, rt); | ||
67 | - tcg_gen_andc_tl(t1, t1, t2); | ||
68 | - tcg_gen_or_tl(t0, t0, t1); | ||
69 | - gen_store_gpr(t0, rt); | ||
70 | + gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ); | ||
71 | + gen_store_gpr(t1, rt); | ||
72 | break; | ||
73 | case OPC_LDR: | ||
74 | t1 = tcg_temp_new(); | ||
75 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
76 | /* fall through */ | ||
77 | case OPC_LWL: | ||
78 | t1 = tcg_temp_new(); | ||
79 | - /* | ||
80 | - * Do a byte access to possibly trigger a page | ||
81 | - * fault with the unaligned address. | ||
82 | - */ | ||
83 | - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); | ||
84 | - tcg_gen_andi_tl(t1, t0, 3); | ||
85 | - if (!cpu_is_bigendian(ctx)) { | ||
86 | - tcg_gen_xori_tl(t1, t1, 3); | ||
87 | - } | ||
88 | - tcg_gen_shli_tl(t1, t1, 3); | ||
89 | - tcg_gen_andi_tl(t0, t0, ~3); | ||
90 | - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL); | ||
91 | - tcg_gen_shl_tl(t0, t0, t1); | ||
92 | - t2 = tcg_const_tl(-1); | ||
93 | - tcg_gen_shl_tl(t2, t2, t1); | ||
94 | gen_load_gpr(t1, rt); | ||
95 | - tcg_gen_andc_tl(t1, t1, t2); | ||
96 | - tcg_gen_or_tl(t0, t0, t1); | ||
97 | - tcg_gen_ext32s_tl(t0, t0); | ||
98 | - gen_store_gpr(t0, rt); | ||
99 | + gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL); | ||
100 | + tcg_gen_ext32s_tl(t1, t1); | ||
101 | + gen_store_gpr(t1, rt); | ||
102 | break; | ||
103 | case OPC_LWRE: | ||
104 | mem_idx = MIPS_HFLAG_UM; | ||
105 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
106 | case OPC_GSLWLC1: | ||
107 | check_cp1_enabled(ctx); | ||
108 | gen_base_offset_addr(ctx, t0, rs, shf_offset); | ||
109 | - t1 = tcg_temp_new(); | ||
110 | - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); | ||
111 | - tcg_gen_andi_tl(t1, t0, 3); | ||
112 | - if (!cpu_is_bigendian(ctx)) { | ||
113 | - tcg_gen_xori_tl(t1, t1, 3); | ||
114 | - } | ||
115 | - tcg_gen_shli_tl(t1, t1, 3); | ||
116 | - tcg_gen_andi_tl(t0, t0, ~3); | ||
117 | - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL); | ||
118 | - tcg_gen_shl_tl(t0, t0, t1); | ||
119 | - t2 = tcg_const_tl(-1); | ||
120 | - tcg_gen_shl_tl(t2, t2, t1); | ||
121 | fp0 = tcg_temp_new_i32(); | ||
122 | gen_load_fpr32(ctx, fp0, rt); | ||
123 | + t1 = tcg_temp_new(); | ||
124 | tcg_gen_ext_i32_tl(t1, fp0); | ||
125 | - tcg_gen_andc_tl(t1, t1, t2); | ||
126 | - tcg_gen_or_tl(t0, t0, t1); | ||
127 | -#if defined(TARGET_MIPS64) | ||
128 | - tcg_gen_extrl_i64_i32(fp0, t0); | ||
129 | -#else | ||
130 | - tcg_gen_ext32s_tl(fp0, t0); | ||
131 | -#endif | ||
132 | + gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL); | ||
133 | + tcg_gen_trunc_tl_i32(fp0, t1); | ||
134 | gen_store_fpr32(ctx, fp0, rt); | ||
135 | break; | ||
136 | case OPC_GSLWRC1: | ||
137 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
138 | check_cp1_enabled(ctx); | ||
139 | gen_base_offset_addr(ctx, t0, rs, shf_offset); | ||
140 | t1 = tcg_temp_new(); | ||
141 | - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); | ||
142 | - tcg_gen_andi_tl(t1, t0, 7); | ||
143 | - if (!cpu_is_bigendian(ctx)) { | ||
144 | - tcg_gen_xori_tl(t1, t1, 7); | ||
145 | - } | ||
146 | - tcg_gen_shli_tl(t1, t1, 3); | ||
147 | - tcg_gen_andi_tl(t0, t0, ~7); | ||
148 | - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); | ||
149 | - tcg_gen_shl_tl(t0, t0, t1); | ||
150 | - t2 = tcg_const_tl(-1); | ||
151 | - tcg_gen_shl_tl(t2, t2, t1); | ||
152 | gen_load_fpr64(ctx, t1, rt); | ||
153 | - tcg_gen_andc_tl(t1, t1, t2); | ||
154 | - tcg_gen_or_tl(t0, t0, t1); | ||
155 | - gen_store_fpr64(ctx, t0, rt); | ||
156 | + gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); | ||
157 | + gen_store_fpr64(ctx, t1, rt); | ||
158 | break; | ||
159 | case OPC_GSLDRC1: | ||
160 | check_cp1_enabled(ctx); | ||
161 | -- | ||
162 | 2.34.1 | ||
163 | |||
164 | diff view generated by jsdifflib |
1 | Now that we have collected all of the page data into | 1 | Common subroutine for LDR and LWR. |
---|---|---|---|
2 | CPUTLBEntryFull, provide an interface to record that | 2 | Use tcg_constant_tl of ~1 instead of tcg_const_tl of 0x..fe. |
3 | all in one go, instead of using 4 arguments. This interface | ||
4 | allows CPUTLBEntryFull to be extended without having to | ||
5 | change the number of arguments. | ||
6 | 3 | ||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 6 | --- |
12 | include/exec/cpu-defs.h | 14 +++++++++++ | 7 | target/mips/tcg/translate.c | 116 +++++++++++++----------------------- |
13 | include/exec/exec-all.h | 22 ++++++++++++++++++ | 8 | 1 file changed, 40 insertions(+), 76 deletions(-) |
14 | accel/tcg/cputlb.c | 51 ++++++++++++++++++++++++++--------------- | ||
15 | 3 files changed, 69 insertions(+), 18 deletions(-) | ||
16 | 9 | ||
17 | diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h | 10 | diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c |
18 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/exec/cpu-defs.h | 12 | --- a/target/mips/tcg/translate.c |
20 | +++ b/include/exec/cpu-defs.h | 13 | +++ b/target/mips/tcg/translate.c |
21 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull { | 14 | @@ -XXX,XX +XXX,XX @@ static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr, |
22 | * + the offset within the target MemoryRegion (otherwise) | 15 | tcg_gen_or_tl(reg, t0, t1); |
23 | */ | 16 | } |
24 | hwaddr xlat_section; | 17 | |
18 | +/* LWR or LDR, depending on MemOp. */ | ||
19 | +static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr, | ||
20 | + int mem_idx, MemOp mop) | ||
21 | +{ | ||
22 | + int size = memop_size(mop); | ||
23 | + int sizem1 = size - 1; | ||
24 | + TCGv t0 = tcg_temp_new(); | ||
25 | + TCGv t1 = tcg_temp_new(); | ||
25 | + | 26 | + |
26 | + /* | 27 | + /* |
27 | + * @phys_addr contains the physical address in the address space | 28 | + * Do a byte access to possibly trigger a page |
28 | + * given by cpu_asidx_from_attrs(cpu, @attrs). | 29 | + * fault with the unaligned address. |
29 | + */ | 30 | + */ |
30 | + hwaddr phys_addr; | 31 | + tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); |
31 | + | 32 | + tcg_gen_andi_tl(t1, addr, sizem1); |
32 | + /* @attrs contains the memory transaction attributes for the page. */ | 33 | + if (cpu_is_bigendian(ctx)) { |
33 | MemTxAttrs attrs; | 34 | + tcg_gen_xori_tl(t1, t1, sizem1); |
34 | + | 35 | + } |
35 | + /* @prot contains the complete protections for the page. */ | 36 | + tcg_gen_shli_tl(t1, t1, 3); |
36 | + uint8_t prot; | 37 | + tcg_gen_andi_tl(t0, addr, ~sizem1); |
37 | + | 38 | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop); |
38 | + /* @lg_page_size contains the log2 of the page size. */ | 39 | + tcg_gen_shr_tl(t0, t0, t1); |
39 | + uint8_t lg_page_size; | 40 | + tcg_gen_xori_tl(t1, t1, size * 8 - 1); |
40 | } CPUTLBEntryFull; | 41 | + tcg_gen_shl_tl(t1, tcg_constant_tl(~1), t1); |
41 | 42 | + tcg_gen_and_tl(t1, reg, t1); | |
42 | /* | 43 | + tcg_gen_or_tl(reg, t0, t1); |
43 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
44 | index XXXXXXX..XXXXXXX 100644 | ||
45 | --- a/include/exec/exec-all.h | ||
46 | +++ b/include/exec/exec-all.h | ||
47 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, | ||
48 | uint16_t idxmap, | ||
49 | unsigned bits); | ||
50 | |||
51 | +/** | ||
52 | + * tlb_set_page_full: | ||
53 | + * @cpu: CPU context | ||
54 | + * @mmu_idx: mmu index of the tlb to modify | ||
55 | + * @vaddr: virtual address of the entry to add | ||
56 | + * @full: the details of the tlb entry | ||
57 | + * | ||
58 | + * Add an entry to @cpu tlb index @mmu_idx. All of the fields of | ||
59 | + * @full must be filled, except for xlat_section, and constitute | ||
60 | + * the complete description of the translated page. | ||
61 | + * | ||
62 | + * This is generally called by the target tlb_fill function after | ||
63 | + * having performed a successful page table walk to find the physical | ||
64 | + * address and attributes for the translation. | ||
65 | + * | ||
66 | + * At most one entry for a given virtual address is permitted. Only a | ||
67 | + * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only | ||
68 | + * used by tlb_flush_page. | ||
69 | + */ | ||
70 | +void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr, | ||
71 | + CPUTLBEntryFull *full); | ||
72 | + | ||
73 | /** | ||
74 | * tlb_set_page_with_attrs: | ||
75 | * @cpu: CPU to add this TLB entry for | ||
76 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/accel/tcg/cputlb.c | ||
79 | +++ b/accel/tcg/cputlb.c | ||
80 | @@ -XXX,XX +XXX,XX @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx, | ||
81 | env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; | ||
82 | } | ||
83 | |||
84 | -/* Add a new TLB entry. At most one entry for a given virtual address | ||
85 | +/* | ||
86 | + * Add a new TLB entry. At most one entry for a given virtual address | ||
87 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the | ||
88 | * supplied size is only used by tlb_flush_page. | ||
89 | * | ||
90 | * Called from TCG-generated code, which is under an RCU read-side | ||
91 | * critical section. | ||
92 | */ | ||
93 | -void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
94 | - hwaddr paddr, MemTxAttrs attrs, int prot, | ||
95 | - int mmu_idx, target_ulong size) | ||
96 | +void tlb_set_page_full(CPUState *cpu, int mmu_idx, | ||
97 | + target_ulong vaddr, CPUTLBEntryFull *full) | ||
98 | { | ||
99 | CPUArchState *env = cpu->env_ptr; | ||
100 | CPUTLB *tlb = env_tlb(env); | ||
101 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
102 | CPUTLBEntry *te, tn; | ||
103 | hwaddr iotlb, xlat, sz, paddr_page; | ||
104 | target_ulong vaddr_page; | ||
105 | - int asidx = cpu_asidx_from_attrs(cpu, attrs); | ||
106 | - int wp_flags; | ||
107 | + int asidx, wp_flags, prot; | ||
108 | bool is_ram, is_romd; | ||
109 | |||
110 | assert_cpu_is_self(cpu); | ||
111 | |||
112 | - if (size <= TARGET_PAGE_SIZE) { | ||
113 | + if (full->lg_page_size <= TARGET_PAGE_BITS) { | ||
114 | sz = TARGET_PAGE_SIZE; | ||
115 | } else { | ||
116 | - tlb_add_large_page(env, mmu_idx, vaddr, size); | ||
117 | - sz = size; | ||
118 | + sz = (hwaddr)1 << full->lg_page_size; | ||
119 | + tlb_add_large_page(env, mmu_idx, vaddr, sz); | ||
120 | } | ||
121 | vaddr_page = vaddr & TARGET_PAGE_MASK; | ||
122 | - paddr_page = paddr & TARGET_PAGE_MASK; | ||
123 | + paddr_page = full->phys_addr & TARGET_PAGE_MASK; | ||
124 | |||
125 | + prot = full->prot; | ||
126 | + asidx = cpu_asidx_from_attrs(cpu, full->attrs); | ||
127 | section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, | ||
128 | - &xlat, &sz, attrs, &prot); | ||
129 | + &xlat, &sz, full->attrs, &prot); | ||
130 | assert(sz >= TARGET_PAGE_SIZE); | ||
131 | |||
132 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx | ||
133 | " prot=%x idx=%d\n", | ||
134 | - vaddr, paddr, prot, mmu_idx); | ||
135 | + vaddr, full->phys_addr, prot, mmu_idx); | ||
136 | |||
137 | address = vaddr_page; | ||
138 | - if (size < TARGET_PAGE_SIZE) { | ||
139 | + if (full->lg_page_size < TARGET_PAGE_BITS) { | ||
140 | /* Repeat the MMU check and TLB fill on every access. */ | ||
141 | address |= TLB_INVALID_MASK; | ||
142 | } | ||
143 | - if (attrs.byte_swap) { | ||
144 | + if (full->attrs.byte_swap) { | ||
145 | address |= TLB_BSWAP; | ||
146 | } | ||
147 | |||
148 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
149 | * subtract here is that of the page base, and not the same as the | ||
150 | * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). | ||
151 | */ | ||
152 | + desc->fulltlb[index] = *full; | ||
153 | desc->fulltlb[index].xlat_section = iotlb - vaddr_page; | ||
154 | - desc->fulltlb[index].attrs = attrs; | ||
155 | + desc->fulltlb[index].phys_addr = paddr_page; | ||
156 | + desc->fulltlb[index].prot = prot; | ||
157 | |||
158 | /* Now calculate the new entry */ | ||
159 | tn.addend = addend - vaddr_page; | ||
160 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
161 | qemu_spin_unlock(&tlb->c.lock); | ||
162 | } | ||
163 | |||
164 | -/* Add a new TLB entry, but without specifying the memory | ||
165 | - * transaction attributes to be used. | ||
166 | - */ | ||
167 | +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
168 | + hwaddr paddr, MemTxAttrs attrs, int prot, | ||
169 | + int mmu_idx, target_ulong size) | ||
170 | +{ | ||
171 | + CPUTLBEntryFull full = { | ||
172 | + .phys_addr = paddr, | ||
173 | + .attrs = attrs, | ||
174 | + .prot = prot, | ||
175 | + .lg_page_size = ctz64(size) | ||
176 | + }; | ||
177 | + | ||
178 | + assert(is_power_of_2(size)); | ||
179 | + tlb_set_page_full(cpu, mmu_idx, vaddr, &full); | ||
180 | +} | 44 | +} |
181 | + | 45 | + |
182 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | 46 | /* Load */ |
183 | hwaddr paddr, int prot, | 47 | static void gen_ld(DisasContext *ctx, uint32_t opc, |
184 | int mmu_idx, target_ulong size) | 48 | int rt, int base, int offset) |
49 | { | ||
50 | - TCGv t0, t1, t2; | ||
51 | + TCGv t0, t1; | ||
52 | int mem_idx = ctx->mem_idx; | ||
53 | |||
54 | if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F | | ||
55 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
56 | break; | ||
57 | case OPC_LDR: | ||
58 | t1 = tcg_temp_new(); | ||
59 | - /* | ||
60 | - * Do a byte access to possibly trigger a page | ||
61 | - * fault with the unaligned address. | ||
62 | - */ | ||
63 | - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); | ||
64 | - tcg_gen_andi_tl(t1, t0, 7); | ||
65 | - if (cpu_is_bigendian(ctx)) { | ||
66 | - tcg_gen_xori_tl(t1, t1, 7); | ||
67 | - } | ||
68 | - tcg_gen_shli_tl(t1, t1, 3); | ||
69 | - tcg_gen_andi_tl(t0, t0, ~7); | ||
70 | - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); | ||
71 | - tcg_gen_shr_tl(t0, t0, t1); | ||
72 | - tcg_gen_xori_tl(t1, t1, 63); | ||
73 | - t2 = tcg_const_tl(0xfffffffffffffffeull); | ||
74 | - tcg_gen_shl_tl(t2, t2, t1); | ||
75 | gen_load_gpr(t1, rt); | ||
76 | - tcg_gen_and_tl(t1, t1, t2); | ||
77 | - tcg_gen_or_tl(t0, t0, t1); | ||
78 | - gen_store_gpr(t0, rt); | ||
79 | + gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ); | ||
80 | + gen_store_gpr(t1, rt); | ||
81 | break; | ||
82 | case OPC_LDPC: | ||
83 | t1 = tcg_const_tl(pc_relative_pc(ctx)); | ||
84 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
85 | /* fall through */ | ||
86 | case OPC_LWR: | ||
87 | t1 = tcg_temp_new(); | ||
88 | - /* | ||
89 | - * Do a byte access to possibly trigger a page | ||
90 | - * fault with the unaligned address. | ||
91 | - */ | ||
92 | - tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); | ||
93 | - tcg_gen_andi_tl(t1, t0, 3); | ||
94 | - if (cpu_is_bigendian(ctx)) { | ||
95 | - tcg_gen_xori_tl(t1, t1, 3); | ||
96 | - } | ||
97 | - tcg_gen_shli_tl(t1, t1, 3); | ||
98 | - tcg_gen_andi_tl(t0, t0, ~3); | ||
99 | - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL); | ||
100 | - tcg_gen_shr_tl(t0, t0, t1); | ||
101 | - tcg_gen_xori_tl(t1, t1, 31); | ||
102 | - t2 = tcg_const_tl(0xfffffffeull); | ||
103 | - tcg_gen_shl_tl(t2, t2, t1); | ||
104 | gen_load_gpr(t1, rt); | ||
105 | - tcg_gen_and_tl(t1, t1, t2); | ||
106 | - tcg_gen_or_tl(t0, t0, t1); | ||
107 | - tcg_gen_ext32s_tl(t0, t0); | ||
108 | - gen_store_gpr(t0, rt); | ||
109 | + gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL); | ||
110 | + tcg_gen_ext32s_tl(t1, t1); | ||
111 | + gen_store_gpr(t1, rt); | ||
112 | break; | ||
113 | case OPC_LLE: | ||
114 | mem_idx = MIPS_HFLAG_UM; | ||
115 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) | ||
116 | static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
117 | int rs, int rd) | ||
118 | { | ||
119 | - TCGv t0, t1, t2; | ||
120 | + TCGv t0, t1; | ||
121 | TCGv_i32 fp0; | ||
122 | #if defined(TARGET_MIPS64) | ||
123 | int lsq_rt1 = ctx->opcode & 0x1f; | ||
124 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
125 | case OPC_GSLWRC1: | ||
126 | check_cp1_enabled(ctx); | ||
127 | gen_base_offset_addr(ctx, t0, rs, shf_offset); | ||
128 | - t1 = tcg_temp_new(); | ||
129 | - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); | ||
130 | - tcg_gen_andi_tl(t1, t0, 3); | ||
131 | - if (cpu_is_bigendian(ctx)) { | ||
132 | - tcg_gen_xori_tl(t1, t1, 3); | ||
133 | - } | ||
134 | - tcg_gen_shli_tl(t1, t1, 3); | ||
135 | - tcg_gen_andi_tl(t0, t0, ~3); | ||
136 | - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL); | ||
137 | - tcg_gen_shr_tl(t0, t0, t1); | ||
138 | - tcg_gen_xori_tl(t1, t1, 31); | ||
139 | - t2 = tcg_const_tl(0xfffffffeull); | ||
140 | - tcg_gen_shl_tl(t2, t2, t1); | ||
141 | fp0 = tcg_temp_new_i32(); | ||
142 | gen_load_fpr32(ctx, fp0, rt); | ||
143 | + t1 = tcg_temp_new(); | ||
144 | tcg_gen_ext_i32_tl(t1, fp0); | ||
145 | - tcg_gen_and_tl(t1, t1, t2); | ||
146 | - tcg_gen_or_tl(t0, t0, t1); | ||
147 | -#if defined(TARGET_MIPS64) | ||
148 | - tcg_gen_extrl_i64_i32(fp0, t0); | ||
149 | -#else | ||
150 | - tcg_gen_ext32s_tl(fp0, t0); | ||
151 | -#endif | ||
152 | + gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL); | ||
153 | + tcg_gen_trunc_tl_i32(fp0, t1); | ||
154 | gen_store_fpr32(ctx, fp0, rt); | ||
155 | break; | ||
156 | #if defined(TARGET_MIPS64) | ||
157 | @@ -XXX,XX +XXX,XX @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, | ||
158 | check_cp1_enabled(ctx); | ||
159 | gen_base_offset_addr(ctx, t0, rs, shf_offset); | ||
160 | t1 = tcg_temp_new(); | ||
161 | - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB); | ||
162 | - tcg_gen_andi_tl(t1, t0, 7); | ||
163 | - if (cpu_is_bigendian(ctx)) { | ||
164 | - tcg_gen_xori_tl(t1, t1, 7); | ||
165 | - } | ||
166 | - tcg_gen_shli_tl(t1, t1, 3); | ||
167 | - tcg_gen_andi_tl(t0, t0, ~7); | ||
168 | - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); | ||
169 | - tcg_gen_shr_tl(t0, t0, t1); | ||
170 | - tcg_gen_xori_tl(t1, t1, 63); | ||
171 | - t2 = tcg_const_tl(0xfffffffffffffffeull); | ||
172 | - tcg_gen_shl_tl(t2, t2, t1); | ||
173 | gen_load_fpr64(ctx, t1, rt); | ||
174 | - tcg_gen_and_tl(t1, t1, t2); | ||
175 | - tcg_gen_or_tl(t0, t0, t1); | ||
176 | - gen_store_fpr64(ctx, t0, rt); | ||
177 | + gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); | ||
178 | + gen_store_fpr64(ctx, t1, rt); | ||
179 | break; | ||
180 | #endif | ||
181 | default: | ||
185 | -- | 182 | -- |
186 | 2.34.1 | 183 | 2.34.1 |
187 | 184 | ||
188 | 185 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Allocate a separate temp for modification. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/translate.c | 4 ++-- | ||
7 | 1 file changed, 2 insertions(+), 2 deletions(-) | ||
8 | |||
9 | diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/mips/tcg/translate.c | ||
12 | +++ b/target/mips/tcg/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) | ||
14 | static inline void gen_r6_ld(target_long addr, int reg, int memidx, | ||
15 | MemOp memop) | ||
16 | { | ||
17 | - TCGv t0 = tcg_const_tl(addr); | ||
18 | - tcg_gen_qemu_ld_tl(t0, t0, memidx, memop); | ||
19 | + TCGv t0 = tcg_temp_new(); | ||
20 | + tcg_gen_qemu_ld_tl(t0, tcg_constant_tl(addr), memidx, memop); | ||
21 | gen_store_gpr(t0, reg); | ||
22 | } | ||
23 | |||
24 | -- | ||
25 | 2.34.1 | ||
26 | |||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/mips/tcg/mxu_translate.c | 4 +- | ||
7 | target/mips/tcg/translate.c | 56 +++++++++++------------ | ||
8 | target/mips/tcg/tx79_translate.c | 4 +- | ||
9 | target/mips/tcg/micromips_translate.c.inc | 4 +- | ||
10 | target/mips/tcg/nanomips_translate.c.inc | 16 ++++--- | ||
11 | 5 files changed, 43 insertions(+), 41 deletions(-) | ||
12 | |||
13 | diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/mips/tcg/mxu_translate.c | ||
16 | +++ b/target/mips/tcg/mxu_translate.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) | ||
18 | uint32_t XRx = XRb ? XRb : XRc; | ||
19 | /* ...and do half-word-wise max/min with one operand 0 */ | ||
20 | TCGv_i32 t0 = tcg_temp_new(); | ||
21 | - TCGv_i32 t1 = tcg_const_i32(0); | ||
22 | + TCGv_i32 t1 = tcg_constant_i32(0); | ||
23 | |||
24 | /* the left half-word first */ | ||
25 | tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFFFF0000); | ||
26 | @@ -XXX,XX +XXX,XX @@ static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) | ||
27 | uint32_t XRx = XRb ? XRb : XRc; | ||
28 | /* ...and do byte-wise max/min with one operand 0 */ | ||
29 | TCGv_i32 t0 = tcg_temp_new(); | ||
30 | - TCGv_i32 t1 = tcg_const_i32(0); | ||
31 | + TCGv_i32 t1 = tcg_constant_i32(0); | ||
32 | int32_t i; | ||
33 | |||
34 | /* the leftmost byte (byte 3) first */ | ||
35 | diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/target/mips/tcg/translate.c | ||
38 | +++ b/target/mips/tcg/translate.c | ||
39 | @@ -XXX,XX +XXX,XX @@ static void gen_ld(DisasContext *ctx, uint32_t opc, | ||
40 | gen_store_gpr(t1, rt); | ||
41 | break; | ||
42 | case OPC_LDPC: | ||
43 | - t1 = tcg_const_tl(pc_relative_pc(ctx)); | ||
44 | + t1 = tcg_constant_tl(pc_relative_pc(ctx)); | ||
45 | gen_op_addr_add(ctx, t0, t0, t1); | ||
46 | tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); | ||
47 | gen_store_gpr(t0, rt); | ||
48 | break; | ||
49 | #endif | ||
50 | case OPC_LWPC: | ||
51 | - t1 = tcg_const_tl(pc_relative_pc(ctx)); | ||
52 | + t1 = tcg_constant_tl(pc_relative_pc(ctx)); | ||
53 | gen_op_addr_add(ctx, t0, t0, t1); | ||
54 | tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL); | ||
55 | gen_store_gpr(t0, rt); | ||
56 | @@ -XXX,XX +XXX,XX @@ static void gen_cond_move(DisasContext *ctx, uint32_t opc, | ||
57 | |||
58 | t0 = tcg_temp_new(); | ||
59 | gen_load_gpr(t0, rt); | ||
60 | - t1 = tcg_const_tl(0); | ||
61 | + t1 = tcg_constant_tl(0); | ||
62 | t2 = tcg_temp_new(); | ||
63 | gen_load_gpr(t2, rs); | ||
64 | switch (opc) { | ||
65 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
66 | break; | ||
67 | case R6_OPC_DIVU: | ||
68 | { | ||
69 | - TCGv t2 = tcg_const_tl(0); | ||
70 | - TCGv t3 = tcg_const_tl(1); | ||
71 | + TCGv t2 = tcg_constant_tl(0); | ||
72 | + TCGv t3 = tcg_constant_tl(1); | ||
73 | tcg_gen_ext32u_tl(t0, t0); | ||
74 | tcg_gen_ext32u_tl(t1, t1); | ||
75 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
76 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
77 | break; | ||
78 | case R6_OPC_MODU: | ||
79 | { | ||
80 | - TCGv t2 = tcg_const_tl(0); | ||
81 | - TCGv t3 = tcg_const_tl(1); | ||
82 | + TCGv t2 = tcg_constant_tl(0); | ||
83 | + TCGv t3 = tcg_constant_tl(1); | ||
84 | tcg_gen_ext32u_tl(t0, t0); | ||
85 | tcg_gen_ext32u_tl(t1, t1); | ||
86 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
87 | @@ -XXX,XX +XXX,XX @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) | ||
88 | break; | ||
89 | case R6_OPC_DDIVU: | ||
90 | { | ||
91 | - TCGv t2 = tcg_const_tl(0); | ||
92 | - TCGv t3 = tcg_const_tl(1); | ||
93 | + TCGv t2 = tcg_constant_tl(0); | ||
94 | + TCGv t3 = tcg_constant_tl(1); | ||
95 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
96 | tcg_gen_divu_i64(cpu_gpr[rd], t0, t1); | ||
97 | } | ||
98 | break; | ||
99 | case R6_OPC_DMODU: | ||
100 | { | ||
101 | - TCGv t2 = tcg_const_tl(0); | ||
102 | - TCGv t3 = tcg_const_tl(1); | ||
103 | + TCGv t2 = tcg_constant_tl(0); | ||
104 | + TCGv t3 = tcg_constant_tl(1); | ||
105 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
106 | tcg_gen_remu_i64(cpu_gpr[rd], t0, t1); | ||
107 | } | ||
108 | @@ -XXX,XX +XXX,XX @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) | ||
109 | break; | ||
110 | case MMI_OPC_DIVU1: | ||
111 | { | ||
112 | - TCGv t2 = tcg_const_tl(0); | ||
113 | - TCGv t3 = tcg_const_tl(1); | ||
114 | + TCGv t2 = tcg_constant_tl(0); | ||
115 | + TCGv t3 = tcg_constant_tl(1); | ||
116 | tcg_gen_ext32u_tl(t0, t0); | ||
117 | tcg_gen_ext32u_tl(t1, t1); | ||
118 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
119 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
120 | break; | ||
121 | case OPC_DIVU: | ||
122 | { | ||
123 | - TCGv t2 = tcg_const_tl(0); | ||
124 | - TCGv t3 = tcg_const_tl(1); | ||
125 | + TCGv t2 = tcg_constant_tl(0); | ||
126 | + TCGv t3 = tcg_constant_tl(1); | ||
127 | tcg_gen_ext32u_tl(t0, t0); | ||
128 | tcg_gen_ext32u_tl(t1, t1); | ||
129 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
130 | @@ -XXX,XX +XXX,XX @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, | ||
131 | break; | ||
132 | case OPC_DDIVU: | ||
133 | { | ||
134 | - TCGv t2 = tcg_const_tl(0); | ||
135 | - TCGv t3 = tcg_const_tl(1); | ||
136 | + TCGv t2 = tcg_constant_tl(0); | ||
137 | + TCGv t3 = tcg_constant_tl(1); | ||
138 | tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); | ||
139 | tcg_gen_divu_i64(cpu_LO[acc], t0, t1); | ||
140 | tcg_gen_remu_i64(cpu_HI[acc], t0, t1); | ||
141 | @@ -XXX,XX +XXX,XX @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) | ||
142 | case OPC_WSBH: | ||
143 | { | ||
144 | TCGv t1 = tcg_temp_new(); | ||
145 | - TCGv t2 = tcg_const_tl(0x00FF00FF); | ||
146 | + TCGv t2 = tcg_constant_tl(0x00FF00FF); | ||
147 | |||
148 | tcg_gen_shri_tl(t1, t0, 8); | ||
149 | tcg_gen_and_tl(t1, t1, t2); | ||
150 | @@ -XXX,XX +XXX,XX @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) | ||
151 | case OPC_DSBH: | ||
152 | { | ||
153 | TCGv t1 = tcg_temp_new(); | ||
154 | - TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL); | ||
155 | + TCGv t2 = tcg_constant_tl(0x00FF00FF00FF00FFULL); | ||
156 | |||
157 | tcg_gen_shri_tl(t1, t0, 8); | ||
158 | tcg_gen_and_tl(t1, t1, t2); | ||
159 | @@ -XXX,XX +XXX,XX @@ static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) | ||
160 | case OPC_DSHD: | ||
161 | { | ||
162 | TCGv t1 = tcg_temp_new(); | ||
163 | - TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL); | ||
164 | + TCGv t2 = tcg_constant_tl(0x0000FFFF0000FFFFULL); | ||
165 | |||
166 | tcg_gen_shri_tl(t1, t0, 16); | ||
167 | tcg_gen_and_tl(t1, t1, t2); | ||
168 | @@ -XXX,XX +XXX,XX @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, | ||
169 | case 5: | ||
170 | case 6: | ||
171 | case 7: | ||
172 | - gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel)); | ||
173 | + gen_helper_mftc0_configx(t0, cpu_env, tcg_constant_tl(sel)); | ||
174 | break; | ||
175 | default: | ||
176 | goto die; | ||
177 | @@ -XXX,XX +XXX,XX @@ static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, | ||
178 | static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, | ||
179 | int fs) | ||
180 | { | ||
181 | - TCGv_i32 t1 = tcg_const_i32(0); | ||
182 | + TCGv_i32 t1 = tcg_constant_i32(0); | ||
183 | TCGv_i32 fp0 = tcg_temp_new_i32(); | ||
184 | TCGv_i32 fp1 = tcg_temp_new_i32(); | ||
185 | TCGv_i32 fp2 = tcg_temp_new_i32(); | ||
186 | @@ -XXX,XX +XXX,XX @@ static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, | ||
187 | static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, | ||
188 | int fs) | ||
189 | { | ||
190 | - TCGv_i64 t1 = tcg_const_i64(0); | ||
191 | + TCGv_i64 t1 = tcg_constant_i64(0); | ||
192 | TCGv_i64 fp0 = tcg_temp_new_i64(); | ||
193 | TCGv_i64 fp1 = tcg_temp_new_i64(); | ||
194 | TCGv_i64 fp2 = tcg_temp_new_i64(); | ||
195 | @@ -XXX,XX +XXX,XX @@ void gen_addiupc(DisasContext *ctx, int rx, int imm, | ||
196 | static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, | ||
197 | int16_t offset) | ||
198 | { | ||
199 | - TCGv_i32 t0 = tcg_const_i32(op); | ||
200 | + TCGv_i32 t0 = tcg_constant_i32(op); | ||
201 | TCGv t1 = tcg_temp_new(); | ||
202 | gen_base_offset_addr(ctx, t1, base, offset); | ||
203 | gen_helper_cache(cpu_env, t1, t0); | ||
204 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
205 | case OPC_PRECR_SRA_PH_W: | ||
206 | check_dsp_r2(ctx); | ||
207 | { | ||
208 | - TCGv_i32 sa_t = tcg_const_i32(v2); | ||
209 | + TCGv_i32 sa_t = tcg_constant_i32(v2); | ||
210 | gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t, | ||
211 | cpu_gpr[ret]); | ||
212 | break; | ||
213 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
214 | case OPC_PRECR_SRA_R_PH_W: | ||
215 | check_dsp_r2(ctx); | ||
216 | { | ||
217 | - TCGv_i32 sa_t = tcg_const_i32(v2); | ||
218 | + TCGv_i32 sa_t = tcg_constant_i32(v2); | ||
219 | gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t, | ||
220 | cpu_gpr[ret]); | ||
221 | break; | ||
222 | @@ -XXX,XX +XXX,XX @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, | ||
223 | case OPC_PRECR_SRA_QH_PW: | ||
224 | check_dsp_r2(ctx); | ||
225 | { | ||
226 | - TCGv_i32 ret_t = tcg_const_i32(ret); | ||
227 | + TCGv_i32 ret_t = tcg_constant_i32(ret); | ||
228 | gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t); | ||
229 | break; | ||
230 | } | ||
231 | case OPC_PRECR_SRA_R_QH_PW: | ||
232 | check_dsp_r2(ctx); | ||
233 | { | ||
234 | - TCGv_i32 sa_v = tcg_const_i32(ret); | ||
235 | + TCGv_i32 sa_v = tcg_constant_i32(ret); | ||
236 | gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v); | ||
237 | break; | ||
238 | } | ||
239 | diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c | ||
240 | index XXXXXXX..XXXXXXX 100644 | ||
241 | --- a/target/mips/tcg/tx79_translate.c | ||
242 | +++ b/target/mips/tcg/tx79_translate.c | ||
243 | @@ -XXX,XX +XXX,XX @@ static bool trans_parallel_compare(DisasContext *ctx, arg_r *a, | ||
244 | return true; | ||
245 | } | ||
246 | |||
247 | - c0 = tcg_const_tl(0); | ||
248 | - c1 = tcg_const_tl(0xffffffff); | ||
249 | + c0 = tcg_constant_tl(0); | ||
250 | + c1 = tcg_constant_tl(0xffffffff); | ||
251 | ax = tcg_temp_new_i64(); | ||
252 | bx = tcg_temp_new_i64(); | ||
253 | t0 = tcg_temp_new_i64(); | ||
254 | diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc | ||
255 | index XXXXXXX..XXXXXXX 100644 | ||
256 | --- a/target/mips/tcg/micromips_translate.c.inc | ||
257 | +++ b/target/mips/tcg/micromips_translate.c.inc | ||
258 | @@ -XXX,XX +XXX,XX @@ static void gen_ldst_multiple(DisasContext *ctx, uint32_t opc, int reglist, | ||
259 | |||
260 | gen_base_offset_addr(ctx, t0, base, offset); | ||
261 | |||
262 | - t1 = tcg_const_tl(reglist); | ||
263 | - t2 = tcg_const_i32(ctx->mem_idx); | ||
264 | + t1 = tcg_constant_tl(reglist); | ||
265 | + t2 = tcg_constant_i32(ctx->mem_idx); | ||
266 | |||
267 | save_cpu_state(ctx, 1); | ||
268 | switch (opc) { | ||
269 | diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc | ||
270 | index XXXXXXX..XXXXXXX 100644 | ||
271 | --- a/target/mips/tcg/nanomips_translate.c.inc | ||
272 | +++ b/target/mips/tcg/nanomips_translate.c.inc | ||
273 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, | ||
274 | case 0: | ||
275 | /* PRECR_SRA_PH_W */ | ||
276 | { | ||
277 | - TCGv_i32 sa_t = tcg_const_i32(rd); | ||
278 | + TCGv_i32 sa_t = tcg_constant_i32(rd); | ||
279 | gen_helper_precr_sra_ph_w(v1_t, sa_t, v1_t, | ||
280 | cpu_gpr[rt]); | ||
281 | gen_store_gpr(v1_t, rt); | ||
282 | @@ -XXX,XX +XXX,XX @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, | ||
283 | case 1: | ||
284 | /* PRECR_SRA_R_PH_W */ | ||
285 | { | ||
286 | - TCGv_i32 sa_t = tcg_const_i32(rd); | ||
287 | + TCGv_i32 sa_t = tcg_constant_i32(rd); | ||
288 | gen_helper_precr_sra_r_ph_w(v1_t, sa_t, v1_t, | ||
289 | cpu_gpr[rt]); | ||
290 | gen_store_gpr(v1_t, rt); | ||
291 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
292 | check_nms(ctx); | ||
293 | if (rt != 0) { | ||
294 | TCGv t0 = tcg_temp_new(); | ||
295 | - TCGv_i32 shift = tcg_const_i32(extract32(ctx->opcode, 0, 5)); | ||
296 | - TCGv_i32 shiftx = tcg_const_i32(extract32(ctx->opcode, 7, 4) | ||
297 | - << 1); | ||
298 | - TCGv_i32 stripe = tcg_const_i32(extract32(ctx->opcode, 6, 1)); | ||
299 | + TCGv_i32 shift = | ||
300 | + tcg_constant_i32(extract32(ctx->opcode, 0, 5)); | ||
301 | + TCGv_i32 shiftx = | ||
302 | + tcg_constant_i32(extract32(ctx->opcode, 7, 4) << 1); | ||
303 | + TCGv_i32 stripe = | ||
304 | + tcg_constant_i32(extract32(ctx->opcode, 6, 1)); | ||
305 | |||
306 | gen_load_gpr(t0, rs); | ||
307 | gen_helper_rotx(cpu_gpr[rt], t0, shift, shiftx, stripe); | ||
308 | @@ -XXX,XX +XXX,XX @@ static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx) | ||
309 | |||
310 | /* make sure instructions are on a halfword boundary */ | ||
311 | if (ctx->base.pc_next & 0x1) { | ||
312 | - TCGv tmp = tcg_const_tl(ctx->base.pc_next); | ||
313 | + TCGv tmp = tcg_constant_tl(ctx->base.pc_next); | ||
314 | tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); | ||
315 | generate_exception_end(ctx, EXCP_AdEL); | ||
316 | return 2; | ||
317 | -- | ||
318 | 2.34.1 | ||
319 | |||
320 | diff view generated by jsdifflib |
1 | From: Leandro Lupori <leandro.lupori@eldorado.org.br> | 1 | Move the body out of this large macro. |
---|---|---|---|
2 | Use tcg_constant_i64. | ||
2 | 3 | ||
3 | PowerPC64 processors handle direct branches better than indirect | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | ones, resulting in less stalled cycles and branch misses. | ||
5 | |||
6 | However, PPC's tb_target_set_jmp_target() was only using direct | ||
7 | branches for 16-bit jumps, while PowerPC64's unconditional branch | ||
8 | instructions are able to handle displacements of up to 26 bits. | ||
9 | To take advantage of this, now jumps whose displacements fit in | ||
10 | between 17 and 26 bits are also converted to direct branches. | ||
11 | |||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br> | ||
14 | [rth: Expanded some commentary.] | ||
15 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
16 | --- | 6 | --- |
17 | tcg/ppc/tcg-target.c.inc | 119 +++++++++++++++++++++++++++++---------- | 7 | target/ppc/translate/vmx-impl.c.inc | 95 +++++++++++++++-------------- |
18 | 1 file changed, 88 insertions(+), 31 deletions(-) | 8 | 1 file changed, 49 insertions(+), 46 deletions(-) |
19 | 9 | ||
20 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | 10 | diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc |
21 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/tcg/ppc/tcg-target.c.inc | 12 | --- a/target/ppc/translate/vmx-impl.c.inc |
23 | +++ b/tcg/ppc/tcg-target.c.inc | 13 | +++ b/target/ppc/translate/vmx-impl.c.inc |
24 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) | 14 | @@ -XXX,XX +XXX,XX @@ static void gen_mtvscr(DisasContext *ctx) |
25 | tcg_out32(s, insn); | 15 | gen_helper_mtvscr(cpu_env, val); |
26 | } | 16 | } |
27 | 17 | ||
28 | +static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2) | 18 | +static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry) |
29 | +{ | 19 | +{ |
30 | + if (HOST_BIG_ENDIAN) { | 20 | + TCGv_i64 t0; |
31 | + return (uint64_t)i1 << 32 | i2; | 21 | + TCGv_i64 t1; |
32 | + } | 22 | + TCGv_i64 t2; |
33 | + return (uint64_t)i2 << 32 | i1; | 23 | + TCGv_i64 avr; |
34 | +} | 24 | + TCGv_i64 ten, z; |
35 | + | 25 | + |
36 | +static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw, | 26 | + if (unlikely(!ctx->altivec_enabled)) { |
37 | + tcg_insn_unit i0, tcg_insn_unit i1) | 27 | + gen_exception(ctx, POWERPC_EXCP_VPU); |
38 | +{ | ||
39 | +#if TCG_TARGET_REG_BITS == 64 | ||
40 | + qatomic_set((uint64_t *)rw, make_pair(i0, i1)); | ||
41 | + flush_idcache_range(rx, rw, 8); | ||
42 | +#else | ||
43 | + qemu_build_not_reached(); | ||
44 | +#endif | ||
45 | +} | ||
46 | + | ||
47 | +static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw, | ||
48 | + tcg_insn_unit i0, tcg_insn_unit i1, | ||
49 | + tcg_insn_unit i2, tcg_insn_unit i3) | ||
50 | +{ | ||
51 | + uint64_t p[2]; | ||
52 | + | ||
53 | + p[!HOST_BIG_ENDIAN] = make_pair(i0, i1); | ||
54 | + p[HOST_BIG_ENDIAN] = make_pair(i2, i3); | ||
55 | + | ||
56 | + /* | ||
57 | + * There's no convenient way to get the compiler to allocate a pair | ||
58 | + * of registers at an even index, so copy into r6/r7 and clobber. | ||
59 | + */ | ||
60 | + asm("mr %%r6, %1\n\t" | ||
61 | + "mr %%r7, %2\n\t" | ||
62 | + "stq %%r6, %0" | ||
63 | + : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7"); | ||
64 | + flush_idcache_range(rx, rw, 16); | ||
65 | +} | ||
66 | + | ||
67 | void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, | ||
68 | uintptr_t jmp_rw, uintptr_t addr) | ||
69 | { | ||
70 | - if (TCG_TARGET_REG_BITS == 64) { | ||
71 | - tcg_insn_unit i1, i2; | ||
72 | - intptr_t tb_diff = addr - tc_ptr; | ||
73 | - intptr_t br_diff = addr - (jmp_rx + 4); | ||
74 | - uint64_t pair; | ||
75 | + tcg_insn_unit i0, i1, i2, i3; | ||
76 | + intptr_t tb_diff = addr - tc_ptr; | ||
77 | + intptr_t br_diff = addr - (jmp_rx + 4); | ||
78 | + intptr_t lo, hi; | ||
79 | |||
80 | - /* This does not exercise the range of the branch, but we do | ||
81 | - still need to be able to load the new value of TCG_REG_TB. | ||
82 | - But this does still happen quite often. */ | ||
83 | - if (tb_diff == (int16_t)tb_diff) { | ||
84 | - i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff); | ||
85 | - i2 = B | (br_diff & 0x3fffffc); | ||
86 | - } else { | ||
87 | - intptr_t lo = (int16_t)tb_diff; | ||
88 | - intptr_t hi = (int32_t)(tb_diff - lo); | ||
89 | - assert(tb_diff == hi + lo); | ||
90 | - i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16); | ||
91 | - i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo); | ||
92 | - } | ||
93 | -#if HOST_BIG_ENDIAN | ||
94 | - pair = (uint64_t)i1 << 32 | i2; | ||
95 | -#else | ||
96 | - pair = (uint64_t)i2 << 32 | i1; | ||
97 | -#endif | ||
98 | - | ||
99 | - /* As per the enclosing if, this is ppc64. Avoid the _Static_assert | ||
100 | - within qatomic_set that would fail to build a ppc32 host. */ | ||
101 | - qatomic_set__nocheck((uint64_t *)jmp_rw, pair); | ||
102 | - flush_idcache_range(jmp_rx, jmp_rw, 8); | ||
103 | - } else { | ||
104 | + if (TCG_TARGET_REG_BITS == 32) { | ||
105 | intptr_t diff = addr - jmp_rx; | ||
106 | tcg_debug_assert(in_range_b(diff)); | ||
107 | qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc)); | ||
108 | flush_idcache_range(jmp_rx, jmp_rw, 4); | ||
109 | + return; | ||
110 | } | ||
111 | + | ||
112 | + /* | ||
113 | + * For 16-bit displacements, we can use a single add + branch. | ||
114 | + * This happens quite often. | ||
115 | + */ | ||
116 | + if (tb_diff == (int16_t)tb_diff) { | ||
117 | + i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff); | ||
118 | + i1 = B | (br_diff & 0x3fffffc); | ||
119 | + ppc64_replace2(jmp_rx, jmp_rw, i0, i1); | ||
120 | + return; | 28 | + return; |
121 | + } | 29 | + } |
122 | + | 30 | + |
123 | + lo = (int16_t)tb_diff; | 31 | + t0 = tcg_temp_new_i64(); |
124 | + hi = (int32_t)(tb_diff - lo); | 32 | + t1 = tcg_temp_new_i64(); |
125 | + assert(tb_diff == hi + lo); | 33 | + t2 = tcg_temp_new_i64(); |
126 | + i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16); | 34 | + avr = tcg_temp_new_i64(); |
127 | + i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo); | 35 | + ten = tcg_constant_i64(10); |
36 | + z = tcg_constant_i64(0); | ||
128 | + | 37 | + |
129 | + /* | 38 | + if (add_cin) { |
130 | + * Without stq from 2.07, we can only update two insns, | 39 | + get_avr64(avr, rA(ctx->opcode), false); |
131 | + * and those must be the ones that load the target address. | 40 | + tcg_gen_mulu2_i64(t0, t1, avr, ten); |
132 | + */ | 41 | + get_avr64(avr, rB(ctx->opcode), false); |
133 | + if (!have_isa_2_07) { | 42 | + tcg_gen_andi_i64(t2, avr, 0xF); |
134 | + ppc64_replace2(jmp_rx, jmp_rw, i0, i1); | 43 | + tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); |
135 | + return; | 44 | + set_avr64(rD(ctx->opcode), avr, false); |
45 | + } else { | ||
46 | + get_avr64(avr, rA(ctx->opcode), false); | ||
47 | + tcg_gen_mulu2_i64(avr, t2, avr, ten); | ||
48 | + set_avr64(rD(ctx->opcode), avr, false); | ||
136 | + } | 49 | + } |
137 | + | 50 | + |
138 | + /* | 51 | + if (ret_carry) { |
139 | + * For 26-bit displacements, we can use a direct branch. | 52 | + get_avr64(avr, rA(ctx->opcode), true); |
140 | + * Otherwise we still need the indirect branch, which we | 53 | + tcg_gen_mulu2_i64(t0, t1, avr, ten); |
141 | + * must restore after a potential direct branch write. | 54 | + tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); |
142 | + */ | 55 | + set_avr64(rD(ctx->opcode), avr, false); |
143 | + br_diff -= 4; | 56 | + set_avr64(rD(ctx->opcode), z, true); |
144 | + if (in_range_b(br_diff)) { | ||
145 | + i2 = B | (br_diff & 0x3fffffc); | ||
146 | + i3 = NOP; | ||
147 | + } else { | 57 | + } else { |
148 | + i2 = MTSPR | RS(TCG_REG_TB) | CTR; | 58 | + get_avr64(avr, rA(ctx->opcode), true); |
149 | + i3 = BCCTR | BO_ALWAYS; | 59 | + tcg_gen_mul_i64(t0, avr, ten); |
60 | + tcg_gen_add_i64(avr, t0, t2); | ||
61 | + set_avr64(rD(ctx->opcode), avr, true); | ||
150 | + } | 62 | + } |
151 | + ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3); | 63 | +} |
152 | } | 64 | + |
153 | 65 | #define GEN_VX_VMUL10(name, add_cin, ret_carry) \ | |
154 | static void tcg_out_call_int(TCGContext *s, int lk, | 66 | -static void glue(gen_, name)(DisasContext *ctx) \ |
155 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | 67 | -{ \ |
156 | if (s->tb_jmp_insn_offset) { | 68 | - TCGv_i64 t0; \ |
157 | /* Direct jump. */ | 69 | - TCGv_i64 t1; \ |
158 | if (TCG_TARGET_REG_BITS == 64) { | 70 | - TCGv_i64 t2; \ |
159 | - /* Ensure the next insns are 8-byte aligned. */ | 71 | - TCGv_i64 avr; \ |
160 | - if ((uintptr_t)s->code_ptr & 7) { | 72 | - TCGv_i64 ten, z; \ |
161 | + /* Ensure the next insns are 8 or 16-byte aligned. */ | 73 | - \ |
162 | + while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) { | 74 | - if (unlikely(!ctx->altivec_enabled)) { \ |
163 | tcg_out32(s, NOP); | 75 | - gen_exception(ctx, POWERPC_EXCP_VPU); \ |
164 | } | 76 | - return; \ |
165 | s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); | 77 | - } \ |
78 | - \ | ||
79 | - t0 = tcg_temp_new_i64(); \ | ||
80 | - t1 = tcg_temp_new_i64(); \ | ||
81 | - t2 = tcg_temp_new_i64(); \ | ||
82 | - avr = tcg_temp_new_i64(); \ | ||
83 | - ten = tcg_const_i64(10); \ | ||
84 | - z = tcg_const_i64(0); \ | ||
85 | - \ | ||
86 | - if (add_cin) { \ | ||
87 | - get_avr64(avr, rA(ctx->opcode), false); \ | ||
88 | - tcg_gen_mulu2_i64(t0, t1, avr, ten); \ | ||
89 | - get_avr64(avr, rB(ctx->opcode), false); \ | ||
90 | - tcg_gen_andi_i64(t2, avr, 0xF); \ | ||
91 | - tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \ | ||
92 | - set_avr64(rD(ctx->opcode), avr, false); \ | ||
93 | - } else { \ | ||
94 | - get_avr64(avr, rA(ctx->opcode), false); \ | ||
95 | - tcg_gen_mulu2_i64(avr, t2, avr, ten); \ | ||
96 | - set_avr64(rD(ctx->opcode), avr, false); \ | ||
97 | - } \ | ||
98 | - \ | ||
99 | - if (ret_carry) { \ | ||
100 | - get_avr64(avr, rA(ctx->opcode), true); \ | ||
101 | - tcg_gen_mulu2_i64(t0, t1, avr, ten); \ | ||
102 | - tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \ | ||
103 | - set_avr64(rD(ctx->opcode), avr, false); \ | ||
104 | - set_avr64(rD(ctx->opcode), z, true); \ | ||
105 | - } else { \ | ||
106 | - get_avr64(avr, rA(ctx->opcode), true); \ | ||
107 | - tcg_gen_mul_i64(t0, avr, ten); \ | ||
108 | - tcg_gen_add_i64(avr, t0, t2); \ | ||
109 | - set_avr64(rD(ctx->opcode), avr, true); \ | ||
110 | - } \ | ||
111 | -} \ | ||
112 | + static void glue(gen_, name)(DisasContext *ctx) \ | ||
113 | + { gen_vx_vmul10(ctx, add_cin, ret_carry); } | ||
114 | |||
115 | GEN_VX_VMUL10(vmul10uq, 0, 0); | ||
116 | GEN_VX_VMUL10(vmul10euq, 1, 0); | ||
166 | -- | 117 | -- |
167 | 2.34.1 | 118 | 2.34.1 |
119 | |||
120 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
3 | --- | ||
4 | target/ppc/translate/vmx-impl.c.inc | 4 +++- | ||
5 | 1 file changed, 3 insertions(+), 1 deletion(-) | ||
1 | 6 | ||
7 | diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc | ||
8 | index XXXXXXX..XXXXXXX 100644 | ||
9 | --- a/target/ppc/translate/vmx-impl.c.inc | ||
10 | +++ b/target/ppc/translate/vmx-impl.c.inc | ||
11 | @@ -XXX,XX +XXX,XX @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right, | ||
12 | hi = tcg_temp_new_i64(); | ||
13 | lo = tcg_temp_new_i64(); | ||
14 | t0 = tcg_temp_new_i64(); | ||
15 | - t1 = tcg_const_i64(0); | ||
16 | |||
17 | get_avr64(lo, a->vra, false); | ||
18 | get_avr64(hi, a->vra, true); | ||
19 | @@ -XXX,XX +XXX,XX @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right, | ||
20 | if (right) { | ||
21 | tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo); | ||
22 | if (alg) { | ||
23 | + t1 = tcg_temp_new_i64(); | ||
24 | tcg_gen_sari_i64(t1, lo, 63); | ||
25 | + } else { | ||
26 | + t1 = zero; | ||
27 | } | ||
28 | tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi); | ||
29 | } else { | ||
30 | -- | ||
31 | 2.34.1 | ||
32 | |||
33 | diff view generated by jsdifflib |
1 | This bitmap is created and discarded immediately. | 1 | Remove the local definition of rx_abs. |
---|---|---|---|
2 | We gain nothing by its existence. | ||
3 | 2 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-Id: <20220822232338.1727934-2-richard.henderson@linaro.org> | ||
7 | --- | 5 | --- |
8 | accel/tcg/translate-all.c | 78 ++------------------------------------- | 6 | target/rx/translate.c | 12 +----------- |
9 | 1 file changed, 4 insertions(+), 74 deletions(-) | 7 | 1 file changed, 1 insertion(+), 11 deletions(-) |
10 | 8 | ||
11 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 9 | diff --git a/target/rx/translate.c b/target/rx/translate.c |
12 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/accel/tcg/translate-all.c | 11 | --- a/target/rx/translate.c |
14 | +++ b/accel/tcg/translate-all.c | 12 | +++ b/target/rx/translate.c |
15 | @@ -XXX,XX +XXX,XX @@ | 13 | @@ -XXX,XX +XXX,XX @@ static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a) |
16 | #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) | 14 | return true; |
17 | #endif | ||
18 | |||
19 | -#define SMC_BITMAP_USE_THRESHOLD 10 | ||
20 | - | ||
21 | typedef struct PageDesc { | ||
22 | /* list of TBs intersecting this ram page */ | ||
23 | uintptr_t first_tb; | ||
24 | -#ifdef CONFIG_SOFTMMU | ||
25 | - /* in order to optimize self modifying code, we count the number | ||
26 | - of lookups we do to a given page to use a bitmap */ | ||
27 | - unsigned long *code_bitmap; | ||
28 | - unsigned int code_write_count; | ||
29 | -#else | ||
30 | +#ifdef CONFIG_USER_ONLY | ||
31 | unsigned long flags; | ||
32 | void *target_data; | ||
33 | #endif | ||
34 | -#ifndef CONFIG_USER_ONLY | ||
35 | +#ifdef CONFIG_SOFTMMU | ||
36 | QemuSpin lock; | ||
37 | #endif | ||
38 | } PageDesc; | ||
39 | @@ -XXX,XX +XXX,XX @@ void tb_htable_init(void) | ||
40 | qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); | ||
41 | } | 15 | } |
42 | 16 | ||
43 | -/* call with @p->lock held */ | 17 | -static void rx_abs(TCGv ret, TCGv arg1) |
44 | -static inline void invalidate_page_bitmap(PageDesc *p) | ||
45 | -{ | 18 | -{ |
46 | - assert_page_locked(p); | 19 | - TCGv neg; |
47 | -#ifdef CONFIG_SOFTMMU | 20 | - TCGv zero; |
48 | - g_free(p->code_bitmap); | 21 | - neg = tcg_temp_new(); |
49 | - p->code_bitmap = NULL; | 22 | - zero = tcg_const_i32(0); |
50 | - p->code_write_count = 0; | 23 | - tcg_gen_neg_i32(neg, arg1); |
51 | -#endif | 24 | - tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1); |
52 | -} | 25 | -} |
53 | - | 26 | - |
54 | /* Set to NULL all the 'first_tb' fields in all PageDescs. */ | 27 | /* abs rd */ |
55 | static void page_flush_tb_1(int level, void **lp) | 28 | /* abs rs, rd */ |
29 | static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a) | ||
56 | { | 30 | { |
57 | @@ -XXX,XX +XXX,XX @@ static void page_flush_tb_1(int level, void **lp) | 31 | - rx_gen_op_rr(rx_abs, a->rd, a->rs); |
58 | for (i = 0; i < V_L2_SIZE; ++i) { | 32 | + rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs); |
59 | page_lock(&pd[i]); | 33 | return true; |
60 | pd[i].first_tb = (uintptr_t)NULL; | ||
61 | - invalidate_page_bitmap(pd + i); | ||
62 | page_unlock(&pd[i]); | ||
63 | } | ||
64 | } else { | ||
65 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
66 | if (rm_from_page_list) { | ||
67 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | ||
68 | tb_page_remove(p, tb); | ||
69 | - invalidate_page_bitmap(p); | ||
70 | if (tb->page_addr[1] != -1) { | ||
71 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | ||
72 | tb_page_remove(p, tb); | ||
73 | - invalidate_page_bitmap(p); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | @@ -XXX,XX +XXX,XX @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) | ||
78 | } | ||
79 | } | 34 | } |
80 | 35 | ||
81 | -#ifdef CONFIG_SOFTMMU | ||
82 | -/* call with @p->lock held */ | ||
83 | -static void build_page_bitmap(PageDesc *p) | ||
84 | -{ | ||
85 | - int n, tb_start, tb_end; | ||
86 | - TranslationBlock *tb; | ||
87 | - | ||
88 | - assert_page_locked(p); | ||
89 | - p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); | ||
90 | - | ||
91 | - PAGE_FOR_EACH_TB(p, tb, n) { | ||
92 | - /* NOTE: this is subtle as a TB may span two physical pages */ | ||
93 | - if (n == 0) { | ||
94 | - /* NOTE: tb_end may be after the end of the page, but | ||
95 | - it is not a problem */ | ||
96 | - tb_start = tb->pc & ~TARGET_PAGE_MASK; | ||
97 | - tb_end = tb_start + tb->size; | ||
98 | - if (tb_end > TARGET_PAGE_SIZE) { | ||
99 | - tb_end = TARGET_PAGE_SIZE; | ||
100 | - } | ||
101 | - } else { | ||
102 | - tb_start = 0; | ||
103 | - tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | ||
104 | - } | ||
105 | - bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); | ||
106 | - } | ||
107 | -} | ||
108 | -#endif | ||
109 | - | ||
110 | /* add the tb in the target page and protect it if necessary | ||
111 | * | ||
112 | * Called with mmap_lock held for user-mode emulation. | ||
113 | @@ -XXX,XX +XXX,XX @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, | ||
114 | page_already_protected = p->first_tb != (uintptr_t)NULL; | ||
115 | #endif | ||
116 | p->first_tb = (uintptr_t)tb | n; | ||
117 | - invalidate_page_bitmap(p); | ||
118 | |||
119 | #if defined(CONFIG_USER_ONLY) | ||
120 | /* translator_loop() must have made all TB pages non-writable */ | ||
121 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
122 | /* remove TB from the page(s) if we couldn't insert it */ | ||
123 | if (unlikely(existing_tb)) { | ||
124 | tb_page_remove(p, tb); | ||
125 | - invalidate_page_bitmap(p); | ||
126 | if (p2) { | ||
127 | tb_page_remove(p2, tb); | ||
128 | - invalidate_page_bitmap(p2); | ||
129 | } | ||
130 | tb = existing_tb; | ||
131 | } | ||
132 | @@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, | ||
133 | #if !defined(CONFIG_USER_ONLY) | ||
134 | /* if no code remaining, no need to continue to use slow writes */ | ||
135 | if (!p->first_tb) { | ||
136 | - invalidate_page_bitmap(p); | ||
137 | tlb_unprotect_code(start); | ||
138 | } | ||
139 | #endif | ||
140 | @@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages, | ||
141 | } | ||
142 | |||
143 | assert_page_locked(p); | ||
144 | - if (!p->code_bitmap && | ||
145 | - ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { | ||
146 | - build_page_bitmap(p); | ||
147 | - } | ||
148 | - if (p->code_bitmap) { | ||
149 | - unsigned int nr; | ||
150 | - unsigned long b; | ||
151 | - | ||
152 | - nr = start & ~TARGET_PAGE_MASK; | ||
153 | - b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); | ||
154 | - if (b & ((1 << len) - 1)) { | ||
155 | - goto do_invalidate; | ||
156 | - } | ||
157 | - } else { | ||
158 | - do_invalidate: | ||
159 | - tb_invalidate_phys_page_range__locked(pages, p, start, start + len, | ||
160 | - retaddr); | ||
161 | - } | ||
162 | + tb_invalidate_phys_page_range__locked(pages, p, start, start + len, | ||
163 | + retaddr); | ||
164 | } | ||
165 | #else | ||
166 | /* Called with mmap_lock held. If pc is not 0 then it indicates the | ||
167 | -- | 36 | -- |
168 | 2.34.1 | 37 | 2.34.1 |
169 | 38 | ||
170 | 39 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | Since PSW_Z = PSW_S, we can move that assignment to the end |
---|---|---|---|
2 | and use PSW_Z as a temporary while computing PSW_O. | ||
3 | Use tcg_constant_i32 instead of tcg_const_i32. | ||
2 | 4 | ||
3 | Before: 35.912 s ± 0.168 s | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | After: 35.565 s ± 0.087 s | ||
5 | |||
6 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <20220811151413.3350684-5-alex.bennee@linaro.org> | ||
9 | Signed-off-by: Cédric Le Goater <clg@kaod.org> | ||
10 | Message-Id: <20220923084803.498337-5-clg@kaod.org> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 7 | --- |
13 | accel/tcg/cputlb.c | 15 ++++++--------- | 8 | target/rx/translate.c | 28 +++++++++++++--------------- |
14 | 1 file changed, 6 insertions(+), 9 deletions(-) | 9 | 1 file changed, 13 insertions(+), 15 deletions(-) |
15 | 10 | ||
16 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 11 | diff --git a/target/rx/translate.c b/target/rx/translate.c |
17 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/accel/tcg/cputlb.c | 13 | --- a/target/rx/translate.c |
19 | +++ b/accel/tcg/cputlb.c | 14 | +++ b/target/rx/translate.c |
20 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, | 15 | @@ -XXX,XX +XXX,XX @@ static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a) |
21 | static void tlb_fill(CPUState *cpu, target_ulong addr, int size, | 16 | /* ret = arg1 + arg2 + psw_c */ |
22 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | 17 | static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2) |
23 | { | 18 | { |
24 | - CPUClass *cc = CPU_GET_CLASS(cpu); | 19 | - TCGv z; |
25 | bool ok; | 20 | - z = tcg_const_i32(0); |
26 | 21 | + TCGv z = tcg_constant_i32(0); | |
27 | /* | 22 | tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z); |
28 | * This is not a probe, so only valid return is success; failure | 23 | tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z); |
29 | * should result in exception + longjmp to the cpu loop. | 24 | - tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); |
30 | */ | 25 | tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); |
31 | - ok = cc->tcg_ops->tlb_fill(cpu, addr, size, | 26 | - tcg_gen_xor_i32(z, arg1, arg2); |
32 | - access_type, mmu_idx, false, retaddr); | 27 | - tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); |
33 | + ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, | 28 | + tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); |
34 | + access_type, mmu_idx, false, retaddr); | 29 | + tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); |
35 | assert(ok); | 30 | + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); |
31 | tcg_gen_mov_i32(ret, cpu_psw_s); | ||
36 | } | 32 | } |
37 | 33 | ||
38 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, | 34 | @@ -XXX,XX +XXX,XX @@ static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a) |
39 | MMUAccessType access_type, | 35 | /* ret = arg1 + arg2 */ |
40 | int mmu_idx, uintptr_t retaddr) | 36 | static void rx_add(TCGv ret, TCGv arg1, TCGv arg2) |
41 | { | 37 | { |
42 | - CPUClass *cc = CPU_GET_CLASS(cpu); | 38 | - TCGv z; |
43 | - | 39 | - z = tcg_const_i32(0); |
44 | - cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); | 40 | + TCGv z = tcg_constant_i32(0); |
45 | + cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, | 41 | tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z); |
46 | + mmu_idx, retaddr); | 42 | - tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); |
43 | tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); | ||
44 | - tcg_gen_xor_i32(z, arg1, arg2); | ||
45 | - tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); | ||
46 | + tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); | ||
47 | + tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); | ||
48 | + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); | ||
49 | tcg_gen_mov_i32(ret, cpu_psw_s); | ||
47 | } | 50 | } |
48 | 51 | ||
49 | static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, | 52 | @@ -XXX,XX +XXX,XX @@ static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a) |
50 | @@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, | 53 | /* ret = arg1 - arg2 */ |
51 | if (!tlb_hit_page(tlb_addr, page_addr)) { | 54 | static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) |
52 | if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { | 55 | { |
53 | CPUState *cs = env_cpu(env); | 56 | - TCGv temp; |
54 | - CPUClass *cc = CPU_GET_CLASS(cs); | 57 | tcg_gen_sub_i32(cpu_psw_s, arg1, arg2); |
55 | 58 | - tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); | |
56 | - if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, | 59 | tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2); |
57 | - mmu_idx, nonfault, retaddr)) { | 60 | tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); |
58 | + if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, | 61 | - temp = tcg_temp_new_i32(); |
59 | + mmu_idx, nonfault, retaddr)) { | 62 | - tcg_gen_xor_i32(temp, arg1, arg2); |
60 | /* Non-faulting page table read failed. */ | 63 | - tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp); |
61 | *phost = NULL; | 64 | + tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); |
62 | return TLB_INVALID_MASK; | 65 | + tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); |
66 | + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); | ||
67 | /* CMP not required return */ | ||
68 | if (ret) { | ||
69 | tcg_gen_mov_i32(ret, cpu_psw_s); | ||
70 | } | ||
71 | } | ||
72 | + | ||
73 | static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2) | ||
74 | { | ||
75 | rx_sub(NULL, arg1, arg2); | ||
76 | } | ||
77 | + | ||
78 | /* ret = arg1 - arg2 - !psw_c */ | ||
79 | /* -> ret = arg1 + ~arg2 + psw_c */ | ||
80 | static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) | ||
63 | -- | 81 | -- |
64 | 2.34.1 | 82 | 2.34.1 |
65 | 83 | ||
66 | 84 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | These three cases use a constant as first input, and | ||
2 | then overwrite the temp in the output. Separate them. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/rx/translate.c | 12 ++++++------ | ||
8 | 1 file changed, 6 insertions(+), 6 deletions(-) | ||
9 | |||
10 | diff --git a/target/rx/translate.c b/target/rx/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/rx/translate.c | ||
13 | +++ b/target/rx/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) | ||
15 | done = gen_new_label(); | ||
16 | /* if (cpu_regs[a->rs]) { */ | ||
17 | tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift); | ||
18 | - count = tcg_const_i32(32); | ||
19 | + count = tcg_temp_new(); | ||
20 | tmp = tcg_temp_new(); | ||
21 | tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31); | ||
22 | - tcg_gen_sub_i32(count, count, tmp); | ||
23 | + tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp); | ||
24 | tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count); | ||
25 | tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); | ||
26 | tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); | ||
27 | @@ -XXX,XX +XXX,XX @@ static inline void rx_bnotr(TCGv reg, TCGv mask) | ||
28 | cat3(arg_, name, _rr) * a) \ | ||
29 | { \ | ||
30 | TCGv mask, b; \ | ||
31 | - mask = tcg_const_i32(1); \ | ||
32 | + mask = tcg_temp_new(); \ | ||
33 | b = tcg_temp_new(); \ | ||
34 | tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \ | ||
35 | - tcg_gen_shl_i32(mask, mask, b); \ | ||
36 | + tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ | ||
37 | cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ | ||
38 | return true; \ | ||
39 | } \ | ||
40 | @@ -XXX,XX +XXX,XX @@ static inline void rx_bnotr(TCGv reg, TCGv mask) | ||
41 | cat3(arg_, name, _rm) * a) \ | ||
42 | { \ | ||
43 | TCGv mask, mem, addr, b; \ | ||
44 | - mask = tcg_const_i32(1); \ | ||
45 | + mask = tcg_temp_new(); \ | ||
46 | b = tcg_temp_new(); \ | ||
47 | tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \ | ||
48 | - tcg_gen_shl_i32(mask, mask, b); \ | ||
49 | + tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ | ||
50 | mem = tcg_temp_new(); \ | ||
51 | addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ | ||
52 | cat3(rx_, op, m)(addr, mask); \ | ||
53 | -- | ||
54 | 2.34.1 | ||
55 | |||
56 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/rx/translate.c | 32 ++++++++++++++++---------------- | ||
7 | 1 file changed, 16 insertions(+), 16 deletions(-) | ||
8 | |||
9 | diff --git a/target/rx/translate.c b/target/rx/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/rx/translate.c | ||
12 | +++ b/target/rx/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a) | ||
14 | static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) | ||
15 | { | ||
16 | TCGv imm, mem; | ||
17 | - imm = tcg_const_i32(a->imm); | ||
18 | + imm = tcg_constant_i32(a->imm); | ||
19 | mem = tcg_temp_new(); | ||
20 | tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); | ||
21 | rx_gen_st(a->sz, imm, mem); | ||
22 | @@ -XXX,XX +XXX,XX @@ static inline void stcond(TCGCond cond, int rd, int imm) | ||
23 | { | ||
24 | TCGv z; | ||
25 | TCGv _imm; | ||
26 | - z = tcg_const_i32(0); | ||
27 | - _imm = tcg_const_i32(imm); | ||
28 | + z = tcg_constant_i32(0); | ||
29 | + _imm = tcg_constant_i32(imm); | ||
30 | tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z, | ||
31 | _imm, cpu_regs[rd]); | ||
32 | } | ||
33 | @@ -XXX,XX +XXX,XX @@ static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2) | ||
34 | |||
35 | static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) | ||
36 | { | ||
37 | - TCGv imm = tcg_const_i32(src2); | ||
38 | + TCGv imm = tcg_constant_i32(src2); | ||
39 | opr(cpu_regs[dst], cpu_regs[src], imm); | ||
40 | } | ||
41 | |||
42 | @@ -XXX,XX +XXX,XX @@ static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a) | ||
43 | /* emul #imm, rd */ | ||
44 | static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) | ||
45 | { | ||
46 | - TCGv imm = tcg_const_i32(a->imm); | ||
47 | + TCGv imm = tcg_constant_i32(a->imm); | ||
48 | if (a->rd > 14) { | ||
49 | qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); | ||
50 | } | ||
51 | @@ -XXX,XX +XXX,XX @@ static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) | ||
52 | /* emulu #imm, rd */ | ||
53 | static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) | ||
54 | { | ||
55 | - TCGv imm = tcg_const_i32(a->imm); | ||
56 | + TCGv imm = tcg_constant_i32(a->imm); | ||
57 | if (a->rd > 14) { | ||
58 | qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); | ||
59 | } | ||
60 | @@ -XXX,XX +XXX,XX @@ static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a) | ||
61 | |||
62 | static inline void rx_save_pc(DisasContext *ctx) | ||
63 | { | ||
64 | - TCGv pc = tcg_const_i32(ctx->base.pc_next); | ||
65 | + TCGv pc = tcg_constant_i32(ctx->base.pc_next); | ||
66 | push(pc); | ||
67 | } | ||
68 | |||
69 | @@ -XXX,XX +XXX,XX @@ static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) | ||
70 | |||
71 | #define STRING(op) \ | ||
72 | do { \ | ||
73 | - TCGv size = tcg_const_i32(a->sz); \ | ||
74 | + TCGv size = tcg_constant_i32(a->sz); \ | ||
75 | gen_helper_##op(cpu_env, size); \ | ||
76 | } while (0) | ||
77 | |||
78 | @@ -XXX,XX +XXX,XX @@ static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) | ||
79 | /* racw #imm */ | ||
80 | static bool trans_RACW(DisasContext *ctx, arg_RACW *a) | ||
81 | { | ||
82 | - TCGv imm = tcg_const_i32(a->imm + 1); | ||
83 | + TCGv imm = tcg_constant_i32(a->imm + 1); | ||
84 | gen_helper_racw(cpu_env, imm); | ||
85 | return true; | ||
86 | } | ||
87 | @@ -XXX,XX +XXX,XX @@ static bool trans_SAT(DisasContext *ctx, arg_SAT *a) | ||
88 | { | ||
89 | TCGv tmp, z; | ||
90 | tmp = tcg_temp_new(); | ||
91 | - z = tcg_const_i32(0); | ||
92 | + z = tcg_constant_i32(0); | ||
93 | /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */ | ||
94 | tcg_gen_sari_i32(tmp, cpu_psw_s, 31); | ||
95 | /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */ | ||
96 | @@ -XXX,XX +XXX,XX @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) | ||
97 | static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ | ||
98 | cat3(arg_, name, _ir) * a) \ | ||
99 | { \ | ||
100 | - TCGv imm = tcg_const_i32(li(ctx, 0)); \ | ||
101 | + TCGv imm = tcg_constant_i32(li(ctx, 0)); \ | ||
102 | gen_helper_##op(cpu_regs[a->rd], cpu_env, \ | ||
103 | cpu_regs[a->rd], imm); \ | ||
104 | return true; \ | ||
105 | @@ -XXX,XX +XXX,XX @@ FOP(FDIV, fdiv) | ||
106 | /* fcmp #imm, rd */ | ||
107 | static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) | ||
108 | { | ||
109 | - TCGv imm = tcg_const_i32(li(ctx, 0)); | ||
110 | + TCGv imm = tcg_constant_i32(li(ctx, 0)); | ||
111 | gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm); | ||
112 | return true; | ||
113 | } | ||
114 | @@ -XXX,XX +XXX,XX @@ static inline void rx_bnotr(TCGv reg, TCGv mask) | ||
115 | { \ | ||
116 | TCGv mask, mem, addr; \ | ||
117 | mem = tcg_temp_new(); \ | ||
118 | - mask = tcg_const_i32(1 << a->imm); \ | ||
119 | + mask = tcg_constant_i32(1 << a->imm); \ | ||
120 | addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ | ||
121 | cat3(rx_, op, m)(addr, mask); \ | ||
122 | return true; \ | ||
123 | @@ -XXX,XX +XXX,XX @@ static inline void rx_bnotr(TCGv reg, TCGv mask) | ||
124 | cat3(arg_, name, _ir) * a) \ | ||
125 | { \ | ||
126 | TCGv mask; \ | ||
127 | - mask = tcg_const_i32(1 << a->imm); \ | ||
128 | + mask = tcg_constant_i32(1 << a->imm); \ | ||
129 | cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ | ||
130 | return true; \ | ||
131 | } \ | ||
132 | @@ -XXX,XX +XXX,XX @@ static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) | ||
133 | { | ||
134 | TCGv imm; | ||
135 | |||
136 | - imm = tcg_const_i32(a->imm); | ||
137 | + imm = tcg_constant_i32(a->imm); | ||
138 | move_to_cr(ctx, imm, a->cr); | ||
139 | return true; | ||
140 | } | ||
141 | @@ -XXX,XX +XXX,XX @@ static bool trans_INT(DisasContext *ctx, arg_INT *a) | ||
142 | TCGv vec; | ||
143 | |||
144 | tcg_debug_assert(a->imm < 0x100); | ||
145 | - vec = tcg_const_i32(a->imm); | ||
146 | + vec = tcg_constant_i32(a->imm); | ||
147 | tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); | ||
148 | gen_helper_rxint(cpu_env, vec); | ||
149 | ctx->base.is_jmp = DISAS_NORETURN; | ||
150 | -- | ||
151 | 2.34.1 | ||
152 | |||
153 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/s390x/tcg/translate.c | 20 ++++++++++---------- | ||
7 | 1 file changed, 10 insertions(+), 10 deletions(-) | ||
8 | |||
9 | diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/s390x/tcg/translate.c | ||
12 | +++ b/target/s390x/tcg/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_xi(DisasContext *s, DisasOps *o) | ||
14 | |||
15 | static DisasJumpType op_zero(DisasContext *s, DisasOps *o) | ||
16 | { | ||
17 | - o->out = tcg_const_i64(0); | ||
18 | + o->out = tcg_constant_i64(0); | ||
19 | return DISAS_NEXT; | ||
20 | } | ||
21 | |||
22 | static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) | ||
23 | { | ||
24 | - o->out = tcg_const_i64(0); | ||
25 | + o->out = tcg_constant_i64(0); | ||
26 | o->out2 = o->out; | ||
27 | return DISAS_NEXT; | ||
28 | } | ||
29 | @@ -XXX,XX +XXX,XX @@ static void in2_sh(DisasContext *s, DisasOps *o) | ||
30 | int d2 = get_field(s, d2); | ||
31 | |||
32 | if (b2 == 0) { | ||
33 | - o->in2 = tcg_const_i64(d2 & 0x3f); | ||
34 | + o->in2 = tcg_constant_i64(d2 & 0x3f); | ||
35 | } else { | ||
36 | o->in2 = get_address(s, 0, b2, d2); | ||
37 | tcg_gen_andi_i64(o->in2, o->in2, 0x3f); | ||
38 | @@ -XXX,XX +XXX,XX @@ static void in2_mri2_64(DisasContext *s, DisasOps *o) | ||
39 | |||
40 | static void in2_i2(DisasContext *s, DisasOps *o) | ||
41 | { | ||
42 | - o->in2 = tcg_const_i64(get_field(s, i2)); | ||
43 | + o->in2 = tcg_constant_i64(get_field(s, i2)); | ||
44 | } | ||
45 | #define SPEC_in2_i2 0 | ||
46 | |||
47 | static void in2_i2_8u(DisasContext *s, DisasOps *o) | ||
48 | { | ||
49 | - o->in2 = tcg_const_i64((uint8_t)get_field(s, i2)); | ||
50 | + o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2)); | ||
51 | } | ||
52 | #define SPEC_in2_i2_8u 0 | ||
53 | |||
54 | static void in2_i2_16u(DisasContext *s, DisasOps *o) | ||
55 | { | ||
56 | - o->in2 = tcg_const_i64((uint16_t)get_field(s, i2)); | ||
57 | + o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2)); | ||
58 | } | ||
59 | #define SPEC_in2_i2_16u 0 | ||
60 | |||
61 | static void in2_i2_32u(DisasContext *s, DisasOps *o) | ||
62 | { | ||
63 | - o->in2 = tcg_const_i64((uint32_t)get_field(s, i2)); | ||
64 | + o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2)); | ||
65 | } | ||
66 | #define SPEC_in2_i2_32u 0 | ||
67 | |||
68 | static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) | ||
69 | { | ||
70 | uint64_t i2 = (uint16_t)get_field(s, i2); | ||
71 | - o->in2 = tcg_const_i64(i2 << s->insn->data); | ||
72 | + o->in2 = tcg_constant_i64(i2 << s->insn->data); | ||
73 | } | ||
74 | #define SPEC_in2_i2_16u_shl 0 | ||
75 | |||
76 | static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) | ||
77 | { | ||
78 | uint64_t i2 = (uint32_t)get_field(s, i2); | ||
79 | - o->in2 = tcg_const_i64(i2 << s->insn->data); | ||
80 | + o->in2 = tcg_constant_i64(i2 << s->insn->data); | ||
81 | } | ||
82 | #define SPEC_in2_i2_32u_shl 0 | ||
83 | |||
84 | #ifndef CONFIG_USER_ONLY | ||
85 | static void in2_insn(DisasContext *s, DisasOps *o) | ||
86 | { | ||
87 | - o->in2 = tcg_const_i64(s->fields.raw_insn); | ||
88 | + o->in2 = tcg_constant_i64(s->fields.raw_insn); | ||
89 | } | ||
90 | #define SPEC_in2_insn 0 | ||
91 | #endif | ||
92 | -- | ||
93 | 2.34.1 | ||
94 | |||
95 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Since we're assigning to cpu_sr_t in the end, | ||
2 | use that as the intermediate temp as well. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/sh4/translate.c | 9 +++------ | ||
8 | 1 file changed, 3 insertions(+), 6 deletions(-) | ||
9 | |||
10 | diff --git a/target/sh4/translate.c b/target/sh4/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/sh4/translate.c | ||
13 | +++ b/target/sh4/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | ||
15 | tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); | ||
16 | return; | ||
17 | case 0x401b: /* tas.b @Rn */ | ||
18 | - { | ||
19 | - TCGv val = tcg_const_i32(0x80); | ||
20 | - tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val, | ||
21 | - ctx->memidx, MO_UB); | ||
22 | - tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); | ||
23 | - } | ||
24 | + tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8), | ||
25 | + tcg_constant_i32(0x80), ctx->memidx, MO_UB); | ||
26 | + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0); | ||
27 | return; | ||
28 | case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ | ||
29 | CHECK_FPU_ENABLED | ||
30 | -- | ||
31 | 2.34.1 | ||
32 | |||
33 | diff view generated by jsdifflib |
1 | The value previously chosen overlaps GUSA_MASK. | 1 | All remaining uses are strictly read-only. |
---|---|---|---|
2 | 2 | ||
3 | Rename all DELAY_SLOT_* and GUSA_* defines to emphasize | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | that they are included in TB_FLAGs. Add aliases for the | ||
5 | FPSCR and SR bits that are included in TB_FLAGS, so that | ||
6 | we don't accidentally reassign those bits. | ||
7 | |||
8 | Fixes: 4da06fb3062 ("target/sh4: Implement prctl_unalign_sigbus") | ||
9 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/856 | ||
10 | Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 5 | --- |
13 | target/sh4/cpu.h | 56 +++++++++++++------------ | 6 | target/sh4/translate.c | 26 +++++++++++++------------- |
14 | linux-user/sh4/signal.c | 6 +-- | 7 | 1 file changed, 13 insertions(+), 13 deletions(-) |
15 | target/sh4/cpu.c | 6 +-- | ||
16 | target/sh4/helper.c | 6 +-- | ||
17 | target/sh4/translate.c | 90 ++++++++++++++++++++++------------------- | ||
18 | 5 files changed, 88 insertions(+), 76 deletions(-) | ||
19 | 8 | ||
20 | diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/target/sh4/cpu.h | ||
23 | +++ b/target/sh4/cpu.h | ||
24 | @@ -XXX,XX +XXX,XX @@ | ||
25 | #define FPSCR_RM_NEAREST (0 << 0) | ||
26 | #define FPSCR_RM_ZERO (1 << 0) | ||
27 | |||
28 | -#define DELAY_SLOT_MASK 0x7 | ||
29 | -#define DELAY_SLOT (1 << 0) | ||
30 | -#define DELAY_SLOT_CONDITIONAL (1 << 1) | ||
31 | -#define DELAY_SLOT_RTE (1 << 2) | ||
32 | +#define TB_FLAG_DELAY_SLOT (1 << 0) | ||
33 | +#define TB_FLAG_DELAY_SLOT_COND (1 << 1) | ||
34 | +#define TB_FLAG_DELAY_SLOT_RTE (1 << 2) | ||
35 | +#define TB_FLAG_PENDING_MOVCA (1 << 3) | ||
36 | +#define TB_FLAG_GUSA_SHIFT 4 /* [11:4] */ | ||
37 | +#define TB_FLAG_GUSA_EXCLUSIVE (1 << 12) | ||
38 | +#define TB_FLAG_UNALIGN (1 << 13) | ||
39 | +#define TB_FLAG_SR_FD (1 << SR_FD) /* 15 */ | ||
40 | +#define TB_FLAG_FPSCR_PR FPSCR_PR /* 19 */ | ||
41 | +#define TB_FLAG_FPSCR_SZ FPSCR_SZ /* 20 */ | ||
42 | +#define TB_FLAG_FPSCR_FR FPSCR_FR /* 21 */ | ||
43 | +#define TB_FLAG_SR_RB (1 << SR_RB) /* 29 */ | ||
44 | +#define TB_FLAG_SR_MD (1 << SR_MD) /* 30 */ | ||
45 | |||
46 | -#define TB_FLAG_PENDING_MOVCA (1 << 3) | ||
47 | -#define TB_FLAG_UNALIGN (1 << 4) | ||
48 | - | ||
49 | -#define GUSA_SHIFT 4 | ||
50 | -#ifdef CONFIG_USER_ONLY | ||
51 | -#define GUSA_EXCLUSIVE (1 << 12) | ||
52 | -#define GUSA_MASK ((0xff << GUSA_SHIFT) | GUSA_EXCLUSIVE) | ||
53 | -#else | ||
54 | -/* Provide dummy versions of the above to allow tests against tbflags | ||
55 | - to be elided while avoiding ifdefs. */ | ||
56 | -#define GUSA_EXCLUSIVE 0 | ||
57 | -#define GUSA_MASK 0 | ||
58 | -#endif | ||
59 | - | ||
60 | -#define TB_FLAG_ENVFLAGS_MASK (DELAY_SLOT_MASK | GUSA_MASK) | ||
61 | +#define TB_FLAG_DELAY_SLOT_MASK (TB_FLAG_DELAY_SLOT | \ | ||
62 | + TB_FLAG_DELAY_SLOT_COND | \ | ||
63 | + TB_FLAG_DELAY_SLOT_RTE) | ||
64 | +#define TB_FLAG_GUSA_MASK ((0xff << TB_FLAG_GUSA_SHIFT) | \ | ||
65 | + TB_FLAG_GUSA_EXCLUSIVE) | ||
66 | +#define TB_FLAG_FPSCR_MASK (TB_FLAG_FPSCR_PR | \ | ||
67 | + TB_FLAG_FPSCR_SZ | \ | ||
68 | + TB_FLAG_FPSCR_FR) | ||
69 | +#define TB_FLAG_SR_MASK (TB_FLAG_SR_FD | \ | ||
70 | + TB_FLAG_SR_RB | \ | ||
71 | + TB_FLAG_SR_MD) | ||
72 | +#define TB_FLAG_ENVFLAGS_MASK (TB_FLAG_DELAY_SLOT_MASK | \ | ||
73 | + TB_FLAG_GUSA_MASK) | ||
74 | |||
75 | typedef struct tlb_t { | ||
76 | uint32_t vpn; /* virtual page number */ | ||
77 | @@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch) | ||
78 | { | ||
79 | /* The instruction in a RTE delay slot is fetched in privileged | ||
80 | mode, but executed in user mode. */ | ||
81 | - if (ifetch && (env->flags & DELAY_SLOT_RTE)) { | ||
82 | + if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) { | ||
83 | return 0; | ||
84 | } else { | ||
85 | return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0; | ||
86 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc, | ||
87 | { | ||
88 | *pc = env->pc; | ||
89 | /* For a gUSA region, notice the end of the region. */ | ||
90 | - *cs_base = env->flags & GUSA_MASK ? env->gregs[0] : 0; | ||
91 | - *flags = env->flags /* TB_FLAG_ENVFLAGS_MASK: bits 0-2, 4-12 */ | ||
92 | - | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */ | ||
93 | - | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */ | ||
94 | - | (env->sr & (1u << SR_FD)) /* Bit 15 */ | ||
95 | + *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0; | ||
96 | + *flags = env->flags | ||
97 | + | (env->fpscr & TB_FLAG_FPSCR_MASK) | ||
98 | + | (env->sr & TB_FLAG_SR_MASK) | ||
99 | | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */ | ||
100 | #ifdef CONFIG_USER_ONLY | ||
101 | *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; | ||
102 | diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/linux-user/sh4/signal.c | ||
105 | +++ b/linux-user/sh4/signal.c | ||
106 | @@ -XXX,XX +XXX,XX @@ static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) | ||
107 | __get_user(regs->fpul, &sc->sc_fpul); | ||
108 | |||
109 | regs->tra = -1; /* disable syscall checks */ | ||
110 | - regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); | ||
111 | + regs->flags = 0; | ||
112 | } | ||
113 | |||
114 | void setup_frame(int sig, struct target_sigaction *ka, | ||
115 | @@ -XXX,XX +XXX,XX @@ void setup_frame(int sig, struct target_sigaction *ka, | ||
116 | regs->gregs[5] = 0; | ||
117 | regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); | ||
118 | regs->pc = (unsigned long) ka->_sa_handler; | ||
119 | - regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); | ||
120 | + regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK); | ||
121 | |||
122 | unlock_user_struct(frame, frame_addr, 1); | ||
123 | return; | ||
124 | @@ -XXX,XX +XXX,XX @@ void setup_rt_frame(int sig, struct target_sigaction *ka, | ||
125 | regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); | ||
126 | regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); | ||
127 | regs->pc = (unsigned long) ka->_sa_handler; | ||
128 | - regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); | ||
129 | + regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK); | ||
130 | |||
131 | unlock_user_struct(frame, frame_addr, 1); | ||
132 | return; | ||
133 | diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c | ||
134 | index XXXXXXX..XXXXXXX 100644 | ||
135 | --- a/target/sh4/cpu.c | ||
136 | +++ b/target/sh4/cpu.c | ||
137 | @@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, | ||
138 | SuperHCPU *cpu = SUPERH_CPU(cs); | ||
139 | |||
140 | cpu->env.pc = tb_pc(tb); | ||
141 | - cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK; | ||
142 | + cpu->env.flags = tb->flags; | ||
143 | } | ||
144 | |||
145 | #ifndef CONFIG_USER_ONLY | ||
146 | @@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs, | ||
147 | SuperHCPU *cpu = SUPERH_CPU(cs); | ||
148 | CPUSH4State *env = &cpu->env; | ||
149 | |||
150 | - if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | ||
151 | + if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND)) | ||
152 | && env->pc != tb_pc(tb)) { | ||
153 | env->pc -= 2; | ||
154 | - env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); | ||
155 | + env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND); | ||
156 | return true; | ||
157 | } | ||
158 | return false; | ||
159 | diff --git a/target/sh4/helper.c b/target/sh4/helper.c | ||
160 | index XXXXXXX..XXXXXXX 100644 | ||
161 | --- a/target/sh4/helper.c | ||
162 | +++ b/target/sh4/helper.c | ||
163 | @@ -XXX,XX +XXX,XX @@ void superh_cpu_do_interrupt(CPUState *cs) | ||
164 | env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB); | ||
165 | env->lock_addr = -1; | ||
166 | |||
167 | - if (env->flags & DELAY_SLOT_MASK) { | ||
168 | + if (env->flags & TB_FLAG_DELAY_SLOT_MASK) { | ||
169 | /* Branch instruction should be executed again before delay slot. */ | ||
170 | env->spc -= 2; | ||
171 | /* Clear flags for exception/interrupt routine. */ | ||
172 | - env->flags &= ~DELAY_SLOT_MASK; | ||
173 | + env->flags &= ~TB_FLAG_DELAY_SLOT_MASK; | ||
174 | } | ||
175 | |||
176 | if (do_exp) { | ||
177 | @@ -XXX,XX +XXX,XX @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request) | ||
178 | CPUSH4State *env = &cpu->env; | ||
179 | |||
180 | /* Delay slots are indivisible, ignore interrupts */ | ||
181 | - if (env->flags & DELAY_SLOT_MASK) { | ||
182 | + if (env->flags & TB_FLAG_DELAY_SLOT_MASK) { | ||
183 | return false; | ||
184 | } else { | ||
185 | superh_cpu_do_interrupt(cs); | ||
186 | diff --git a/target/sh4/translate.c b/target/sh4/translate.c | 9 | diff --git a/target/sh4/translate.c b/target/sh4/translate.c |
187 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
188 | --- a/target/sh4/translate.c | 11 | --- a/target/sh4/translate.c |
189 | +++ b/target/sh4/translate.c | 12 | +++ b/target/sh4/translate.c |
190 | @@ -XXX,XX +XXX,XX @@ void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags) | ||
191 | i, env->gregs[i], i + 1, env->gregs[i + 1], | ||
192 | i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]); | ||
193 | } | ||
194 | - if (env->flags & DELAY_SLOT) { | ||
195 | + if (env->flags & TB_FLAG_DELAY_SLOT) { | ||
196 | qemu_printf("in delay slot (delayed_pc=0x%08x)\n", | ||
197 | env->delayed_pc); | ||
198 | - } else if (env->flags & DELAY_SLOT_CONDITIONAL) { | ||
199 | + } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) { | ||
200 | qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n", | ||
201 | env->delayed_pc); | ||
202 | - } else if (env->flags & DELAY_SLOT_RTE) { | ||
203 | + } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) { | ||
204 | qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n", | ||
205 | env->delayed_pc); | ||
206 | } | ||
207 | @@ -XXX,XX +XXX,XX @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc) | ||
208 | |||
209 | static inline bool use_exit_tb(DisasContext *ctx) | ||
210 | { | ||
211 | - return (ctx->tbflags & GUSA_EXCLUSIVE) != 0; | ||
212 | + return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0; | ||
213 | } | ||
214 | |||
215 | static bool use_goto_tb(DisasContext *ctx, target_ulong dest) | ||
216 | @@ -XXX,XX +XXX,XX @@ static void gen_conditional_jump(DisasContext *ctx, target_ulong dest, | ||
217 | TCGLabel *l1 = gen_new_label(); | ||
218 | TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE; | ||
219 | |||
220 | - if (ctx->tbflags & GUSA_EXCLUSIVE) { | ||
221 | + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) { | ||
222 | /* When in an exclusive region, we must continue to the end. | ||
223 | Therefore, exit the region on a taken branch, but otherwise | ||
224 | fall through to the next instruction. */ | ||
225 | tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1); | ||
226 | - tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); | ||
227 | + tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK); | ||
228 | /* Note that this won't actually use a goto_tb opcode because we | ||
229 | disallow it in use_goto_tb, but it handles exit + singlestep. */ | ||
230 | gen_goto_tb(ctx, 0, dest); | ||
231 | @@ -XXX,XX +XXX,XX @@ static void gen_delayed_conditional_jump(DisasContext * ctx) | ||
232 | tcg_gen_mov_i32(ds, cpu_delayed_cond); | ||
233 | tcg_gen_discard_i32(cpu_delayed_cond); | ||
234 | |||
235 | - if (ctx->tbflags & GUSA_EXCLUSIVE) { | ||
236 | + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) { | ||
237 | /* When in an exclusive region, we must continue to the end. | ||
238 | Therefore, exit the region on a taken branch, but otherwise | ||
239 | fall through to the next instruction. */ | ||
240 | tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1); | ||
241 | |||
242 | /* Leave the gUSA region. */ | ||
243 | - tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); | ||
244 | + tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK); | ||
245 | gen_jump(ctx); | ||
246 | |||
247 | gen_set_label(l1); | ||
248 | @@ -XXX,XX +XXX,XX @@ static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) | ||
249 | #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe)) | ||
250 | |||
251 | #define CHECK_NOT_DELAY_SLOT \ | ||
252 | - if (ctx->envflags & DELAY_SLOT_MASK) { \ | ||
253 | - goto do_illegal_slot; \ | ||
254 | + if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \ | ||
255 | + goto do_illegal_slot; \ | ||
256 | } | ||
257 | |||
258 | #define CHECK_PRIVILEGED \ | ||
259 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | 13 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) |
260 | case 0x000b: /* rts */ | ||
261 | CHECK_NOT_DELAY_SLOT | ||
262 | tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); | ||
263 | - ctx->envflags |= DELAY_SLOT; | ||
264 | + ctx->envflags |= TB_FLAG_DELAY_SLOT; | ||
265 | ctx->delayed_pc = (uint32_t) - 1; | ||
266 | return; | 14 | return; |
267 | case 0x0028: /* clrmac */ | 15 | case 0x9000: /* mov.w @(disp,PC),Rn */ |
268 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | 16 | { |
269 | CHECK_NOT_DELAY_SLOT | 17 | - TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2); |
270 | gen_write_sr(cpu_ssr); | 18 | + TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2); |
271 | tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); | 19 | tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); |
272 | - ctx->envflags |= DELAY_SLOT_RTE; | 20 | } |
273 | + ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE; | 21 | return; |
274 | ctx->delayed_pc = (uint32_t) - 1; | 22 | case 0xd000: /* mov.l @(disp,PC),Rn */ |
275 | ctx->base.is_jmp = DISAS_STOP; | 23 | { |
24 | - TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3); | ||
25 | + TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3); | ||
26 | tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); | ||
27 | } | ||
276 | return; | 28 | return; |
277 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | 29 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) |
30 | case 0x300e: /* addc Rm,Rn */ | ||
31 | { | ||
32 | TCGv t0, t1; | ||
33 | - t0 = tcg_const_tl(0); | ||
34 | + t0 = tcg_constant_tl(0); | ||
35 | t1 = tcg_temp_new(); | ||
36 | tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); | ||
37 | tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, | ||
38 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | ||
39 | TCGv t0 = tcg_temp_new(); | ||
40 | TCGv t1 = tcg_temp_new(); | ||
41 | TCGv t2 = tcg_temp_new(); | ||
42 | - TCGv zero = tcg_const_i32(0); | ||
43 | + TCGv zero = tcg_constant_i32(0); | ||
44 | |||
45 | /* shift left arg1, saving the bit being pushed out and inserting | ||
46 | T on the right */ | ||
47 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | ||
278 | return; | 48 | return; |
279 | case 0xe000: /* mov #imm,Rn */ | 49 | case 0x600a: /* negc Rm,Rn */ |
280 | #ifdef CONFIG_USER_ONLY | 50 | { |
281 | - /* Detect the start of a gUSA region. If so, update envflags | 51 | - TCGv t0 = tcg_const_i32(0); |
282 | - and end the TB. This will allow us to see the end of the | 52 | + TCGv t0 = tcg_constant_i32(0); |
283 | - region (stored in R0) in the next TB. */ | 53 | tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, |
284 | + /* | 54 | REG(B7_4), t0, cpu_sr_t, t0); |
285 | + * Detect the start of a gUSA region (mov #-n, r15). | 55 | tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, |
286 | + * If so, update envflags and end the TB. This will allow us | ||
287 | + * to see the end of the region (stored in R0) in the next TB. | ||
288 | + */ | ||
289 | if (B11_8 == 15 && B7_0s < 0 && | ||
290 | (tb_cflags(ctx->base.tb) & CF_PARALLEL)) { | ||
291 | - ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s); | ||
292 | + ctx->envflags = | ||
293 | + deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s); | ||
294 | ctx->base.is_jmp = DISAS_STOP; | ||
295 | } | ||
296 | #endif | ||
297 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | 56 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) |
298 | case 0xa000: /* bra disp */ | 57 | case 0x300a: /* subc Rm,Rn */ |
299 | CHECK_NOT_DELAY_SLOT | 58 | { |
300 | ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2; | 59 | TCGv t0, t1; |
301 | - ctx->envflags |= DELAY_SLOT; | 60 | - t0 = tcg_const_tl(0); |
302 | + ctx->envflags |= TB_FLAG_DELAY_SLOT; | 61 | + t0 = tcg_constant_tl(0); |
303 | return; | 62 | t1 = tcg_temp_new(); |
304 | case 0xb000: /* bsr disp */ | 63 | tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); |
305 | CHECK_NOT_DELAY_SLOT | 64 | tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, |
306 | tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4); | ||
307 | ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2; | ||
308 | - ctx->envflags |= DELAY_SLOT; | ||
309 | + ctx->envflags |= TB_FLAG_DELAY_SLOT; | ||
310 | return; | ||
311 | } | ||
312 | |||
313 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | 65 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) |
314 | CHECK_NOT_DELAY_SLOT | 66 | TCGv imm; |
315 | tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1); | 67 | CHECK_NOT_DELAY_SLOT |
316 | ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2; | 68 | gen_save_cpu_state(ctx, true); |
317 | - ctx->envflags |= DELAY_SLOT_CONDITIONAL; | 69 | - imm = tcg_const_i32(B7_0); |
318 | + ctx->envflags |= TB_FLAG_DELAY_SLOT_COND; | 70 | + imm = tcg_constant_i32(B7_0); |
319 | return; | 71 | gen_helper_trapa(cpu_env, imm); |
320 | case 0x8900: /* bt label */ | 72 | ctx->base.is_jmp = DISAS_NORETURN; |
321 | CHECK_NOT_DELAY_SLOT | 73 | } |
322 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | 74 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) |
323 | CHECK_NOT_DELAY_SLOT | 75 | CHECK_FPU_ENABLED |
324 | tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t); | 76 | CHECK_FPSCR_PR_1 |
325 | ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2; | 77 | { |
326 | - ctx->envflags |= DELAY_SLOT_CONDITIONAL; | 78 | - TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3); |
327 | + ctx->envflags |= TB_FLAG_DELAY_SLOT_COND; | 79 | - TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); |
328 | return; | 80 | + TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3); |
329 | case 0x8800: /* cmp/eq #imm,R0 */ | 81 | + TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3); |
330 | tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s); | 82 | gen_helper_fipr(cpu_env, m, n); |
331 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | ||
332 | case 0x0023: /* braf Rn */ | ||
333 | CHECK_NOT_DELAY_SLOT | ||
334 | tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4); | ||
335 | - ctx->envflags |= DELAY_SLOT; | ||
336 | + ctx->envflags |= TB_FLAG_DELAY_SLOT; | ||
337 | ctx->delayed_pc = (uint32_t) - 1; | ||
338 | return; | ||
339 | case 0x0003: /* bsrf Rn */ | ||
340 | CHECK_NOT_DELAY_SLOT | ||
341 | tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4); | ||
342 | tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); | ||
343 | - ctx->envflags |= DELAY_SLOT; | ||
344 | + ctx->envflags |= TB_FLAG_DELAY_SLOT; | ||
345 | ctx->delayed_pc = (uint32_t) - 1; | ||
346 | return; | ||
347 | case 0x4015: /* cmp/pl Rn */ | ||
348 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | ||
349 | case 0x402b: /* jmp @Rn */ | ||
350 | CHECK_NOT_DELAY_SLOT | ||
351 | tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); | ||
352 | - ctx->envflags |= DELAY_SLOT; | ||
353 | + ctx->envflags |= TB_FLAG_DELAY_SLOT; | ||
354 | ctx->delayed_pc = (uint32_t) - 1; | ||
355 | return; | ||
356 | case 0x400b: /* jsr @Rn */ | ||
357 | CHECK_NOT_DELAY_SLOT | ||
358 | tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4); | ||
359 | tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); | ||
360 | - ctx->envflags |= DELAY_SLOT; | ||
361 | + ctx->envflags |= TB_FLAG_DELAY_SLOT; | ||
362 | ctx->delayed_pc = (uint32_t) - 1; | ||
363 | return; | ||
364 | case 0x400e: /* ldc Rm,SR */ | ||
365 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | ||
366 | fflush(stderr); | ||
367 | #endif | ||
368 | do_illegal: | ||
369 | - if (ctx->envflags & DELAY_SLOT_MASK) { | ||
370 | + if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { | ||
371 | do_illegal_slot: | ||
372 | gen_save_cpu_state(ctx, true); | ||
373 | gen_helper_raise_slot_illegal_instruction(cpu_env); | ||
374 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) | ||
375 | |||
376 | do_fpu_disabled: | ||
377 | gen_save_cpu_state(ctx, true); | ||
378 | - if (ctx->envflags & DELAY_SLOT_MASK) { | ||
379 | + if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { | ||
380 | gen_helper_raise_slot_fpu_disable(cpu_env); | ||
381 | } else { | ||
382 | gen_helper_raise_fpu_disable(cpu_env); | ||
383 | @@ -XXX,XX +XXX,XX @@ static void decode_opc(DisasContext * ctx) | ||
384 | |||
385 | _decode_opc(ctx); | ||
386 | |||
387 | - if (old_flags & DELAY_SLOT_MASK) { | ||
388 | + if (old_flags & TB_FLAG_DELAY_SLOT_MASK) { | ||
389 | /* go out of the delay slot */ | ||
390 | - ctx->envflags &= ~DELAY_SLOT_MASK; | ||
391 | + ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK; | ||
392 | |||
393 | /* When in an exclusive region, we must continue to the end | ||
394 | for conditional branches. */ | ||
395 | - if (ctx->tbflags & GUSA_EXCLUSIVE | ||
396 | - && old_flags & DELAY_SLOT_CONDITIONAL) { | ||
397 | + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE | ||
398 | + && old_flags & TB_FLAG_DELAY_SLOT_COND) { | ||
399 | gen_delayed_conditional_jump(ctx); | ||
400 | return; | 83 | return; |
401 | } | 84 | } |
402 | /* Otherwise this is probably an invalid gUSA region. | 85 | @@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx) |
403 | Drop the GUSA bits so the next TB doesn't see them. */ | 86 | if ((ctx->opcode & 0x0300) != 0x0100) { |
404 | - ctx->envflags &= ~GUSA_MASK; | 87 | goto do_illegal; |
405 | + ctx->envflags &= ~TB_FLAG_GUSA_MASK; | 88 | } |
406 | 89 | - TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); | |
407 | tcg_gen_movi_i32(cpu_flags, ctx->envflags); | 90 | + TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3); |
408 | - if (old_flags & DELAY_SLOT_CONDITIONAL) { | 91 | gen_helper_ftrv(cpu_env, n); |
409 | + if (old_flags & TB_FLAG_DELAY_SLOT_COND) { | ||
410 | gen_delayed_conditional_jump(ctx); | ||
411 | } else { | ||
412 | gen_jump(ctx); | ||
413 | @@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) | ||
414 | } | ||
415 | |||
416 | /* The entire region has been translated. */ | ||
417 | - ctx->envflags &= ~GUSA_MASK; | ||
418 | + ctx->envflags &= ~TB_FLAG_GUSA_MASK; | ||
419 | ctx->base.pc_next = pc_end; | ||
420 | ctx->base.num_insns += max_insns - 1; | ||
421 | return; | ||
422 | @@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) | ||
423 | |||
424 | /* Restart with the EXCLUSIVE bit set, within a TB run via | ||
425 | cpu_exec_step_atomic holding the exclusive lock. */ | ||
426 | - ctx->envflags |= GUSA_EXCLUSIVE; | ||
427 | + ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE; | ||
428 | gen_save_cpu_state(ctx, false); | ||
429 | gen_helper_exclusive(cpu_env); | ||
430 | ctx->base.is_jmp = DISAS_NORETURN; | ||
431 | @@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
432 | (tbflags & (1 << SR_RB))) * 0x10; | ||
433 | ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0; | ||
434 | |||
435 | - if (tbflags & GUSA_MASK) { | ||
436 | +#ifdef CONFIG_USER_ONLY | ||
437 | + if (tbflags & TB_FLAG_GUSA_MASK) { | ||
438 | + /* In gUSA exclusive region. */ | ||
439 | uint32_t pc = ctx->base.pc_next; | ||
440 | uint32_t pc_end = ctx->base.tb->cs_base; | ||
441 | - int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8); | ||
442 | + int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8); | ||
443 | int max_insns = (pc_end - pc) / 2; | ||
444 | |||
445 | if (pc != pc_end + backup || max_insns < 2) { | ||
446 | /* This is a malformed gUSA region. Don't do anything special, | ||
447 | since the interpreter is likely to get confused. */ | ||
448 | - ctx->envflags &= ~GUSA_MASK; | ||
449 | - } else if (tbflags & GUSA_EXCLUSIVE) { | ||
450 | + ctx->envflags &= ~TB_FLAG_GUSA_MASK; | ||
451 | + } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) { | ||
452 | /* Regardless of single-stepping or the end of the page, | ||
453 | we must complete execution of the gUSA region while | ||
454 | holding the exclusive lock. */ | ||
455 | @@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
456 | return; | 92 | return; |
457 | } | 93 | } |
458 | } | 94 | @@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) |
459 | +#endif | 95 | } |
460 | 96 | op_dst = B11_8; | |
461 | /* Since the ISA is fixed-width, we can bound by the number | 97 | op_opc = INDEX_op_xor_i32; |
462 | of instructions remaining on the page. */ | 98 | - op_arg = tcg_const_i32(-1); |
463 | @@ -XXX,XX +XXX,XX @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) | 99 | + op_arg = tcg_constant_i32(-1); |
464 | DisasContext *ctx = container_of(dcbase, DisasContext, base); | 100 | break; |
465 | 101 | ||
466 | #ifdef CONFIG_USER_ONLY | 102 | case 0x7000 ... 0x700f: /* add #imm,Rn */ |
467 | - if (unlikely(ctx->envflags & GUSA_MASK) | 103 | @@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) |
468 | - && !(ctx->envflags & GUSA_EXCLUSIVE)) { | 104 | goto fail; |
469 | + if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK) | 105 | } |
470 | + && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) { | 106 | op_opc = INDEX_op_add_i32; |
471 | /* We're in an gUSA region, and we have not already fallen | 107 | - op_arg = tcg_const_i32(B7_0s); |
472 | back on using an exclusive region. Attempt to parse the | 108 | + op_arg = tcg_constant_i32(B7_0s); |
473 | region into a single supported atomic operation. Failure | 109 | break; |
474 | @@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) | 110 | |
475 | { | 111 | case 0x3000: /* cmp/eq Rm,Rn */ |
476 | DisasContext *ctx = container_of(dcbase, DisasContext, base); | 112 | @@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) |
477 | 113 | goto fail; | |
478 | - if (ctx->tbflags & GUSA_EXCLUSIVE) { | 114 | } |
479 | + if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) { | 115 | op_opc = INDEX_op_setcond_i32; |
480 | /* Ending the region of exclusivity. Clear the bits. */ | 116 | - op_arg = tcg_const_i32(0); |
481 | - ctx->envflags &= ~GUSA_MASK; | 117 | + op_arg = tcg_constant_i32(0); |
482 | + ctx->envflags &= ~TB_FLAG_GUSA_MASK; | 118 | |
483 | } | 119 | NEXT_INSN; |
484 | 120 | if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */ | |
485 | switch (ctx->base.is_jmp) { | ||
486 | -- | 121 | -- |
487 | 2.34.1 | 122 | 2.34.1 |
123 | |||
124 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Push tcg_constant_tl into the shift argument directly. | ||
2 | Since t1 no longer exists as a temp, replace with lo1, | ||
3 | whose last use was just above. | ||
1 | 4 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/sparc/translate.c | 14 ++++++-------- | ||
9 | 1 file changed, 6 insertions(+), 8 deletions(-) | ||
10 | |||
11 | diff --git a/target/sparc/translate.c b/target/sparc/translate.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/sparc/translate.c | ||
14 | +++ b/target/sparc/translate.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env) | ||
16 | static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, | ||
17 | int width, bool cc, bool left) | ||
18 | { | ||
19 | - TCGv lo1, lo2, t1, t2; | ||
20 | + TCGv lo1, lo2; | ||
21 | uint64_t amask, tabl, tabr; | ||
22 | int shift, imask, omask; | ||
23 | |||
24 | @@ -XXX,XX +XXX,XX @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, | ||
25 | tcg_gen_shli_tl(lo1, lo1, shift); | ||
26 | tcg_gen_shli_tl(lo2, lo2, shift); | ||
27 | |||
28 | - t1 = tcg_const_tl(tabl); | ||
29 | - t2 = tcg_const_tl(tabr); | ||
30 | - tcg_gen_shr_tl(lo1, t1, lo1); | ||
31 | - tcg_gen_shr_tl(lo2, t2, lo2); | ||
32 | + tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1); | ||
33 | + tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2); | ||
34 | tcg_gen_andi_tl(dst, lo1, omask); | ||
35 | tcg_gen_andi_tl(lo2, lo2, omask); | ||
36 | |||
37 | @@ -XXX,XX +XXX,XX @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, | ||
38 | lo2 |= -(s1 == s2) | ||
39 | dst &= lo2 | ||
40 | */ | ||
41 | - tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2); | ||
42 | - tcg_gen_neg_tl(t1, t1); | ||
43 | - tcg_gen_or_tl(lo2, lo2, t1); | ||
44 | + tcg_gen_setcond_tl(TCG_COND_EQ, lo1, s1, s2); | ||
45 | + tcg_gen_neg_tl(lo1, lo1); | ||
46 | + tcg_gen_or_tl(lo2, lo2, lo1); | ||
47 | tcg_gen_and_tl(dst, dst, lo2); | ||
48 | } | ||
49 | |||
50 | -- | ||
51 | 2.34.1 | ||
52 | |||
53 | diff view generated by jsdifflib |
1 | Bool is more appropriate type for the alloc parameter. | 1 | As required, allocate temp separately. |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 5 | --- |
7 | accel/tcg/translate-all.c | 14 +++++++------- | 6 | target/tricore/translate.c | 268 +++++++++++++++++++------------------ |
8 | 1 file changed, 7 insertions(+), 7 deletions(-) | 7 | 1 file changed, 140 insertions(+), 128 deletions(-) |
9 | 8 | ||
10 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 9 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c |
11 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/accel/tcg/translate-all.c | 11 | --- a/target/tricore/translate.c |
13 | +++ b/accel/tcg/translate-all.c | 12 | +++ b/target/tricore/translate.c |
14 | @@ -XXX,XX +XXX,XX @@ void page_init(void) | 13 | @@ -XXX,XX +XXX,XX @@ static inline void |
15 | #endif | 14 | gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, |
15 | TCGv r3, uint32_t n, uint32_t mode) | ||
16 | { | ||
17 | - TCGv temp = tcg_const_i32(n); | ||
18 | + TCGv t_n = tcg_constant_i32(n); | ||
19 | + TCGv temp = tcg_temp_new(); | ||
20 | TCGv temp2 = tcg_temp_new(); | ||
21 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
22 | switch (mode) { | ||
23 | case MODE_LL: | ||
24 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
25 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
26 | break; | ||
27 | case MODE_LU: | ||
28 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
29 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
30 | break; | ||
31 | case MODE_UL: | ||
32 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
33 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
34 | break; | ||
35 | case MODE_UU: | ||
36 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
37 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
38 | break; | ||
39 | } | ||
40 | tcg_gen_extr_i64_i32(temp, temp2, temp64); | ||
41 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
42 | gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
43 | TCGv r3, uint32_t n, uint32_t mode) | ||
44 | { | ||
45 | - TCGv temp = tcg_const_i32(n); | ||
46 | + TCGv t_n = tcg_constant_i32(n); | ||
47 | + TCGv temp = tcg_temp_new(); | ||
48 | TCGv temp2 = tcg_temp_new(); | ||
49 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
50 | switch (mode) { | ||
51 | case MODE_LL: | ||
52 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
53 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
54 | break; | ||
55 | case MODE_LU: | ||
56 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
57 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
58 | break; | ||
59 | case MODE_UL: | ||
60 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
61 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
62 | break; | ||
63 | case MODE_UU: | ||
64 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
65 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
66 | break; | ||
67 | } | ||
68 | tcg_gen_extr_i64_i32(temp, temp2, temp64); | ||
69 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
70 | gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
71 | TCGv r3, uint32_t n, uint32_t mode) | ||
72 | { | ||
73 | - TCGv temp = tcg_const_i32(n); | ||
74 | + TCGv t_n = tcg_constant_i32(n); | ||
75 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
76 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
77 | TCGv_i64 temp64_3 = tcg_temp_new_i64(); | ||
78 | switch (mode) { | ||
79 | case MODE_LL: | ||
80 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
81 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
82 | break; | ||
83 | case MODE_LU: | ||
84 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
85 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
86 | break; | ||
87 | case MODE_UL: | ||
88 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
89 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
90 | break; | ||
91 | case MODE_UU: | ||
92 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
93 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
94 | break; | ||
95 | } | ||
96 | tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high); | ||
97 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
98 | gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
99 | TCGv r3, uint32_t n, uint32_t mode) | ||
100 | { | ||
101 | - TCGv temp = tcg_const_i32(n); | ||
102 | + TCGv t_n = tcg_constant_i32(n); | ||
103 | + TCGv temp = tcg_temp_new(); | ||
104 | TCGv temp2 = tcg_temp_new(); | ||
105 | TCGv temp3 = tcg_temp_new(); | ||
106 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
107 | |||
108 | switch (mode) { | ||
109 | case MODE_LL: | ||
110 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
111 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
112 | break; | ||
113 | case MODE_LU: | ||
114 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
115 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
116 | break; | ||
117 | case MODE_UL: | ||
118 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
119 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
120 | break; | ||
121 | case MODE_UU: | ||
122 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
123 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
124 | break; | ||
125 | } | ||
126 | tcg_gen_extr_i64_i32(temp, temp2, temp64); | ||
127 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
128 | gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
129 | TCGv r3, uint32_t n, uint32_t mode) | ||
130 | { | ||
131 | - TCGv temp = tcg_const_i32(n); | ||
132 | + TCGv t_n = tcg_constant_i32(n); | ||
133 | + TCGv temp = tcg_temp_new(); | ||
134 | TCGv temp2 = tcg_temp_new(); | ||
135 | TCGv temp3 = tcg_temp_new(); | ||
136 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
137 | |||
138 | switch (mode) { | ||
139 | case MODE_LL: | ||
140 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
141 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
142 | break; | ||
143 | case MODE_LU: | ||
144 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
145 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
146 | break; | ||
147 | case MODE_UL: | ||
148 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
149 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
150 | break; | ||
151 | case MODE_UU: | ||
152 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
153 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
154 | break; | ||
155 | } | ||
156 | tcg_gen_extr_i64_i32(temp, temp2, temp64); | ||
157 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
158 | gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
159 | TCGv r3, uint32_t n, uint32_t mode) | ||
160 | { | ||
161 | - TCGv temp = tcg_const_i32(n); | ||
162 | + TCGv t_n = tcg_constant_i32(n); | ||
163 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
164 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
165 | |||
166 | switch (mode) { | ||
167 | case MODE_LL: | ||
168 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
169 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
170 | break; | ||
171 | case MODE_LU: | ||
172 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
173 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
174 | break; | ||
175 | case MODE_UL: | ||
176 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
177 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
178 | break; | ||
179 | case MODE_UU: | ||
180 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
181 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
182 | break; | ||
183 | } | ||
184 | tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ | ||
185 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
186 | gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
187 | TCGv r3, uint32_t n, uint32_t mode) | ||
188 | { | ||
189 | - TCGv temp = tcg_const_i32(n); | ||
190 | + TCGv t_n = tcg_constant_i32(n); | ||
191 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
192 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
193 | TCGv_i64 temp64_3 = tcg_temp_new_i64(); | ||
194 | switch (mode) { | ||
195 | case MODE_LL: | ||
196 | - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); | ||
197 | + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); | ||
198 | break; | ||
199 | case MODE_LU: | ||
200 | - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); | ||
201 | + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); | ||
202 | break; | ||
203 | case MODE_UL: | ||
204 | - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); | ||
205 | + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); | ||
206 | break; | ||
207 | case MODE_UU: | ||
208 | - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); | ||
209 | + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); | ||
210 | break; | ||
211 | } | ||
212 | tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); | ||
213 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
214 | gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
215 | TCGv r3, uint32_t n, uint32_t mode) | ||
216 | { | ||
217 | - TCGv temp = tcg_const_i32(n); | ||
218 | + TCGv t_n = tcg_constant_i32(n); | ||
219 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
220 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
221 | switch (mode) { | ||
222 | case MODE_LL: | ||
223 | - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); | ||
224 | + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); | ||
225 | break; | ||
226 | case MODE_LU: | ||
227 | - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); | ||
228 | + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); | ||
229 | break; | ||
230 | case MODE_UL: | ||
231 | - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); | ||
232 | + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); | ||
233 | break; | ||
234 | case MODE_UU: | ||
235 | - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); | ||
236 | + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); | ||
237 | break; | ||
238 | } | ||
239 | tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); | ||
240 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
241 | gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, | ||
242 | uint32_t mode) | ||
243 | { | ||
244 | - TCGv temp = tcg_const_i32(n); | ||
245 | + TCGv t_n = tcg_constant_i32(n); | ||
246 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
247 | switch (mode) { | ||
248 | case MODE_LL: | ||
249 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
250 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
251 | break; | ||
252 | case MODE_LU: | ||
253 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
254 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
255 | break; | ||
256 | case MODE_UL: | ||
257 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
258 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
259 | break; | ||
260 | case MODE_UU: | ||
261 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
262 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
263 | break; | ||
264 | } | ||
265 | gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high); | ||
266 | @@ -XXX,XX +XXX,XX @@ gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
267 | static inline void | ||
268 | gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
269 | { | ||
270 | - TCGv temp = tcg_const_i32(n); | ||
271 | + TCGv t_n = tcg_constant_i32(n); | ||
272 | + TCGv temp = tcg_temp_new(); | ||
273 | TCGv temp2 = tcg_temp_new(); | ||
274 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
275 | switch (mode) { | ||
276 | case MODE_LL: | ||
277 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
278 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
279 | break; | ||
280 | case MODE_LU: | ||
281 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
282 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
283 | break; | ||
284 | case MODE_UL: | ||
285 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
286 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
287 | break; | ||
288 | case MODE_UU: | ||
289 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
290 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
291 | break; | ||
292 | } | ||
293 | tcg_gen_andi_tl(temp2, r1, 0xffff0000); | ||
294 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
295 | gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, | ||
296 | uint32_t n, uint32_t mode) | ||
297 | { | ||
298 | - TCGv temp = tcg_const_i32(n); | ||
299 | + TCGv t_n = tcg_constant_i32(n); | ||
300 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
301 | switch (mode) { | ||
302 | case MODE_LL: | ||
303 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
304 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
305 | break; | ||
306 | case MODE_LU: | ||
307 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
308 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
309 | break; | ||
310 | case MODE_UL: | ||
311 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
312 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
313 | break; | ||
314 | case MODE_UU: | ||
315 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
316 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
317 | break; | ||
318 | } | ||
319 | gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); | ||
320 | @@ -XXX,XX +XXX,XX @@ gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
321 | static inline void | ||
322 | gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
323 | { | ||
324 | - TCGv temp = tcg_const_i32(n); | ||
325 | + TCGv t_n = tcg_constant_i32(n); | ||
326 | + TCGv temp = tcg_temp_new(); | ||
327 | TCGv temp2 = tcg_temp_new(); | ||
328 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
329 | switch (mode) { | ||
330 | case MODE_LL: | ||
331 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
332 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
333 | break; | ||
334 | case MODE_LU: | ||
335 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
336 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
337 | break; | ||
338 | case MODE_UL: | ||
339 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
340 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
341 | break; | ||
342 | case MODE_UU: | ||
343 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
344 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
345 | break; | ||
346 | } | ||
347 | tcg_gen_andi_tl(temp2, r1, 0xffff0000); | ||
348 | @@ -XXX,XX +XXX,XX @@ gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
349 | static inline void | ||
350 | gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) | ||
351 | { | ||
352 | - TCGv temp = tcg_const_i32(n); | ||
353 | - gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp); | ||
354 | + TCGv t_n = tcg_constant_i32(n); | ||
355 | + gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, t_n); | ||
16 | } | 356 | } |
17 | 357 | ||
18 | -static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | 358 | static inline void |
19 | +static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc) | 359 | gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) |
20 | { | 360 | { |
21 | PageDesc *pd; | 361 | - TCGv temp = tcg_const_i32(n); |
22 | void **lp; | 362 | - gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp); |
23 | @@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | 363 | + TCGv t_n = tcg_constant_i32(n); |
24 | 364 | + gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, t_n); | |
25 | static inline PageDesc *page_find(tb_page_addr_t index) | ||
26 | { | ||
27 | - return page_find_alloc(index, 0); | ||
28 | + return page_find_alloc(index, false); | ||
29 | } | 365 | } |
30 | 366 | ||
31 | static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, | 367 | static inline void |
32 | - PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); | 368 | @@ -XXX,XX +XXX,XX @@ gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, |
33 | + PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc); | 369 | TCGv arg3, uint32_t n) |
34 | 370 | { | |
35 | /* In user-mode page locks aren't used; mmap_lock is enough */ | 371 | TCGv_i64 r1 = tcg_temp_new_i64(); |
36 | #ifdef CONFIG_USER_ONLY | 372 | - TCGv temp = tcg_const_i32(n); |
37 | @@ -XXX,XX +XXX,XX @@ static inline void page_unlock(PageDesc *pd) | 373 | + TCGv t_n = tcg_constant_i32(n); |
38 | /* lock the page(s) of a TB in the correct acquisition order */ | 374 | |
39 | static inline void page_lock_tb(const TranslationBlock *tb) | 375 | tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); |
40 | { | 376 | - gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); |
41 | - page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); | 377 | + gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n); |
42 | + page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false); | 378 | tcg_gen_extr_i64_i32(rl, rh, r1); |
43 | } | 379 | } |
44 | 380 | ||
45 | static inline void page_unlock_tb(const TranslationBlock *tb) | 381 | @@ -XXX,XX +XXX,XX @@ static inline void |
46 | @@ -XXX,XX +XXX,XX @@ void page_collection_unlock(struct page_collection *set) | 382 | gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, |
47 | #endif /* !CONFIG_USER_ONLY */ | 383 | TCGv r3, uint32_t n, uint32_t mode) |
48 | 384 | { | |
49 | static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, | 385 | - TCGv temp = tcg_const_i32(n); |
50 | - PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) | 386 | + TCGv t_n = tcg_constant_i32(n); |
51 | + PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc) | 387 | + TCGv temp = tcg_temp_new(); |
52 | { | 388 | TCGv temp2 = tcg_temp_new(); |
53 | PageDesc *p1, *p2; | 389 | TCGv_i64 temp64 = tcg_temp_new_i64(); |
54 | tb_page_addr_t page1; | 390 | switch (mode) { |
55 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | 391 | case MODE_LL: |
56 | * Note that inserting into the hash table first isn't an option, since | 392 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); |
57 | * we can only insert TBs that are fully initialized. | 393 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); |
58 | */ | 394 | break; |
59 | - page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); | 395 | case MODE_LU: |
60 | + page_lock_pair(&p, phys_pc, &p2, phys_page2, true); | 396 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); |
61 | tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); | 397 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); |
62 | if (p2) { | 398 | break; |
63 | tb_page_add(p2, tb, 1, phys_page2); | 399 | case MODE_UL: |
64 | @@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags) | 400 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); |
65 | for (addr = start, len = end - start; | 401 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); |
66 | len != 0; | 402 | break; |
67 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | 403 | case MODE_UU: |
68 | - PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | 404 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); |
69 | + PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true); | 405 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); |
70 | 406 | break; | |
71 | /* If the write protection bit is set, then we invalidate | 407 | } |
72 | the code inside. */ | 408 | tcg_gen_extr_i64_i32(temp, temp2, temp64); |
409 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
410 | gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
411 | TCGv r3, uint32_t n, uint32_t mode) | ||
412 | { | ||
413 | - TCGv temp = tcg_const_i32(n); | ||
414 | + TCGv t_n = tcg_constant_i32(n); | ||
415 | + TCGv temp = tcg_temp_new(); | ||
416 | TCGv temp2 = tcg_temp_new(); | ||
417 | TCGv temp3 = tcg_temp_new(); | ||
418 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
419 | |||
420 | switch (mode) { | ||
421 | case MODE_LL: | ||
422 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
423 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
424 | break; | ||
425 | case MODE_LU: | ||
426 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
427 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
428 | break; | ||
429 | case MODE_UL: | ||
430 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
431 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
432 | break; | ||
433 | case MODE_UU: | ||
434 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
435 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
436 | break; | ||
437 | } | ||
438 | tcg_gen_extr_i64_i32(temp, temp2, temp64); | ||
439 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
440 | gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
441 | TCGv r3, uint32_t n, uint32_t mode) | ||
442 | { | ||
443 | - TCGv temp = tcg_const_i32(n); | ||
444 | + TCGv t_n = tcg_constant_i32(n); | ||
445 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
446 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
447 | TCGv_i64 temp64_3 = tcg_temp_new_i64(); | ||
448 | switch (mode) { | ||
449 | case MODE_LL: | ||
450 | - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); | ||
451 | + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); | ||
452 | break; | ||
453 | case MODE_LU: | ||
454 | - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); | ||
455 | + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); | ||
456 | break; | ||
457 | case MODE_UL: | ||
458 | - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); | ||
459 | + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); | ||
460 | break; | ||
461 | case MODE_UU: | ||
462 | - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); | ||
463 | + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); | ||
464 | break; | ||
465 | } | ||
466 | tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); | ||
467 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
468 | gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
469 | TCGv r3, uint32_t n, uint32_t mode) | ||
470 | { | ||
471 | - TCGv temp = tcg_const_i32(n); | ||
472 | + TCGv t_n = tcg_constant_i32(n); | ||
473 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
474 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
475 | switch (mode) { | ||
476 | case MODE_LL: | ||
477 | - GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); | ||
478 | + GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n); | ||
479 | break; | ||
480 | case MODE_LU: | ||
481 | - GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); | ||
482 | + GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n); | ||
483 | break; | ||
484 | case MODE_UL: | ||
485 | - GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); | ||
486 | + GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n); | ||
487 | break; | ||
488 | case MODE_UU: | ||
489 | - GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); | ||
490 | + GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n); | ||
491 | break; | ||
492 | } | ||
493 | tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); | ||
494 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
495 | gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, | ||
496 | uint32_t mode) | ||
497 | { | ||
498 | - TCGv temp = tcg_const_i32(n); | ||
499 | + TCGv t_n = tcg_constant_i32(n); | ||
500 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
501 | switch (mode) { | ||
502 | case MODE_LL: | ||
503 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
504 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
505 | break; | ||
506 | case MODE_LU: | ||
507 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
508 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
509 | break; | ||
510 | case MODE_UL: | ||
511 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
512 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
513 | break; | ||
514 | case MODE_UU: | ||
515 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
516 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
517 | break; | ||
518 | } | ||
519 | gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high); | ||
520 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
521 | gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, | ||
522 | uint32_t n, uint32_t mode) | ||
523 | { | ||
524 | - TCGv temp = tcg_const_i32(n); | ||
525 | + TCGv t_n = tcg_constant_i32(n); | ||
526 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
527 | switch (mode) { | ||
528 | case MODE_LL: | ||
529 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
530 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
531 | break; | ||
532 | case MODE_LU: | ||
533 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
534 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
535 | break; | ||
536 | case MODE_UL: | ||
537 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
538 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
539 | break; | ||
540 | case MODE_UU: | ||
541 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
542 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
543 | break; | ||
544 | } | ||
545 | gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); | ||
546 | @@ -XXX,XX +XXX,XX @@ gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, | ||
547 | TCGv arg3, uint32_t n) | ||
548 | { | ||
549 | TCGv_i64 r1 = tcg_temp_new_i64(); | ||
550 | - TCGv temp = tcg_const_i32(n); | ||
551 | + TCGv t_n = tcg_constant_i32(n); | ||
552 | |||
553 | tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); | ||
554 | - gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); | ||
555 | + gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n); | ||
556 | tcg_gen_extr_i64_i32(rl, rh, r1); | ||
557 | } | ||
558 | |||
559 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
560 | gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
561 | TCGv r3, uint32_t n, uint32_t mode) | ||
562 | { | ||
563 | - TCGv temp = tcg_const_i32(n); | ||
564 | + TCGv t_n = tcg_constant_i32(n); | ||
565 | + TCGv temp = tcg_temp_new(); | ||
566 | TCGv temp2 = tcg_temp_new(); | ||
567 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
568 | switch (mode) { | ||
569 | case MODE_LL: | ||
570 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
571 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
572 | break; | ||
573 | case MODE_LU: | ||
574 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
575 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
576 | break; | ||
577 | case MODE_UL: | ||
578 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
579 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
580 | break; | ||
581 | case MODE_UU: | ||
582 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
583 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
584 | break; | ||
585 | } | ||
586 | tcg_gen_extr_i64_i32(temp, temp2, temp64); | ||
587 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
588 | gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
589 | TCGv r3, uint32_t n, uint32_t mode) | ||
590 | { | ||
591 | - TCGv temp = tcg_const_i32(n); | ||
592 | + TCGv t_n = tcg_constant_i32(n); | ||
593 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
594 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
595 | TCGv_i64 temp64_3 = tcg_temp_new_i64(); | ||
596 | switch (mode) { | ||
597 | case MODE_LL: | ||
598 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
599 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
600 | break; | ||
601 | case MODE_LU: | ||
602 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
603 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
604 | break; | ||
605 | case MODE_UL: | ||
606 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
607 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
608 | break; | ||
609 | case MODE_UU: | ||
610 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
611 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
612 | break; | ||
613 | } | ||
614 | tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high); | ||
615 | @@ -XXX,XX +XXX,XX @@ gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
616 | static inline void | ||
617 | gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
618 | { | ||
619 | - TCGv temp = tcg_const_i32(n); | ||
620 | + TCGv t_n = tcg_constant_i32(n); | ||
621 | + TCGv temp = tcg_temp_new(); | ||
622 | TCGv temp2 = tcg_temp_new(); | ||
623 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
624 | switch (mode) { | ||
625 | case MODE_LL: | ||
626 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
627 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
628 | break; | ||
629 | case MODE_LU: | ||
630 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
631 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
632 | break; | ||
633 | case MODE_UL: | ||
634 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
635 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
636 | break; | ||
637 | case MODE_UU: | ||
638 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
639 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
640 | break; | ||
641 | } | ||
642 | tcg_gen_andi_tl(temp2, r1, 0xffff0000); | ||
643 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
644 | gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
645 | TCGv r3, uint32_t n, uint32_t mode) | ||
646 | { | ||
647 | - TCGv temp = tcg_const_i32(n); | ||
648 | + TCGv t_n = tcg_constant_i32(n); | ||
649 | + TCGv temp = tcg_temp_new(); | ||
650 | TCGv temp2 = tcg_temp_new(); | ||
651 | TCGv temp3 = tcg_temp_new(); | ||
652 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
653 | |||
654 | switch (mode) { | ||
655 | case MODE_LL: | ||
656 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
657 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
658 | break; | ||
659 | case MODE_LU: | ||
660 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
661 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
662 | break; | ||
663 | case MODE_UL: | ||
664 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
665 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
666 | break; | ||
667 | case MODE_UU: | ||
668 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
669 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
670 | break; | ||
671 | } | ||
672 | tcg_gen_extr_i64_i32(temp, temp2, temp64); | ||
673 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
674 | gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
675 | TCGv r3, uint32_t n, uint32_t mode) | ||
676 | { | ||
677 | - TCGv temp = tcg_const_i32(n); | ||
678 | + TCGv t_n = tcg_constant_i32(n); | ||
679 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
680 | TCGv_i64 temp64_2 = tcg_temp_new_i64(); | ||
681 | |||
682 | switch (mode) { | ||
683 | case MODE_LL: | ||
684 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
685 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
686 | break; | ||
687 | case MODE_LU: | ||
688 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
689 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
690 | break; | ||
691 | case MODE_UL: | ||
692 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
693 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
694 | break; | ||
695 | case MODE_UU: | ||
696 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
697 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
698 | break; | ||
699 | } | ||
700 | tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ | ||
701 | @@ -XXX,XX +XXX,XX @@ gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, | ||
702 | static inline void | ||
703 | gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
704 | { | ||
705 | - TCGv temp = tcg_const_i32(n); | ||
706 | + TCGv t_n = tcg_constant_i32(n); | ||
707 | + TCGv temp = tcg_temp_new(); | ||
708 | TCGv temp2 = tcg_temp_new(); | ||
709 | TCGv_i64 temp64 = tcg_temp_new_i64(); | ||
710 | switch (mode) { | ||
711 | case MODE_LL: | ||
712 | - GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); | ||
713 | + GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n); | ||
714 | break; | ||
715 | case MODE_LU: | ||
716 | - GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); | ||
717 | + GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n); | ||
718 | break; | ||
719 | case MODE_UL: | ||
720 | - GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); | ||
721 | + GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n); | ||
722 | break; | ||
723 | case MODE_UU: | ||
724 | - GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); | ||
725 | + GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); | ||
726 | break; | ||
727 | } | ||
728 | tcg_gen_andi_tl(temp2, r1, 0xffff0000); | ||
73 | -- | 729 | -- |
74 | 2.34.1 | 730 | 2.34.1 |
75 | 731 | ||
76 | 732 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | While temp3 could simply be initialized with tcg_constant_i32, | |
2 | the renaming makes the purpose clearer. | ||
3 | |||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/tricore/translate.c | 56 ++++++++++++++++++-------------------- | ||
8 | 1 file changed, 27 insertions(+), 29 deletions(-) | ||
9 | |||
10 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/tricore/translate.c | ||
13 | +++ b/target/tricore/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
15 | uint32_t op2; | ||
16 | uint32_t off10; | ||
17 | int32_t r1, r2; | ||
18 | - TCGv temp, temp2, temp3; | ||
19 | + TCGv temp, temp2, t_off10; | ||
20 | |||
21 | r1 = MASK_OP_BO_S1D(ctx->opcode); | ||
22 | r2 = MASK_OP_BO_S2(ctx->opcode); | ||
23 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
24 | |||
25 | temp = tcg_temp_new(); | ||
26 | temp2 = tcg_temp_new(); | ||
27 | - temp3 = tcg_const_i32(off10); | ||
28 | + t_off10 = tcg_constant_i32(off10); | ||
29 | CHECK_REG_PAIR(r2); | ||
30 | tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); | ||
31 | tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); | ||
32 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
33 | case OPC2_32_BO_CACHEA_WI_CIRC: | ||
34 | case OPC2_32_BO_CACHEA_W_CIRC: | ||
35 | case OPC2_32_BO_CACHEA_I_CIRC: | ||
36 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
37 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
38 | break; | ||
39 | case OPC2_32_BO_ST_A_BR: | ||
40 | tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); | ||
41 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
42 | break; | ||
43 | case OPC2_32_BO_ST_A_CIRC: | ||
44 | tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); | ||
45 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
46 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
47 | break; | ||
48 | case OPC2_32_BO_ST_B_BR: | ||
49 | tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); | ||
50 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
51 | break; | ||
52 | case OPC2_32_BO_ST_B_CIRC: | ||
53 | tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); | ||
54 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
55 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
56 | break; | ||
57 | case OPC2_32_BO_ST_D_BR: | ||
58 | CHECK_REG_PAIR(r1); | ||
59 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
60 | tcg_gen_rem_tl(temp, temp, temp2); | ||
61 | tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); | ||
62 | tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); | ||
63 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
64 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
65 | break; | ||
66 | case OPC2_32_BO_ST_DA_BR: | ||
67 | CHECK_REG_PAIR(r1); | ||
68 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
69 | tcg_gen_rem_tl(temp, temp, temp2); | ||
70 | tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); | ||
71 | tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); | ||
72 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
73 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
74 | break; | ||
75 | case OPC2_32_BO_ST_H_BR: | ||
76 | tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); | ||
77 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
78 | break; | ||
79 | case OPC2_32_BO_ST_H_CIRC: | ||
80 | tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); | ||
81 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
82 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
83 | break; | ||
84 | case OPC2_32_BO_ST_Q_BR: | ||
85 | tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); | ||
86 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
87 | case OPC2_32_BO_ST_Q_CIRC: | ||
88 | tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); | ||
89 | tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW); | ||
90 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
91 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
92 | break; | ||
93 | case OPC2_32_BO_ST_W_BR: | ||
94 | tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); | ||
95 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) | ||
96 | break; | ||
97 | case OPC2_32_BO_ST_W_CIRC: | ||
98 | tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); | ||
99 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
100 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
101 | break; | ||
102 | default: | ||
103 | generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); | ||
104 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
105 | uint32_t op2; | ||
106 | uint32_t off10; | ||
107 | int r1, r2; | ||
108 | - | ||
109 | - TCGv temp, temp2, temp3; | ||
110 | + TCGv temp, temp2, t_off10; | ||
111 | |||
112 | r1 = MASK_OP_BO_S1D(ctx->opcode); | ||
113 | r2 = MASK_OP_BO_S2(ctx->opcode); | ||
114 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
115 | |||
116 | temp = tcg_temp_new(); | ||
117 | temp2 = tcg_temp_new(); | ||
118 | - temp3 = tcg_const_i32(off10); | ||
119 | + t_off10 = tcg_constant_i32(off10); | ||
120 | CHECK_REG_PAIR(r2); | ||
121 | tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); | ||
122 | tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); | ||
123 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
124 | break; | ||
125 | case OPC2_32_BO_LD_A_CIRC: | ||
126 | tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); | ||
127 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
128 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
129 | break; | ||
130 | case OPC2_32_BO_LD_B_BR: | ||
131 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); | ||
132 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
133 | break; | ||
134 | case OPC2_32_BO_LD_B_CIRC: | ||
135 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); | ||
136 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
137 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
138 | break; | ||
139 | case OPC2_32_BO_LD_BU_BR: | ||
140 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); | ||
141 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
142 | break; | ||
143 | case OPC2_32_BO_LD_BU_CIRC: | ||
144 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); | ||
145 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
146 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
147 | break; | ||
148 | case OPC2_32_BO_LD_D_BR: | ||
149 | CHECK_REG_PAIR(r1); | ||
150 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
151 | tcg_gen_rem_tl(temp, temp, temp2); | ||
152 | tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); | ||
153 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); | ||
154 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
155 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
156 | break; | ||
157 | case OPC2_32_BO_LD_DA_BR: | ||
158 | CHECK_REG_PAIR(r1); | ||
159 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
160 | tcg_gen_rem_tl(temp, temp, temp2); | ||
161 | tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); | ||
162 | tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); | ||
163 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
164 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
165 | break; | ||
166 | case OPC2_32_BO_LD_H_BR: | ||
167 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); | ||
168 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
169 | break; | ||
170 | case OPC2_32_BO_LD_H_CIRC: | ||
171 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); | ||
172 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
173 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
174 | break; | ||
175 | case OPC2_32_BO_LD_HU_BR: | ||
176 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); | ||
177 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
178 | break; | ||
179 | case OPC2_32_BO_LD_HU_CIRC: | ||
180 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); | ||
181 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
182 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
183 | break; | ||
184 | case OPC2_32_BO_LD_Q_BR: | ||
185 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); | ||
186 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
187 | case OPC2_32_BO_LD_Q_CIRC: | ||
188 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); | ||
189 | tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); | ||
190 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
191 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
192 | break; | ||
193 | case OPC2_32_BO_LD_W_BR: | ||
194 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); | ||
195 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) | ||
196 | break; | ||
197 | case OPC2_32_BO_LD_W_CIRC: | ||
198 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); | ||
199 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
200 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
201 | break; | ||
202 | default: | ||
203 | generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); | ||
204 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) | ||
205 | uint32_t op2; | ||
206 | uint32_t off10; | ||
207 | int r1, r2; | ||
208 | - | ||
209 | - TCGv temp, temp2, temp3; | ||
210 | + TCGv temp, temp2, t_off10; | ||
211 | |||
212 | r1 = MASK_OP_BO_S1D(ctx->opcode); | ||
213 | r2 = MASK_OP_BO_S2(ctx->opcode); | ||
214 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) | ||
215 | |||
216 | temp = tcg_temp_new(); | ||
217 | temp2 = tcg_temp_new(); | ||
218 | - temp3 = tcg_const_i32(off10); | ||
219 | + t_off10 = tcg_constant_i32(off10); | ||
220 | CHECK_REG_PAIR(r2); | ||
221 | tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); | ||
222 | tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); | ||
223 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) | ||
224 | break; | ||
225 | case OPC2_32_BO_LDMST_CIRC: | ||
226 | gen_ldmst(ctx, r1, temp2); | ||
227 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
228 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
229 | break; | ||
230 | case OPC2_32_BO_SWAP_W_BR: | ||
231 | gen_swap(ctx, r1, temp2); | ||
232 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) | ||
233 | break; | ||
234 | case OPC2_32_BO_SWAP_W_CIRC: | ||
235 | gen_swap(ctx, r1, temp2); | ||
236 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
237 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
238 | break; | ||
239 | case OPC2_32_BO_CMPSWAP_W_BR: | ||
240 | gen_cmpswap(ctx, r1, temp2); | ||
241 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) | ||
242 | break; | ||
243 | case OPC2_32_BO_CMPSWAP_W_CIRC: | ||
244 | gen_cmpswap(ctx, r1, temp2); | ||
245 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
246 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
247 | break; | ||
248 | case OPC2_32_BO_SWAPMSK_W_BR: | ||
249 | gen_swapmsk(ctx, r1, temp2); | ||
250 | @@ -XXX,XX +XXX,XX @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) | ||
251 | break; | ||
252 | case OPC2_32_BO_SWAPMSK_W_CIRC: | ||
253 | gen_swapmsk(ctx, r1, temp2); | ||
254 | - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); | ||
255 | + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); | ||
256 | break; | ||
257 | default: | ||
258 | generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); | ||
259 | -- | ||
260 | 2.34.1 | ||
261 | |||
262 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | This removes the only use of temp. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/tricore/translate.c | 7 ++----- | ||
7 | 1 file changed, 2 insertions(+), 5 deletions(-) | ||
8 | |||
9 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/tricore/translate.c | ||
12 | +++ b/target/tricore/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static void decode_sr_accu(DisasContext *ctx) | ||
14 | { | ||
15 | uint32_t op2; | ||
16 | uint32_t r1; | ||
17 | - TCGv temp; | ||
18 | |||
19 | r1 = MASK_OP_SR_S1D(ctx->opcode); | ||
20 | op2 = MASK_OP_SR_OP2(ctx->opcode); | ||
21 | |||
22 | switch (op2) { | ||
23 | case OPC2_16_SR_RSUB: | ||
24 | - /* overflow only if r1 = -0x80000000 */ | ||
25 | - temp = tcg_const_i32(-0x80000000); | ||
26 | - /* calc V bit */ | ||
27 | - tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp); | ||
28 | + /* calc V bit -- overflow only if r1 = -0x80000000 */ | ||
29 | + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], -0x80000000); | ||
30 | tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); | ||
31 | /* calc SV bit */ | ||
32 | tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); | ||
33 | -- | ||
34 | 2.34.1 | ||
35 | |||
36 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The temp variables here are always set afterward; | ||
2 | the initialization with a constant was discarded. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/tricore/translate.c | 4 ++-- | ||
8 | 1 file changed, 2 insertions(+), 2 deletions(-) | ||
9 | |||
10 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/tricore/translate.c | ||
13 | +++ b/target/tricore/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void decode_rrr1_maddq_h(DisasContext *ctx) | ||
15 | r4 = MASK_OP_RRR1_D(ctx->opcode); | ||
16 | n = MASK_OP_RRR1_N(ctx->opcode); | ||
17 | |||
18 | - temp = tcg_const_i32(n); | ||
19 | + temp = tcg_temp_new(); | ||
20 | temp2 = tcg_temp_new(); | ||
21 | |||
22 | switch (op2) { | ||
23 | @@ -XXX,XX +XXX,XX @@ static void decode_rrr1_msubq_h(DisasContext *ctx) | ||
24 | r4 = MASK_OP_RRR1_D(ctx->opcode); | ||
25 | n = MASK_OP_RRR1_N(ctx->opcode); | ||
26 | |||
27 | - temp = tcg_const_i32(n); | ||
28 | + temp = tcg_temp_new(); | ||
29 | temp2 = tcg_temp_new(); | ||
30 | |||
31 | switch (op2) { | ||
32 | -- | ||
33 | 2.34.1 | ||
34 | |||
35 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/tricore/translate.c | 127 +++++++++++++++++++------------------ | ||
7 | 1 file changed, 64 insertions(+), 63 deletions(-) | ||
8 | |||
9 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/tricore/translate.c | ||
12 | +++ b/target/tricore/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) | ||
14 | /* Makros for generating helpers */ | ||
15 | |||
16 | #define gen_helper_1arg(name, arg) do { \ | ||
17 | - TCGv_i32 helper_tmp = tcg_const_i32(arg); \ | ||
18 | + TCGv_i32 helper_tmp = tcg_constant_i32(arg); \ | ||
19 | gen_helper_##name(cpu_env, helper_tmp); \ | ||
20 | } while (0) | ||
21 | |||
22 | @@ -XXX,XX +XXX,XX @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) | ||
23 | |||
24 | static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) | ||
25 | { | ||
26 | - TCGv temp = tcg_const_i32(con); | ||
27 | + TCGv temp = tcg_constant_i32(con); | ||
28 | gen_madd32_d(ret, r1, r2, temp); | ||
29 | } | ||
30 | |||
31 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
32 | gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
33 | int32_t con) | ||
34 | { | ||
35 | - TCGv temp = tcg_const_i32(con); | ||
36 | + TCGv temp = tcg_constant_i32(con); | ||
37 | gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
38 | } | ||
39 | |||
40 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
41 | gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
42 | int32_t con) | ||
43 | { | ||
44 | - TCGv temp = tcg_const_i32(con); | ||
45 | + TCGv temp = tcg_constant_i32(con); | ||
46 | gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
47 | } | ||
48 | |||
49 | @@ -XXX,XX +XXX,XX @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) | ||
50 | |||
51 | static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) | ||
52 | { | ||
53 | - TCGv temp = tcg_const_i32(con); | ||
54 | + TCGv temp = tcg_constant_i32(con); | ||
55 | gen_msub32_d(ret, r1, r2, temp); | ||
56 | } | ||
57 | |||
58 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
59 | gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
60 | int32_t con) | ||
61 | { | ||
62 | - TCGv temp = tcg_const_i32(con); | ||
63 | + TCGv temp = tcg_constant_i32(con); | ||
64 | gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
65 | } | ||
66 | |||
67 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
68 | gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
69 | int32_t con) | ||
70 | { | ||
71 | - TCGv temp = tcg_const_i32(con); | ||
72 | + TCGv temp = tcg_constant_i32(con); | ||
73 | gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
74 | } | ||
75 | |||
76 | static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2) | ||
77 | { | ||
78 | - TCGv temp = tcg_const_i32(r2); | ||
79 | + TCGv temp = tcg_constant_i32(r2); | ||
80 | gen_add_d(ret, r1, temp); | ||
81 | } | ||
82 | |||
83 | @@ -XXX,XX +XXX,XX @@ static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2) | ||
84 | |||
85 | static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con) | ||
86 | { | ||
87 | - TCGv temp = tcg_const_i32(con); | ||
88 | + TCGv temp = tcg_constant_i32(con); | ||
89 | gen_add_CC(ret, r1, temp); | ||
90 | } | ||
91 | |||
92 | @@ -XXX,XX +XXX,XX @@ static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) | ||
93 | |||
94 | static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con) | ||
95 | { | ||
96 | - TCGv temp = tcg_const_i32(con); | ||
97 | + TCGv temp = tcg_constant_i32(con); | ||
98 | gen_addc_CC(ret, r1, temp); | ||
99 | } | ||
100 | |||
101 | @@ -XXX,XX +XXX,XX @@ static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, | ||
102 | TCGv temp2 = tcg_temp_new(); | ||
103 | TCGv result = tcg_temp_new(); | ||
104 | TCGv mask = tcg_temp_new(); | ||
105 | - TCGv t0 = tcg_const_i32(0); | ||
106 | + TCGv t0 = tcg_constant_i32(0); | ||
107 | |||
108 | /* create mask for sticky bits */ | ||
109 | tcg_gen_setcond_tl(cond, mask, r4, t0); | ||
110 | @@ -XXX,XX +XXX,XX @@ static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, | ||
111 | static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, | ||
112 | TCGv r3, TCGv r4) | ||
113 | { | ||
114 | - TCGv temp = tcg_const_i32(r2); | ||
115 | + TCGv temp = tcg_constant_i32(r2); | ||
116 | gen_cond_add(cond, r1, temp, r3, r4); | ||
117 | } | ||
118 | |||
119 | @@ -XXX,XX +XXX,XX @@ static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, | ||
120 | TCGv temp2 = tcg_temp_new(); | ||
121 | TCGv result = tcg_temp_new(); | ||
122 | TCGv mask = tcg_temp_new(); | ||
123 | - TCGv t0 = tcg_const_i32(0); | ||
124 | + TCGv t0 = tcg_constant_i32(0); | ||
125 | |||
126 | /* create mask for sticky bits */ | ||
127 | tcg_gen_setcond_tl(cond, mask, r4, t0); | ||
128 | @@ -XXX,XX +XXX,XX @@ gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) | ||
129 | static inline void | ||
130 | gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) | ||
131 | { | ||
132 | - TCGv temp = tcg_const_i32(n); | ||
133 | + TCGv temp = tcg_constant_i32(n); | ||
134 | gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp); | ||
135 | } | ||
136 | |||
137 | static inline void | ||
138 | gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) | ||
139 | { | ||
140 | - TCGv temp = tcg_const_i32(n); | ||
141 | + TCGv temp = tcg_constant_i32(n); | ||
142 | gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp); | ||
143 | } | ||
144 | |||
145 | @@ -XXX,XX +XXX,XX @@ static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2) | ||
146 | |||
147 | static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con) | ||
148 | { | ||
149 | - TCGv temp = tcg_const_i32(con); | ||
150 | + TCGv temp = tcg_constant_i32(con); | ||
151 | gen_absdif(ret, r1, temp); | ||
152 | } | ||
153 | |||
154 | static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con) | ||
155 | { | ||
156 | - TCGv temp = tcg_const_i32(con); | ||
157 | + TCGv temp = tcg_constant_i32(con); | ||
158 | gen_helper_absdif_ssov(ret, cpu_env, r1, temp); | ||
159 | } | ||
160 | |||
161 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) | ||
162 | |||
163 | static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con) | ||
164 | { | ||
165 | - TCGv temp = tcg_const_i32(con); | ||
166 | + TCGv temp = tcg_constant_i32(con); | ||
167 | gen_mul_i32s(ret, r1, temp); | ||
168 | } | ||
169 | |||
170 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) | ||
171 | static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, | ||
172 | int32_t con) | ||
173 | { | ||
174 | - TCGv temp = tcg_const_i32(con); | ||
175 | + TCGv temp = tcg_constant_i32(con); | ||
176 | gen_mul_i64s(ret_low, ret_high, r1, temp); | ||
177 | } | ||
178 | |||
179 | @@ -XXX,XX +XXX,XX @@ static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) | ||
180 | static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, | ||
181 | int32_t con) | ||
182 | { | ||
183 | - TCGv temp = tcg_const_i32(con); | ||
184 | + TCGv temp = tcg_constant_i32(con); | ||
185 | gen_mul_i64u(ret_low, ret_high, r1, temp); | ||
186 | } | ||
187 | |||
188 | static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con) | ||
189 | { | ||
190 | - TCGv temp = tcg_const_i32(con); | ||
191 | + TCGv temp = tcg_constant_i32(con); | ||
192 | gen_helper_mul_ssov(ret, cpu_env, r1, temp); | ||
193 | } | ||
194 | |||
195 | static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con) | ||
196 | { | ||
197 | - TCGv temp = tcg_const_i32(con); | ||
198 | + TCGv temp = tcg_constant_i32(con); | ||
199 | gen_helper_mul_suov(ret, cpu_env, r1, temp); | ||
200 | } | ||
201 | + | ||
202 | /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */ | ||
203 | static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) | ||
204 | { | ||
205 | - TCGv temp = tcg_const_i32(con); | ||
206 | + TCGv temp = tcg_constant_i32(con); | ||
207 | gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp); | ||
208 | } | ||
209 | |||
210 | static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) | ||
211 | { | ||
212 | - TCGv temp = tcg_const_i32(con); | ||
213 | + TCGv temp = tcg_constant_i32(con); | ||
214 | gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp); | ||
215 | } | ||
216 | |||
217 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
218 | gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
219 | int32_t con) | ||
220 | { | ||
221 | - TCGv temp = tcg_const_i32(con); | ||
222 | + TCGv temp = tcg_constant_i32(con); | ||
223 | gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
224 | } | ||
225 | |||
226 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
227 | gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
228 | int32_t con) | ||
229 | { | ||
230 | - TCGv temp = tcg_const_i32(con); | ||
231 | + TCGv temp = tcg_constant_i32(con); | ||
232 | gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
233 | } | ||
234 | |||
235 | static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) | ||
236 | { | ||
237 | - TCGv temp = tcg_const_i32(con); | ||
238 | + TCGv temp = tcg_constant_i32(con); | ||
239 | gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp); | ||
240 | } | ||
241 | |||
242 | static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) | ||
243 | { | ||
244 | - TCGv temp = tcg_const_i32(con); | ||
245 | + TCGv temp = tcg_constant_i32(con); | ||
246 | gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp); | ||
247 | } | ||
248 | |||
249 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
250 | gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
251 | int32_t con) | ||
252 | { | ||
253 | - TCGv temp = tcg_const_i32(con); | ||
254 | + TCGv temp = tcg_constant_i32(con); | ||
255 | gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
256 | } | ||
257 | |||
258 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
259 | gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, | ||
260 | int32_t con) | ||
261 | { | ||
262 | - TCGv temp = tcg_const_i32(con); | ||
263 | + TCGv temp = tcg_constant_i32(con); | ||
264 | gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); | ||
265 | } | ||
266 | |||
267 | @@ -XXX,XX +XXX,XX @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) | ||
268 | /* clear PSW.V */ | ||
269 | tcg_gen_movi_tl(cpu_PSW_V, 0); | ||
270 | } else if (shift_count > 0) { | ||
271 | - TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count); | ||
272 | - TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count); | ||
273 | + TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count); | ||
274 | + TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count); | ||
275 | |||
276 | /* calc carry */ | ||
277 | msk_start = 32 - shift_count; | ||
278 | @@ -XXX,XX +XXX,XX @@ static void gen_shas(TCGv ret, TCGv r1, TCGv r2) | ||
279 | |||
280 | static void gen_shasi(TCGv ret, TCGv r1, int32_t con) | ||
281 | { | ||
282 | - TCGv temp = tcg_const_i32(con); | ||
283 | + TCGv temp = tcg_constant_i32(con); | ||
284 | gen_shas(ret, r1, temp); | ||
285 | } | ||
286 | |||
287 | @@ -XXX,XX +XXX,XX @@ static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2) | ||
288 | |||
289 | static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con) | ||
290 | { | ||
291 | - TCGv temp = tcg_const_i32(con); | ||
292 | + TCGv temp = tcg_constant_i32(con); | ||
293 | gen_sh_cond(cond, ret, r1, temp); | ||
294 | } | ||
295 | |||
296 | @@ -XXX,XX +XXX,XX @@ static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) | ||
297 | |||
298 | static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con) | ||
299 | { | ||
300 | - TCGv temp = tcg_const_i32(con); | ||
301 | + TCGv temp = tcg_constant_i32(con); | ||
302 | gen_helper_add_ssov(ret, cpu_env, r1, temp); | ||
303 | } | ||
304 | |||
305 | static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con) | ||
306 | { | ||
307 | - TCGv temp = tcg_const_i32(con); | ||
308 | + TCGv temp = tcg_constant_i32(con); | ||
309 | gen_helper_add_suov(ret, cpu_env, r1, temp); | ||
310 | } | ||
311 | |||
312 | @@ -XXX,XX +XXX,XX @@ static inline void | ||
313 | gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con, | ||
314 | void(*op)(TCGv, TCGv, TCGv)) | ||
315 | { | ||
316 | - TCGv temp = tcg_const_i32(con); | ||
317 | + TCGv temp = tcg_constant_i32(con); | ||
318 | gen_accumulating_cond(cond, ret, r1, temp, op); | ||
319 | } | ||
320 | |||
321 | @@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) | ||
322 | |||
323 | static void generate_trap(DisasContext *ctx, int class, int tin) | ||
324 | { | ||
325 | - TCGv_i32 classtemp = tcg_const_i32(class); | ||
326 | - TCGv_i32 tintemp = tcg_const_i32(tin); | ||
327 | + TCGv_i32 classtemp = tcg_constant_i32(class); | ||
328 | + TCGv_i32 tintemp = tcg_constant_i32(tin); | ||
329 | |||
330 | gen_save_pc(ctx->base.pc_next); | ||
331 | gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp); | ||
332 | @@ -XXX,XX +XXX,XX @@ static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, | ||
333 | static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, | ||
334 | int r2, int16_t address) | ||
335 | { | ||
336 | - TCGv temp = tcg_const_i32(r2); | ||
337 | + TCGv temp = tcg_constant_i32(r2); | ||
338 | gen_branch_cond(ctx, cond, r1, temp, address); | ||
339 | } | ||
340 | |||
341 | @@ -XXX,XX +XXX,XX @@ static void decode_src_opc(DisasContext *ctx, int op1) | ||
342 | cpu_gpr_d[15]); | ||
343 | break; | ||
344 | case OPC1_16_SRC_CMOV: | ||
345 | - temp = tcg_const_tl(0); | ||
346 | - temp2 = tcg_const_tl(const4); | ||
347 | + temp = tcg_constant_tl(0); | ||
348 | + temp2 = tcg_constant_tl(const4); | ||
349 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, | ||
350 | temp2, cpu_gpr_d[r1]); | ||
351 | break; | ||
352 | case OPC1_16_SRC_CMOVN: | ||
353 | - temp = tcg_const_tl(0); | ||
354 | - temp2 = tcg_const_tl(const4); | ||
355 | + temp = tcg_constant_tl(0); | ||
356 | + temp2 = tcg_constant_tl(const4); | ||
357 | tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, | ||
358 | temp2, cpu_gpr_d[r1]); | ||
359 | break; | ||
360 | @@ -XXX,XX +XXX,XX @@ static void decode_srr_opc(DisasContext *ctx, int op1) | ||
361 | tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); | ||
362 | break; | ||
363 | case OPC1_16_SRR_CMOV: | ||
364 | - temp = tcg_const_tl(0); | ||
365 | + temp = tcg_constant_tl(0); | ||
366 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, | ||
367 | cpu_gpr_d[r2], cpu_gpr_d[r1]); | ||
368 | break; | ||
369 | case OPC1_16_SRR_CMOVN: | ||
370 | - temp = tcg_const_tl(0); | ||
371 | + temp = tcg_constant_tl(0); | ||
372 | tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, | ||
373 | cpu_gpr_d[r2], cpu_gpr_d[r1]); | ||
374 | break; | ||
375 | @@ -XXX,XX +XXX,XX @@ static void decode_abs_ldw(DisasContext *ctx) | ||
376 | address = MASK_OP_ABS_OFF18(ctx->opcode); | ||
377 | op2 = MASK_OP_ABS_OP2(ctx->opcode); | ||
378 | |||
379 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
380 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
381 | |||
382 | switch (op2) { | ||
383 | case OPC2_32_ABS_LD_A: | ||
384 | @@ -XXX,XX +XXX,XX @@ static void decode_abs_ldb(DisasContext *ctx) | ||
385 | address = MASK_OP_ABS_OFF18(ctx->opcode); | ||
386 | op2 = MASK_OP_ABS_OP2(ctx->opcode); | ||
387 | |||
388 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
389 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
390 | |||
391 | switch (op2) { | ||
392 | case OPC2_32_ABS_LD_B: | ||
393 | @@ -XXX,XX +XXX,XX @@ static void decode_abs_ldst_swap(DisasContext *ctx) | ||
394 | address = MASK_OP_ABS_OFF18(ctx->opcode); | ||
395 | op2 = MASK_OP_ABS_OP2(ctx->opcode); | ||
396 | |||
397 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
398 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
399 | |||
400 | switch (op2) { | ||
401 | case OPC2_32_ABS_LDMST: | ||
402 | @@ -XXX,XX +XXX,XX @@ static void decode_abs_store(DisasContext *ctx) | ||
403 | address = MASK_OP_ABS_OFF18(ctx->opcode); | ||
404 | op2 = MASK_OP_ABS_OP2(ctx->opcode); | ||
405 | |||
406 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
407 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
408 | |||
409 | switch (op2) { | ||
410 | case OPC2_32_ABS_ST_A: | ||
411 | @@ -XXX,XX +XXX,XX @@ static void decode_abs_storeb_h(DisasContext *ctx) | ||
412 | address = MASK_OP_ABS_OFF18(ctx->opcode); | ||
413 | op2 = MASK_OP_ABS_OP2(ctx->opcode); | ||
414 | |||
415 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
416 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
417 | |||
418 | switch (op2) { | ||
419 | case OPC2_32_ABS_ST_B: | ||
420 | @@ -XXX,XX +XXX,XX @@ static void decode_rcpw_insert(DisasContext *ctx) | ||
421 | case OPC2_32_RCPW_INSERT: | ||
422 | /* if pos + width > 32 undefined result */ | ||
423 | if (pos + width <= 32) { | ||
424 | - temp = tcg_const_i32(const4); | ||
425 | + temp = tcg_constant_i32(const4); | ||
426 | tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width); | ||
427 | } | ||
428 | break; | ||
429 | @@ -XXX,XX +XXX,XX @@ static void decode_rcr_cond_select(DisasContext *ctx) | ||
430 | cpu_gpr_d[r3]); | ||
431 | break; | ||
432 | case OPC2_32_RCR_SEL: | ||
433 | - temp = tcg_const_i32(0); | ||
434 | - temp2 = tcg_const_i32(const9); | ||
435 | + temp = tcg_constant_i32(0); | ||
436 | + temp2 = tcg_constant_i32(const9); | ||
437 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, | ||
438 | cpu_gpr_d[r1], temp2); | ||
439 | break; | ||
440 | case OPC2_32_RCR_SELN: | ||
441 | - temp = tcg_const_i32(0); | ||
442 | - temp2 = tcg_const_i32(const9); | ||
443 | + temp = tcg_constant_i32(0); | ||
444 | + temp2 = tcg_constant_i32(const9); | ||
445 | tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, | ||
446 | cpu_gpr_d[r1], temp2); | ||
447 | break; | ||
448 | @@ -XXX,XX +XXX,XX @@ static void decode_rr1_mul(DisasContext *ctx) | ||
449 | r1 = MASK_OP_RR1_S1(ctx->opcode); | ||
450 | r2 = MASK_OP_RR1_S2(ctx->opcode); | ||
451 | r3 = MASK_OP_RR1_D(ctx->opcode); | ||
452 | - n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode)); | ||
453 | + n = tcg_constant_i32(MASK_OP_RR1_N(ctx->opcode)); | ||
454 | op2 = MASK_OP_RR1_OP2(ctx->opcode); | ||
455 | |||
456 | switch (op2) { | ||
457 | @@ -XXX,XX +XXX,XX @@ static void decode_rrr_cond_select(DisasContext *ctx) | ||
458 | cpu_gpr_d[r3]); | ||
459 | break; | ||
460 | case OPC2_32_RRR_SEL: | ||
461 | - temp = tcg_const_i32(0); | ||
462 | + temp = tcg_constant_i32(0); | ||
463 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, | ||
464 | cpu_gpr_d[r1], cpu_gpr_d[r2]); | ||
465 | break; | ||
466 | case OPC2_32_RRR_SELN: | ||
467 | - temp = tcg_const_i32(0); | ||
468 | + temp = tcg_constant_i32(0); | ||
469 | tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, | ||
470 | cpu_gpr_d[r1], cpu_gpr_d[r2]); | ||
471 | break; | ||
472 | @@ -XXX,XX +XXX,XX @@ static void decode_32Bit_opc(DisasContext *ctx) | ||
473 | case OPC1_32_ABS_STOREQ: | ||
474 | address = MASK_OP_ABS_OFF18(ctx->opcode); | ||
475 | r1 = MASK_OP_ABS_S1D(ctx->opcode); | ||
476 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
477 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
478 | temp2 = tcg_temp_new(); | ||
479 | |||
480 | tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16); | ||
481 | @@ -XXX,XX +XXX,XX @@ static void decode_32Bit_opc(DisasContext *ctx) | ||
482 | case OPC1_32_ABS_LD_Q: | ||
483 | address = MASK_OP_ABS_OFF18(ctx->opcode); | ||
484 | r1 = MASK_OP_ABS_S1D(ctx->opcode); | ||
485 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
486 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
487 | |||
488 | tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); | ||
489 | tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); | ||
490 | @@ -XXX,XX +XXX,XX @@ static void decode_32Bit_opc(DisasContext *ctx) | ||
491 | b = MASK_OP_ABSB_B(ctx->opcode); | ||
492 | bpos = MASK_OP_ABSB_BPOS(ctx->opcode); | ||
493 | |||
494 | - temp = tcg_const_i32(EA_ABS_FORMAT(address)); | ||
495 | + temp = tcg_constant_i32(EA_ABS_FORMAT(address)); | ||
496 | temp2 = tcg_temp_new(); | ||
497 | |||
498 | tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB); | ||
499 | @@ -XXX,XX +XXX,XX @@ static void decode_32Bit_opc(DisasContext *ctx) | ||
500 | r2 = MASK_OP_RCRR_S3(ctx->opcode); | ||
501 | r3 = MASK_OP_RCRR_D(ctx->opcode); | ||
502 | const16 = MASK_OP_RCRR_CONST4(ctx->opcode); | ||
503 | - temp = tcg_const_i32(const16); | ||
504 | + temp = tcg_constant_i32(const16); | ||
505 | temp2 = tcg_temp_new(); /* width*/ | ||
506 | temp3 = tcg_temp_new(); /* pos */ | ||
507 | |||
508 | -- | ||
509 | 2.34.1 | ||
510 | |||
511 | diff view generated by jsdifflib |
1 | This function has two users, who use it incompatibly. | 1 | These three instances got missed in previous conversion. |
---|---|---|---|
2 | In tlb_flush_page_by_mmuidx_async_0, when flushing a | ||
3 | single page, we need to flush exactly two pages. | ||
4 | In tlb_flush_range_by_mmuidx_async_0, when flushing a | ||
5 | range of pages, we need to flush N+1 pages. | ||
6 | 2 | ||
7 | This avoids double-flushing of jmp cache pages in a range. | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
8 | |||
9 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 5 | --- |
12 | accel/tcg/cputlb.c | 25 ++++++++++++++----------- | 6 | tcg/tcg-op.c | 12 +++--------- |
13 | 1 file changed, 14 insertions(+), 11 deletions(-) | 7 | 1 file changed, 3 insertions(+), 9 deletions(-) |
14 | 8 | ||
15 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 9 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c |
16 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/accel/tcg/cputlb.c | 11 | --- a/tcg/tcg-op.c |
18 | +++ b/accel/tcg/cputlb.c | 12 | +++ b/tcg/tcg-op.c |
19 | @@ -XXX,XX +XXX,XX @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) | 13 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) |
14 | } else if (is_power_of_2(arg2)) { | ||
15 | tcg_gen_shli_i64(ret, arg1, ctz64(arg2)); | ||
16 | } else { | ||
17 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
18 | - tcg_gen_mul_i64(ret, arg1, t0); | ||
19 | - tcg_temp_free_i64(t0); | ||
20 | + tcg_gen_mul_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
20 | } | 21 | } |
21 | } | 22 | } |
22 | 23 | ||
23 | -static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) | 24 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) |
24 | -{ | 25 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); |
25 | - /* Discard jump cache entries for any tb which might potentially | 26 | tcg_temp_free_i32(t); |
26 | - overlap the flushed page. */ | 27 | } else { |
27 | - tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); | 28 | - TCGv_i64 t0 = tcg_const_i64(arg2); |
28 | - tb_jmp_cache_clear_page(cpu, addr); | 29 | - tcg_gen_clz_i64(ret, arg1, t0); |
29 | -} | 30 | - tcg_temp_free_i64(t0); |
30 | - | 31 | + tcg_gen_clz_i64(ret, arg1, tcg_constant_i64(arg2)); |
31 | /** | ||
32 | * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary | ||
33 | * @desc: The CPUTLBDesc portion of the TLB | ||
34 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, | ||
35 | } | ||
36 | qemu_spin_unlock(&env_tlb(env)->c.lock); | ||
37 | |||
38 | - tb_flush_jmp_cache(cpu, addr); | ||
39 | + /* | ||
40 | + * Discard jump cache entries for any tb which might potentially | ||
41 | + * overlap the flushed page, which includes the previous. | ||
42 | + */ | ||
43 | + tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); | ||
44 | + tb_jmp_cache_clear_page(cpu, addr); | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, | ||
49 | return; | ||
50 | } | ||
51 | |||
52 | - for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { | ||
53 | - tb_flush_jmp_cache(cpu, d.addr + i); | ||
54 | + /* | ||
55 | + * Discard jump cache entries for any tb which might potentially | ||
56 | + * overlap the flushed pages, which includes the previous. | ||
57 | + */ | ||
58 | + d.addr -= TARGET_PAGE_SIZE; | ||
59 | + for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { | ||
60 | + tb_jmp_cache_clear_page(cpu, d.addr); | ||
61 | + d.addr += TARGET_PAGE_SIZE; | ||
62 | } | 32 | } |
63 | } | 33 | } |
34 | |||
35 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) | ||
36 | tcg_gen_ctpop_i64(ret, t); | ||
37 | tcg_temp_free_i64(t); | ||
38 | } else { | ||
39 | - TCGv_i64 t0 = tcg_const_i64(arg2); | ||
40 | - tcg_gen_ctz_i64(ret, arg1, t0); | ||
41 | - tcg_temp_free_i64(t0); | ||
42 | + tcg_gen_ctz_i64(ret, arg1, tcg_constant_i64(arg2)); | ||
43 | } | ||
44 | } | ||
64 | 45 | ||
65 | -- | 46 | -- |
66 | 2.34.1 | 47 | 2.34.1 |
67 | 48 | ||
68 | 49 | diff view generated by jsdifflib |
1 | Add an interface to return the CPUTLBEntryFull struct | 1 | Initialize rmode to -1 instead of keeping two variables. |
---|---|---|---|
2 | that goes with the lookup. The result is not intended | 2 | This is already used elsewhere in translate-a64.c. |
3 | to be valid across multiple lookups, so the user must | ||
4 | use the results immediately. | ||
5 | 3 | ||
6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 6 | --- |
11 | include/exec/exec-all.h | 15 +++++++++++++ | 7 | target/arm/tcg/translate-a64.c | 34 ++++++---------------------------- |
12 | include/qemu/typedefs.h | 1 + | 8 | 1 file changed, 6 insertions(+), 28 deletions(-) |
13 | accel/tcg/cputlb.c | 47 +++++++++++++++++++++++++---------------- | ||
14 | 3 files changed, 45 insertions(+), 18 deletions(-) | ||
15 | 9 | ||
16 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 10 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
17 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/exec/exec-all.h | 12 | --- a/target/arm/tcg/translate-a64.c |
19 | +++ b/include/exec/exec-all.h | 13 | +++ b/target/arm/tcg/translate-a64.c |
20 | @@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr, | 14 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) |
21 | MMUAccessType access_type, int mmu_idx, | 15 | int rn = extract32(insn, 5, 5); |
22 | bool nonfault, void **phost, uintptr_t retaddr); | 16 | int rd = extract32(insn, 0, 5); |
23 | 17 | bool need_fpstatus = false; | |
24 | +#ifndef CONFIG_USER_ONLY | 18 | - bool need_rmode = false; |
25 | +/** | 19 | int rmode = -1; |
26 | + * probe_access_full: | 20 | TCGv_i32 tcg_rmode; |
27 | + * Like probe_access_flags, except also return into @pfull. | 21 | TCGv_ptr tcg_fpstatus; |
28 | + * | 22 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) |
29 | + * The CPUTLBEntryFull structure returned via @pfull is transient | 23 | case 0x7a: /* FCVTPU */ |
30 | + * and must be consumed or copied immediately, before any further | 24 | case 0x7b: /* FCVTZU */ |
31 | + * access or changes to TLB @mmu_idx. | 25 | need_fpstatus = true; |
32 | + */ | 26 | - need_rmode = true; |
33 | +int probe_access_full(CPUArchState *env, target_ulong addr, | 27 | rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); |
34 | + MMUAccessType access_type, int mmu_idx, | 28 | if (size == 3 && !is_q) { |
35 | + bool nonfault, void **phost, | 29 | unallocated_encoding(s); |
36 | + CPUTLBEntryFull **pfull, uintptr_t retaddr); | 30 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) |
37 | +#endif | 31 | case 0x5c: /* FCVTAU */ |
38 | + | 32 | case 0x1c: /* FCVTAS */ |
39 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | 33 | need_fpstatus = true; |
40 | 34 | - need_rmode = true; | |
41 | /* Estimated block size for TB allocation. */ | 35 | rmode = FPROUNDING_TIEAWAY; |
42 | diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h | 36 | if (size == 3 && !is_q) { |
43 | index XXXXXXX..XXXXXXX 100644 | 37 | unallocated_encoding(s); |
44 | --- a/include/qemu/typedefs.h | 38 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) |
45 | +++ b/include/qemu/typedefs.h | 39 | case 0x19: /* FRINTM */ |
46 | @@ -XXX,XX +XXX,XX @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; | 40 | case 0x38: /* FRINTP */ |
47 | typedef struct CPUAddressSpace CPUAddressSpace; | 41 | case 0x39: /* FRINTZ */ |
48 | typedef struct CPUArchState CPUArchState; | 42 | - need_rmode = true; |
49 | typedef struct CPUState CPUState; | 43 | rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); |
50 | +typedef struct CPUTLBEntryFull CPUTLBEntryFull; | 44 | /* fall through */ |
51 | typedef struct DeviceListener DeviceListener; | 45 | case 0x59: /* FRINTX */ |
52 | typedef struct DeviceState DeviceState; | 46 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) |
53 | typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot; | ||
54 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
55 | index XXXXXXX..XXXXXXX 100644 | ||
56 | --- a/accel/tcg/cputlb.c | ||
57 | +++ b/accel/tcg/cputlb.c | ||
58 | @@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, | ||
59 | static int probe_access_internal(CPUArchState *env, target_ulong addr, | ||
60 | int fault_size, MMUAccessType access_type, | ||
61 | int mmu_idx, bool nonfault, | ||
62 | - void **phost, uintptr_t retaddr) | ||
63 | + void **phost, CPUTLBEntryFull **pfull, | ||
64 | + uintptr_t retaddr) | ||
65 | { | ||
66 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
67 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | ||
68 | @@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, | ||
69 | mmu_idx, nonfault, retaddr)) { | ||
70 | /* Non-faulting page table read failed. */ | ||
71 | *phost = NULL; | ||
72 | + *pfull = NULL; | ||
73 | return TLB_INVALID_MASK; | ||
74 | } | 47 | } |
75 | 48 | break; | |
76 | /* TLB resize via tlb_fill may have moved the entry. */ | 49 | case 0x58: /* FRINTA */ |
77 | + index = tlb_index(env, mmu_idx, addr); | 50 | - need_rmode = true; |
78 | entry = tlb_entry(env, mmu_idx, addr); | 51 | rmode = FPROUNDING_TIEAWAY; |
79 | 52 | need_fpstatus = true; | |
80 | /* | 53 | if (size == 3 && !is_q) { |
81 | @@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, | 54 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) |
55 | break; | ||
56 | case 0x1e: /* FRINT32Z */ | ||
57 | case 0x1f: /* FRINT64Z */ | ||
58 | - need_rmode = true; | ||
59 | rmode = FPROUNDING_ZERO; | ||
60 | /* fall through */ | ||
61 | case 0x5e: /* FRINT32X */ | ||
62 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) | ||
63 | return; | ||
82 | } | 64 | } |
83 | flags &= tlb_addr; | 65 | |
84 | 66 | - if (need_fpstatus || need_rmode) { | |
85 | + *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | 67 | + if (need_fpstatus || rmode >= 0) { |
86 | + | 68 | tcg_fpstatus = fpstatus_ptr(FPST_FPCR); |
87 | /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ | 69 | } else { |
88 | if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { | 70 | tcg_fpstatus = NULL; |
89 | *phost = NULL; | 71 | } |
90 | @@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, | 72 | - if (need_rmode) { |
91 | return flags; | 73 | + if (rmode >= 0) { |
74 | tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
75 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
76 | } else { | ||
77 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) | ||
78 | } | ||
79 | clear_vec_high(s, is_q, rd); | ||
80 | |||
81 | - if (need_rmode) { | ||
82 | + if (tcg_rmode) { | ||
83 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
84 | } | ||
92 | } | 85 | } |
93 | 86 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) | |
94 | -int probe_access_flags(CPUArchState *env, target_ulong addr, | 87 | int pass; |
95 | - MMUAccessType access_type, int mmu_idx, | 88 | TCGv_i32 tcg_rmode = NULL; |
96 | - bool nonfault, void **phost, uintptr_t retaddr) | 89 | TCGv_ptr tcg_fpstatus = NULL; |
97 | +int probe_access_full(CPUArchState *env, target_ulong addr, | 90 | - bool need_rmode = false; |
98 | + MMUAccessType access_type, int mmu_idx, | 91 | bool need_fpst = true; |
99 | + bool nonfault, void **phost, CPUTLBEntryFull **pfull, | 92 | - int rmode; |
100 | + uintptr_t retaddr) | 93 | + int rmode = -1; |
101 | { | 94 | |
102 | - int flags; | 95 | if (!dc_isar_feature(aa64_fp16, s)) { |
103 | - | 96 | unallocated_encoding(s); |
104 | - flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, | 97 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) |
105 | - nonfault, phost, retaddr); | 98 | case 0x3f: /* FRECPX */ |
106 | + int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, | 99 | break; |
107 | + nonfault, phost, pfull, retaddr); | 100 | case 0x18: /* FRINTN */ |
108 | 101 | - need_rmode = true; | |
109 | /* Handle clean RAM pages. */ | 102 | only_in_vector = true; |
110 | if (unlikely(flags & TLB_NOTDIRTY)) { | 103 | rmode = FPROUNDING_TIEEVEN; |
111 | - uintptr_t index = tlb_index(env, mmu_idx, addr); | 104 | break; |
112 | - CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | 105 | case 0x19: /* FRINTM */ |
113 | - | 106 | - need_rmode = true; |
114 | - notdirty_write(env_cpu(env), addr, 1, full, retaddr); | 107 | only_in_vector = true; |
115 | + notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); | 108 | rmode = FPROUNDING_NEGINF; |
116 | flags &= ~TLB_NOTDIRTY; | 109 | break; |
110 | case 0x38: /* FRINTP */ | ||
111 | - need_rmode = true; | ||
112 | only_in_vector = true; | ||
113 | rmode = FPROUNDING_POSINF; | ||
114 | break; | ||
115 | case 0x39: /* FRINTZ */ | ||
116 | - need_rmode = true; | ||
117 | only_in_vector = true; | ||
118 | rmode = FPROUNDING_ZERO; | ||
119 | break; | ||
120 | case 0x58: /* FRINTA */ | ||
121 | - need_rmode = true; | ||
122 | only_in_vector = true; | ||
123 | rmode = FPROUNDING_TIEAWAY; | ||
124 | break; | ||
125 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) | ||
126 | /* current rounding mode */ | ||
127 | break; | ||
128 | case 0x1a: /* FCVTNS */ | ||
129 | - need_rmode = true; | ||
130 | rmode = FPROUNDING_TIEEVEN; | ||
131 | break; | ||
132 | case 0x1b: /* FCVTMS */ | ||
133 | - need_rmode = true; | ||
134 | rmode = FPROUNDING_NEGINF; | ||
135 | break; | ||
136 | case 0x1c: /* FCVTAS */ | ||
137 | - need_rmode = true; | ||
138 | rmode = FPROUNDING_TIEAWAY; | ||
139 | break; | ||
140 | case 0x3a: /* FCVTPS */ | ||
141 | - need_rmode = true; | ||
142 | rmode = FPROUNDING_POSINF; | ||
143 | break; | ||
144 | case 0x3b: /* FCVTZS */ | ||
145 | - need_rmode = true; | ||
146 | rmode = FPROUNDING_ZERO; | ||
147 | break; | ||
148 | case 0x5a: /* FCVTNU */ | ||
149 | - need_rmode = true; | ||
150 | rmode = FPROUNDING_TIEEVEN; | ||
151 | break; | ||
152 | case 0x5b: /* FCVTMU */ | ||
153 | - need_rmode = true; | ||
154 | rmode = FPROUNDING_NEGINF; | ||
155 | break; | ||
156 | case 0x5c: /* FCVTAU */ | ||
157 | - need_rmode = true; | ||
158 | rmode = FPROUNDING_TIEAWAY; | ||
159 | break; | ||
160 | case 0x7a: /* FCVTPU */ | ||
161 | - need_rmode = true; | ||
162 | rmode = FPROUNDING_POSINF; | ||
163 | break; | ||
164 | case 0x7b: /* FCVTZU */ | ||
165 | - need_rmode = true; | ||
166 | rmode = FPROUNDING_ZERO; | ||
167 | break; | ||
168 | case 0x2f: /* FABS */ | ||
169 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) | ||
170 | return; | ||
117 | } | 171 | } |
118 | 172 | ||
119 | return flags; | 173 | - if (need_rmode || need_fpst) { |
120 | } | 174 | + if (rmode >= 0 || need_fpst) { |
121 | 175 | tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16); | |
122 | +int probe_access_flags(CPUArchState *env, target_ulong addr, | ||
123 | + MMUAccessType access_type, int mmu_idx, | ||
124 | + bool nonfault, void **phost, uintptr_t retaddr) | ||
125 | +{ | ||
126 | + CPUTLBEntryFull *full; | ||
127 | + | ||
128 | + return probe_access_full(env, addr, access_type, mmu_idx, | ||
129 | + nonfault, phost, &full, retaddr); | ||
130 | +} | ||
131 | + | ||
132 | void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
133 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | ||
134 | { | ||
135 | + CPUTLBEntryFull *full; | ||
136 | void *host; | ||
137 | int flags; | ||
138 | |||
139 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||
140 | |||
141 | flags = probe_access_internal(env, addr, size, access_type, mmu_idx, | ||
142 | - false, &host, retaddr); | ||
143 | + false, &host, &full, retaddr); | ||
144 | |||
145 | /* Per the interface, size == 0 merely faults the access. */ | ||
146 | if (size == 0) { | ||
147 | @@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
148 | } | 176 | } |
149 | 177 | ||
150 | if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { | 178 | - if (need_rmode) { |
151 | - uintptr_t index = tlb_index(env, mmu_idx, addr); | 179 | + if (rmode >= 0) { |
152 | - CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; | 180 | tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); |
153 | - | 181 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); |
154 | /* Handle watchpoints. */ | ||
155 | if (flags & TLB_WATCHPOINT) { | ||
156 | int wp_access = (access_type == MMU_DATA_STORE | ||
157 | @@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
158 | void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, | ||
159 | MMUAccessType access_type, int mmu_idx) | ||
160 | { | ||
161 | + CPUTLBEntryFull *full; | ||
162 | void *host; | ||
163 | int flags; | ||
164 | |||
165 | flags = probe_access_internal(env, addr, 0, access_type, | ||
166 | - mmu_idx, true, &host, 0); | ||
167 | + mmu_idx, true, &host, &full, 0); | ||
168 | |||
169 | /* No combination of flags are expected by the caller. */ | ||
170 | return flags ? NULL : host; | ||
171 | @@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, | ||
172 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, | ||
173 | void **hostp) | ||
174 | { | ||
175 | + CPUTLBEntryFull *full; | ||
176 | void *p; | ||
177 | |||
178 | (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, | ||
179 | - cpu_mmu_index(env, true), false, &p, 0); | ||
180 | + cpu_mmu_index(env, true), false, &p, &full, 0); | ||
181 | if (p == NULL) { | ||
182 | return -1; | ||
183 | } | 182 | } |
184 | -- | 183 | -- |
185 | 2.34.1 | 184 | 2.34.1 |
186 | 185 | ||
187 | 186 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | While this enumerator has been present since the first commit, | ||
2 | it isn't ever used. The first actual use of round-to-odd came | ||
3 | with SVE, which currently uses float_round_to_odd instead of | ||
4 | the arm-specific enumerator. | ||
1 | 5 | ||
6 | Amusingly, the comment about unhandled TIEAWAY has been | ||
7 | out of date since the initial commit of translate-a64.c. | ||
8 | |||
9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | target/arm/vfp_helper.c | 6 ++---- | ||
13 | 1 file changed, 2 insertions(+), 4 deletions(-) | ||
14 | |||
15 | diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/vfp_helper.c | ||
18 | +++ b/target/arm/vfp_helper.c | ||
19 | @@ -XXX,XX +XXX,XX @@ int arm_rmode_to_sf(int rmode) | ||
20 | rmode = float_round_ties_away; | ||
21 | break; | ||
22 | case FPROUNDING_ODD: | ||
23 | - /* FIXME: add support for TIEAWAY and ODD */ | ||
24 | - qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", | ||
25 | - rmode); | ||
26 | - /* fall through for now */ | ||
27 | + rmode = float_round_to_odd; | ||
28 | + break; | ||
29 | case FPROUNDING_TIEEVEN: | ||
30 | default: | ||
31 | rmode = float_round_nearest_even; | ||
32 | -- | ||
33 | 2.34.1 | ||
34 | |||
35 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Use proper enumeration types for input and output. | ||
2 | Use a const array to perform the mapping, with an | ||
3 | assert that the input is valid. | ||
1 | 4 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/arm/internals.h | 12 +++++++++--- | ||
9 | target/arm/tcg/translate-mve.c | 2 +- | ||
10 | target/arm/vfp_helper.c | 33 ++++++++------------------------- | ||
11 | 3 files changed, 18 insertions(+), 29 deletions(-) | ||
12 | |||
13 | diff --git a/target/arm/internals.h b/target/arm/internals.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/internals.h | ||
16 | +++ b/target/arm/internals.h | ||
17 | @@ -XXX,XX +XXX,XX @@ void arm_restore_state_to_opc(CPUState *cs, | ||
18 | void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); | ||
19 | #endif /* CONFIG_TCG */ | ||
20 | |||
21 | -enum arm_fprounding { | ||
22 | +typedef enum ARMFPRounding { | ||
23 | FPROUNDING_TIEEVEN, | ||
24 | FPROUNDING_POSINF, | ||
25 | FPROUNDING_NEGINF, | ||
26 | FPROUNDING_ZERO, | ||
27 | FPROUNDING_TIEAWAY, | ||
28 | FPROUNDING_ODD | ||
29 | -}; | ||
30 | +} ARMFPRounding; | ||
31 | |||
32 | -int arm_rmode_to_sf(int rmode); | ||
33 | +extern const FloatRoundMode arm_rmode_to_sf_map[6]; | ||
34 | + | ||
35 | +static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) | ||
36 | +{ | ||
37 | + assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); | ||
38 | + return arm_rmode_to_sf_map[rmode]; | ||
39 | +} | ||
40 | |||
41 | static inline void aarch64_save_sp(CPUARMState *env, int el) | ||
42 | { | ||
43 | diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c | ||
44 | index XXXXXXX..XXXXXXX 100644 | ||
45 | --- a/target/arm/tcg/translate-mve.c | ||
46 | +++ b/target/arm/tcg/translate-mve.c | ||
47 | @@ -XXX,XX +XXX,XX @@ DO_VCVT(VCVT_FS, vcvt_hs, vcvt_fs) | ||
48 | DO_VCVT(VCVT_FU, vcvt_hu, vcvt_fu) | ||
49 | |||
50 | static bool do_vcvt_rmode(DisasContext *s, arg_1op *a, | ||
51 | - enum arm_fprounding rmode, bool u) | ||
52 | + ARMFPRounding rmode, bool u) | ||
53 | { | ||
54 | /* | ||
55 | * Handle VCVT fp to int with specified rounding mode. | ||
56 | diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/target/arm/vfp_helper.c | ||
59 | +++ b/target/arm/vfp_helper.c | ||
60 | @@ -XXX,XX +XXX,XX @@ float64 HELPER(rintd)(float64 x, void *fp_status) | ||
61 | } | ||
62 | |||
63 | /* Convert ARM rounding mode to softfloat */ | ||
64 | -int arm_rmode_to_sf(int rmode) | ||
65 | -{ | ||
66 | - switch (rmode) { | ||
67 | - case FPROUNDING_TIEAWAY: | ||
68 | - rmode = float_round_ties_away; | ||
69 | - break; | ||
70 | - case FPROUNDING_ODD: | ||
71 | - rmode = float_round_to_odd; | ||
72 | - break; | ||
73 | - case FPROUNDING_TIEEVEN: | ||
74 | - default: | ||
75 | - rmode = float_round_nearest_even; | ||
76 | - break; | ||
77 | - case FPROUNDING_POSINF: | ||
78 | - rmode = float_round_up; | ||
79 | - break; | ||
80 | - case FPROUNDING_NEGINF: | ||
81 | - rmode = float_round_down; | ||
82 | - break; | ||
83 | - case FPROUNDING_ZERO: | ||
84 | - rmode = float_round_to_zero; | ||
85 | - break; | ||
86 | - } | ||
87 | - return rmode; | ||
88 | -} | ||
89 | +const FloatRoundMode arm_rmode_to_sf_map[] = { | ||
90 | + [FPROUNDING_TIEEVEN] = float_round_nearest_even, | ||
91 | + [FPROUNDING_POSINF] = float_round_up, | ||
92 | + [FPROUNDING_NEGINF] = float_round_down, | ||
93 | + [FPROUNDING_ZERO] = float_round_to_zero, | ||
94 | + [FPROUNDING_TIEAWAY] = float_round_ties_away, | ||
95 | + [FPROUNDING_ODD] = float_round_to_odd, | ||
96 | +}; | ||
97 | |||
98 | /* | ||
99 | * Implement float64 to int32_t conversion without saturation; | ||
100 | -- | ||
101 | 2.34.1 | ||
102 | |||
103 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | In preparation for extracting new helpers, ensure that | ||
2 | the rounding mode is represented as ARMFPRounding and | ||
3 | not FloatRoundMode. | ||
1 | 4 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/arm/tcg/translate-a64.c | 16 ++++++++-------- | ||
9 | target/arm/tcg/translate-sve.c | 18 +++++++++--------- | ||
10 | target/arm/tcg/translate-vfp.c | 6 +++--- | ||
11 | 3 files changed, 20 insertions(+), 20 deletions(-) | ||
12 | |||
13 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/tcg/translate-a64.c | ||
16 | +++ b/target/arm/tcg/translate-a64.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) | ||
18 | case 0xa: /* FRINTM */ | ||
19 | case 0xb: /* FRINTZ */ | ||
20 | case 0xc: /* FRINTA */ | ||
21 | - rmode = arm_rmode_to_sf(opcode & 7); | ||
22 | + rmode = opcode & 7; | ||
23 | gen_fpst = gen_helper_rints; | ||
24 | break; | ||
25 | case 0xe: /* FRINTX */ | ||
26 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) | ||
27 | gen_fpst = gen_helper_rints; | ||
28 | break; | ||
29 | case 0x10: /* FRINT32Z */ | ||
30 | - rmode = float_round_to_zero; | ||
31 | + rmode = FPROUNDING_ZERO; | ||
32 | gen_fpst = gen_helper_frint32_s; | ||
33 | break; | ||
34 | case 0x11: /* FRINT32X */ | ||
35 | gen_fpst = gen_helper_frint32_s; | ||
36 | break; | ||
37 | case 0x12: /* FRINT64Z */ | ||
38 | - rmode = float_round_to_zero; | ||
39 | + rmode = FPROUNDING_ZERO; | ||
40 | gen_fpst = gen_helper_frint64_s; | ||
41 | break; | ||
42 | case 0x13: /* FRINT64X */ | ||
43 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) | ||
44 | |||
45 | fpst = fpstatus_ptr(FPST_FPCR); | ||
46 | if (rmode >= 0) { | ||
47 | - TCGv_i32 tcg_rmode = tcg_const_i32(rmode); | ||
48 | + TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
49 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
50 | gen_fpst(tcg_res, tcg_op, fpst); | ||
51 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
52 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) | ||
53 | case 0xa: /* FRINTM */ | ||
54 | case 0xb: /* FRINTZ */ | ||
55 | case 0xc: /* FRINTA */ | ||
56 | - rmode = arm_rmode_to_sf(opcode & 7); | ||
57 | + rmode = opcode & 7; | ||
58 | gen_fpst = gen_helper_rintd; | ||
59 | break; | ||
60 | case 0xe: /* FRINTX */ | ||
61 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) | ||
62 | gen_fpst = gen_helper_rintd; | ||
63 | break; | ||
64 | case 0x10: /* FRINT32Z */ | ||
65 | - rmode = float_round_to_zero; | ||
66 | + rmode = FPROUNDING_ZERO; | ||
67 | gen_fpst = gen_helper_frint32_d; | ||
68 | break; | ||
69 | case 0x11: /* FRINT32X */ | ||
70 | gen_fpst = gen_helper_frint32_d; | ||
71 | break; | ||
72 | case 0x12: /* FRINT64Z */ | ||
73 | - rmode = float_round_to_zero; | ||
74 | + rmode = FPROUNDING_ZERO; | ||
75 | gen_fpst = gen_helper_frint64_d; | ||
76 | break; | ||
77 | case 0x13: /* FRINT64X */ | ||
78 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) | ||
79 | |||
80 | fpst = fpstatus_ptr(FPST_FPCR); | ||
81 | if (rmode >= 0) { | ||
82 | - TCGv_i32 tcg_rmode = tcg_const_i32(rmode); | ||
83 | + TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
84 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
85 | gen_fpst(tcg_res, tcg_op, fpst); | ||
86 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
87 | diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c | ||
88 | index XXXXXXX..XXXXXXX 100644 | ||
89 | --- a/target/arm/tcg/translate-sve.c | ||
90 | +++ b/target/arm/tcg/translate-sve.c | ||
91 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz], | ||
92 | a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); | ||
93 | |||
94 | static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, | ||
95 | - int mode, gen_helper_gvec_3_ptr *fn) | ||
96 | + ARMFPRounding mode, gen_helper_gvec_3_ptr *fn) | ||
97 | { | ||
98 | unsigned vsz; | ||
99 | TCGv_i32 tmode; | ||
100 | @@ -XXX,XX +XXX,XX @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, | ||
101 | } | ||
102 | |||
103 | vsz = vec_full_reg_size(s); | ||
104 | - tmode = tcg_const_i32(mode); | ||
105 | + tmode = tcg_const_i32(arm_rmode_to_sf(mode)); | ||
106 | status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); | ||
107 | |||
108 | gen_helper_set_rmode(tmode, tmode, status); | ||
109 | @@ -XXX,XX +XXX,XX @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, | ||
110 | } | ||
111 | |||
112 | TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a, | ||
113 | - float_round_nearest_even, frint_fns[a->esz]) | ||
114 | + FPROUNDING_TIEEVEN, frint_fns[a->esz]) | ||
115 | TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a, | ||
116 | - float_round_up, frint_fns[a->esz]) | ||
117 | + FPROUNDING_POSINF, frint_fns[a->esz]) | ||
118 | TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a, | ||
119 | - float_round_down, frint_fns[a->esz]) | ||
120 | + FPROUNDING_NEGINF, frint_fns[a->esz]) | ||
121 | TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a, | ||
122 | - float_round_to_zero, frint_fns[a->esz]) | ||
123 | + FPROUNDING_ZERO, frint_fns[a->esz]) | ||
124 | TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a, | ||
125 | - float_round_ties_away, frint_fns[a->esz]) | ||
126 | + FPROUNDING_TIEAWAY, frint_fns[a->esz]) | ||
127 | |||
128 | static gen_helper_gvec_3_ptr * const frecpx_fns[] = { | ||
129 | NULL, gen_helper_sve_frecpx_h, | ||
130 | @@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz, | ||
131 | gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR) | ||
132 | |||
133 | TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a, | ||
134 | - float_round_to_odd, gen_helper_sve_fcvt_ds) | ||
135 | + FPROUNDING_ODD, gen_helper_sve_fcvt_ds) | ||
136 | TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a, | ||
137 | - float_round_to_odd, gen_helper_sve2_fcvtnt_ds) | ||
138 | + FPROUNDING_ODD, gen_helper_sve2_fcvtnt_ds) | ||
139 | |||
140 | static gen_helper_gvec_3_ptr * const flogb_fns[] = { | ||
141 | NULL, gen_helper_flogb_h, | ||
142 | diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c | ||
143 | index XXXXXXX..XXXXXXX 100644 | ||
144 | --- a/target/arm/tcg/translate-vfp.c | ||
145 | +++ b/target/arm/tcg/translate-vfp.c | ||
146 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a) | ||
147 | tmp = tcg_temp_new_i32(); | ||
148 | vfp_load_reg32(tmp, a->vm); | ||
149 | fpst = fpstatus_ptr(FPST_FPCR_F16); | ||
150 | - tcg_rmode = tcg_const_i32(float_round_to_zero); | ||
151 | + tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); | ||
152 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
153 | gen_helper_rinth(tmp, tmp, fpst); | ||
154 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
155 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) | ||
156 | tmp = tcg_temp_new_i32(); | ||
157 | vfp_load_reg32(tmp, a->vm); | ||
158 | fpst = fpstatus_ptr(FPST_FPCR); | ||
159 | - tcg_rmode = tcg_const_i32(float_round_to_zero); | ||
160 | + tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); | ||
161 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
162 | gen_helper_rints(tmp, tmp, fpst); | ||
163 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
164 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) | ||
165 | tmp = tcg_temp_new_i64(); | ||
166 | vfp_load_reg64(tmp, a->vm); | ||
167 | fpst = fpstatus_ptr(FPST_FPCR); | ||
168 | - tcg_rmode = tcg_const_i32(float_round_to_zero); | ||
169 | + tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); | ||
170 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
171 | gen_helper_rintd(tmp, tmp, fpst); | ||
172 | gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
173 | -- | ||
174 | 2.34.1 | ||
175 | |||
176 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | Split out common subroutines for handing rounding mode |
---|---|---|---|
2 | changes during translation. Use tcg_constant_i32 and | ||
3 | tcg_temp_new_i32 instead of tcg_const_i32. | ||
2 | 4 | ||
3 | The class cast checkers are quite expensive and always on (unlike the | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | dynamic case who's checks are gated by CONFIG_QOM_CAST_DEBUG). To | ||
5 | avoid the overhead of repeatedly checking something which should never | ||
6 | change we cache the CPUClass reference for use in the hot code paths. | ||
7 | |||
8 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-Id: <20220811151413.3350684-3-alex.bennee@linaro.org> | ||
11 | Signed-off-by: Cédric Le Goater <clg@kaod.org> | ||
12 | Message-Id: <20220923084803.498337-3-clg@kaod.org> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 7 | --- |
15 | include/hw/core/cpu.h | 9 +++++++++ | 8 | target/arm/tcg/translate.h | 17 ++++++++++++ |
16 | cpu.c | 9 ++++----- | 9 | target/arm/tcg/translate-a64.c | 47 ++++++++++++++-------------------- |
17 | 2 files changed, 13 insertions(+), 5 deletions(-) | 10 | target/arm/tcg/translate-sve.c | 6 ++--- |
11 | target/arm/tcg/translate-vfp.c | 26 ++++++++----------- | ||
12 | 4 files changed, 48 insertions(+), 48 deletions(-) | ||
18 | 13 | ||
19 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | 14 | diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h |
20 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/include/hw/core/cpu.h | 16 | --- a/target/arm/tcg/translate.h |
22 | +++ b/include/hw/core/cpu.h | 17 | +++ b/target/arm/tcg/translate.h |
23 | @@ -XXX,XX +XXX,XX @@ typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, | 18 | @@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key) |
24 | */ | 19 | return ret; |
25 | #define CPU(obj) ((CPUState *)(obj)) | 20 | } |
26 | 21 | ||
27 | +/* | 22 | +/* |
28 | + * The class checkers bring in CPU_GET_CLASS() which is potentially | 23 | + * Set and reset rounding mode around another operation. |
29 | + * expensive given the eventual call to | ||
30 | + * object_class_dynamic_cast_assert(). Because of this the CPUState | ||
31 | + * has a cached value for the class in cs->cc which is set up in | ||
32 | + * cpu_exec_realizefn() for use in hot code paths. | ||
33 | + */ | 24 | + */ |
34 | typedef struct CPUClass CPUClass; | 25 | +static inline TCGv_i32 gen_set_rmode(ARMFPRounding rmode, TCGv_ptr fpst) |
35 | DECLARE_CLASS_CHECKERS(CPUClass, CPU, | 26 | +{ |
36 | TYPE_CPU) | 27 | + TCGv_i32 new = tcg_constant_i32(arm_rmode_to_sf(rmode)); |
37 | @@ -XXX,XX +XXX,XX @@ struct qemu_work_item; | 28 | + TCGv_i32 old = tcg_temp_new_i32(); |
38 | struct CPUState { | 29 | + |
39 | /*< private >*/ | 30 | + gen_helper_set_rmode(old, new, fpst); |
40 | DeviceState parent_obj; | 31 | + return old; |
41 | + /* cache to avoid expensive CPU_GET_CLASS */ | 32 | +} |
42 | + CPUClass *cc; | 33 | + |
43 | /*< public >*/ | 34 | +static inline void gen_restore_rmode(TCGv_i32 old, TCGv_ptr fpst) |
44 | 35 | +{ | |
45 | int nr_cores; | 36 | + gen_helper_set_rmode(old, old, fpst); |
46 | diff --git a/cpu.c b/cpu.c | 37 | +} |
38 | + | ||
39 | /* | ||
40 | * Helpers for implementing sets of trans_* functions. | ||
41 | * Defer the implementation of NAME to FUNC, with optional extra arguments. | ||
42 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
47 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
48 | --- a/cpu.c | 44 | --- a/target/arm/tcg/translate-a64.c |
49 | +++ b/cpu.c | 45 | +++ b/target/arm/tcg/translate-a64.c |
50 | @@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_cpu_common = { | 46 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) |
51 | 47 | case 0xb: /* FRINTZ */ | |
52 | void cpu_exec_realizefn(CPUState *cpu, Error **errp) | 48 | case 0xc: /* FRINTA */ |
53 | { | 49 | { |
54 | -#ifndef CONFIG_USER_ONLY | 50 | - TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7)); |
55 | - CPUClass *cc = CPU_GET_CLASS(cpu); | 51 | + TCGv_i32 tcg_rmode; |
56 | -#endif | 52 | + |
57 | + /* cache the cpu class for the hotpath */ | 53 | fpst = fpstatus_ptr(FPST_FPCR_F16); |
58 | + cpu->cc = CPU_GET_CLASS(cpu); | 54 | - |
59 | 55 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | |
60 | cpu_list_add(cpu); | 56 | + tcg_rmode = gen_set_rmode(opcode & 7, fpst); |
61 | if (!accel_cpu_realizefn(cpu, errp)) { | 57 | gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); |
62 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp) | 58 | - |
63 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | 59 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); |
64 | vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); | 60 | + gen_restore_rmode(tcg_rmode, fpst); |
65 | } | 61 | break; |
66 | - if (cc->sysemu_ops->legacy_vmsd != NULL) { | 62 | } |
67 | - vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu); | 63 | case 0xe: /* FRINTX */ |
68 | + if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) { | 64 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) |
69 | + vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu); | 65 | |
70 | } | 66 | fpst = fpstatus_ptr(FPST_FPCR); |
71 | #endif /* CONFIG_USER_ONLY */ | 67 | if (rmode >= 0) { |
68 | - TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
69 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
70 | + TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); | ||
71 | gen_fpst(tcg_res, tcg_op, fpst); | ||
72 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
73 | + gen_restore_rmode(tcg_rmode, fpst); | ||
74 | } else { | ||
75 | gen_fpst(tcg_res, tcg_op, fpst); | ||
76 | } | ||
77 | @@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) | ||
78 | |||
79 | fpst = fpstatus_ptr(FPST_FPCR); | ||
80 | if (rmode >= 0) { | ||
81 | - TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
82 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
83 | + TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); | ||
84 | gen_fpst(tcg_res, tcg_op, fpst); | ||
85 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
86 | + gen_restore_rmode(tcg_rmode, fpst); | ||
87 | } else { | ||
88 | gen_fpst(tcg_res, tcg_op, fpst); | ||
89 | } | ||
90 | @@ -XXX,XX +XXX,XX @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, | ||
91 | rmode = FPROUNDING_TIEAWAY; | ||
92 | } | ||
93 | |||
94 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
95 | - | ||
96 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
97 | + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); | ||
98 | |||
99 | switch (type) { | ||
100 | case 1: /* float64 */ | ||
101 | @@ -XXX,XX +XXX,XX @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, | ||
102 | g_assert_not_reached(); | ||
103 | } | ||
104 | |||
105 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
106 | + gen_restore_rmode(tcg_rmode, tcg_fpstatus); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, | ||
111 | |||
112 | assert(!(is_scalar && is_q)); | ||
113 | |||
114 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); | ||
115 | tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); | ||
116 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
117 | + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus); | ||
118 | fracbits = (16 << size) - immhb; | ||
119 | tcg_shift = tcg_constant_i32(fracbits); | ||
120 | |||
121 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, | ||
122 | } | ||
123 | } | ||
124 | |||
125 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
126 | + gen_restore_rmode(tcg_rmode, tcg_fpstatus); | ||
127 | } | ||
128 | |||
129 | /* AdvSIMD scalar shift by immediate | ||
130 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) | ||
131 | } | ||
132 | |||
133 | if (is_fcvt) { | ||
134 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
135 | tcg_fpstatus = fpstatus_ptr(FPST_FPCR); | ||
136 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
137 | + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); | ||
138 | } else { | ||
139 | - tcg_rmode = NULL; | ||
140 | tcg_fpstatus = NULL; | ||
141 | + tcg_rmode = NULL; | ||
142 | } | ||
143 | |||
144 | if (size == 3) { | ||
145 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) | ||
146 | } | ||
147 | |||
148 | if (is_fcvt) { | ||
149 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
150 | + gen_restore_rmode(tcg_rmode, tcg_fpstatus); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) | ||
155 | tcg_fpstatus = NULL; | ||
156 | } | ||
157 | if (rmode >= 0) { | ||
158 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
159 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
160 | + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); | ||
161 | } else { | ||
162 | tcg_rmode = NULL; | ||
163 | } | ||
164 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) | ||
165 | clear_vec_high(s, is_q, rd); | ||
166 | |||
167 | if (tcg_rmode) { | ||
168 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
169 | + gen_restore_rmode(tcg_rmode, tcg_fpstatus); | ||
170 | } | ||
171 | } | ||
172 | |||
173 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) | ||
174 | } | ||
175 | |||
176 | if (rmode >= 0) { | ||
177 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); | ||
178 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
179 | + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); | ||
180 | } | ||
181 | |||
182 | if (is_scalar) { | ||
183 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) | ||
184 | } | ||
185 | |||
186 | if (tcg_rmode) { | ||
187 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); | ||
188 | + gen_restore_rmode(tcg_rmode, tcg_fpstatus); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c | ||
193 | index XXXXXXX..XXXXXXX 100644 | ||
194 | --- a/target/arm/tcg/translate-sve.c | ||
195 | +++ b/target/arm/tcg/translate-sve.c | ||
196 | @@ -XXX,XX +XXX,XX @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, | ||
197 | } | ||
198 | |||
199 | vsz = vec_full_reg_size(s); | ||
200 | - tmode = tcg_const_i32(arm_rmode_to_sf(mode)); | ||
201 | status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); | ||
202 | - | ||
203 | - gen_helper_set_rmode(tmode, tmode, status); | ||
204 | + tmode = gen_set_rmode(mode, status); | ||
205 | |||
206 | tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), | ||
207 | vec_full_reg_offset(s, a->rn), | ||
208 | pred_full_reg_offset(s, a->pg), | ||
209 | status, vsz, vsz, 0, fn); | ||
210 | |||
211 | - gen_helper_set_rmode(tmode, tmode, status); | ||
212 | + gen_restore_rmode(tmode, status); | ||
213 | return true; | ||
214 | } | ||
215 | |||
216 | diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c | ||
217 | index XXXXXXX..XXXXXXX 100644 | ||
218 | --- a/target/arm/tcg/translate-vfp.c | ||
219 | +++ b/target/arm/tcg/translate-vfp.c | ||
220 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) | ||
221 | fpst = fpstatus_ptr(FPST_FPCR); | ||
222 | } | ||
223 | |||
224 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); | ||
225 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
226 | + tcg_rmode = gen_set_rmode(rounding, fpst); | ||
227 | |||
228 | if (sz == 3) { | ||
229 | TCGv_i64 tcg_op; | ||
230 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) | ||
231 | vfp_store_reg32(tcg_res, rd); | ||
232 | } | ||
233 | |||
234 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
235 | + gen_restore_rmode(tcg_rmode, fpst); | ||
236 | return true; | ||
237 | } | ||
238 | |||
239 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) | ||
240 | } | ||
241 | |||
242 | tcg_shift = tcg_constant_i32(0); | ||
243 | - | ||
244 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding)); | ||
245 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
246 | + tcg_rmode = gen_set_rmode(rounding, fpst); | ||
247 | |||
248 | if (sz == 3) { | ||
249 | TCGv_i64 tcg_double, tcg_res; | ||
250 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) | ||
251 | vfp_store_reg32(tcg_res, rd); | ||
252 | } | ||
253 | |||
254 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
255 | + gen_restore_rmode(tcg_rmode, fpst); | ||
256 | return true; | ||
257 | } | ||
258 | |||
259 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a) | ||
260 | tmp = tcg_temp_new_i32(); | ||
261 | vfp_load_reg32(tmp, a->vm); | ||
262 | fpst = fpstatus_ptr(FPST_FPCR_F16); | ||
263 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); | ||
264 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
265 | + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); | ||
266 | gen_helper_rinth(tmp, tmp, fpst); | ||
267 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
268 | + gen_restore_rmode(tcg_rmode, fpst); | ||
269 | vfp_store_reg32(tmp, a->vd); | ||
270 | return true; | ||
271 | } | ||
272 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) | ||
273 | tmp = tcg_temp_new_i32(); | ||
274 | vfp_load_reg32(tmp, a->vm); | ||
275 | fpst = fpstatus_ptr(FPST_FPCR); | ||
276 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); | ||
277 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
278 | + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); | ||
279 | gen_helper_rints(tmp, tmp, fpst); | ||
280 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
281 | + gen_restore_rmode(tcg_rmode, fpst); | ||
282 | vfp_store_reg32(tmp, a->vd); | ||
283 | return true; | ||
284 | } | ||
285 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) | ||
286 | tmp = tcg_temp_new_i64(); | ||
287 | vfp_load_reg64(tmp, a->vm); | ||
288 | fpst = fpstatus_ptr(FPST_FPCR); | ||
289 | - tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); | ||
290 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
291 | + tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); | ||
292 | gen_helper_rintd(tmp, tmp, fpst); | ||
293 | - gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); | ||
294 | + gen_restore_rmode(tcg_rmode, fpst); | ||
295 | vfp_store_reg64(tmp, a->vd); | ||
296 | return true; | ||
72 | } | 297 | } |
73 | -- | 298 | -- |
74 | 2.34.1 | 299 | 2.34.1 |
75 | 300 | ||
76 | 301 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Reorg temporary usage so that we can use tcg_constant_i32. | ||
2 | tcg_gen_deposit_i32 already has a width == 32 special case, | ||
3 | so remove the check here. | ||
1 | 4 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/arm/tcg/translate.c | 13 +++++-------- | ||
9 | 1 file changed, 5 insertions(+), 8 deletions(-) | ||
10 | |||
11 | diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/arm/tcg/translate.c | ||
14 | +++ b/target/arm/tcg/translate.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_UBFX(DisasContext *s, arg_UBFX *a) | ||
16 | |||
17 | static bool trans_BFCI(DisasContext *s, arg_BFCI *a) | ||
18 | { | ||
19 | - TCGv_i32 tmp; | ||
20 | int msb = a->msb, lsb = a->lsb; | ||
21 | + TCGv_i32 t_in, t_rd; | ||
22 | int width; | ||
23 | |||
24 | if (!ENABLE_ARCH_6T2) { | ||
25 | @@ -XXX,XX +XXX,XX @@ static bool trans_BFCI(DisasContext *s, arg_BFCI *a) | ||
26 | width = msb + 1 - lsb; | ||
27 | if (a->rn == 15) { | ||
28 | /* BFC */ | ||
29 | - tmp = tcg_const_i32(0); | ||
30 | + t_in = tcg_constant_i32(0); | ||
31 | } else { | ||
32 | /* BFI */ | ||
33 | - tmp = load_reg(s, a->rn); | ||
34 | + t_in = load_reg(s, a->rn); | ||
35 | } | ||
36 | - if (width != 32) { | ||
37 | - TCGv_i32 tmp2 = load_reg(s, a->rd); | ||
38 | - tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width); | ||
39 | - } | ||
40 | - store_reg(s, a->rd, tmp); | ||
41 | + t_rd = load_reg(s, a->rd); | ||
42 | + tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width); | ||
43 | return true; | ||
44 | } | ||
45 | |||
46 | -- | ||
47 | 2.34.1 | ||
48 | |||
49 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | This hides the implicit initialization of a variable. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/arm/tcg/translate-sve.c | 6 ++++-- | ||
7 | 1 file changed, 4 insertions(+), 2 deletions(-) | ||
8 | |||
9 | diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/arm/tcg/translate-sve.c | ||
12 | +++ b/target/arm/tcg/translate-sve.c | ||
13 | @@ -XXX,XX +XXX,XX @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, | ||
14 | } | ||
15 | } else { | ||
16 | TCGLabel *loop = gen_new_label(); | ||
17 | - TCGv_ptr tp, i = tcg_const_ptr(0); | ||
18 | + TCGv_ptr tp, i = tcg_temp_new_ptr(); | ||
19 | |||
20 | + tcg_gen_movi_ptr(i, 0); | ||
21 | gen_set_label(loop); | ||
22 | |||
23 | t0 = tcg_temp_new_i64(); | ||
24 | @@ -XXX,XX +XXX,XX @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, | ||
25 | } | ||
26 | } else { | ||
27 | TCGLabel *loop = gen_new_label(); | ||
28 | - TCGv_ptr tp, i = tcg_const_ptr(0); | ||
29 | + TCGv_ptr tp, i = tcg_temp_new_ptr(); | ||
30 | |||
31 | + tcg_gen_movi_ptr(i, 0); | ||
32 | gen_set_label(loop); | ||
33 | |||
34 | t0 = tcg_temp_new_i64(); | ||
35 | -- | ||
36 | 2.34.1 | ||
37 | |||
38 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All uses are in the context of an accumulator conditionally | ||
2 | having a zero input. Split the rda variable to rda_{i,o}, | ||
3 | and set rda_i to tcg_constant_foo(0) when required. | ||
1 | 4 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/arm/tcg/translate-mve.c | 54 ++++++++++++++++++---------------- | ||
9 | 1 file changed, 29 insertions(+), 25 deletions(-) | ||
10 | |||
11 | diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/arm/tcg/translate-mve.c | ||
14 | +++ b/target/arm/tcg/translate-mve.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, | ||
16 | MVEGenLongDualAccOpFn *fn) | ||
17 | { | ||
18 | TCGv_ptr qn, qm; | ||
19 | - TCGv_i64 rda; | ||
20 | + TCGv_i64 rda_i, rda_o; | ||
21 | TCGv_i32 rdalo, rdahi; | ||
22 | |||
23 | if (!dc_isar_feature(aa32_mve, s) || | ||
24 | @@ -XXX,XX +XXX,XX @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, | ||
25 | * of an A=0 (no-accumulate) insn which does not execute the first | ||
26 | * beat must start with the current rda value, not 0. | ||
27 | */ | ||
28 | + rda_o = tcg_temp_new_i64(); | ||
29 | if (a->a || mve_skip_first_beat(s)) { | ||
30 | - rda = tcg_temp_new_i64(); | ||
31 | + rda_i = rda_o; | ||
32 | rdalo = load_reg(s, a->rdalo); | ||
33 | rdahi = load_reg(s, a->rdahi); | ||
34 | - tcg_gen_concat_i32_i64(rda, rdalo, rdahi); | ||
35 | + tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi); | ||
36 | } else { | ||
37 | - rda = tcg_const_i64(0); | ||
38 | + rda_i = tcg_constant_i64(0); | ||
39 | } | ||
40 | |||
41 | - fn(rda, cpu_env, qn, qm, rda); | ||
42 | + fn(rda_o, cpu_env, qn, qm, rda_i); | ||
43 | |||
44 | rdalo = tcg_temp_new_i32(); | ||
45 | rdahi = tcg_temp_new_i32(); | ||
46 | - tcg_gen_extrl_i64_i32(rdalo, rda); | ||
47 | - tcg_gen_extrh_i64_i32(rdahi, rda); | ||
48 | + tcg_gen_extrl_i64_i32(rdalo, rda_o); | ||
49 | + tcg_gen_extrh_i64_i32(rdahi, rda_o); | ||
50 | store_reg(s, a->rdalo, rdalo); | ||
51 | store_reg(s, a->rdahi, rdahi); | ||
52 | mve_update_eci(s); | ||
53 | @@ -XXX,XX +XXX,XX @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a) | ||
54 | static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn) | ||
55 | { | ||
56 | TCGv_ptr qn, qm; | ||
57 | - TCGv_i32 rda; | ||
58 | + TCGv_i32 rda_i, rda_o; | ||
59 | |||
60 | if (!dc_isar_feature(aa32_mve, s) || | ||
61 | !mve_check_qreg_bank(s, a->qn) || | ||
62 | @@ -XXX,XX +XXX,XX @@ static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn) | ||
63 | * beat must start with the current rda value, not 0. | ||
64 | */ | ||
65 | if (a->a || mve_skip_first_beat(s)) { | ||
66 | - rda = load_reg(s, a->rda); | ||
67 | + rda_o = rda_i = load_reg(s, a->rda); | ||
68 | } else { | ||
69 | - rda = tcg_const_i32(0); | ||
70 | + rda_i = tcg_constant_i32(0); | ||
71 | + rda_o = tcg_temp_new_i32(); | ||
72 | } | ||
73 | |||
74 | - fn(rda, cpu_env, qn, qm, rda); | ||
75 | - store_reg(s, a->rda, rda); | ||
76 | + fn(rda_o, cpu_env, qn, qm, rda_i); | ||
77 | + store_reg(s, a->rda, rda_o); | ||
78 | |||
79 | mve_update_eci(s); | ||
80 | return true; | ||
81 | @@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a) | ||
82 | { NULL, NULL } | ||
83 | }; | ||
84 | TCGv_ptr qm; | ||
85 | - TCGv_i32 rda; | ||
86 | + TCGv_i32 rda_i, rda_o; | ||
87 | |||
88 | if (!dc_isar_feature(aa32_mve, s) || | ||
89 | a->size == 3) { | ||
90 | @@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a) | ||
91 | */ | ||
92 | if (a->a || mve_skip_first_beat(s)) { | ||
93 | /* Accumulate input from Rda */ | ||
94 | - rda = load_reg(s, a->rda); | ||
95 | + rda_o = rda_i = load_reg(s, a->rda); | ||
96 | } else { | ||
97 | /* Accumulate starting at zero */ | ||
98 | - rda = tcg_const_i32(0); | ||
99 | + rda_i = tcg_constant_i32(0); | ||
100 | + rda_o = tcg_temp_new_i32(); | ||
101 | } | ||
102 | |||
103 | qm = mve_qreg_ptr(a->qm); | ||
104 | - fns[a->size][a->u](rda, cpu_env, qm, rda); | ||
105 | - store_reg(s, a->rda, rda); | ||
106 | + fns[a->size][a->u](rda_o, cpu_env, qm, rda_i); | ||
107 | + store_reg(s, a->rda, rda_o); | ||
108 | |||
109 | mve_update_eci(s); | ||
110 | return true; | ||
111 | @@ -XXX,XX +XXX,XX @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) | ||
112 | * No need to check Qm's bank: it is only 3 bits in decode. | ||
113 | */ | ||
114 | TCGv_ptr qm; | ||
115 | - TCGv_i64 rda; | ||
116 | + TCGv_i64 rda_i, rda_o; | ||
117 | TCGv_i32 rdalo, rdahi; | ||
118 | |||
119 | if (!dc_isar_feature(aa32_mve, s)) { | ||
120 | @@ -XXX,XX +XXX,XX @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) | ||
121 | * of an A=0 (no-accumulate) insn which does not execute the first | ||
122 | * beat must start with the current value of RdaHi:RdaLo, not zero. | ||
123 | */ | ||
124 | + rda_o = tcg_temp_new_i64(); | ||
125 | if (a->a || mve_skip_first_beat(s)) { | ||
126 | /* Accumulate input from RdaHi:RdaLo */ | ||
127 | - rda = tcg_temp_new_i64(); | ||
128 | + rda_i = rda_o; | ||
129 | rdalo = load_reg(s, a->rdalo); | ||
130 | rdahi = load_reg(s, a->rdahi); | ||
131 | - tcg_gen_concat_i32_i64(rda, rdalo, rdahi); | ||
132 | + tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi); | ||
133 | } else { | ||
134 | /* Accumulate starting at zero */ | ||
135 | - rda = tcg_const_i64(0); | ||
136 | + rda_i = tcg_constant_i64(0); | ||
137 | } | ||
138 | |||
139 | qm = mve_qreg_ptr(a->qm); | ||
140 | if (a->u) { | ||
141 | - gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda); | ||
142 | + gen_helper_mve_vaddlv_u(rda_o, cpu_env, qm, rda_i); | ||
143 | } else { | ||
144 | - gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda); | ||
145 | + gen_helper_mve_vaddlv_s(rda_o, cpu_env, qm, rda_i); | ||
146 | } | ||
147 | |||
148 | rdalo = tcg_temp_new_i32(); | ||
149 | rdahi = tcg_temp_new_i32(); | ||
150 | - tcg_gen_extrl_i64_i32(rdalo, rda); | ||
151 | - tcg_gen_extrh_i64_i32(rdahi, rda); | ||
152 | + tcg_gen_extrl_i64_i32(rdalo, rda_o); | ||
153 | + tcg_gen_extrh_i64_i32(rdahi, rda_o); | ||
154 | store_reg(s, a->rdalo, rdalo); | ||
155 | store_reg(s, a->rdahi, rdahi); | ||
156 | mve_update_eci(s); | ||
157 | -- | ||
158 | 2.34.1 | ||
159 | |||
160 | diff view generated by jsdifflib |
1 | Wrap the bare TranslationBlock pointer into a structure. | 1 | It is easy enough to use mov instead of or-with-zero and relying |
---|---|---|---|
2 | on the optimizer to fold away the or. Use an array for the output, | ||
3 | rather than separate tcg_res{l,h} variables. | ||
2 | 4 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 7 | --- |
7 | accel/tcg/tb-hash.h | 1 + | 8 | target/arm/tcg/translate-a64.c | 41 +++++++++++++++++----------------- |
8 | accel/tcg/tb-jmp-cache.h | 24 ++++++++++++++++++++++++ | 9 | 1 file changed, 21 insertions(+), 20 deletions(-) |
9 | include/exec/cpu-common.h | 1 + | ||
10 | include/hw/core/cpu.h | 15 +-------------- | ||
11 | include/qemu/typedefs.h | 1 + | ||
12 | accel/stubs/tcg-stub.c | 4 ++++ | ||
13 | accel/tcg/cpu-exec.c | 10 +++++++--- | ||
14 | accel/tcg/cputlb.c | 9 +++++---- | ||
15 | accel/tcg/translate-all.c | 28 +++++++++++++++++++++++++--- | ||
16 | hw/core/cpu-common.c | 3 +-- | ||
17 | plugins/core.c | 2 +- | ||
18 | trace/control-target.c | 2 +- | ||
19 | 12 files changed, 72 insertions(+), 28 deletions(-) | ||
20 | create mode 100644 accel/tcg/tb-jmp-cache.h | ||
21 | 10 | ||
22 | diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h | 11 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c |
23 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/accel/tcg/tb-hash.h | 13 | --- a/target/arm/tcg/translate-a64.c |
25 | +++ b/accel/tcg/tb-hash.h | 14 | +++ b/target/arm/tcg/translate-a64.c |
26 | @@ -XXX,XX +XXX,XX @@ | 15 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) |
27 | #include "exec/cpu-defs.h" | 16 | bool part = extract32(insn, 14, 1); |
28 | #include "exec/exec-all.h" | 17 | bool is_q = extract32(insn, 30, 1); |
29 | #include "qemu/xxhash.h" | 18 | int esize = 8 << size; |
30 | +#include "tb-jmp-cache.h" | 19 | - int i, ofs; |
31 | 20 | + int i; | |
32 | #ifdef CONFIG_SOFTMMU | 21 | int datasize = is_q ? 128 : 64; |
33 | 22 | int elements = datasize / esize; | |
34 | diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h | 23 | - TCGv_i64 tcg_res, tcg_resl, tcg_resh; |
35 | new file mode 100644 | 24 | + TCGv_i64 tcg_res[2], tcg_ele; |
36 | index XXXXXXX..XXXXXXX | 25 | |
37 | --- /dev/null | 26 | if (opcode == 0 || (size == 3 && !is_q)) { |
38 | +++ b/accel/tcg/tb-jmp-cache.h | 27 | unallocated_encoding(s); |
39 | @@ -XXX,XX +XXX,XX @@ | 28 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) |
40 | +/* | ||
41 | + * The per-CPU TranslationBlock jump cache. | ||
42 | + * | ||
43 | + * Copyright (c) 2003 Fabrice Bellard | ||
44 | + * | ||
45 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
46 | + */ | ||
47 | + | ||
48 | +#ifndef ACCEL_TCG_TB_JMP_CACHE_H | ||
49 | +#define ACCEL_TCG_TB_JMP_CACHE_H | ||
50 | + | ||
51 | +#define TB_JMP_CACHE_BITS 12 | ||
52 | +#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) | ||
53 | + | ||
54 | +/* | ||
55 | + * Accessed in parallel; all accesses to 'tb' must be atomic. | ||
56 | + */ | ||
57 | +struct CPUJumpCache { | ||
58 | + struct { | ||
59 | + TranslationBlock *tb; | ||
60 | + } array[TB_JMP_CACHE_SIZE]; | ||
61 | +}; | ||
62 | + | ||
63 | +#endif /* ACCEL_TCG_TB_JMP_CACHE_H */ | ||
64 | diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/include/exec/cpu-common.h | ||
67 | +++ b/include/exec/cpu-common.h | ||
68 | @@ -XXX,XX +XXX,XX @@ void cpu_list_unlock(void); | ||
69 | unsigned int cpu_list_generation_id_get(void); | ||
70 | |||
71 | void tcg_flush_softmmu_tlb(CPUState *cs); | ||
72 | +void tcg_flush_jmp_cache(CPUState *cs); | ||
73 | |||
74 | void tcg_iommu_init_notifier_list(CPUState *cpu); | ||
75 | void tcg_iommu_free_notifier_list(CPUState *cpu); | ||
76 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/include/hw/core/cpu.h | ||
79 | +++ b/include/hw/core/cpu.h | ||
80 | @@ -XXX,XX +XXX,XX @@ struct kvm_run; | ||
81 | struct hax_vcpu_state; | ||
82 | struct hvf_vcpu_state; | ||
83 | |||
84 | -#define TB_JMP_CACHE_BITS 12 | ||
85 | -#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) | ||
86 | - | ||
87 | /* work queue */ | ||
88 | |||
89 | /* The union type allows passing of 64 bit target pointers on 32 bit | ||
90 | @@ -XXX,XX +XXX,XX @@ struct CPUState { | ||
91 | CPUArchState *env_ptr; | ||
92 | IcountDecr *icount_decr_ptr; | ||
93 | |||
94 | - /* Accessed in parallel; all accesses must be atomic */ | ||
95 | - TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; | ||
96 | + CPUJumpCache *tb_jmp_cache; | ||
97 | |||
98 | struct GDBRegisterState *gdb_regs; | ||
99 | int gdb_num_regs; | ||
100 | @@ -XXX,XX +XXX,XX @@ extern CPUTailQ cpus; | ||
101 | |||
102 | extern __thread CPUState *current_cpu; | ||
103 | |||
104 | -static inline void cpu_tb_jmp_cache_clear(CPUState *cpu) | ||
105 | -{ | ||
106 | - unsigned int i; | ||
107 | - | ||
108 | - for (i = 0; i < TB_JMP_CACHE_SIZE; i++) { | ||
109 | - qatomic_set(&cpu->tb_jmp_cache[i], NULL); | ||
110 | - } | ||
111 | -} | ||
112 | - | ||
113 | /** | ||
114 | * qemu_tcg_mttcg_enabled: | ||
115 | * Check whether we are running MultiThread TCG or not. | ||
116 | diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h | ||
117 | index XXXXXXX..XXXXXXX 100644 | ||
118 | --- a/include/qemu/typedefs.h | ||
119 | +++ b/include/qemu/typedefs.h | ||
120 | @@ -XXX,XX +XXX,XX @@ typedef struct CoMutex CoMutex; | ||
121 | typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; | ||
122 | typedef struct CPUAddressSpace CPUAddressSpace; | ||
123 | typedef struct CPUArchState CPUArchState; | ||
124 | +typedef struct CPUJumpCache CPUJumpCache; | ||
125 | typedef struct CPUState CPUState; | ||
126 | typedef struct CPUTLBEntryFull CPUTLBEntryFull; | ||
127 | typedef struct DeviceListener DeviceListener; | ||
128 | diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c | ||
129 | index XXXXXXX..XXXXXXX 100644 | ||
130 | --- a/accel/stubs/tcg-stub.c | ||
131 | +++ b/accel/stubs/tcg-stub.c | ||
132 | @@ -XXX,XX +XXX,XX @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) | ||
133 | { | ||
134 | } | ||
135 | |||
136 | +void tcg_flush_jmp_cache(CPUState *cpu) | ||
137 | +{ | ||
138 | +} | ||
139 | + | ||
140 | int probe_access_flags(CPUArchState *env, target_ulong addr, | ||
141 | MMUAccessType access_type, int mmu_idx, | ||
142 | bool nonfault, void **phost, uintptr_t retaddr) | ||
143 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
144 | index XXXXXXX..XXXXXXX 100644 | ||
145 | --- a/accel/tcg/cpu-exec.c | ||
146 | +++ b/accel/tcg/cpu-exec.c | ||
147 | @@ -XXX,XX +XXX,XX @@ | ||
148 | #include "sysemu/replay.h" | ||
149 | #include "sysemu/tcg.h" | ||
150 | #include "exec/helper-proto.h" | ||
151 | +#include "tb-jmp-cache.h" | ||
152 | #include "tb-hash.h" | ||
153 | #include "tb-context.h" | ||
154 | #include "internal.h" | ||
155 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
156 | tcg_debug_assert(!(cflags & CF_INVALID)); | ||
157 | |||
158 | hash = tb_jmp_cache_hash_func(pc); | ||
159 | - tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
160 | + tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb); | ||
161 | |||
162 | if (likely(tb && | ||
163 | tb->pc == pc && | ||
164 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
165 | if (tb == NULL) { | ||
166 | return NULL; | ||
167 | } | ||
168 | - qatomic_set(&cpu->tb_jmp_cache[hash], tb); | ||
169 | + qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb); | ||
170 | return tb; | ||
171 | } | ||
172 | |||
173 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | ||
174 | |||
175 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | ||
176 | if (tb == NULL) { | ||
177 | + uint32_t h; | ||
178 | + | ||
179 | mmap_lock(); | ||
180 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||
181 | mmap_unlock(); | ||
182 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | ||
183 | * We add the TB in the virtual pc hash table | ||
184 | * for the fast lookup | ||
185 | */ | ||
186 | - qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); | ||
187 | + h = tb_jmp_cache_hash_func(pc); | ||
188 | + qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb); | ||
189 | } | ||
190 | |||
191 | #ifndef CONFIG_USER_ONLY | ||
192 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
193 | index XXXXXXX..XXXXXXX 100644 | ||
194 | --- a/accel/tcg/cputlb.c | ||
195 | +++ b/accel/tcg/cputlb.c | ||
196 | @@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, | ||
197 | |||
198 | static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) | ||
199 | { | ||
200 | - unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); | ||
201 | + int i, i0 = tb_jmp_cache_hash_page(page_addr); | ||
202 | + CPUJumpCache *jc = cpu->tb_jmp_cache; | ||
203 | |||
204 | for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { | ||
205 | - qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); | ||
206 | + qatomic_set(&jc->array[i0 + i].tb, NULL); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) | ||
211 | |||
212 | qemu_spin_unlock(&env_tlb(env)->c.lock); | ||
213 | |||
214 | - cpu_tb_jmp_cache_clear(cpu); | ||
215 | + tcg_flush_jmp_cache(cpu); | ||
216 | |||
217 | if (to_clean == ALL_MMUIDX_BITS) { | ||
218 | qatomic_set(&env_tlb(env)->c.full_flush_count, | ||
219 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, | ||
220 | * longer to clear each entry individually than it will to clear it all. | ||
221 | */ | ||
222 | if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { | ||
223 | - cpu_tb_jmp_cache_clear(cpu); | ||
224 | + tcg_flush_jmp_cache(cpu); | ||
225 | return; | 29 | return; |
226 | } | 30 | } |
227 | 31 | ||
228 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 32 | - tcg_resl = tcg_const_i64(0); |
229 | index XXXXXXX..XXXXXXX 100644 | 33 | - tcg_resh = is_q ? tcg_const_i64(0) : NULL; |
230 | --- a/accel/tcg/translate-all.c | 34 | - tcg_res = tcg_temp_new_i64(); |
231 | +++ b/accel/tcg/translate-all.c | 35 | + tcg_res[0] = tcg_temp_new_i64(); |
232 | @@ -XXX,XX +XXX,XX @@ | 36 | + tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL; |
233 | #include "sysemu/tcg.h" | 37 | + tcg_ele = tcg_temp_new_i64(); |
234 | #include "qapi/error.h" | 38 | |
235 | #include "hw/core/tcg-cpu-ops.h" | 39 | for (i = 0; i < elements; i++) { |
236 | +#include "tb-jmp-cache.h" | 40 | + int o, w; |
237 | #include "tb-hash.h" | 41 | + |
238 | #include "tb-context.h" | 42 | switch (opcode) { |
239 | #include "internal.h" | 43 | case 1: /* UZP1/2 */ |
240 | @@ -XXX,XX +XXX,XX @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) | 44 | { |
241 | } | 45 | int midpoint = elements / 2; |
242 | 46 | if (i < midpoint) { | |
243 | CPU_FOREACH(cpu) { | 47 | - read_vec_element(s, tcg_res, rn, 2 * i + part, size); |
244 | - cpu_tb_jmp_cache_clear(cpu); | 48 | + read_vec_element(s, tcg_ele, rn, 2 * i + part, size); |
245 | + tcg_flush_jmp_cache(cpu); | 49 | } else { |
246 | } | 50 | - read_vec_element(s, tcg_res, rm, |
247 | 51 | + read_vec_element(s, tcg_ele, rm, | |
248 | qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); | 52 | 2 * (i - midpoint) + part, size); |
249 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | 53 | } |
250 | /* remove the TB from the hash list */ | 54 | break; |
251 | h = tb_jmp_cache_hash_func(tb->pc); | 55 | } |
252 | CPU_FOREACH(cpu) { | 56 | case 2: /* TRN1/2 */ |
253 | - if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { | 57 | if (i & 1) { |
254 | - qatomic_set(&cpu->tb_jmp_cache[h], NULL); | 58 | - read_vec_element(s, tcg_res, rm, (i & ~1) + part, size); |
255 | + CPUJumpCache *jc = cpu->tb_jmp_cache; | 59 | + read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size); |
256 | + if (qatomic_read(&jc->array[h].tb) == tb) { | 60 | } else { |
257 | + qatomic_set(&jc->array[h].tb, NULL); | 61 | - read_vec_element(s, tcg_res, rn, (i & ~1) + part, size); |
62 | + read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size); | ||
63 | } | ||
64 | break; | ||
65 | case 3: /* ZIP1/2 */ | ||
66 | { | ||
67 | int base = part * elements / 2; | ||
68 | if (i & 1) { | ||
69 | - read_vec_element(s, tcg_res, rm, base + (i >> 1), size); | ||
70 | + read_vec_element(s, tcg_ele, rm, base + (i >> 1), size); | ||
71 | } else { | ||
72 | - read_vec_element(s, tcg_res, rn, base + (i >> 1), size); | ||
73 | + read_vec_element(s, tcg_ele, rn, base + (i >> 1), size); | ||
74 | } | ||
75 | break; | ||
76 | } | ||
77 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) | ||
78 | g_assert_not_reached(); | ||
79 | } | ||
80 | |||
81 | - ofs = i * esize; | ||
82 | - if (ofs < 64) { | ||
83 | - tcg_gen_shli_i64(tcg_res, tcg_res, ofs); | ||
84 | - tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res); | ||
85 | + w = (i * esize) / 64; | ||
86 | + o = (i * esize) % 64; | ||
87 | + if (o == 0) { | ||
88 | + tcg_gen_mov_i64(tcg_res[w], tcg_ele); | ||
89 | } else { | ||
90 | - tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64); | ||
91 | - tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res); | ||
92 | + tcg_gen_shli_i64(tcg_ele, tcg_ele, o); | ||
93 | + tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele); | ||
258 | } | 94 | } |
259 | } | 95 | } |
260 | 96 | ||
261 | @@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc) | 97 | - write_vec_element(s, tcg_resl, rd, 0, MO_64); |
98 | - if (is_q) { | ||
99 | - write_vec_element(s, tcg_resh, rd, 1, MO_64); | ||
100 | + for (i = 0; i <= is_q; ++i) { | ||
101 | + write_vec_element(s, tcg_res[i], rd, i, MO_64); | ||
102 | } | ||
103 | clear_vec_high(s, is_q, rd); | ||
262 | } | 104 | } |
263 | #endif /* CONFIG_USER_ONLY */ | ||
264 | |||
265 | +/* | ||
266 | + * Called by generic code at e.g. cpu reset after cpu creation, | ||
267 | + * therefore we must be prepared to allocate the jump cache. | ||
268 | + */ | ||
269 | +void tcg_flush_jmp_cache(CPUState *cpu) | ||
270 | +{ | ||
271 | + CPUJumpCache *jc = cpu->tb_jmp_cache; | ||
272 | + | ||
273 | + if (likely(jc)) { | ||
274 | + for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) { | ||
275 | + qatomic_set(&jc->array[i].tb, NULL); | ||
276 | + } | ||
277 | + } else { | ||
278 | + /* This should happen once during realize, and thus never race. */ | ||
279 | + jc = g_new0(CPUJumpCache, 1); | ||
280 | + jc = qatomic_xchg(&cpu->tb_jmp_cache, jc); | ||
281 | + assert(jc == NULL); | ||
282 | + } | ||
283 | +} | ||
284 | + | ||
285 | /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ | ||
286 | void tcg_flush_softmmu_tlb(CPUState *cs) | ||
287 | { | ||
288 | diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c | ||
289 | index XXXXXXX..XXXXXXX 100644 | ||
290 | --- a/hw/core/cpu-common.c | ||
291 | +++ b/hw/core/cpu-common.c | ||
292 | @@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(DeviceState *dev) | ||
293 | cpu->cflags_next_tb = -1; | ||
294 | |||
295 | if (tcg_enabled()) { | ||
296 | - cpu_tb_jmp_cache_clear(cpu); | ||
297 | - | ||
298 | + tcg_flush_jmp_cache(cpu); | ||
299 | tcg_flush_softmmu_tlb(cpu); | ||
300 | } | ||
301 | } | ||
302 | diff --git a/plugins/core.c b/plugins/core.c | ||
303 | index XXXXXXX..XXXXXXX 100644 | ||
304 | --- a/plugins/core.c | ||
305 | +++ b/plugins/core.c | ||
306 | @@ -XXX,XX +XXX,XX @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) | ||
307 | static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) | ||
308 | { | ||
309 | bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); | ||
310 | - cpu_tb_jmp_cache_clear(cpu); | ||
311 | + tcg_flush_jmp_cache(cpu); | ||
312 | } | ||
313 | |||
314 | static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) | ||
315 | diff --git a/trace/control-target.c b/trace/control-target.c | ||
316 | index XXXXXXX..XXXXXXX 100644 | ||
317 | --- a/trace/control-target.c | ||
318 | +++ b/trace/control-target.c | ||
319 | @@ -XXX,XX +XXX,XX @@ static void trace_event_synchronize_vcpu_state_dynamic( | ||
320 | { | ||
321 | bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed, | ||
322 | CPU_TRACE_DSTATE_MAX_EVENTS); | ||
323 | - cpu_tb_jmp_cache_clear(vcpu); | ||
324 | + tcg_flush_jmp_cache(vcpu); | ||
325 | } | ||
326 | |||
327 | void trace_event_set_vcpu_state_dynamic(CPUState *vcpu, | ||
328 | -- | 105 | -- |
329 | 2.34.1 | 106 | 2.34.1 |
330 | 107 | ||
331 | 108 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | It is easy enough to use mov instead of or-with-zero | ||
2 | and relying on the optimizer to fold away the or. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/arm/tcg/translate-a64.c | 8 ++++++-- | ||
8 | 1 file changed, 6 insertions(+), 2 deletions(-) | ||
9 | |||
10 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/arm/tcg/translate-a64.c | ||
13 | +++ b/target/arm/tcg/translate-a64.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, | ||
15 | tcg_rn = tcg_temp_new_i64(); | ||
16 | tcg_rd = tcg_temp_new_i64(); | ||
17 | tcg_rd_narrowed = tcg_temp_new_i32(); | ||
18 | - tcg_final = tcg_const_i64(0); | ||
19 | + tcg_final = tcg_temp_new_i64(); | ||
20 | |||
21 | if (round) { | ||
22 | tcg_round = tcg_constant_i64(1ULL << (shift - 1)); | ||
23 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, | ||
24 | false, is_u_shift, size+1, shift); | ||
25 | narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd); | ||
26 | tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed); | ||
27 | - tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); | ||
28 | + if (i == 0) { | ||
29 | + tcg_gen_mov_i64(tcg_final, tcg_rd); | ||
30 | + } else { | ||
31 | + tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); | ||
32 | + } | ||
33 | } | ||
34 | |||
35 | if (!is_q) { | ||
36 | -- | ||
37 | 2.34.1 | ||
38 | |||
39 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Here it is not trivial to notice first initialization, so explicitly | ||
2 | zero the temps. Use an array for the output, rather than separate | ||
3 | tcg_rd/tcg_rd_hi variables. | ||
1 | 4 | ||
5 | Fixes a bug by adding a missing clear_vec_high. | ||
6 | |||
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | target/arm/tcg/translate-a64.c | 26 +++++++++++++++----------- | ||
11 | 1 file changed, 15 insertions(+), 11 deletions(-) | ||
12 | |||
13 | diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/tcg/translate-a64.c | ||
16 | +++ b/target/arm/tcg/translate-a64.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void handle_rev(DisasContext *s, int opcode, bool u, | ||
18 | int esize = 8 << size; | ||
19 | int elements = dsize / esize; | ||
20 | TCGv_i64 tcg_rn = tcg_temp_new_i64(); | ||
21 | - TCGv_i64 tcg_rd = tcg_const_i64(0); | ||
22 | - TCGv_i64 tcg_rd_hi = tcg_const_i64(0); | ||
23 | + TCGv_i64 tcg_rd[2]; | ||
24 | + | ||
25 | + for (i = 0; i < 2; i++) { | ||
26 | + tcg_rd[i] = tcg_temp_new_i64(); | ||
27 | + tcg_gen_movi_i64(tcg_rd[i], 0); | ||
28 | + } | ||
29 | |||
30 | for (i = 0; i < elements; i++) { | ||
31 | int e_rev = (i & 0xf) ^ revmask; | ||
32 | - int off = e_rev * esize; | ||
33 | + int w = (e_rev * esize) / 64; | ||
34 | + int o = (e_rev * esize) % 64; | ||
35 | + | ||
36 | read_vec_element(s, tcg_rn, rn, i, size); | ||
37 | - if (off >= 64) { | ||
38 | - tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi, | ||
39 | - tcg_rn, off - 64, esize); | ||
40 | - } else { | ||
41 | - tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize); | ||
42 | - } | ||
43 | + tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize); | ||
44 | } | ||
45 | - write_vec_element(s, tcg_rd, rd, 0, MO_64); | ||
46 | - write_vec_element(s, tcg_rd_hi, rd, 1, MO_64); | ||
47 | + | ||
48 | + for (i = 0; i < 2; i++) { | ||
49 | + write_vec_element(s, tcg_rd[i], rd, i, MO_64); | ||
50 | + } | ||
51 | + clear_vec_high(s, true, rd); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | -- | ||
56 | 2.34.1 | ||
57 | |||
58 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Return a constant for an immediate input. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/m68k/translate.c | 2 +- | ||
7 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
8 | |||
9 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/m68k/translate.c | ||
12 | +++ b/target/m68k/translate.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, | ||
14 | default: | ||
15 | g_assert_not_reached(); | ||
16 | } | ||
17 | - return tcg_const_i32(offset); | ||
18 | + return tcg_constant_i32(offset); | ||
19 | default: | ||
20 | return NULL_QREG; | ||
21 | } | ||
22 | -- | ||
23 | 2.34.1 | ||
24 | |||
25 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Compute both partial results separately and accumulate | ||
2 | at the end, instead of accumulating in the middle. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/ppc/translate/vmx-impl.c.inc | 21 +++++++++++---------- | ||
9 | 1 file changed, 11 insertions(+), 10 deletions(-) | ||
10 | |||
11 | diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/ppc/translate/vmx-impl.c.inc | ||
14 | +++ b/target/ppc/translate/vmx-impl.c.inc | ||
15 | @@ -XXX,XX +XXX,XX @@ static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a) | ||
16 | |||
17 | static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece) | ||
18 | { | ||
19 | - TCGv_i64 rt, vrb, mask; | ||
20 | - rt = tcg_const_i64(0); | ||
21 | - vrb = tcg_temp_new_i64(); | ||
22 | + TCGv_i64 r[2], mask; | ||
23 | + | ||
24 | + r[0] = tcg_temp_new_i64(); | ||
25 | + r[1] = tcg_temp_new_i64(); | ||
26 | mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1))); | ||
27 | |||
28 | for (int i = 0; i < 2; i++) { | ||
29 | - get_avr64(vrb, a->vrb, i); | ||
30 | + get_avr64(r[i], a->vrb, i); | ||
31 | if (a->mp) { | ||
32 | - tcg_gen_and_i64(vrb, mask, vrb); | ||
33 | + tcg_gen_and_i64(r[i], mask, r[i]); | ||
34 | } else { | ||
35 | - tcg_gen_andc_i64(vrb, mask, vrb); | ||
36 | + tcg_gen_andc_i64(r[i], mask, r[i]); | ||
37 | } | ||
38 | - tcg_gen_ctpop_i64(vrb, vrb); | ||
39 | - tcg_gen_add_i64(rt, rt, vrb); | ||
40 | + tcg_gen_ctpop_i64(r[i], r[i]); | ||
41 | } | ||
42 | |||
43 | - tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece); | ||
44 | - tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt); | ||
45 | + tcg_gen_add_i64(r[0], r[0], r[1]); | ||
46 | + tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece); | ||
47 | + tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]); | ||
48 | return true; | ||
49 | } | ||
50 | |||
51 | -- | ||
52 | 2.34.1 | ||
53 | |||
54 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/ppc/translate/vmx-impl.c.inc | 10 +++++----- | ||
8 | 1 file changed, 5 insertions(+), 5 deletions(-) | ||
9 | |||
10 | diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/ppc/translate/vmx-impl.c.inc | ||
13 | +++ b/target/ppc/translate/vmx-impl.c.inc | ||
14 | @@ -XXX,XX +XXX,XX @@ static void glue(gen_, name)(DisasContext *ctx) \ | ||
15 | gen_exception(ctx, POWERPC_EXCP_VPU); \ | ||
16 | return; \ | ||
17 | } \ | ||
18 | - uimm = tcg_const_i32(UIMM5(ctx->opcode)); \ | ||
19 | + uimm = tcg_constant_i32(UIMM5(ctx->opcode)); \ | ||
20 | rb = gen_avr_ptr(rB(ctx->opcode)); \ | ||
21 | rd = gen_avr_ptr(rD(ctx->opcode)); \ | ||
22 | gen_helper_##name(cpu_env, rd, rb, uimm); \ | ||
23 | @@ -XXX,XX +XXX,XX @@ static void gen_vsldoi(DisasContext *ctx) | ||
24 | ra = gen_avr_ptr(rA(ctx->opcode)); | ||
25 | rb = gen_avr_ptr(rB(ctx->opcode)); | ||
26 | rd = gen_avr_ptr(rD(ctx->opcode)); | ||
27 | - sh = tcg_const_i32(VSH(ctx->opcode)); | ||
28 | + sh = tcg_constant_i32(VSH(ctx->opcode)); | ||
29 | gen_helper_vsldoi(rd, ra, rb, sh); | ||
30 | } | ||
31 | |||
32 | @@ -XXX,XX +XXX,XX @@ static void gen_##op(DisasContext *ctx) \ | ||
33 | rb = gen_avr_ptr(rB(ctx->opcode)); \ | ||
34 | rd = gen_avr_ptr(rD(ctx->opcode)); \ | ||
35 | \ | ||
36 | - ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ | ||
37 | + ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \ | ||
38 | \ | ||
39 | gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \ | ||
40 | } | ||
41 | @@ -XXX,XX +XXX,XX @@ static void gen_##op(DisasContext *ctx) \ | ||
42 | rb = gen_avr_ptr(rB(ctx->opcode)); \ | ||
43 | rd = gen_avr_ptr(rD(ctx->opcode)); \ | ||
44 | \ | ||
45 | - ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ | ||
46 | + ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \ | ||
47 | \ | ||
48 | gen_helper_##op(cpu_crf[6], rd, rb, ps); \ | ||
49 | } | ||
50 | @@ -XXX,XX +XXX,XX @@ static void gen_##op(DisasContext *ctx) \ | ||
51 | } \ | ||
52 | ra = gen_avr_ptr(rA(ctx->opcode)); \ | ||
53 | rd = gen_avr_ptr(rD(ctx->opcode)); \ | ||
54 | - st_six = tcg_const_i32(rB(ctx->opcode)); \ | ||
55 | + st_six = tcg_constant_i32(rB(ctx->opcode)); \ | ||
56 | gen_helper_##op(rd, ra, st_six); \ | ||
57 | } | ||
58 | |||
59 | -- | ||
60 | 2.34.1 | ||
61 | |||
62 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Initialize a new temp instead of tcg_const_*. | ||
2 | Fix a pasto in a comment. | ||
1 | 3 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | target/ppc/translate/vsx-impl.c.inc | 8 +++++--- | ||
9 | 1 file changed, 5 insertions(+), 3 deletions(-) | ||
10 | |||
11 | diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/ppc/translate/vsx-impl.c.inc | ||
14 | +++ b/target/ppc/translate/vsx-impl.c.inc | ||
15 | @@ -XXX,XX +XXX,XX @@ static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c, | ||
16 | TCGv_i64 conj, disj; | ||
17 | |||
18 | conj = tcg_temp_new_i64(); | ||
19 | - disj = tcg_const_i64(0); | ||
20 | + disj = tcg_temp_new_i64(); | ||
21 | + tcg_gen_movi_i64(disj, 0); | ||
22 | |||
23 | /* Iterate over set bits from the least to the most significant bit */ | ||
24 | while (imm) { | ||
25 | @@ -XXX,XX +XXX,XX @@ static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, | ||
26 | int bit; | ||
27 | TCGv_vec disj, conj; | ||
28 | |||
29 | - disj = tcg_const_zeros_vec_matching(t); | ||
30 | conj = tcg_temp_new_vec_matching(t); | ||
31 | + disj = tcg_temp_new_vec_matching(t); | ||
32 | + tcg_gen_dupi_vec(vece, disj, 0); | ||
33 | |||
34 | /* Iterate over set bits from the least to the most significant bit */ | ||
35 | while (imm) { | ||
36 | @@ -XXX,XX +XXX,XX @@ static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a) | ||
37 | |||
38 | /* Equivalent functions that can be implemented with a single gen_gvec */ | ||
39 | switch (a->imm) { | ||
40 | - case 0b00000000: /* true */ | ||
41 | + case 0b00000000: /* false */ | ||
42 | set_cpu_vsr(a->xt, tcg_constant_i64(0), true); | ||
43 | set_cpu_vsr(a->xt, tcg_constant_i64(0), false); | ||
44 | break; | ||
45 | -- | ||
46 | 2.34.1 | ||
47 | |||
48 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All remaining uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/ppc/translate/vsx-impl.c.inc | 28 ++++++++++++++-------------- | ||
8 | 1 file changed, 14 insertions(+), 14 deletions(-) | ||
9 | |||
10 | diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/ppc/translate/vsx-impl.c.inc | ||
13 | +++ b/target/ppc/translate/vsx-impl.c.inc | ||
14 | @@ -XXX,XX +XXX,XX @@ static void gen_lxvdsx(DisasContext *ctx) | ||
15 | static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl, | ||
16 | TCGv_i64 inh, TCGv_i64 inl) | ||
17 | { | ||
18 | - TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF); | ||
19 | + TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF); | ||
20 | TCGv_i64 t0 = tcg_temp_new_i64(); | ||
21 | TCGv_i64 t1 = tcg_temp_new_i64(); | ||
22 | |||
23 | @@ -XXX,XX +XXX,XX @@ static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a) | ||
24 | REQUIRE_INSNS_FLAGS2(ctx, ISA300); | ||
25 | REQUIRE_VSX(ctx); | ||
26 | |||
27 | - ro = tcg_const_i32(a->rc); | ||
28 | + ro = tcg_constant_i32(a->rc); | ||
29 | |||
30 | xt = gen_avr_ptr(a->rt); | ||
31 | xb = gen_avr_ptr(a->rb); | ||
32 | @@ -XXX,XX +XXX,XX @@ static void gen_##name(DisasContext *ctx) \ | ||
33 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | ||
34 | return; \ | ||
35 | } \ | ||
36 | - opc = tcg_const_i32(ctx->opcode); \ | ||
37 | + opc = tcg_constant_i32(ctx->opcode); \ | ||
38 | gen_helper_##name(cpu_env, opc); \ | ||
39 | } | ||
40 | |||
41 | @@ -XXX,XX +XXX,XX @@ static void gen_##name(DisasContext *ctx) \ | ||
42 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | ||
43 | return; \ | ||
44 | } \ | ||
45 | - opc = tcg_const_i32(ctx->opcode); \ | ||
46 | + opc = tcg_constant_i32(ctx->opcode); \ | ||
47 | xa = gen_vsr_ptr(xA(ctx->opcode)); \ | ||
48 | xb = gen_vsr_ptr(xB(ctx->opcode)); \ | ||
49 | gen_helper_##name(cpu_env, opc, xa, xb); \ | ||
50 | @@ -XXX,XX +XXX,XX @@ static void gen_##name(DisasContext *ctx) \ | ||
51 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | ||
52 | return; \ | ||
53 | } \ | ||
54 | - opc = tcg_const_i32(ctx->opcode); \ | ||
55 | + opc = tcg_constant_i32(ctx->opcode); \ | ||
56 | xb = gen_vsr_ptr(xB(ctx->opcode)); \ | ||
57 | gen_helper_##name(cpu_env, opc, xb); \ | ||
58 | } | ||
59 | @@ -XXX,XX +XXX,XX @@ static void gen_##name(DisasContext *ctx) \ | ||
60 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | ||
61 | return; \ | ||
62 | } \ | ||
63 | - opc = tcg_const_i32(ctx->opcode); \ | ||
64 | + opc = tcg_constant_i32(ctx->opcode); \ | ||
65 | xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \ | ||
66 | xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ | ||
67 | xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ | ||
68 | @@ -XXX,XX +XXX,XX @@ static void gen_##name(DisasContext *ctx) \ | ||
69 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | ||
70 | return; \ | ||
71 | } \ | ||
72 | - opc = tcg_const_i32(ctx->opcode); \ | ||
73 | + opc = tcg_constant_i32(ctx->opcode); \ | ||
74 | xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \ | ||
75 | xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ | ||
76 | gen_helper_##name(cpu_env, opc, xt, xb); \ | ||
77 | @@ -XXX,XX +XXX,XX @@ static void gen_##name(DisasContext *ctx) \ | ||
78 | gen_exception(ctx, POWERPC_EXCP_VSXU); \ | ||
79 | return; \ | ||
80 | } \ | ||
81 | - opc = tcg_const_i32(ctx->opcode); \ | ||
82 | + opc = tcg_constant_i32(ctx->opcode); \ | ||
83 | xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ | ||
84 | xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ | ||
85 | gen_helper_##name(cpu_env, opc, xa, xb); \ | ||
86 | @@ -XXX,XX +XXX,XX @@ static void gen_xsxsigdp(DisasContext *ctx) | ||
87 | exp = tcg_temp_new_i64(); | ||
88 | t0 = tcg_temp_new_i64(); | ||
89 | t1 = tcg_temp_new_i64(); | ||
90 | - zr = tcg_const_i64(0); | ||
91 | - nan = tcg_const_i64(2047); | ||
92 | + zr = tcg_constant_i64(0); | ||
93 | + nan = tcg_constant_i64(2047); | ||
94 | |||
95 | get_cpu_vsr(t1, xB(ctx->opcode), true); | ||
96 | tcg_gen_extract_i64(exp, t1, 52, 11); | ||
97 | @@ -XXX,XX +XXX,XX @@ static void gen_xsxsigqp(DisasContext *ctx) | ||
98 | get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false); | ||
99 | exp = tcg_temp_new_i64(); | ||
100 | t0 = tcg_temp_new_i64(); | ||
101 | - zr = tcg_const_i64(0); | ||
102 | - nan = tcg_const_i64(32767); | ||
103 | + zr = tcg_constant_i64(0); | ||
104 | + nan = tcg_constant_i64(32767); | ||
105 | |||
106 | tcg_gen_extract_i64(exp, xbh, 48, 15); | ||
107 | tcg_gen_movi_i64(t0, 0x0001000000000000); | ||
108 | @@ -XXX,XX +XXX,XX @@ static void gen_xvxsigdp(DisasContext *ctx) | ||
109 | get_cpu_vsr(xbl, xB(ctx->opcode), false); | ||
110 | exp = tcg_temp_new_i64(); | ||
111 | t0 = tcg_temp_new_i64(); | ||
112 | - zr = tcg_const_i64(0); | ||
113 | - nan = tcg_const_i64(2047); | ||
114 | + zr = tcg_constant_i64(0); | ||
115 | + nan = tcg_constant_i64(2047); | ||
116 | |||
117 | tcg_gen_extract_i64(exp, xbh, 52, 11); | ||
118 | tcg_gen_movi_i64(t0, 0x0010000000000000); | ||
119 | -- | ||
120 | 2.34.1 | ||
121 | |||
122 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | All uses are strictly read-only. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
4 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/ppc/translate/fp-impl.c.inc | 26 ++++++++++++-------------- | ||
8 | 1 file changed, 12 insertions(+), 14 deletions(-) | ||
9 | |||
10 | diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/ppc/translate/fp-impl.c.inc | ||
13 | +++ b/target/ppc/translate/fp-impl.c.inc | ||
14 | @@ -XXX,XX +XXX,XX @@ static void gen_fcmpo(DisasContext *ctx) | ||
15 | t0 = tcg_temp_new_i64(); | ||
16 | t1 = tcg_temp_new_i64(); | ||
17 | gen_reset_fpstatus(); | ||
18 | - crf = tcg_const_i32(crfD(ctx->opcode)); | ||
19 | + crf = tcg_constant_i32(crfD(ctx->opcode)); | ||
20 | get_fpr(t0, rA(ctx->opcode)); | ||
21 | get_fpr(t1, rB(ctx->opcode)); | ||
22 | gen_helper_fcmpo(cpu_env, t0, t1, crf); | ||
23 | @@ -XXX,XX +XXX,XX @@ static void gen_fcmpu(DisasContext *ctx) | ||
24 | t0 = tcg_temp_new_i64(); | ||
25 | t1 = tcg_temp_new_i64(); | ||
26 | gen_reset_fpstatus(); | ||
27 | - crf = tcg_const_i32(crfD(ctx->opcode)); | ||
28 | + crf = tcg_constant_i32(crfD(ctx->opcode)); | ||
29 | get_fpr(t0, rA(ctx->opcode)); | ||
30 | get_fpr(t1, rB(ctx->opcode)); | ||
31 | gen_helper_fcmpu(cpu_env, t0, t1, crf); | ||
32 | @@ -XXX,XX +XXX,XX @@ static void gen_mcrfs(DisasContext *ctx) | ||
33 | tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, | ||
34 | ~((0xF << shift) & FP_EX_CLEAR_BITS)); | ||
35 | /* FEX and VX need to be updated, so don't set fpscr directly */ | ||
36 | - tmask = tcg_const_i32(1 << nibble); | ||
37 | + tmask = tcg_constant_i32(1 << nibble); | ||
38 | gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); | ||
39 | } | ||
40 | |||
41 | @@ -XXX,XX +XXX,XX @@ static void gen_mtfsb0(DisasContext *ctx) | ||
42 | crb = 31 - crbD(ctx->opcode); | ||
43 | gen_reset_fpstatus(); | ||
44 | if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { | ||
45 | - TCGv_i32 t0; | ||
46 | - t0 = tcg_const_i32(crb); | ||
47 | - gen_helper_fpscr_clrbit(cpu_env, t0); | ||
48 | + gen_helper_fpscr_clrbit(cpu_env, tcg_constant_i32(crb)); | ||
49 | } | ||
50 | if (unlikely(Rc(ctx->opcode) != 0)) { | ||
51 | tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); | ||
52 | @@ -XXX,XX +XXX,XX @@ static void gen_mtfsb1(DisasContext *ctx) | ||
53 | crb = 31 - crbD(ctx->opcode); | ||
54 | /* XXX: we pretend we can only do IEEE floating-point computations */ | ||
55 | if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { | ||
56 | - TCGv_i32 t0; | ||
57 | - t0 = tcg_const_i32(crb); | ||
58 | - gen_helper_fpscr_setbit(cpu_env, t0); | ||
59 | + gen_helper_fpscr_setbit(cpu_env, tcg_constant_i32(crb)); | ||
60 | } | ||
61 | if (unlikely(Rc(ctx->opcode) != 0)) { | ||
62 | tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); | ||
63 | @@ -XXX,XX +XXX,XX @@ static void gen_mtfsf(DisasContext *ctx) | ||
64 | gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); | ||
65 | return; | ||
66 | } | ||
67 | - if (l) { | ||
68 | - t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); | ||
69 | + if (!l) { | ||
70 | + t0 = tcg_constant_i32(flm << (w * 8)); | ||
71 | + } else if (ctx->insns_flags2 & PPC2_ISA205) { | ||
72 | + t0 = tcg_constant_i32(0xffff); | ||
73 | } else { | ||
74 | - t0 = tcg_const_i32(flm << (w * 8)); | ||
75 | + t0 = tcg_constant_i32(0xff); | ||
76 | } | ||
77 | t1 = tcg_temp_new_i64(); | ||
78 | get_fpr(t1, rB(ctx->opcode)); | ||
79 | @@ -XXX,XX +XXX,XX @@ static void gen_mtfsfi(DisasContext *ctx) | ||
80 | return; | ||
81 | } | ||
82 | sh = (8 * w) + 7 - bf; | ||
83 | - t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); | ||
84 | - t1 = tcg_const_i32(1 << sh); | ||
85 | + t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); | ||
86 | + t1 = tcg_constant_i32(1 << sh); | ||
87 | gen_helper_store_fpscr(cpu_env, t0, t1); | ||
88 | if (unlikely(Rc(ctx->opcode) != 0)) { | ||
89 | tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); | ||
90 | -- | ||
91 | 2.34.1 | ||
92 | |||
93 | diff view generated by jsdifflib |
1 | When PAGE_WRITE_INV is set when calling tlb_set_page, | 1 | All uses are strictly read-only. |
---|---|---|---|
2 | we immediately set TLB_INVALID_MASK in order to force | ||
3 | tlb_fill to be called on the next lookup. Here in | ||
4 | probe_access_internal, we have just called tlb_fill | ||
5 | and eliminated true misses, thus the lookup must be valid. | ||
6 | 2 | ||
7 | This allows us to remove a warning comment from s390x. | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
8 | There doesn't seem to be a reason to change the code though. | 4 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> |
9 | |||
10 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
11 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 6 | --- |
15 | accel/tcg/cputlb.c | 10 +++++++++- | 7 | target/ppc/power8-pmu-regs.c.inc | 4 ++-- |
16 | target/s390x/tcg/mem_helper.c | 4 ---- | 8 | 1 file changed, 2 insertions(+), 2 deletions(-) |
17 | 2 files changed, 9 insertions(+), 5 deletions(-) | ||
18 | 9 | ||
19 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 10 | diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc |
20 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/accel/tcg/cputlb.c | 12 | --- a/target/ppc/power8-pmu-regs.c.inc |
22 | +++ b/accel/tcg/cputlb.c | 13 | +++ b/target/ppc/power8-pmu-regs.c.inc |
23 | @@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, | 14 | @@ -XXX,XX +XXX,XX @@ void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) |
24 | } | 15 | |
25 | tlb_addr = tlb_read_ofs(entry, elt_ofs); | 16 | void spr_read_PMC(DisasContext *ctx, int gprn, int sprn) |
26 | 17 | { | |
27 | + flags = TLB_FLAGS_MASK; | 18 | - TCGv_i32 t_sprn = tcg_const_i32(sprn); |
28 | page_addr = addr & TARGET_PAGE_MASK; | 19 | + TCGv_i32 t_sprn = tcg_constant_i32(sprn); |
29 | if (!tlb_hit_page(tlb_addr, page_addr)) { | 20 | |
30 | if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { | 21 | gen_icount_io_start(ctx); |
31 | @@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, | 22 | gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn); |
32 | 23 | @@ -XXX,XX +XXX,XX @@ void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) | |
33 | /* TLB resize via tlb_fill may have moved the entry. */ | 24 | |
34 | entry = tlb_entry(env, mmu_idx, addr); | 25 | void spr_write_PMC(DisasContext *ctx, int sprn, int gprn) |
35 | + | 26 | { |
36 | + /* | 27 | - TCGv_i32 t_sprn = tcg_const_i32(sprn); |
37 | + * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, | 28 | + TCGv_i32 t_sprn = tcg_constant_i32(sprn); |
38 | + * to force the next access through tlb_fill. We've just | 29 | |
39 | + * called tlb_fill, so we know that this entry *is* valid. | 30 | gen_icount_io_start(ctx); |
40 | + */ | 31 | gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]); |
41 | + flags &= ~TLB_INVALID_MASK; | ||
42 | } | ||
43 | tlb_addr = tlb_read_ofs(entry, elt_ofs); | ||
44 | } | ||
45 | - flags = tlb_addr & TLB_FLAGS_MASK; | ||
46 | + flags &= tlb_addr; | ||
47 | |||
48 | /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ | ||
49 | if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { | ||
50 | diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c | ||
51 | index XXXXXXX..XXXXXXX 100644 | ||
52 | --- a/target/s390x/tcg/mem_helper.c | ||
53 | +++ b/target/s390x/tcg/mem_helper.c | ||
54 | @@ -XXX,XX +XXX,XX @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, | ||
55 | #else | ||
56 | int flags; | ||
57 | |||
58 | - /* | ||
59 | - * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL | ||
60 | - * to detect if there was an exception during tlb_fill(). | ||
61 | - */ | ||
62 | env->tlb_fill_exc = 0; | ||
63 | flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost, | ||
64 | ra); | ||
65 | -- | 32 | -- |
66 | 2.34.1 | 33 | 2.34.1 |
67 | 34 | ||
68 | 35 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Compute all carry bits in parallel instead of a loop. | ||
1 | 2 | ||
3 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | target/ppc/translate/fixedpoint-impl.c.inc | 44 +++++++++++----------- | ||
7 | 1 file changed, 23 insertions(+), 21 deletions(-) | ||
8 | |||
9 | diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/target/ppc/translate/fixedpoint-impl.c.inc | ||
12 | +++ b/target/ppc/translate/fixedpoint-impl.c.inc | ||
13 | @@ -XXX,XX +XXX,XX @@ static bool trans_PEXTD(DisasContext *ctx, arg_X *a) | ||
14 | |||
15 | static bool trans_ADDG6S(DisasContext *ctx, arg_X *a) | ||
16 | { | ||
17 | - const uint64_t carry_bits = 0x1111111111111111ULL; | ||
18 | - TCGv t0, t1, carry, zero = tcg_constant_tl(0); | ||
19 | + const target_ulong carry_bits = (target_ulong)-1 / 0xf; | ||
20 | + TCGv in1, in2, carryl, carryh, tmp; | ||
21 | + TCGv zero = tcg_constant_tl(0); | ||
22 | |||
23 | REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); | ||
24 | |||
25 | - t0 = tcg_temp_new(); | ||
26 | - t1 = tcg_const_tl(0); | ||
27 | - carry = tcg_const_tl(0); | ||
28 | + in1 = cpu_gpr[a->ra]; | ||
29 | + in2 = cpu_gpr[a->rb]; | ||
30 | + tmp = tcg_temp_new(); | ||
31 | + carryl = tcg_temp_new(); | ||
32 | + carryh = tcg_temp_new(); | ||
33 | |||
34 | - for (int i = 0; i < 16; i++) { | ||
35 | - tcg_gen_shri_tl(t0, cpu_gpr[a->ra], i * 4); | ||
36 | - tcg_gen_andi_tl(t0, t0, 0xf); | ||
37 | - tcg_gen_add_tl(t1, t1, t0); | ||
38 | + /* Addition with carry. */ | ||
39 | + tcg_gen_add2_tl(carryl, carryh, in1, zero, in2, zero); | ||
40 | + /* Addition without carry. */ | ||
41 | + tcg_gen_xor_tl(tmp, in1, in2); | ||
42 | + /* Difference between the two is carry in to each bit. */ | ||
43 | + tcg_gen_xor_tl(carryl, carryl, tmp); | ||
44 | |||
45 | - tcg_gen_shri_tl(t0, cpu_gpr[a->rb], i * 4); | ||
46 | - tcg_gen_andi_tl(t0, t0, 0xf); | ||
47 | - tcg_gen_add_tl(t1, t1, t0); | ||
48 | + /* | ||
49 | + * The carry-out that we're looking for is the carry-in to | ||
50 | + * the next nibble. Shift the double-word down one nibble, | ||
51 | + * which puts all of the bits back into one word. | ||
52 | + */ | ||
53 | + tcg_gen_extract2_tl(carryl, carryl, carryh, 4); | ||
54 | |||
55 | - tcg_gen_andi_tl(t1, t1, 0x10); | ||
56 | - tcg_gen_setcond_tl(TCG_COND_NE, t1, t1, zero); | ||
57 | - | ||
58 | - tcg_gen_shli_tl(t0, t1, i * 4); | ||
59 | - tcg_gen_or_tl(carry, carry, t0); | ||
60 | - } | ||
61 | - | ||
62 | - tcg_gen_xori_tl(carry, carry, (target_long)carry_bits); | ||
63 | - tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6); | ||
64 | + /* Invert, isolate the carry bits, and produce 6's. */ | ||
65 | + tcg_gen_andc_tl(carryl, tcg_constant_tl(carry_bits), carryl); | ||
66 | + tcg_gen_muli_tl(cpu_gpr[a->rt], carryl, 6); | ||
67 | return true; | ||
68 | } | ||
69 | |||
70 | -- | ||
71 | 2.34.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Fix incorrect read from rD. | ||
2 | Avoid adding 0 when rA == 0. | ||
1 | 3 | ||
4 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | target/ppc/translate.c | 6 ++---- | ||
8 | 1 file changed, 2 insertions(+), 4 deletions(-) | ||
9 | |||
10 | diff --git a/target/ppc/translate.c b/target/ppc/translate.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/target/ppc/translate.c | ||
13 | +++ b/target/ppc/translate.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void gen_tlbsx_booke206(DisasContext *ctx) | ||
15 | CHK_SV(ctx); | ||
16 | if (rA(ctx->opcode)) { | ||
17 | t0 = tcg_temp_new(); | ||
18 | - tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]); | ||
19 | + tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); | ||
20 | } else { | ||
21 | - t0 = tcg_const_tl(0); | ||
22 | + t0 = cpu_gpr[rB(ctx->opcode)]; | ||
23 | } | ||
24 | - | ||
25 | - tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]); | ||
26 | gen_helper_booke206_tlbsx(cpu_env, t0); | ||
27 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
28 | } | ||
29 | -- | ||
30 | 2.34.1 | diff view generated by jsdifflib |
1 | This field is only written, not read; remove it. | 1 | All remaining uses are strictly read-only. |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 6 | --- |
8 | include/hw/core/cpu.h | 1 - | 7 | target/ppc/translate.c | 142 +++++++++++++++++++++-------------------- |
9 | accel/tcg/cputlb.c | 7 +++---- | 8 | 1 file changed, 72 insertions(+), 70 deletions(-) |
10 | 2 files changed, 3 insertions(+), 5 deletions(-) | ||
11 | 9 | ||
12 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | 10 | diff --git a/target/ppc/translate.c b/target/ppc/translate.c |
13 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/include/hw/core/cpu.h | 12 | --- a/target/ppc/translate.c |
15 | +++ b/include/hw/core/cpu.h | 13 | +++ b/target/ppc/translate.c |
16 | @@ -XXX,XX +XXX,XX @@ struct CPUWatchpoint { | 14 | @@ -XXX,XX +XXX,XX @@ static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) |
17 | * the memory regions get moved around by io_writex. | 15 | * faulting instruction |
18 | */ | 16 | */ |
19 | typedef struct SavedIOTLB { | 17 | gen_update_nip(ctx, ctx->cia); |
20 | - hwaddr addr; | 18 | - t0 = tcg_const_i32(excp); |
21 | MemoryRegionSection *section; | 19 | - t1 = tcg_const_i32(error); |
22 | hwaddr mr_offset; | 20 | + t0 = tcg_constant_i32(excp); |
23 | } SavedIOTLB; | 21 | + t1 = tcg_constant_i32(error); |
24 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 22 | gen_helper_raise_exception_err(cpu_env, t0, t1); |
25 | index XXXXXXX..XXXXXXX 100644 | 23 | ctx->base.is_jmp = DISAS_NORETURN; |
26 | --- a/accel/tcg/cputlb.c | 24 | } |
27 | +++ b/accel/tcg/cputlb.c | 25 | @@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *ctx, uint32_t excp) |
28 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, | 26 | * faulting instruction |
29 | * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match | 27 | */ |
30 | * because of the side effect of io_writex changing memory layout. | 28 | gen_update_nip(ctx, ctx->cia); |
31 | */ | 29 | - t0 = tcg_const_i32(excp); |
32 | -static void save_iotlb_data(CPUState *cs, hwaddr addr, | 30 | + t0 = tcg_constant_i32(excp); |
33 | - MemoryRegionSection *section, hwaddr mr_offset) | 31 | gen_helper_raise_exception(cpu_env, t0); |
34 | +static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, | 32 | ctx->base.is_jmp = DISAS_NORETURN; |
35 | + hwaddr mr_offset) | 33 | } |
36 | { | 34 | @@ -XXX,XX +XXX,XX @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp, |
37 | #ifdef CONFIG_PLUGIN | 35 | TCGv_i32 t0; |
38 | SavedIOTLB *saved = &cs->saved_iotlb; | 36 | |
39 | - saved->addr = addr; | 37 | gen_update_nip(ctx, nip); |
40 | saved->section = section; | 38 | - t0 = tcg_const_i32(excp); |
41 | saved->mr_offset = mr_offset; | 39 | + t0 = tcg_constant_i32(excp); |
40 | gen_helper_raise_exception(cpu_env, t0); | ||
41 | ctx->base.is_jmp = DISAS_NORETURN; | ||
42 | } | ||
43 | @@ -XXX,XX +XXX,XX @@ void spr_noaccess(DisasContext *ctx, int gprn, int sprn) | ||
44 | static void spr_load_dump_spr(int sprn) | ||
45 | { | ||
46 | #ifdef PPC_DUMP_SPR_ACCESSES | ||
47 | - TCGv_i32 t0 = tcg_const_i32(sprn); | ||
48 | + TCGv_i32 t0 = tcg_constant_i32(sprn); | ||
49 | gen_helper_load_dump_spr(cpu_env, t0); | ||
42 | #endif | 50 | #endif |
43 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, | 51 | } |
44 | * The memory_region_dispatch may trigger a flush/resize | 52 | @@ -XXX,XX +XXX,XX @@ void spr_read_generic(DisasContext *ctx, int gprn, int sprn) |
45 | * so for plugins we save the iotlb_data just in case. | 53 | static void spr_store_dump_spr(int sprn) |
54 | { | ||
55 | #ifdef PPC_DUMP_SPR_ACCESSES | ||
56 | - TCGv_i32 t0 = tcg_const_i32(sprn); | ||
57 | + TCGv_i32 t0 = tcg_constant_i32(sprn); | ||
58 | gen_helper_store_dump_spr(cpu_env, t0); | ||
59 | #endif | ||
60 | } | ||
61 | @@ -XXX,XX +XXX,XX @@ void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) | ||
62 | |||
63 | void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) | ||
64 | { | ||
65 | - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); | ||
66 | + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2); | ||
67 | gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); | ||
68 | } | ||
69 | |||
70 | void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) | ||
71 | { | ||
72 | - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4); | ||
73 | + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4); | ||
74 | gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); | ||
75 | } | ||
76 | |||
77 | void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) | ||
78 | { | ||
79 | - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2); | ||
80 | + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2); | ||
81 | gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); | ||
82 | } | ||
83 | |||
84 | void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) | ||
85 | { | ||
86 | - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4); | ||
87 | + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4); | ||
88 | gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); | ||
89 | } | ||
90 | |||
91 | @@ -XXX,XX +XXX,XX @@ void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) | ||
92 | |||
93 | void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) | ||
94 | { | ||
95 | - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2); | ||
96 | + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2); | ||
97 | gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); | ||
98 | } | ||
99 | |||
100 | void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) | ||
101 | { | ||
102 | - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4); | ||
103 | + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4); | ||
104 | gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); | ||
105 | } | ||
106 | |||
107 | void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) | ||
108 | { | ||
109 | - TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2); | ||
110 | + TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2); | ||
111 | gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); | ||
112 | } | ||
113 | |||
114 | void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) | ||
115 | { | ||
116 | - TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4); | ||
117 | + TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4); | ||
118 | gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); | ||
119 | } | ||
120 | |||
121 | @@ -XXX,XX +XXX,XX @@ void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) | ||
122 | |||
123 | void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) | ||
124 | { | ||
125 | - TCGv_i32 t0 = tcg_const_i32(sprn); | ||
126 | + TCGv_i32 t0 = tcg_constant_i32(sprn); | ||
127 | gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); | ||
128 | } | ||
129 | + | ||
130 | void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) | ||
131 | { | ||
132 | gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]); | ||
133 | } | ||
134 | + | ||
135 | void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) | ||
136 | { | ||
137 | gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]); | ||
138 | @@ -XXX,XX +XXX,XX @@ void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) | ||
139 | static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, | ||
140 | int bit, int sprn, int cause) | ||
141 | { | ||
142 | - TCGv_i32 t1 = tcg_const_i32(bit); | ||
143 | - TCGv_i32 t2 = tcg_const_i32(sprn); | ||
144 | - TCGv_i32 t3 = tcg_const_i32(cause); | ||
145 | + TCGv_i32 t1 = tcg_constant_i32(bit); | ||
146 | + TCGv_i32 t2 = tcg_constant_i32(sprn); | ||
147 | + TCGv_i32 t3 = tcg_constant_i32(cause); | ||
148 | |||
149 | gen_helper_fscr_facility_check(cpu_env, t1, t2, t3); | ||
150 | } | ||
151 | @@ -XXX,XX +XXX,XX @@ static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, | ||
152 | static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, | ||
153 | int bit, int sprn, int cause) | ||
154 | { | ||
155 | - TCGv_i32 t1 = tcg_const_i32(bit); | ||
156 | - TCGv_i32 t2 = tcg_const_i32(sprn); | ||
157 | - TCGv_i32 t3 = tcg_const_i32(cause); | ||
158 | + TCGv_i32 t1 = tcg_constant_i32(bit); | ||
159 | + TCGv_i32 t2 = tcg_constant_i32(sprn); | ||
160 | + TCGv_i32 t3 = tcg_constant_i32(cause); | ||
161 | |||
162 | gen_helper_msr_facility_check(cpu_env, t1, t2, t3); | ||
163 | } | ||
164 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) | ||
165 | |||
166 | static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) | ||
167 | { | ||
168 | - TCGv t0 = tcg_const_tl(arg1); | ||
169 | + TCGv t0 = tcg_constant_tl(arg1); | ||
170 | gen_op_cmp(arg0, t0, s, crf); | ||
171 | } | ||
172 | |||
173 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) | ||
174 | |||
175 | static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf) | ||
176 | { | ||
177 | - TCGv t0 = tcg_const_tl(arg1); | ||
178 | + TCGv t0 = tcg_constant_tl(arg1); | ||
179 | gen_op_cmp32(arg0, t0, s, crf); | ||
180 | } | ||
181 | |||
182 | @@ -XXX,XX +XXX,XX @@ static void gen_isel(DisasContext *ctx) | ||
183 | tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); | ||
184 | tcg_gen_andi_tl(t0, t0, mask); | ||
185 | |||
186 | - zr = tcg_const_tl(0); | ||
187 | + zr = tcg_constant_tl(0); | ||
188 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, | ||
189 | rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, | ||
190 | cpu_gpr[rB(ctx->opcode)]); | ||
191 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, | ||
192 | tcg_gen_mov_tl(ca32, ca); | ||
193 | } | ||
194 | } else { | ||
195 | - TCGv zero = tcg_const_tl(0); | ||
196 | + TCGv zero = tcg_constant_tl(0); | ||
197 | if (add_ca) { | ||
198 | tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero); | ||
199 | tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero); | ||
200 | @@ -XXX,XX +XXX,XX @@ static void glue(gen_, name)(DisasContext *ctx) \ | ||
201 | add_ca, compute_ca, compute_ov) \ | ||
202 | static void glue(gen_, name)(DisasContext *ctx) \ | ||
203 | { \ | ||
204 | - TCGv t0 = tcg_const_tl(const_val); \ | ||
205 | + TCGv t0 = tcg_constant_tl(const_val); \ | ||
206 | gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ | ||
207 | cpu_gpr[rA(ctx->opcode)], t0, \ | ||
208 | ca, glue(ca, 32), \ | ||
209 | @@ -XXX,XX +XXX,XX @@ GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) | ||
210 | /* addic addic.*/ | ||
211 | static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) | ||
212 | { | ||
213 | - TCGv c = tcg_const_tl(SIMM(ctx->opcode)); | ||
214 | + TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); | ||
215 | gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], | ||
216 | c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); | ||
217 | } | ||
218 | @@ -XXX,XX +XXX,XX @@ GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); | ||
219 | #define GEN_DIVE(name, hlpr, compute_ov) \ | ||
220 | static void gen_##name(DisasContext *ctx) \ | ||
221 | { \ | ||
222 | - TCGv_i32 t0 = tcg_const_i32(compute_ov); \ | ||
223 | + TCGv_i32 t0 = tcg_constant_i32(compute_ov); \ | ||
224 | gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \ | ||
225 | cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ | ||
226 | if (unlikely(Rc(ctx->opcode) != 0)) { \ | ||
227 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, | ||
228 | tcg_gen_rem_i32(t3, t0, t1); | ||
229 | tcg_gen_ext_i32_tl(ret, t3); | ||
230 | } else { | ||
231 | - TCGv_i32 t2 = tcg_const_i32(1); | ||
232 | - TCGv_i32 t3 = tcg_const_i32(0); | ||
233 | + TCGv_i32 t2 = tcg_constant_i32(1); | ||
234 | + TCGv_i32 t3 = tcg_constant_i32(0); | ||
235 | tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1); | ||
236 | tcg_gen_remu_i32(t3, t0, t1); | ||
237 | tcg_gen_extu_i32_tl(ret, t3); | ||
238 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, | ||
239 | tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); | ||
240 | tcg_gen_rem_i64(ret, t0, t1); | ||
241 | } else { | ||
242 | - TCGv_i64 t2 = tcg_const_i64(1); | ||
243 | - TCGv_i64 t3 = tcg_const_i64(0); | ||
244 | + TCGv_i64 t2 = tcg_constant_i64(1); | ||
245 | + TCGv_i64 t3 = tcg_constant_i64(0); | ||
246 | tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1); | ||
247 | tcg_gen_remu_i64(ret, t0, t1); | ||
248 | } | ||
249 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, | ||
250 | } else if (add_ca) { | ||
251 | TCGv zero, inv1 = tcg_temp_new(); | ||
252 | tcg_gen_not_tl(inv1, arg1); | ||
253 | - zero = tcg_const_tl(0); | ||
254 | + zero = tcg_constant_tl(0); | ||
255 | tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); | ||
256 | tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); | ||
257 | gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); | ||
258 | @@ -XXX,XX +XXX,XX @@ static void glue(gen_, name)(DisasContext *ctx) \ | ||
259 | add_ca, compute_ca, compute_ov) \ | ||
260 | static void glue(gen_, name)(DisasContext *ctx) \ | ||
261 | { \ | ||
262 | - TCGv t0 = tcg_const_tl(const_val); \ | ||
263 | + TCGv t0 = tcg_constant_tl(const_val); \ | ||
264 | gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ | ||
265 | cpu_gpr[rA(ctx->opcode)], t0, \ | ||
266 | add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ | ||
267 | @@ -XXX,XX +XXX,XX @@ GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) | ||
268 | /* subfic */ | ||
269 | static void gen_subfic(DisasContext *ctx) | ||
270 | { | ||
271 | - TCGv c = tcg_const_tl(SIMM(ctx->opcode)); | ||
272 | + TCGv c = tcg_constant_tl(SIMM(ctx->opcode)); | ||
273 | gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], | ||
274 | c, 0, 1, 0, 0); | ||
275 | } | ||
276 | @@ -XXX,XX +XXX,XX @@ static void gen_subfic(DisasContext *ctx) | ||
277 | /* neg neg. nego nego. */ | ||
278 | static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) | ||
279 | { | ||
280 | - TCGv zero = tcg_const_tl(0); | ||
281 | + TCGv zero = tcg_constant_tl(0); | ||
282 | gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], | ||
283 | zero, 0, 0, compute_ov, Rc(ctx->opcode)); | ||
284 | } | ||
285 | @@ -XXX,XX +XXX,XX @@ GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); | ||
286 | #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) | ||
287 | static void gen_pause(DisasContext *ctx) | ||
288 | { | ||
289 | - TCGv_i32 t0 = tcg_const_i32(0); | ||
290 | + TCGv_i32 t0 = tcg_constant_i32(0); | ||
291 | tcg_gen_st_i32(t0, cpu_env, | ||
292 | -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); | ||
293 | |||
294 | @@ -XXX,XX +XXX,XX @@ static void gen_lmw(DisasContext *ctx) | ||
295 | } | ||
296 | gen_set_access_type(ctx, ACCESS_INT); | ||
297 | t0 = tcg_temp_new(); | ||
298 | - t1 = tcg_const_i32(rD(ctx->opcode)); | ||
299 | + t1 = tcg_constant_i32(rD(ctx->opcode)); | ||
300 | gen_addr_imm_index(ctx, t0, 0); | ||
301 | gen_helper_lmw(cpu_env, t0, t1); | ||
302 | } | ||
303 | @@ -XXX,XX +XXX,XX @@ static void gen_stmw(DisasContext *ctx) | ||
304 | } | ||
305 | gen_set_access_type(ctx, ACCESS_INT); | ||
306 | t0 = tcg_temp_new(); | ||
307 | - t1 = tcg_const_i32(rS(ctx->opcode)); | ||
308 | + t1 = tcg_constant_i32(rS(ctx->opcode)); | ||
309 | gen_addr_imm_index(ctx, t0, 0); | ||
310 | gen_helper_stmw(cpu_env, t0, t1); | ||
311 | } | ||
312 | @@ -XXX,XX +XXX,XX @@ static void gen_lswi(DisasContext *ctx) | ||
313 | gen_set_access_type(ctx, ACCESS_INT); | ||
314 | t0 = tcg_temp_new(); | ||
315 | gen_addr_register(ctx, t0); | ||
316 | - t1 = tcg_const_i32(nb); | ||
317 | - t2 = tcg_const_i32(start); | ||
318 | + t1 = tcg_constant_i32(nb); | ||
319 | + t2 = tcg_constant_i32(start); | ||
320 | gen_helper_lsw(cpu_env, t0, t1, t2); | ||
321 | } | ||
322 | |||
323 | @@ -XXX,XX +XXX,XX @@ static void gen_lswx(DisasContext *ctx) | ||
324 | gen_set_access_type(ctx, ACCESS_INT); | ||
325 | t0 = tcg_temp_new(); | ||
326 | gen_addr_reg_index(ctx, t0); | ||
327 | - t1 = tcg_const_i32(rD(ctx->opcode)); | ||
328 | - t2 = tcg_const_i32(rA(ctx->opcode)); | ||
329 | - t3 = tcg_const_i32(rB(ctx->opcode)); | ||
330 | + t1 = tcg_constant_i32(rD(ctx->opcode)); | ||
331 | + t2 = tcg_constant_i32(rA(ctx->opcode)); | ||
332 | + t3 = tcg_constant_i32(rB(ctx->opcode)); | ||
333 | gen_helper_lswx(cpu_env, t0, t1, t2, t3); | ||
334 | } | ||
335 | |||
336 | @@ -XXX,XX +XXX,XX @@ static void gen_stswi(DisasContext *ctx) | ||
337 | if (nb == 0) { | ||
338 | nb = 32; | ||
339 | } | ||
340 | - t1 = tcg_const_i32(nb); | ||
341 | - t2 = tcg_const_i32(rS(ctx->opcode)); | ||
342 | + t1 = tcg_constant_i32(nb); | ||
343 | + t2 = tcg_constant_i32(rS(ctx->opcode)); | ||
344 | gen_helper_stsw(cpu_env, t0, t1, t2); | ||
345 | } | ||
346 | |||
347 | @@ -XXX,XX +XXX,XX @@ static void gen_stswx(DisasContext *ctx) | ||
348 | t1 = tcg_temp_new_i32(); | ||
349 | tcg_gen_trunc_tl_i32(t1, cpu_xer); | ||
350 | tcg_gen_andi_i32(t1, t1, 0x7F); | ||
351 | - t2 = tcg_const_i32(rS(ctx->opcode)); | ||
352 | + t2 = tcg_constant_i32(rS(ctx->opcode)); | ||
353 | gen_helper_stsw(cpu_env, t0, t1, t2); | ||
354 | } | ||
355 | |||
356 | @@ -XXX,XX +XXX,XX @@ static void gen_wait(DisasContext *ctx) | ||
357 | * to occur. | ||
46 | */ | 358 | */ |
47 | - save_iotlb_data(cpu, full->xlat_section, section, mr_offset); | 359 | if (wc == 0) { |
48 | + save_iotlb_data(cpu, section, mr_offset); | 360 | - TCGv_i32 t0 = tcg_const_i32(1); |
49 | 361 | + TCGv_i32 t0 = tcg_constant_i32(1); | |
50 | if (!qemu_mutex_iothread_locked()) { | 362 | tcg_gen_st_i32(t0, cpu_env, |
51 | qemu_mutex_lock_iothread(); | 363 | -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); |
364 | /* Stop translation, as the CPU is supposed to sleep from now */ | ||
365 | @@ -XXX,XX +XXX,XX @@ static void gen_doze(DisasContext *ctx) | ||
366 | TCGv_i32 t; | ||
367 | |||
368 | CHK_HV(ctx); | ||
369 | - t = tcg_const_i32(PPC_PM_DOZE); | ||
370 | + t = tcg_constant_i32(PPC_PM_DOZE); | ||
371 | gen_helper_pminsn(cpu_env, t); | ||
372 | /* Stop translation, as the CPU is supposed to sleep from now */ | ||
373 | gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); | ||
374 | @@ -XXX,XX +XXX,XX @@ static void gen_nap(DisasContext *ctx) | ||
375 | TCGv_i32 t; | ||
376 | |||
377 | CHK_HV(ctx); | ||
378 | - t = tcg_const_i32(PPC_PM_NAP); | ||
379 | + t = tcg_constant_i32(PPC_PM_NAP); | ||
380 | gen_helper_pminsn(cpu_env, t); | ||
381 | /* Stop translation, as the CPU is supposed to sleep from now */ | ||
382 | gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); | ||
383 | @@ -XXX,XX +XXX,XX @@ static void gen_stop(DisasContext *ctx) | ||
384 | TCGv_i32 t; | ||
385 | |||
386 | CHK_HV(ctx); | ||
387 | - t = tcg_const_i32(PPC_PM_STOP); | ||
388 | + t = tcg_constant_i32(PPC_PM_STOP); | ||
389 | gen_helper_pminsn(cpu_env, t); | ||
390 | /* Stop translation, as the CPU is supposed to sleep from now */ | ||
391 | gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); | ||
392 | @@ -XXX,XX +XXX,XX @@ static void gen_sleep(DisasContext *ctx) | ||
393 | TCGv_i32 t; | ||
394 | |||
395 | CHK_HV(ctx); | ||
396 | - t = tcg_const_i32(PPC_PM_SLEEP); | ||
397 | + t = tcg_constant_i32(PPC_PM_SLEEP); | ||
398 | gen_helper_pminsn(cpu_env, t); | ||
399 | /* Stop translation, as the CPU is supposed to sleep from now */ | ||
400 | gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); | ||
401 | @@ -XXX,XX +XXX,XX @@ static void gen_rvwinkle(DisasContext *ctx) | ||
402 | TCGv_i32 t; | ||
403 | |||
404 | CHK_HV(ctx); | ||
405 | - t = tcg_const_i32(PPC_PM_RVWINKLE); | ||
406 | + t = tcg_constant_i32(PPC_PM_RVWINKLE); | ||
407 | gen_helper_pminsn(cpu_env, t); | ||
408 | /* Stop translation, as the CPU is supposed to sleep from now */ | ||
409 | gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); | ||
410 | @@ -XXX,XX +XXX,XX @@ static void gen_tw(DisasContext *ctx) | ||
411 | if (check_unconditional_trap(ctx)) { | ||
412 | return; | ||
413 | } | ||
414 | - t0 = tcg_const_i32(TO(ctx->opcode)); | ||
415 | + t0 = tcg_constant_i32(TO(ctx->opcode)); | ||
416 | gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], | ||
417 | t0); | ||
418 | } | ||
419 | @@ -XXX,XX +XXX,XX @@ static void gen_twi(DisasContext *ctx) | ||
420 | if (check_unconditional_trap(ctx)) { | ||
421 | return; | ||
422 | } | ||
423 | - t0 = tcg_const_tl(SIMM(ctx->opcode)); | ||
424 | - t1 = tcg_const_i32(TO(ctx->opcode)); | ||
425 | + t0 = tcg_constant_tl(SIMM(ctx->opcode)); | ||
426 | + t1 = tcg_constant_i32(TO(ctx->opcode)); | ||
427 | gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); | ||
428 | } | ||
429 | |||
430 | @@ -XXX,XX +XXX,XX @@ static void gen_td(DisasContext *ctx) | ||
431 | if (check_unconditional_trap(ctx)) { | ||
432 | return; | ||
433 | } | ||
434 | - t0 = tcg_const_i32(TO(ctx->opcode)); | ||
435 | + t0 = tcg_constant_i32(TO(ctx->opcode)); | ||
436 | gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], | ||
437 | t0); | ||
438 | } | ||
439 | @@ -XXX,XX +XXX,XX @@ static void gen_tdi(DisasContext *ctx) | ||
440 | if (check_unconditional_trap(ctx)) { | ||
441 | return; | ||
442 | } | ||
443 | - t0 = tcg_const_tl(SIMM(ctx->opcode)); | ||
444 | - t1 = tcg_const_i32(TO(ctx->opcode)); | ||
445 | + t0 = tcg_constant_tl(SIMM(ctx->opcode)); | ||
446 | + t1 = tcg_constant_i32(TO(ctx->opcode)); | ||
447 | gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); | ||
448 | } | ||
449 | #endif | ||
450 | @@ -XXX,XX +XXX,XX @@ static void gen_dcbz(DisasContext *ctx) | ||
451 | |||
452 | gen_set_access_type(ctx, ACCESS_CACHE); | ||
453 | tcgv_addr = tcg_temp_new(); | ||
454 | - tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000); | ||
455 | + tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); | ||
456 | gen_addr_reg_index(ctx, tcgv_addr); | ||
457 | gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op); | ||
458 | } | ||
459 | @@ -XXX,XX +XXX,XX @@ static void gen_dcbzep(DisasContext *ctx) | ||
460 | |||
461 | gen_set_access_type(ctx, ACCESS_CACHE); | ||
462 | tcgv_addr = tcg_temp_new(); | ||
463 | - tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000); | ||
464 | + tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000); | ||
465 | gen_addr_reg_index(ctx, tcgv_addr); | ||
466 | gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op); | ||
467 | } | ||
468 | @@ -XXX,XX +XXX,XX @@ static void gen_mfsr(DisasContext *ctx) | ||
469 | TCGv t0; | ||
470 | |||
471 | CHK_SV(ctx); | ||
472 | - t0 = tcg_const_tl(SR(ctx->opcode)); | ||
473 | + t0 = tcg_constant_tl(SR(ctx->opcode)); | ||
474 | gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); | ||
475 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
476 | } | ||
477 | @@ -XXX,XX +XXX,XX @@ static void gen_mtsr(DisasContext *ctx) | ||
478 | TCGv t0; | ||
479 | |||
480 | CHK_SV(ctx); | ||
481 | - t0 = tcg_const_tl(SR(ctx->opcode)); | ||
482 | + t0 = tcg_constant_tl(SR(ctx->opcode)); | ||
483 | gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); | ||
484 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
485 | } | ||
486 | @@ -XXX,XX +XXX,XX @@ static void gen_mfsr_64b(DisasContext *ctx) | ||
487 | TCGv t0; | ||
488 | |||
489 | CHK_SV(ctx); | ||
490 | - t0 = tcg_const_tl(SR(ctx->opcode)); | ||
491 | + t0 = tcg_constant_tl(SR(ctx->opcode)); | ||
492 | gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); | ||
493 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
494 | } | ||
495 | @@ -XXX,XX +XXX,XX @@ static void gen_mtsr_64b(DisasContext *ctx) | ||
496 | TCGv t0; | ||
497 | |||
498 | CHK_SV(ctx); | ||
499 | - t0 = tcg_const_tl(SR(ctx->opcode)); | ||
500 | + t0 = tcg_constant_tl(SR(ctx->opcode)); | ||
501 | gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); | ||
502 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
503 | } | ||
504 | @@ -XXX,XX +XXX,XX @@ static void gen_mfdcr(DisasContext *ctx) | ||
505 | TCGv dcrn; | ||
506 | |||
507 | CHK_SV(ctx); | ||
508 | - dcrn = tcg_const_tl(SPR(ctx->opcode)); | ||
509 | + dcrn = tcg_constant_tl(SPR(ctx->opcode)); | ||
510 | gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); | ||
511 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
512 | } | ||
513 | @@ -XXX,XX +XXX,XX @@ static void gen_mtdcr(DisasContext *ctx) | ||
514 | TCGv dcrn; | ||
515 | |||
516 | CHK_SV(ctx); | ||
517 | - dcrn = tcg_const_tl(SPR(ctx->opcode)); | ||
518 | + dcrn = tcg_constant_tl(SPR(ctx->opcode)); | ||
519 | gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); | ||
520 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
521 | } | ||
522 | @@ -XXX,XX +XXX,XX @@ static void gen_tlbre_440(DisasContext *ctx) | ||
523 | case 1: | ||
524 | case 2: | ||
525 | { | ||
526 | - TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); | ||
527 | + TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); | ||
528 | gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env, | ||
529 | t0, cpu_gpr[rA(ctx->opcode)]); | ||
530 | } | ||
531 | @@ -XXX,XX +XXX,XX @@ static void gen_tlbwe_440(DisasContext *ctx) | ||
532 | case 1: | ||
533 | case 2: | ||
534 | { | ||
535 | - TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); | ||
536 | + TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode)); | ||
537 | gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)], | ||
538 | cpu_gpr[rS(ctx->opcode)]); | ||
539 | } | ||
540 | @@ -XXX,XX +XXX,XX @@ static void gen_wrteei(DisasContext *ctx) | ||
541 | /* dlmzb */ | ||
542 | static void gen_dlmzb(DisasContext *ctx) | ||
543 | { | ||
544 | - TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode)); | ||
545 | + TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode)); | ||
546 | gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env, | ||
547 | cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); | ||
548 | } | ||
52 | -- | 549 | -- |
53 | 2.34.1 | 550 | 2.34.1 |
54 | 551 | ||
55 | 552 | diff view generated by jsdifflib |
1 | Let tb->page_addr[0] contain the address of the first byte of the | 1 | Use tcg_constant_i32 for the bounds. |
---|---|---|---|
2 | translated block, rather than the address of the page containing the | ||
3 | start of the translated block. We need to recover this value anyway | ||
4 | at various points, and it is easier to discard a page offset when it | ||
5 | is not needed, which happens naturally via the existing find_page shift. | ||
6 | 2 | ||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 5 | --- |
10 | accel/tcg/cpu-exec.c | 16 ++++++++-------- | 6 | target/tricore/translate.c | 14 +++----------- |
11 | accel/tcg/cputlb.c | 3 ++- | 7 | 1 file changed, 3 insertions(+), 11 deletions(-) |
12 | accel/tcg/translate-all.c | 9 +++++---- | ||
13 | 3 files changed, 15 insertions(+), 13 deletions(-) | ||
14 | 8 | ||
15 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | 9 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c |
16 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/accel/tcg/cpu-exec.c | 11 | --- a/target/tricore/translate.c |
18 | +++ b/accel/tcg/cpu-exec.c | 12 | +++ b/target/tricore/translate.c |
19 | @@ -XXX,XX +XXX,XX @@ struct tb_desc { | 13 | @@ -XXX,XX +XXX,XX @@ gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, |
20 | target_ulong pc; | 14 | |
21 | target_ulong cs_base; | 15 | static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low) |
22 | CPUArchState *env; | 16 | { |
23 | - tb_page_addr_t phys_page1; | 17 | - TCGv sat_neg = tcg_const_i32(low); |
24 | + tb_page_addr_t page_addr0; | 18 | - TCGv temp = tcg_const_i32(up); |
25 | uint32_t flags; | 19 | - |
26 | uint32_t cflags; | 20 | - /* sat_neg = (arg < low ) ? low : arg; */ |
27 | uint32_t trace_vcpu_dstate; | 21 | - tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg); |
28 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | 22 | - |
29 | const struct tb_desc *desc = d; | 23 | - /* ret = (sat_neg > up ) ? up : sat_neg; */ |
30 | 24 | - tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg); | |
31 | if (tb->pc == desc->pc && | 25 | + tcg_gen_smax_tl(ret, arg, tcg_constant_i32(low)); |
32 | - tb->page_addr[0] == desc->phys_page1 && | 26 | + tcg_gen_smin_tl(ret, ret, tcg_constant_i32(up)); |
33 | + tb->page_addr[0] == desc->page_addr0 && | ||
34 | tb->cs_base == desc->cs_base && | ||
35 | tb->flags == desc->flags && | ||
36 | tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && | ||
37 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
38 | if (tb->page_addr[1] == -1) { | ||
39 | return true; | ||
40 | } else { | ||
41 | - tb_page_addr_t phys_page2; | ||
42 | - target_ulong virt_page2; | ||
43 | + tb_page_addr_t phys_page1; | ||
44 | + target_ulong virt_page1; | ||
45 | |||
46 | /* | ||
47 | * We know that the first page matched, and an otherwise valid TB | ||
48 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
49 | * is different for the new TB. Therefore any exception raised | ||
50 | * here by the faulting lookup is not premature. | ||
51 | */ | ||
52 | - virt_page2 = TARGET_PAGE_ALIGN(desc->pc); | ||
53 | - phys_page2 = get_page_addr_code(desc->env, virt_page2); | ||
54 | - if (tb->page_addr[1] == phys_page2) { | ||
55 | + virt_page1 = TARGET_PAGE_ALIGN(desc->pc); | ||
56 | + phys_page1 = get_page_addr_code(desc->env, virt_page1); | ||
57 | + if (tb->page_addr[1] == phys_page1) { | ||
58 | return true; | ||
59 | } | ||
60 | } | ||
61 | @@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
62 | if (phys_pc == -1) { | ||
63 | return NULL; | ||
64 | } | ||
65 | - desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; | ||
66 | + desc.page_addr0 = phys_pc; | ||
67 | h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); | ||
68 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); | ||
69 | } | 27 | } |
70 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 28 | |
71 | index XXXXXXX..XXXXXXX 100644 | 29 | static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up) |
72 | --- a/accel/tcg/cputlb.c | ||
73 | +++ b/accel/tcg/cputlb.c | ||
74 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
75 | can be detected */ | ||
76 | void tlb_protect_code(ram_addr_t ram_addr) | ||
77 | { | 30 | { |
78 | - cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, | 31 | - TCGv temp = tcg_const_i32(up); |
79 | + cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, | 32 | - /* sat_neg = (arg > up ) ? up : arg; */ |
80 | + TARGET_PAGE_SIZE, | 33 | - tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg); |
81 | DIRTY_MEMORY_CODE); | 34 | + tcg_gen_umin_tl(ret, arg, tcg_constant_i32(up)); |
82 | } | 35 | } |
83 | 36 | ||
84 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 37 | static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count) |
85 | index XXXXXXX..XXXXXXX 100644 | ||
86 | --- a/accel/tcg/translate-all.c | ||
87 | +++ b/accel/tcg/translate-all.c | ||
88 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
89 | qemu_spin_unlock(&tb->jmp_lock); | ||
90 | |||
91 | /* remove the TB from the hash list */ | ||
92 | - phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | ||
93 | + phys_pc = tb->page_addr[0]; | ||
94 | h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, | ||
95 | tb->trace_vcpu_dstate); | ||
96 | if (!qht_remove(&tb_ctx.htable, tb, h)) { | ||
97 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
98 | * we can only insert TBs that are fully initialized. | ||
99 | */ | ||
100 | page_lock_pair(&p, phys_pc, &p2, phys_page2, true); | ||
101 | - tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); | ||
102 | + tb_page_add(p, tb, 0, phys_pc); | ||
103 | if (p2) { | ||
104 | tb_page_add(p2, tb, 1, phys_page2); | ||
105 | } else { | ||
106 | @@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, | ||
107 | if (n == 0) { | ||
108 | /* NOTE: tb_end may be after the end of the page, but | ||
109 | it is not a problem */ | ||
110 | - tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | ||
111 | + tb_start = tb->page_addr[0]; | ||
112 | tb_end = tb_start + tb->size; | ||
113 | } else { | ||
114 | tb_start = tb->page_addr[1]; | ||
115 | - tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | ||
116 | + tb_end = tb_start + ((tb->page_addr[0] + tb->size) | ||
117 | + & ~TARGET_PAGE_MASK); | ||
118 | } | ||
119 | if (!(tb_end <= start || tb_start >= end)) { | ||
120 | #ifdef TARGET_HAS_PRECISE_SMC | ||
121 | -- | 38 | -- |
122 | 2.34.1 | 39 | 2.34.1 |
123 | 40 | ||
124 | 41 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | Replace with tcg_constant_vec*. |
---|---|---|---|
2 | 2 | ||
3 | This is a heavily used function so lets avoid the cost of | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | CPU_GET_CLASS. On the romulus-bmc run it has a modest effect: | ||
5 | |||
6 | Before: 36.812 s ± 0.506 s | ||
7 | After: 35.912 s ± 0.168 s | ||
8 | |||
9 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Message-Id: <20220811151413.3350684-4-alex.bennee@linaro.org> | ||
12 | Signed-off-by: Cédric Le Goater <clg@kaod.org> | ||
13 | Message-Id: <20220923084803.498337-4-clg@kaod.org> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
15 | --- | 5 | --- |
16 | hw/core/cpu-sysemu.c | 5 ++--- | 6 | include/tcg/tcg.h | 4 ---- |
17 | 1 file changed, 2 insertions(+), 3 deletions(-) | 7 | tcg/tcg-op-vec.c | 34 ++-------------------------------- |
8 | tcg/i386/tcg-target.c.inc | 9 ++++----- | ||
9 | 3 files changed, 6 insertions(+), 41 deletions(-) | ||
18 | 10 | ||
19 | diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c | 11 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h |
20 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/hw/core/cpu-sysemu.c | 13 | --- a/include/tcg/tcg.h |
22 | +++ b/hw/core/cpu-sysemu.c | 14 | +++ b/include/tcg/tcg.h |
23 | @@ -XXX,XX +XXX,XX @@ hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) | 15 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s); |
24 | 16 | /* Allocate a new temporary and initialize it with a constant. */ | |
25 | int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) | 17 | TCGv_i32 tcg_const_i32(int32_t val); |
18 | TCGv_i64 tcg_const_i64(int64_t val); | ||
19 | -TCGv_vec tcg_const_zeros_vec(TCGType); | ||
20 | -TCGv_vec tcg_const_ones_vec(TCGType); | ||
21 | -TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec); | ||
22 | -TCGv_vec tcg_const_ones_vec_matching(TCGv_vec); | ||
23 | |||
24 | /* | ||
25 | * Locate or create a read-only temporary that is a constant. | ||
26 | diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/tcg/tcg-op-vec.c | ||
29 | +++ b/tcg/tcg-op-vec.c | ||
30 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a) | ||
31 | } | ||
32 | } | ||
33 | |||
34 | -TCGv_vec tcg_const_zeros_vec(TCGType type) | ||
35 | -{ | ||
36 | - TCGv_vec ret = tcg_temp_new_vec(type); | ||
37 | - tcg_gen_dupi_vec(MO_64, ret, 0); | ||
38 | - return ret; | ||
39 | -} | ||
40 | - | ||
41 | -TCGv_vec tcg_const_ones_vec(TCGType type) | ||
42 | -{ | ||
43 | - TCGv_vec ret = tcg_temp_new_vec(type); | ||
44 | - tcg_gen_dupi_vec(MO_64, ret, -1); | ||
45 | - return ret; | ||
46 | -} | ||
47 | - | ||
48 | -TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec m) | ||
49 | -{ | ||
50 | - TCGTemp *t = tcgv_vec_temp(m); | ||
51 | - return tcg_const_zeros_vec(t->base_type); | ||
52 | -} | ||
53 | - | ||
54 | -TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m) | ||
55 | -{ | ||
56 | - TCGTemp *t = tcgv_vec_temp(m); | ||
57 | - return tcg_const_ones_vec(t->base_type); | ||
58 | -} | ||
59 | - | ||
60 | void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a) | ||
26 | { | 61 | { |
27 | - CPUClass *cc = CPU_GET_CLASS(cpu); | 62 | TCGTemp *rt = tcgv_vec_temp(r); |
28 | int ret = 0; | 63 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a) |
29 | 64 | const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); | |
30 | - if (cc->sysemu_ops->asidx_from_attrs) { | 65 | |
31 | - ret = cc->sysemu_ops->asidx_from_attrs(cpu, attrs); | 66 | if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) { |
32 | + if (cpu->cc->sysemu_ops->asidx_from_attrs) { | 67 | - TCGv_vec t = tcg_const_ones_vec_matching(r); |
33 | + ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs); | 68 | - tcg_gen_xor_vec(0, r, a, t); |
34 | assert(ret < cpu->num_ases && ret >= 0); | 69 | - tcg_temp_free_vec(t); |
70 | + tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1)); | ||
35 | } | 71 | } |
36 | return ret; | 72 | tcg_swap_vecop_list(hold_list); |
73 | } | ||
74 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a) | ||
75 | hold_list = tcg_swap_vecop_list(NULL); | ||
76 | |||
77 | if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) { | ||
78 | - TCGv_vec t = tcg_const_zeros_vec_matching(r); | ||
79 | - tcg_gen_sub_vec(vece, r, t, a); | ||
80 | - tcg_temp_free_vec(t); | ||
81 | + tcg_gen_sub_vec(vece, r, tcg_constant_vec_matching(r, vece, 0), a); | ||
82 | } | ||
83 | tcg_swap_vecop_list(hold_list); | ||
84 | } | ||
85 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/tcg/i386/tcg-target.c.inc | ||
88 | +++ b/tcg/i386/tcg-target.c.inc | ||
89 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_sari(TCGType type, unsigned vece, | ||
90 | break; | ||
91 | |||
92 | case MO_64: | ||
93 | + t1 = tcg_temp_new_vec(type); | ||
94 | if (imm <= 32) { | ||
95 | /* | ||
96 | * We can emulate a small sign extend by performing an arithmetic | ||
97 | @@ -XXX,XX +XXX,XX @@ static void expand_vec_sari(TCGType type, unsigned vece, | ||
98 | * does not, so we have to bound the smaller shift -- we get the | ||
99 | * same result in the high half either way. | ||
100 | */ | ||
101 | - t1 = tcg_temp_new_vec(type); | ||
102 | tcg_gen_sari_vec(MO_32, t1, v1, MIN(imm, 31)); | ||
103 | tcg_gen_shri_vec(MO_64, v0, v1, imm); | ||
104 | vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32, | ||
105 | tcgv_vec_arg(v0), tcgv_vec_arg(v0), | ||
106 | tcgv_vec_arg(t1), 0xaa); | ||
107 | - tcg_temp_free_vec(t1); | ||
108 | } else { | ||
109 | /* Otherwise we will need to use a compare vs 0 to produce | ||
110 | * the sign-extend, shift and merge. | ||
111 | */ | ||
112 | - t1 = tcg_const_zeros_vec(type); | ||
113 | - tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, t1, v1); | ||
114 | + tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, | ||
115 | + tcg_constant_vec(type, MO_64, 0), v1); | ||
116 | tcg_gen_shri_vec(MO_64, v0, v1, imm); | ||
117 | tcg_gen_shli_vec(MO_64, t1, t1, 64 - imm); | ||
118 | tcg_gen_or_vec(MO_64, v0, v0, t1); | ||
119 | - tcg_temp_free_vec(t1); | ||
120 | } | ||
121 | + tcg_temp_free_vec(t1); | ||
122 | break; | ||
123 | |||
124 | default: | ||
37 | -- | 125 | -- |
38 | 2.34.1 | 126 | 2.34.1 |
39 | 127 | ||
40 | 128 | diff view generated by jsdifflib |
1 | The availability of tb->pc will shortly be conditional. | 1 | These functions are no longer used. |
---|---|---|---|
2 | Introduce accessor functions to minimize ifdefs. | ||
3 | 2 | ||
4 | Pass around a known pc to places like tcg_gen_code, | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
5 | where the caller must already have the value. | ||
6 | |||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 5 | --- |
10 | accel/tcg/internal.h | 6 ++++ | 6 | include/tcg/tcg-op.h | 4 ---- |
11 | include/exec/exec-all.h | 6 ++++ | 7 | include/tcg/tcg.h | 6 ------ |
12 | include/tcg/tcg.h | 2 +- | 8 | tcg/tcg.c | 16 ---------------- |
13 | accel/tcg/cpu-exec.c | 46 ++++++++++++++----------- | 9 | 3 files changed, 26 deletions(-) |
14 | accel/tcg/translate-all.c | 37 +++++++++++--------- | ||
15 | target/arm/cpu.c | 4 +-- | ||
16 | target/avr/cpu.c | 2 +- | ||
17 | target/hexagon/cpu.c | 2 +- | ||
18 | target/hppa/cpu.c | 4 +-- | ||
19 | target/i386/tcg/tcg-cpu.c | 2 +- | ||
20 | target/loongarch/cpu.c | 2 +- | ||
21 | target/microblaze/cpu.c | 2 +- | ||
22 | target/mips/tcg/exception.c | 2 +- | ||
23 | target/mips/tcg/sysemu/special_helper.c | 2 +- | ||
24 | target/openrisc/cpu.c | 2 +- | ||
25 | target/riscv/cpu.c | 4 +-- | ||
26 | target/rx/cpu.c | 2 +- | ||
27 | target/sh4/cpu.c | 4 +-- | ||
28 | target/sparc/cpu.c | 2 +- | ||
29 | target/tricore/cpu.c | 2 +- | ||
30 | tcg/tcg.c | 8 ++--- | ||
31 | 21 files changed, 82 insertions(+), 61 deletions(-) | ||
32 | 10 | ||
33 | diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h | 11 | diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h |
34 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
35 | --- a/accel/tcg/internal.h | 13 | --- a/include/tcg/tcg-op.h |
36 | +++ b/accel/tcg/internal.h | 14 | +++ b/include/tcg/tcg-op.h |
37 | @@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); | 15 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); |
38 | void page_init(void); | 16 | #define tcg_gen_extract_tl tcg_gen_extract_i64 |
39 | void tb_htable_init(void); | 17 | #define tcg_gen_sextract_tl tcg_gen_sextract_i64 |
40 | 18 | #define tcg_gen_extract2_tl tcg_gen_extract2_i64 | |
41 | +/* Return the current PC from CPU, which may be cached in TB. */ | 19 | -#define tcg_const_tl tcg_const_i64 |
42 | +static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) | 20 | #define tcg_constant_tl tcg_constant_i64 |
43 | +{ | 21 | -#define tcg_const_local_tl tcg_const_local_i64 |
44 | + return tb_pc(tb); | 22 | #define tcg_gen_movcond_tl tcg_gen_movcond_i64 |
45 | +} | 23 | #define tcg_gen_add2_tl tcg_gen_add2_i64 |
46 | + | 24 | #define tcg_gen_sub2_tl tcg_gen_sub2_i64 |
47 | #endif /* ACCEL_TCG_INTERNAL_H */ | 25 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); |
48 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 26 | #define tcg_gen_extract_tl tcg_gen_extract_i32 |
49 | index XXXXXXX..XXXXXXX 100644 | 27 | #define tcg_gen_sextract_tl tcg_gen_sextract_i32 |
50 | --- a/include/exec/exec-all.h | 28 | #define tcg_gen_extract2_tl tcg_gen_extract2_i32 |
51 | +++ b/include/exec/exec-all.h | 29 | -#define tcg_const_tl tcg_const_i32 |
52 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | 30 | #define tcg_constant_tl tcg_constant_i32 |
53 | uintptr_t jmp_dest[2]; | 31 | -#define tcg_const_local_tl tcg_const_local_i32 |
54 | }; | 32 | #define tcg_gen_movcond_tl tcg_gen_movcond_i32 |
55 | 33 | #define tcg_gen_add2_tl tcg_gen_add2_i32 | |
56 | +/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */ | 34 | #define tcg_gen_sub2_tl tcg_gen_sub2_i32 |
57 | +static inline target_ulong tb_pc(const TranslationBlock *tb) | ||
58 | +{ | ||
59 | + return tb->pc; | ||
60 | +} | ||
61 | + | ||
62 | /* Hide the qatomic_read to make code a little easier on the eyes */ | ||
63 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
64 | { | ||
65 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 35 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h |
66 | index XXXXXXX..XXXXXXX 100644 | 36 | index XXXXXXX..XXXXXXX 100644 |
67 | --- a/include/tcg/tcg.h | 37 | --- a/include/tcg/tcg.h |
68 | +++ b/include/tcg/tcg.h | 38 | +++ b/include/tcg/tcg.h |
69 | @@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void); | 39 | @@ -XXX,XX +XXX,XX @@ void tcg_remove_ops_after(TCGOp *op); |
70 | void tcg_prologue_init(TCGContext *s); | 40 | |
71 | void tcg_func_start(TCGContext *s); | 41 | void tcg_optimize(TCGContext *s); |
72 | 42 | ||
73 | -int tcg_gen_code(TCGContext *s, TranslationBlock *tb); | 43 | -/* Allocate a new temporary and initialize it with a constant. */ |
74 | +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start); | 44 | -TCGv_i32 tcg_const_i32(int32_t val); |
75 | 45 | -TCGv_i64 tcg_const_i64(int64_t val); | |
76 | void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); | ||
77 | |||
78 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
79 | index XXXXXXX..XXXXXXX 100644 | ||
80 | --- a/accel/tcg/cpu-exec.c | ||
81 | +++ b/accel/tcg/cpu-exec.c | ||
82 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
83 | const TranslationBlock *tb = p; | ||
84 | const struct tb_desc *desc = d; | ||
85 | |||
86 | - if (tb->pc == desc->pc && | ||
87 | + if (tb_pc(tb) == desc->pc && | ||
88 | tb->page_addr[0] == desc->page_addr0 && | ||
89 | tb->cs_base == desc->cs_base && | ||
90 | tb->flags == desc->flags && | ||
91 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
92 | return tb; | ||
93 | } | ||
94 | |||
95 | -static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, | ||
96 | - const TranslationBlock *tb) | ||
97 | +static void log_cpu_exec(target_ulong pc, CPUState *cpu, | ||
98 | + const TranslationBlock *tb) | ||
99 | { | ||
100 | - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) | ||
101 | - && qemu_log_in_addr_range(pc)) { | ||
102 | - | 46 | - |
103 | + if (qemu_log_in_addr_range(pc)) { | 47 | /* |
104 | qemu_log_mask(CPU_LOG_EXEC, | 48 | * Locate or create a read-only temporary that is a constant. |
105 | "Trace %d: %p [" TARGET_FMT_lx | 49 | * This kind of temporary need not be freed, but for convenience |
106 | "/" TARGET_FMT_lx "/%08x/%08x] %s\n", | 50 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val); |
107 | @@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | 51 | TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val); |
108 | return tcg_code_gen_epilogue; | 52 | |
109 | } | 53 | #if UINTPTR_MAX == UINT32_MAX |
110 | 54 | -# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) | |
111 | - log_cpu_exec(pc, cpu, tb); | 55 | # define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i32((intptr_t)(x))) |
112 | + if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { | 56 | #else |
113 | + log_cpu_exec(pc, cpu, tb); | 57 | -# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x))) |
114 | + } | 58 | # define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i64((intptr_t)(x))) |
115 | |||
116 | return tb->tc.ptr; | ||
117 | } | ||
118 | @@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | ||
119 | TranslationBlock *last_tb; | ||
120 | const void *tb_ptr = itb->tc.ptr; | ||
121 | |||
122 | - log_cpu_exec(itb->pc, cpu, itb); | ||
123 | + if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { | ||
124 | + log_cpu_exec(log_pc(cpu, itb), cpu, itb); | ||
125 | + } | ||
126 | |||
127 | qemu_thread_jit_execute(); | ||
128 | ret = tcg_qemu_tb_exec(env, tb_ptr); | ||
129 | @@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | ||
130 | * of the start of the TB. | ||
131 | */ | ||
132 | CPUClass *cc = CPU_GET_CLASS(cpu); | ||
133 | - qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, | ||
134 | - "Stopped execution of TB chain before %p [" | ||
135 | - TARGET_FMT_lx "] %s\n", | ||
136 | - last_tb->tc.ptr, last_tb->pc, | ||
137 | - lookup_symbol(last_tb->pc)); | ||
138 | + | ||
139 | if (cc->tcg_ops->synchronize_from_tb) { | ||
140 | cc->tcg_ops->synchronize_from_tb(cpu, last_tb); | ||
141 | } else { | ||
142 | assert(cc->set_pc); | ||
143 | - cc->set_pc(cpu, last_tb->pc); | ||
144 | + cc->set_pc(cpu, tb_pc(last_tb)); | ||
145 | + } | ||
146 | + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | ||
147 | + target_ulong pc = log_pc(cpu, last_tb); | ||
148 | + if (qemu_log_in_addr_range(pc)) { | ||
149 | + qemu_log("Stopped execution of TB chain before %p [" | ||
150 | + TARGET_FMT_lx "] %s\n", | ||
151 | + last_tb->tc.ptr, pc, lookup_symbol(pc)); | ||
152 | + } | ||
153 | } | ||
154 | } | ||
155 | |||
156 | @@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n, | ||
157 | |||
158 | qemu_spin_unlock(&tb_next->jmp_lock); | ||
159 | |||
160 | - qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, | ||
161 | - "Linking TBs %p [" TARGET_FMT_lx | ||
162 | - "] index %d -> %p [" TARGET_FMT_lx "]\n", | ||
163 | - tb->tc.ptr, tb->pc, n, | ||
164 | - tb_next->tc.ptr, tb_next->pc); | ||
165 | + qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n", | ||
166 | + tb->tc.ptr, n, tb_next->tc.ptr); | ||
167 | return; | ||
168 | |||
169 | out_unlock_next: | ||
170 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu, | ||
171 | } | ||
172 | |||
173 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, | ||
174 | + target_ulong pc, | ||
175 | TranslationBlock **last_tb, int *tb_exit) | ||
176 | { | ||
177 | int32_t insns_left; | ||
178 | |||
179 | - trace_exec_tb(tb, tb->pc); | ||
180 | + trace_exec_tb(tb, pc); | ||
181 | tb = cpu_tb_exec(cpu, tb, tb_exit); | ||
182 | if (*tb_exit != TB_EXIT_REQUESTED) { | ||
183 | *last_tb = tb; | ||
184 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | ||
185 | tb_add_jump(last_tb, tb_exit, tb); | ||
186 | } | ||
187 | |||
188 | - cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); | ||
189 | + cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); | ||
190 | |||
191 | /* Try to align the host and virtual clocks | ||
192 | if the guest is in advance */ | ||
193 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
194 | index XXXXXXX..XXXXXXX 100644 | ||
195 | --- a/accel/tcg/translate-all.c | ||
196 | +++ b/accel/tcg/translate-all.c | ||
197 | @@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block) | ||
198 | |||
199 | for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { | ||
200 | if (i == 0) { | ||
201 | - prev = (j == 0 ? tb->pc : 0); | ||
202 | + prev = (j == 0 ? tb_pc(tb) : 0); | ||
203 | } else { | ||
204 | prev = tcg_ctx->gen_insn_data[i - 1][j]; | ||
205 | } | ||
206 | @@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block) | ||
207 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||
208 | uintptr_t searched_pc, bool reset_icount) | ||
209 | { | ||
210 | - target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; | ||
211 | + target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) }; | ||
212 | uintptr_t host_pc = (uintptr_t)tb->tc.ptr; | ||
213 | CPUArchState *env = cpu->env_ptr; | ||
214 | const uint8_t *p = tb->tc.ptr + tb->tc.size; | ||
215 | @@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp) | ||
216 | const TranslationBlock *a = ap; | ||
217 | const TranslationBlock *b = bp; | ||
218 | |||
219 | - return a->pc == b->pc && | ||
220 | + return tb_pc(a) == tb_pc(b) && | ||
221 | a->cs_base == b->cs_base && | ||
222 | a->flags == b->flags && | ||
223 | (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && | ||
224 | @@ -XXX,XX +XXX,XX @@ static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) | ||
225 | TranslationBlock *tb = p; | ||
226 | target_ulong addr = *(target_ulong *)userp; | ||
227 | |||
228 | - if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { | ||
229 | + if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) || | ||
230 | + addr >= tb_pc(tb) + tb->size)) { | ||
231 | printf("ERROR invalidate: address=" TARGET_FMT_lx | ||
232 | - " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); | ||
233 | + " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | @@ -XXX,XX +XXX,XX @@ static void do_tb_page_check(void *p, uint32_t hash, void *userp) | ||
238 | TranslationBlock *tb = p; | ||
239 | int flags1, flags2; | ||
240 | |||
241 | - flags1 = page_get_flags(tb->pc); | ||
242 | - flags2 = page_get_flags(tb->pc + tb->size - 1); | ||
243 | + flags1 = page_get_flags(tb_pc(tb)); | ||
244 | + flags2 = page_get_flags(tb_pc(tb) + tb->size - 1); | ||
245 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | ||
246 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | ||
247 | - (long)tb->pc, tb->size, flags1, flags2); | ||
248 | + (long)tb_pc(tb), tb->size, flags1, flags2); | ||
249 | } | ||
250 | } | ||
251 | |||
252 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
253 | |||
254 | /* remove the TB from the hash list */ | ||
255 | phys_pc = tb->page_addr[0]; | ||
256 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, | ||
257 | + h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags, | ||
258 | tb->trace_vcpu_dstate); | ||
259 | if (!qht_remove(&tb_ctx.htable, tb, h)) { | ||
260 | return; | ||
261 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
262 | } | ||
263 | |||
264 | /* add in the hash table */ | ||
265 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, | ||
266 | + h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags, | ||
267 | tb->trace_vcpu_dstate); | ||
268 | qht_insert(&tb_ctx.htable, tb, h, &existing_tb); | ||
269 | |||
270 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
271 | tcg_ctx->cpu = NULL; | ||
272 | max_insns = tb->icount; | ||
273 | |||
274 | - trace_translate_block(tb, tb->pc, tb->tc.ptr); | ||
275 | + trace_translate_block(tb, pc, tb->tc.ptr); | ||
276 | |||
277 | /* generate machine code */ | ||
278 | tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; | ||
279 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
280 | ti = profile_getclock(); | ||
281 | #endif | 59 | #endif |
282 | 60 | ||
283 | - gen_code_size = tcg_gen_code(tcg_ctx, tb); | ||
284 | + gen_code_size = tcg_gen_code(tcg_ctx, tb, pc); | ||
285 | if (unlikely(gen_code_size < 0)) { | ||
286 | error_return: | ||
287 | switch (gen_code_size) { | ||
288 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
289 | |||
290 | #ifdef DEBUG_DISAS | ||
291 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && | ||
292 | - qemu_log_in_addr_range(tb->pc)) { | ||
293 | + qemu_log_in_addr_range(pc)) { | ||
294 | FILE *logfile = qemu_log_trylock(); | ||
295 | if (logfile) { | ||
296 | int code_size, data_size; | ||
297 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||
298 | */ | ||
299 | cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; | ||
300 | |||
301 | - qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, | ||
302 | - "cpu_io_recompile: rewound execution of TB to " | ||
303 | - TARGET_FMT_lx "\n", tb->pc); | ||
304 | + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | ||
305 | + target_ulong pc = log_pc(cpu, tb); | ||
306 | + if (qemu_log_in_addr_range(pc)) { | ||
307 | + qemu_log("cpu_io_recompile: rewound execution of TB to " | ||
308 | + TARGET_FMT_lx "\n", pc); | ||
309 | + } | ||
310 | + } | ||
311 | |||
312 | cpu_loop_exit_noexc(cpu); | ||
313 | } | ||
314 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
315 | index XXXXXXX..XXXXXXX 100644 | ||
316 | --- a/target/arm/cpu.c | ||
317 | +++ b/target/arm/cpu.c | ||
318 | @@ -XXX,XX +XXX,XX @@ void arm_cpu_synchronize_from_tb(CPUState *cs, | ||
319 | * never possible for an AArch64 TB to chain to an AArch32 TB. | ||
320 | */ | ||
321 | if (is_a64(env)) { | ||
322 | - env->pc = tb->pc; | ||
323 | + env->pc = tb_pc(tb); | ||
324 | } else { | ||
325 | - env->regs[15] = tb->pc; | ||
326 | + env->regs[15] = tb_pc(tb); | ||
327 | } | ||
328 | } | ||
329 | #endif /* CONFIG_TCG */ | ||
330 | diff --git a/target/avr/cpu.c b/target/avr/cpu.c | ||
331 | index XXXXXXX..XXXXXXX 100644 | ||
332 | --- a/target/avr/cpu.c | ||
333 | +++ b/target/avr/cpu.c | ||
334 | @@ -XXX,XX +XXX,XX @@ static void avr_cpu_synchronize_from_tb(CPUState *cs, | ||
335 | AVRCPU *cpu = AVR_CPU(cs); | ||
336 | CPUAVRState *env = &cpu->env; | ||
337 | |||
338 | - env->pc_w = tb->pc / 2; /* internally PC points to words */ | ||
339 | + env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */ | ||
340 | } | ||
341 | |||
342 | static void avr_cpu_reset(DeviceState *ds) | ||
343 | diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c | ||
344 | index XXXXXXX..XXXXXXX 100644 | ||
345 | --- a/target/hexagon/cpu.c | ||
346 | +++ b/target/hexagon/cpu.c | ||
347 | @@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs, | ||
348 | { | ||
349 | HexagonCPU *cpu = HEXAGON_CPU(cs); | ||
350 | CPUHexagonState *env = &cpu->env; | ||
351 | - env->gpr[HEX_REG_PC] = tb->pc; | ||
352 | + env->gpr[HEX_REG_PC] = tb_pc(tb); | ||
353 | } | ||
354 | |||
355 | static bool hexagon_cpu_has_work(CPUState *cs) | ||
356 | diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c | ||
357 | index XXXXXXX..XXXXXXX 100644 | ||
358 | --- a/target/hppa/cpu.c | ||
359 | +++ b/target/hppa/cpu.c | ||
360 | @@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, | ||
361 | HPPACPU *cpu = HPPA_CPU(cs); | ||
362 | |||
363 | #ifdef CONFIG_USER_ONLY | ||
364 | - cpu->env.iaoq_f = tb->pc; | ||
365 | + cpu->env.iaoq_f = tb_pc(tb); | ||
366 | cpu->env.iaoq_b = tb->cs_base; | ||
367 | #else | ||
368 | /* Recover the IAOQ values from the GVA + PRIV. */ | ||
369 | @@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs, | ||
370 | int32_t diff = cs_base; | ||
371 | |||
372 | cpu->env.iasq_f = iasq_f; | ||
373 | - cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv; | ||
374 | + cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv; | ||
375 | if (diff) { | ||
376 | cpu->env.iaoq_b = cpu->env.iaoq_f + diff; | ||
377 | } | ||
378 | diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c | ||
379 | index XXXXXXX..XXXXXXX 100644 | ||
380 | --- a/target/i386/tcg/tcg-cpu.c | ||
381 | +++ b/target/i386/tcg/tcg-cpu.c | ||
382 | @@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, | ||
383 | { | ||
384 | X86CPU *cpu = X86_CPU(cs); | ||
385 | |||
386 | - cpu->env.eip = tb->pc - tb->cs_base; | ||
387 | + cpu->env.eip = tb_pc(tb) - tb->cs_base; | ||
388 | } | ||
389 | |||
390 | #ifndef CONFIG_USER_ONLY | ||
391 | diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c | ||
392 | index XXXXXXX..XXXXXXX 100644 | ||
393 | --- a/target/loongarch/cpu.c | ||
394 | +++ b/target/loongarch/cpu.c | ||
395 | @@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs, | ||
396 | LoongArchCPU *cpu = LOONGARCH_CPU(cs); | ||
397 | CPULoongArchState *env = &cpu->env; | ||
398 | |||
399 | - env->pc = tb->pc; | ||
400 | + env->pc = tb_pc(tb); | ||
401 | } | ||
402 | #endif /* CONFIG_TCG */ | ||
403 | |||
404 | diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c | ||
405 | index XXXXXXX..XXXXXXX 100644 | ||
406 | --- a/target/microblaze/cpu.c | ||
407 | +++ b/target/microblaze/cpu.c | ||
408 | @@ -XXX,XX +XXX,XX @@ static void mb_cpu_synchronize_from_tb(CPUState *cs, | ||
409 | { | ||
410 | MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); | ||
411 | |||
412 | - cpu->env.pc = tb->pc; | ||
413 | + cpu->env.pc = tb_pc(tb); | ||
414 | cpu->env.iflags = tb->flags & IFLAGS_TB_MASK; | ||
415 | } | ||
416 | |||
417 | diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c | ||
418 | index XXXXXXX..XXXXXXX 100644 | ||
419 | --- a/target/mips/tcg/exception.c | ||
420 | +++ b/target/mips/tcg/exception.c | ||
421 | @@ -XXX,XX +XXX,XX @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) | ||
422 | MIPSCPU *cpu = MIPS_CPU(cs); | ||
423 | CPUMIPSState *env = &cpu->env; | ||
424 | |||
425 | - env->active_tc.PC = tb->pc; | ||
426 | + env->active_tc.PC = tb_pc(tb); | ||
427 | env->hflags &= ~MIPS_HFLAG_BMASK; | ||
428 | env->hflags |= tb->flags & MIPS_HFLAG_BMASK; | ||
429 | } | ||
430 | diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c | ||
431 | index XXXXXXX..XXXXXXX 100644 | ||
432 | --- a/target/mips/tcg/sysemu/special_helper.c | ||
433 | +++ b/target/mips/tcg/sysemu/special_helper.c | ||
434 | @@ -XXX,XX +XXX,XX @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb) | ||
435 | CPUMIPSState *env = &cpu->env; | ||
436 | |||
437 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 | ||
438 | - && env->active_tc.PC != tb->pc) { | ||
439 | + && env->active_tc.PC != tb_pc(tb)) { | ||
440 | env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); | ||
441 | env->hflags &= ~MIPS_HFLAG_BMASK; | ||
442 | return true; | ||
443 | diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c | ||
444 | index XXXXXXX..XXXXXXX 100644 | ||
445 | --- a/target/openrisc/cpu.c | ||
446 | +++ b/target/openrisc/cpu.c | ||
447 | @@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs, | ||
448 | { | ||
449 | OpenRISCCPU *cpu = OPENRISC_CPU(cs); | ||
450 | |||
451 | - cpu->env.pc = tb->pc; | ||
452 | + cpu->env.pc = tb_pc(tb); | ||
453 | } | ||
454 | |||
455 | |||
456 | diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c | ||
457 | index XXXXXXX..XXXXXXX 100644 | ||
458 | --- a/target/riscv/cpu.c | ||
459 | +++ b/target/riscv/cpu.c | ||
460 | @@ -XXX,XX +XXX,XX @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs, | ||
461 | RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); | ||
462 | |||
463 | if (xl == MXL_RV32) { | ||
464 | - env->pc = (int32_t)tb->pc; | ||
465 | + env->pc = (int32_t)tb_pc(tb); | ||
466 | } else { | ||
467 | - env->pc = tb->pc; | ||
468 | + env->pc = tb_pc(tb); | ||
469 | } | ||
470 | } | ||
471 | |||
472 | diff --git a/target/rx/cpu.c b/target/rx/cpu.c | ||
473 | index XXXXXXX..XXXXXXX 100644 | ||
474 | --- a/target/rx/cpu.c | ||
475 | +++ b/target/rx/cpu.c | ||
476 | @@ -XXX,XX +XXX,XX @@ static void rx_cpu_synchronize_from_tb(CPUState *cs, | ||
477 | { | ||
478 | RXCPU *cpu = RX_CPU(cs); | ||
479 | |||
480 | - cpu->env.pc = tb->pc; | ||
481 | + cpu->env.pc = tb_pc(tb); | ||
482 | } | ||
483 | |||
484 | static bool rx_cpu_has_work(CPUState *cs) | ||
485 | diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c | ||
486 | index XXXXXXX..XXXXXXX 100644 | ||
487 | --- a/target/sh4/cpu.c | ||
488 | +++ b/target/sh4/cpu.c | ||
489 | @@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, | ||
490 | { | ||
491 | SuperHCPU *cpu = SUPERH_CPU(cs); | ||
492 | |||
493 | - cpu->env.pc = tb->pc; | ||
494 | + cpu->env.pc = tb_pc(tb); | ||
495 | cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK; | ||
496 | } | ||
497 | |||
498 | @@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs, | ||
499 | CPUSH4State *env = &cpu->env; | ||
500 | |||
501 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | ||
502 | - && env->pc != tb->pc) { | ||
503 | + && env->pc != tb_pc(tb)) { | ||
504 | env->pc -= 2; | ||
505 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); | ||
506 | return true; | ||
507 | diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c | ||
508 | index XXXXXXX..XXXXXXX 100644 | ||
509 | --- a/target/sparc/cpu.c | ||
510 | +++ b/target/sparc/cpu.c | ||
511 | @@ -XXX,XX +XXX,XX @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs, | ||
512 | { | ||
513 | SPARCCPU *cpu = SPARC_CPU(cs); | ||
514 | |||
515 | - cpu->env.pc = tb->pc; | ||
516 | + cpu->env.pc = tb_pc(tb); | ||
517 | cpu->env.npc = tb->cs_base; | ||
518 | } | ||
519 | |||
520 | diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c | ||
521 | index XXXXXXX..XXXXXXX 100644 | ||
522 | --- a/target/tricore/cpu.c | ||
523 | +++ b/target/tricore/cpu.c | ||
524 | @@ -XXX,XX +XXX,XX @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs, | ||
525 | TriCoreCPU *cpu = TRICORE_CPU(cs); | ||
526 | CPUTriCoreState *env = &cpu->env; | ||
527 | |||
528 | - env->PC = tb->pc; | ||
529 | + env->PC = tb_pc(tb); | ||
530 | } | ||
531 | |||
532 | static void tricore_cpu_reset(DeviceState *dev) | ||
533 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 61 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
534 | index XXXXXXX..XXXXXXX 100644 | 62 | index XXXXXXX..XXXXXXX 100644 |
535 | --- a/tcg/tcg.c | 63 | --- a/tcg/tcg.c |
536 | +++ b/tcg/tcg.c | 64 | +++ b/tcg/tcg.c |
537 | @@ -XXX,XX +XXX,XX @@ int64_t tcg_cpu_exec_time(void) | 65 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val) |
538 | #endif | 66 | return tcg_constant_vec(t->base_type, vece, val); |
539 | 67 | } | |
540 | 68 | ||
541 | -int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | 69 | -TCGv_i32 tcg_const_i32(int32_t val) |
542 | +int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) | 70 | -{ |
543 | { | 71 | - TCGv_i32 t0; |
544 | #ifdef CONFIG_PROFILER | 72 | - t0 = tcg_temp_new_i32(); |
545 | TCGProfile *prof = &s->prof; | 73 | - tcg_gen_movi_i32(t0, val); |
546 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | 74 | - return t0; |
547 | 75 | -} | |
548 | #ifdef DEBUG_DISAS | 76 | - |
549 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP) | 77 | -TCGv_i64 tcg_const_i64(int64_t val) |
550 | - && qemu_log_in_addr_range(tb->pc))) { | 78 | -{ |
551 | + && qemu_log_in_addr_range(pc_start))) { | 79 | - TCGv_i64 t0; |
552 | FILE *logfile = qemu_log_trylock(); | 80 | - t0 = tcg_temp_new_i64(); |
553 | if (logfile) { | 81 | - tcg_gen_movi_i64(t0, val); |
554 | fprintf(logfile, "OP:\n"); | 82 | - return t0; |
555 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | 83 | -} |
556 | if (s->nb_indirects > 0) { | 84 | - |
557 | #ifdef DEBUG_DISAS | 85 | /* Return true if OP may appear in the opcode stream. |
558 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND) | 86 | Test the runtime variable that controls each opcode. */ |
559 | - && qemu_log_in_addr_range(tb->pc))) { | 87 | bool tcg_op_supported(TCGOpcode op) |
560 | + && qemu_log_in_addr_range(pc_start))) { | ||
561 | FILE *logfile = qemu_log_trylock(); | ||
562 | if (logfile) { | ||
563 | fprintf(logfile, "OP before indirect lowering:\n"); | ||
564 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
565 | |||
566 | #ifdef DEBUG_DISAS | ||
567 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT) | ||
568 | - && qemu_log_in_addr_range(tb->pc))) { | ||
569 | + && qemu_log_in_addr_range(pc_start))) { | ||
570 | FILE *logfile = qemu_log_trylock(); | ||
571 | if (logfile) { | ||
572 | fprintf(logfile, "OP after optimization and liveness analysis:\n"); | ||
573 | -- | 88 | -- |
574 | 2.34.1 | 89 | 2.34.1 |
575 | 90 | ||
576 | 91 | diff view generated by jsdifflib |