1 | The following changes since commit 91e92cad67caca3bc4b8e920ddb5c8ca64aac9e1: | 1 | Mini PR, aimed at fixing the mips and ovmf regressions. |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/cohuck-gitlab/tags/s390x-20210305' into staging (2021-03-05 19:04:47 +0000) | 3 | |
4 | r~ | ||
5 | |||
6 | The following changes since commit 36e9aab3c569d4c9ad780473596e18479838d1aa: | ||
7 | |||
8 | migration: Move return path cleanup to main migration thread (2023-09-27 13:58:02 -0400) | ||
4 | 9 | ||
5 | are available in the Git repository at: | 10 | are available in the Git repository at: |
6 | 11 | ||
7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210306 | 12 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230928 |
8 | 13 | ||
9 | for you to fetch changes up to 6cc9d67c6f682cf04eea2d6e64a252b63a7eccdf: | 14 | for you to fetch changes up to 18a536f1f8d6222e562f59179e837fdfd8b92718: |
10 | 15 | ||
11 | accel/tcg: Precompute curr_cflags into cpu->tcg_cflags (2021-03-06 11:53:57 -0800) | 16 | accel/tcg: Always require can_do_io (2023-09-28 10:08:13 -0700) |
12 | 17 | ||
13 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
14 | TCI build fix and cleanup | 19 | accel/tcg: Always require can_do_io, for #1866 |
15 | Streamline tb_lookup | ||
16 | Fixes for tcg/aarch64 | ||
17 | 20 | ||
18 | ---------------------------------------------------------------- | 21 | ---------------------------------------------------------------- |
19 | Alex Bennée (4): | 22 | Richard Henderson (6): |
20 | accel/tcg: rename tb_lookup__cpu_state and hoist state extraction | 23 | accel/tcg: Avoid load of icount_decr if unused |
21 | accel/tcg: move CF_CLUSTER calculation to curr_cflags | 24 | accel/tcg: Hoist CF_MEMI_ONLY check outside translation loop |
22 | accel/tcg: drop the use of CF_HASH_MASK and rename params | 25 | accel/tcg: Track current value of can_do_io in the TB |
23 | include/exec: lightly re-arrange TranslationBlock | 26 | accel/tcg: Improve setting of can_do_io at start of TB |
27 | accel/tcg: Always set CF_LAST_IO with CF_NOIRQ | ||
28 | accel/tcg: Always require can_do_io | ||
24 | 29 | ||
25 | Richard Henderson (23): | 30 | include/exec/translator.h | 2 ++ |
26 | tcg/aarch64: Fix constant subtraction in tcg_out_addsub2 | 31 | accel/tcg/cpu-exec.c | 2 +- |
27 | tcg/aarch64: Fix I3617_CMLE0 | 32 | accel/tcg/tb-maint.c | 6 ++-- |
28 | tcg/aarch64: Fix generation of "scalar" vector operations | 33 | accel/tcg/translator.c | 72 +++++++++++++++++++++------------------------ |
29 | tcg/tci: Use exec/cpu_ldst.h interfaces | 34 | target/mips/tcg/translate.c | 1 - |
30 | tcg: Split out tcg_raise_tb_overflow | 35 | 5 files changed, 41 insertions(+), 42 deletions(-) |
31 | tcg: Manage splitwx in tc_ptr_to_region_tree by hand | ||
32 | tcg/tci: Merge identical cases in generation (arithmetic opcodes) | ||
33 | tcg/tci: Merge identical cases in generation (exchange opcodes) | ||
34 | tcg/tci: Merge identical cases in generation (deposit opcode) | ||
35 | tcg/tci: Merge identical cases in generation (conditional opcodes) | ||
36 | tcg/tci: Merge identical cases in generation (load/store opcodes) | ||
37 | tcg/tci: Remove tci_read_r8 | ||
38 | tcg/tci: Remove tci_read_r8s | ||
39 | tcg/tci: Remove tci_read_r16 | ||
40 | tcg/tci: Remove tci_read_r16s | ||
41 | tcg/tci: Remove tci_read_r32 | ||
42 | tcg/tci: Remove tci_read_r32s | ||
43 | tcg/tci: Reduce use of tci_read_r64 | ||
44 | tcg/tci: Merge basic arithmetic operations | ||
45 | tcg/tci: Merge extension operations | ||
46 | tcg/tci: Merge bswap operations | ||
47 | tcg/tci: Merge mov, not and neg operations | ||
48 | accel/tcg: Precompute curr_cflags into cpu->tcg_cflags | ||
49 | |||
50 | accel/tcg/tcg-accel-ops.h | 1 + | ||
51 | include/exec/exec-all.h | 22 +- | ||
52 | include/exec/tb-lookup.h | 26 +- | ||
53 | include/hw/core/cpu.h | 2 + | ||
54 | accel/tcg/cpu-exec.c | 34 +-- | ||
55 | accel/tcg/tcg-accel-ops-mttcg.c | 3 +- | ||
56 | accel/tcg/tcg-accel-ops-rr.c | 2 +- | ||
57 | accel/tcg/tcg-accel-ops.c | 8 + | ||
58 | accel/tcg/tcg-runtime.c | 6 +- | ||
59 | accel/tcg/translate-all.c | 18 +- | ||
60 | linux-user/main.c | 1 + | ||
61 | linux-user/sh4/signal.c | 8 +- | ||
62 | linux-user/syscall.c | 18 +- | ||
63 | softmmu/physmem.c | 2 +- | ||
64 | tcg/tcg.c | 29 ++- | ||
65 | tcg/tci.c | 526 ++++++++++++---------------------------- | ||
66 | tcg/aarch64/tcg-target.c.inc | 229 ++++++++++++++--- | ||
67 | tcg/tci/tcg-target.c.inc | 204 ++++++---------- | ||
68 | 18 files changed, 529 insertions(+), 610 deletions(-) | ||
69 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | With CF_NOIRQ and without !CF_USE_ICOUNT, the load isn't used. |
---|---|---|---|
2 | Avoid emitting it. | ||
2 | 3 | ||
3 | Lets make sure all the flags we compare when looking up blocks are | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | together in the same place. | ||
5 | |||
6 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
7 | Message-Id: <20210224165811.11567-5-alex.bennee@linaro.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 6 | --- |
10 | include/exec/exec-all.h | 11 ++++++++--- | 7 | accel/tcg/translator.c | 11 +++++++---- |
11 | 1 file changed, 8 insertions(+), 3 deletions(-) | 8 | 1 file changed, 7 insertions(+), 4 deletions(-) |
12 | 9 | ||
13 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 10 | diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c |
14 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/include/exec/exec-all.h | 12 | --- a/accel/tcg/translator.c |
16 | +++ b/include/exec/exec-all.h | 13 | +++ b/accel/tcg/translator.c |
17 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | 14 | @@ -XXX,XX +XXX,XX @@ bool translator_io_start(DisasContextBase *db) |
18 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ | 15 | |
19 | target_ulong cs_base; /* CS base for this block */ | 16 | static TCGOp *gen_tb_start(uint32_t cflags) |
20 | uint32_t flags; /* flags defining in which context the code was generated */ | 17 | { |
21 | - uint16_t size; /* size of target code for this block (1 <= | 18 | - TCGv_i32 count = tcg_temp_new_i32(); |
22 | - size <= TARGET_PAGE_SIZE) */ | 19 | + TCGv_i32 count = NULL; |
23 | - uint16_t icount; | 20 | TCGOp *icount_start_insn = NULL; |
24 | uint32_t cflags; /* compile flags */ | 21 | |
25 | #define CF_COUNT_MASK 0x00007fff | 22 | - tcg_gen_ld_i32(count, cpu_env, |
26 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ | 23 | - offsetof(ArchCPU, neg.icount_decr.u32) - |
27 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | 24 | - offsetof(ArchCPU, env)); |
28 | /* Per-vCPU dynamic tracing state used to generate this TB */ | 25 | + if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) { |
29 | uint32_t trace_vcpu_dstate; | 26 | + count = tcg_temp_new_i32(); |
30 | 27 | + tcg_gen_ld_i32(count, cpu_env, | |
31 | + /* | 28 | + offsetof(ArchCPU, neg.icount_decr.u32) - |
32 | + * Above fields used for comparing | 29 | + offsetof(ArchCPU, env)); |
33 | + */ | 30 | + } |
34 | + | 31 | |
35 | + /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ | 32 | if (cflags & CF_USE_ICOUNT) { |
36 | + uint16_t size; | 33 | /* |
37 | + uint16_t icount; | ||
38 | + | ||
39 | struct tb_tc tc; | ||
40 | |||
41 | /* first and second physical page containing code. The lower bit | ||
42 | -- | 34 | -- |
43 | 2.25.1 | 35 | 2.34.1 |
44 | 36 | ||
45 | 37 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | The condition checked is loop invariant; check it only once. |
---|---|---|---|
2 | 2 | ||
3 | We don't really deal in cf_mask most of the time. The one time it's | 3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | relevant is when we want to remove an invalidated TB from the QHT | ||
5 | lookup. Everywhere else we should be looking up things without | ||
6 | CF_INVALID set. | ||
7 | |||
8 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Message-Id: <20210224165811.11567-4-alex.bennee@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 5 | --- |
12 | include/exec/exec-all.h | 4 +--- | 6 | accel/tcg/translator.c | 14 ++++++++------ |
13 | include/exec/tb-lookup.h | 9 ++++++--- | 7 | 1 file changed, 8 insertions(+), 6 deletions(-) |
14 | accel/tcg/cpu-exec.c | 16 ++++++++-------- | ||
15 | accel/tcg/tcg-runtime.c | 2 +- | ||
16 | accel/tcg/translate-all.c | 8 +++++--- | ||
17 | 5 files changed, 21 insertions(+), 18 deletions(-) | ||
18 | 8 | ||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 9 | diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c |
20 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/include/exec/exec-all.h | 11 | --- a/accel/tcg/translator.c |
22 | +++ b/include/exec/exec-all.h | 12 | +++ b/accel/tcg/translator.c |
23 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | 13 | @@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, |
24 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ | 14 | ops->tb_start(db, cpu); |
25 | #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ | 15 | tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ |
26 | #define CF_CLUSTER_SHIFT 24 | 16 | |
27 | -/* cflags' mask for hashing/comparison, basically ignore CF_INVALID */ | 17 | - plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY); |
28 | -#define CF_HASH_MASK (~CF_INVALID) | 18 | + if (cflags & CF_MEMI_ONLY) { |
29 | 19 | + /* We should only see CF_MEMI_ONLY for io_recompile. */ | |
30 | /* Per-vCPU dynamic tracing state used to generate this TB */ | 20 | + assert(cflags & CF_LAST_IO); |
31 | uint32_t trace_vcpu_dstate; | 21 | + plugin_enabled = plugin_gen_tb_start(cpu, db, true); |
32 | @@ -XXX,XX +XXX,XX @@ void tb_flush(CPUState *cpu); | 22 | + } else { |
33 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); | 23 | + plugin_enabled = plugin_gen_tb_start(cpu, db, false); |
34 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | 24 | + } |
35 | target_ulong cs_base, uint32_t flags, | 25 | |
36 | - uint32_t cf_mask); | 26 | while (true) { |
37 | + uint32_t cflags); | 27 | *max_insns = ++db->num_insns; |
38 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); | 28 | @@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, |
39 | 29 | if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) { | |
40 | /* GETPC is the true target of the return instruction that we'll execute. */ | 30 | /* Accept I/O on the last instruction. */ |
41 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | 31 | gen_io_start(); |
42 | index XXXXXXX..XXXXXXX 100644 | 32 | - ops->translate_insn(db, cpu); |
43 | --- a/include/exec/tb-lookup.h | 33 | - } else { |
44 | +++ b/include/exec/tb-lookup.h | 34 | - /* we should only see CF_MEMI_ONLY for io_recompile */ |
45 | @@ -XXX,XX +XXX,XX @@ | 35 | - tcg_debug_assert(!(cflags & CF_MEMI_ONLY)); |
46 | /* Might cause an exception, so have a longjmp destination ready */ | 36 | - ops->translate_insn(db, cpu); |
47 | static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | 37 | } |
48 | target_ulong cs_base, | 38 | + ops->translate_insn(db, cpu); |
49 | - uint32_t flags, uint32_t cf_mask) | 39 | |
50 | + uint32_t flags, uint32_t cflags) | 40 | /* |
51 | { | 41 | * We can't instrument after instructions that change control |
52 | TranslationBlock *tb; | ||
53 | uint32_t hash; | ||
54 | |||
55 | + /* we should never be trying to look up an INVALID tb */ | ||
56 | + tcg_debug_assert(!(cflags & CF_INVALID)); | ||
57 | + | ||
58 | hash = tb_jmp_cache_hash_func(pc); | ||
59 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
60 | |||
61 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
62 | tb->cs_base == cs_base && | ||
63 | tb->flags == flags && | ||
64 | tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||
65 | - (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { | ||
66 | + tb_cflags(tb) == cflags)) { | ||
67 | return tb; | ||
68 | } | ||
69 | - tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
70 | + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | ||
71 | if (tb == NULL) { | ||
72 | return NULL; | ||
73 | } | ||
74 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/accel/tcg/cpu-exec.c | ||
77 | +++ b/accel/tcg/cpu-exec.c | ||
78 | @@ -XXX,XX +XXX,XX @@ struct tb_desc { | ||
79 | CPUArchState *env; | ||
80 | tb_page_addr_t phys_page1; | ||
81 | uint32_t flags; | ||
82 | - uint32_t cf_mask; | ||
83 | + uint32_t cflags; | ||
84 | uint32_t trace_vcpu_dstate; | ||
85 | }; | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
88 | tb->cs_base == desc->cs_base && | ||
89 | tb->flags == desc->flags && | ||
90 | tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && | ||
91 | - (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { | ||
92 | + tb_cflags(tb) == desc->cflags) { | ||
93 | /* check next page if needed */ | ||
94 | if (tb->page_addr[1] == -1) { | ||
95 | return true; | ||
96 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
97 | |||
98 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
99 | target_ulong cs_base, uint32_t flags, | ||
100 | - uint32_t cf_mask) | ||
101 | + uint32_t cflags) | ||
102 | { | ||
103 | tb_page_addr_t phys_pc; | ||
104 | struct tb_desc desc; | ||
105 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
106 | desc.env = (CPUArchState *)cpu->env_ptr; | ||
107 | desc.cs_base = cs_base; | ||
108 | desc.flags = flags; | ||
109 | - desc.cf_mask = cf_mask; | ||
110 | + desc.cflags = cflags; | ||
111 | desc.trace_vcpu_dstate = *cpu->trace_dstate; | ||
112 | desc.pc = pc; | ||
113 | phys_pc = get_page_addr_code(desc.env, pc); | ||
114 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
115 | return NULL; | ||
116 | } | ||
117 | desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; | ||
118 | - h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); | ||
119 | + h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); | ||
120 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); | ||
121 | } | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n, | ||
124 | |||
125 | static inline TranslationBlock *tb_find(CPUState *cpu, | ||
126 | TranslationBlock *last_tb, | ||
127 | - int tb_exit, uint32_t cf_mask) | ||
128 | + int tb_exit, uint32_t cflags) | ||
129 | { | ||
130 | CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
131 | TranslationBlock *tb; | ||
132 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu, | ||
133 | |||
134 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
135 | |||
136 | - tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
137 | + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | ||
138 | if (tb == NULL) { | ||
139 | mmap_lock(); | ||
140 | - tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); | ||
141 | + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||
142 | mmap_unlock(); | ||
143 | /* We add the TB in the virtual pc hash table for the fast lookup */ | ||
144 | qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); | ||
145 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
146 | index XXXXXXX..XXXXXXX 100644 | ||
147 | --- a/accel/tcg/tcg-runtime.c | ||
148 | +++ b/accel/tcg/tcg-runtime.c | ||
149 | @@ -XXX,XX +XXX,XX @@ | ||
150 | #include "exec/helper-proto.h" | ||
151 | #include "exec/cpu_ldst.h" | ||
152 | #include "exec/exec-all.h" | ||
153 | -#include "exec/tb-lookup.h" | ||
154 | #include "disas/disas.h" | ||
155 | #include "exec/log.h" | ||
156 | #include "tcg/tcg.h" | ||
157 | +#include "exec/tb-lookup.h" | ||
158 | |||
159 | /* 32-bit helpers */ | ||
160 | |||
161 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
162 | index XXXXXXX..XXXXXXX 100644 | ||
163 | --- a/accel/tcg/translate-all.c | ||
164 | +++ b/accel/tcg/translate-all.c | ||
165 | @@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp) | ||
166 | return a->pc == b->pc && | ||
167 | a->cs_base == b->cs_base && | ||
168 | a->flags == b->flags && | ||
169 | - (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && | ||
170 | + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && | ||
171 | a->trace_vcpu_dstate == b->trace_vcpu_dstate && | ||
172 | a->page_addr[0] == b->page_addr[0] && | ||
173 | a->page_addr[1] == b->page_addr[1]; | ||
174 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
175 | PageDesc *p; | ||
176 | uint32_t h; | ||
177 | tb_page_addr_t phys_pc; | ||
178 | + uint32_t orig_cflags = tb_cflags(tb); | ||
179 | |||
180 | assert_memory_lock(); | ||
181 | |||
182 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
183 | |||
184 | /* remove the TB from the hash list */ | ||
185 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | ||
186 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, | ||
187 | + h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, | ||
188 | tb->trace_vcpu_dstate); | ||
189 | if (!qht_remove(&tb_ctx.htable, tb, h)) { | ||
190 | return; | ||
191 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
192 | uint32_t h; | ||
193 | |||
194 | assert_memory_lock(); | ||
195 | + tcg_debug_assert(!(tb->cflags & CF_INVALID)); | ||
196 | |||
197 | /* | ||
198 | * Add the TB to the page list, acquiring first the pages's locks. | ||
199 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
200 | } | ||
201 | |||
202 | /* add in the hash table */ | ||
203 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, | ||
204 | + h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, | ||
205 | tb->trace_vcpu_dstate); | ||
206 | qht_insert(&tb_ctx.htable, tb, h, &existing_tb); | ||
207 | |||
208 | -- | 42 | -- |
209 | 2.25.1 | 43 | 2.34.1 |
210 | 44 | ||
211 | 45 | diff view generated by jsdifflib |
1 | An hppa guest executing | 1 | Simplify translator_io_start by recording the current |
---|---|---|---|
2 | known value of can_do_io within DisasContextBase. | ||
2 | 3 | ||
3 | 0x000000000000e05c: ldil L%10000,r4 | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | 0x000000000000e060: ldo 0(r4),r4 | ||
5 | 0x000000000000e064: sub r3,r4,sp | ||
6 | |||
7 | produces | ||
8 | |||
9 | ---- 000000000000e064 000000000000e068 | ||
10 | sub2_i32 tmp0,tmp4,r3,$0x1,$0x10000,$0x0 | ||
11 | |||
12 | after folding and constant propagation. Then we hit | ||
13 | |||
14 | tcg-target.c.inc:640: tcg_out_insn_3401: Assertion `aimm <= 0xfff' failed. | ||
15 | |||
16 | because aimm is in fact -16, but unsigned. | ||
17 | |||
18 | The ((bl < 0) ^ sub) condition which negates bl is incorrect and will | ||
19 | always lead to this abort. If the constant is positive, sub will make | ||
20 | it negative; if the constant is negative, sub will keep it negative. | ||
21 | |||
22 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
23 | --- | 6 | --- |
24 | tcg/aarch64/tcg-target.c.inc | 16 +++++++++------- | 7 | include/exec/translator.h | 2 ++ |
25 | 1 file changed, 9 insertions(+), 7 deletions(-) | 8 | accel/tcg/translator.c | 31 ++++++++++++++----------------- |
9 | 2 files changed, 16 insertions(+), 17 deletions(-) | ||
26 | 10 | ||
27 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | 11 | diff --git a/include/exec/translator.h b/include/exec/translator.h |
28 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
29 | --- a/tcg/aarch64/tcg-target.c.inc | 13 | --- a/include/exec/translator.h |
30 | +++ b/tcg/aarch64/tcg-target.c.inc | 14 | +++ b/include/exec/translator.h |
31 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, | 15 | @@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType { |
16 | * @num_insns: Number of translated instructions (including current). | ||
17 | * @max_insns: Maximum number of instructions to be translated in this TB. | ||
18 | * @singlestep_enabled: "Hardware" single stepping enabled. | ||
19 | + * @saved_can_do_io: Known value of cpu->neg.can_do_io, or -1 for unknown. | ||
20 | * | ||
21 | * Architecture-agnostic disassembly context. | ||
22 | */ | ||
23 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContextBase { | ||
24 | int num_insns; | ||
25 | int max_insns; | ||
26 | bool singlestep_enabled; | ||
27 | + int8_t saved_can_do_io; | ||
28 | void *host_addr[2]; | ||
29 | } DisasContextBase; | ||
30 | |||
31 | diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/accel/tcg/translator.c | ||
34 | +++ b/accel/tcg/translator.c | ||
35 | @@ -XXX,XX +XXX,XX @@ | ||
36 | #include "tcg/tcg-op-common.h" | ||
37 | #include "internal.h" | ||
38 | |||
39 | -static void gen_io_start(void) | ||
40 | +static void set_can_do_io(DisasContextBase *db, bool val) | ||
41 | { | ||
42 | - tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, | ||
43 | - offsetof(ArchCPU, parent_obj.can_do_io) - | ||
44 | - offsetof(ArchCPU, env)); | ||
45 | + if (db->saved_can_do_io != val) { | ||
46 | + db->saved_can_do_io = val; | ||
47 | + tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, | ||
48 | + offsetof(ArchCPU, parent_obj.can_do_io) - | ||
49 | + offsetof(ArchCPU, env)); | ||
50 | + } | ||
51 | } | ||
52 | |||
53 | bool translator_io_start(DisasContextBase *db) | ||
54 | @@ -XXX,XX +XXX,XX @@ bool translator_io_start(DisasContextBase *db) | ||
55 | if (!(cflags & CF_USE_ICOUNT)) { | ||
56 | return false; | ||
32 | } | 57 | } |
58 | - if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) { | ||
59 | - /* Already started in translator_loop. */ | ||
60 | - return true; | ||
61 | - } | ||
62 | |||
63 | - gen_io_start(); | ||
64 | + set_can_do_io(db, true); | ||
65 | |||
66 | /* | ||
67 | * Ensure that this instruction will be the last in the TB. | ||
68 | @@ -XXX,XX +XXX,XX @@ bool translator_io_start(DisasContextBase *db) | ||
69 | return true; | ||
33 | } | 70 | } |
34 | 71 | ||
35 | -static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, | 72 | -static TCGOp *gen_tb_start(uint32_t cflags) |
36 | - TCGReg rh, TCGReg al, TCGReg ah, | 73 | +static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) |
37 | - tcg_target_long bl, tcg_target_long bh, | ||
38 | - bool const_bl, bool const_bh, bool sub) | ||
39 | +static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, | ||
40 | + TCGReg rh, TCGReg al, TCGReg ah, | ||
41 | + tcg_target_long bl, tcg_target_long bh, | ||
42 | + bool const_bl, bool const_bh, bool sub) | ||
43 | { | 74 | { |
44 | TCGReg orig_rl = rl; | 75 | TCGv_i32 count = NULL; |
45 | AArch64Insn insn; | 76 | TCGOp *icount_start_insn = NULL; |
46 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, | 77 | @@ -XXX,XX +XXX,XX @@ static TCGOp *gen_tb_start(uint32_t cflags) |
78 | * cpu->can_do_io is cleared automatically here at the beginning of | ||
79 | * each translation block. The cost is minimal and only paid for | ||
80 | * -icount, plus it would be very easy to forget doing it in the | ||
81 | - * translator. Doing it here means we don't need a gen_io_end() to | ||
82 | - * go with gen_io_start(). | ||
83 | + * translator. | ||
84 | */ | ||
85 | - tcg_gen_st_i32(tcg_constant_i32(0), cpu_env, | ||
86 | - offsetof(ArchCPU, parent_obj.can_do_io) - | ||
87 | - offsetof(ArchCPU, env)); | ||
88 | + set_can_do_io(db, false); | ||
47 | } | 89 | } |
48 | 90 | ||
49 | if (const_bl) { | 91 | return icount_start_insn; |
50 | - insn = I3401_ADDSI; | 92 | @@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, |
51 | - if ((bl < 0) ^ sub) { | 93 | db->num_insns = 0; |
52 | - insn = I3401_SUBSI; | 94 | db->max_insns = *max_insns; |
53 | + if (bl < 0) { | 95 | db->singlestep_enabled = cflags & CF_SINGLE_STEP; |
54 | bl = -bl; | 96 | + db->saved_can_do_io = -1; |
55 | + insn = sub ? I3401_ADDSI : I3401_SUBSI; | 97 | db->host_addr[0] = host_pc; |
56 | + } else { | 98 | db->host_addr[1] = NULL; |
57 | + insn = sub ? I3401_SUBSI : I3401_ADDSI; | 99 | |
100 | @@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, | ||
101 | tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ | ||
102 | |||
103 | /* Start translating. */ | ||
104 | - icount_start_insn = gen_tb_start(cflags); | ||
105 | + icount_start_insn = gen_tb_start(db, cflags); | ||
106 | ops->tb_start(db, cpu); | ||
107 | tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ | ||
108 | |||
109 | @@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, | ||
110 | the next instruction. */ | ||
111 | if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) { | ||
112 | /* Accept I/O on the last instruction. */ | ||
113 | - gen_io_start(); | ||
114 | + set_can_do_io(db, true); | ||
58 | } | 115 | } |
59 | + | 116 | ops->translate_insn(db, cpu); |
60 | if (unlikely(al == TCG_REG_XZR)) { | 117 | |
61 | /* ??? We want to allow al to be zero for the benefit of | ||
62 | negation via subtraction. However, that leaves open the | ||
63 | -- | 118 | -- |
64 | 2.25.1 | 119 | 2.34.1 |
65 | 120 | ||
66 | 121 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Fix a typo in the encodeing of the cmle (zero) instruction. | ||
2 | 1 | ||
3 | Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations") | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/aarch64/tcg-target.c.inc | 2 +- | ||
7 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
8 | |||
9 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/aarch64/tcg-target.c.inc | ||
12 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
13 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
14 | I3617_CMEQ0 = 0x0e209800, | ||
15 | I3617_CMLT0 = 0x0e20a800, | ||
16 | I3617_CMGE0 = 0x2e208800, | ||
17 | - I3617_CMLE0 = 0x2e20a800, | ||
18 | + I3617_CMLE0 = 0x2e209800, | ||
19 | I3617_NOT = 0x2e205800, | ||
20 | I3617_ABS = 0x0e20b800, | ||
21 | I3617_NEG = 0x2e20b800, | ||
22 | -- | ||
23 | 2.25.1 | ||
24 | |||
25 | diff view generated by jsdifflib |
1 | The primary motivation is to remove a dozen insns along | 1 | Initialize can_do_io to true if this the TB has CF_LAST_IO |
---|---|---|---|
2 | the fast-path in tb_lookup. As a byproduct, this allows | 2 | and will consist of a single instruction. This avoids a |
3 | us to completely remove parallel_cpus. | 3 | set to 0 followed immediately by a set to 1. |
4 | 4 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 7 | --- |
8 | accel/tcg/tcg-accel-ops.h | 1 + | 8 | accel/tcg/translator.c | 4 ++-- |
9 | include/exec/exec-all.h | 7 +------ | 9 | 1 file changed, 2 insertions(+), 2 deletions(-) |
10 | include/hw/core/cpu.h | 2 ++ | ||
11 | accel/tcg/cpu-exec.c | 3 --- | ||
12 | accel/tcg/tcg-accel-ops-mttcg.c | 3 +-- | ||
13 | accel/tcg/tcg-accel-ops-rr.c | 2 +- | ||
14 | accel/tcg/tcg-accel-ops.c | 8 ++++++++ | ||
15 | accel/tcg/translate-all.c | 4 ---- | ||
16 | linux-user/main.c | 1 + | ||
17 | linux-user/sh4/signal.c | 8 +++++--- | ||
18 | linux-user/syscall.c | 18 ++++++++++-------- | ||
19 | 11 files changed, 30 insertions(+), 27 deletions(-) | ||
20 | 10 | ||
21 | diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h | 11 | diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c |
22 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/accel/tcg/tcg-accel-ops.h | 13 | --- a/accel/tcg/translator.c |
24 | +++ b/accel/tcg/tcg-accel-ops.h | 14 | +++ b/accel/tcg/translator.c |
25 | @@ -XXX,XX +XXX,XX @@ | 15 | @@ -XXX,XX +XXX,XX @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) |
26 | void tcg_cpus_destroy(CPUState *cpu); | 16 | offsetof(ArchCPU, neg.icount_decr.u16.low) - |
27 | int tcg_cpus_exec(CPUState *cpu); | 17 | offsetof(ArchCPU, env)); |
28 | void tcg_handle_interrupt(CPUState *cpu, int mask); | 18 | /* |
29 | +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); | 19 | - * cpu->can_do_io is cleared automatically here at the beginning of |
30 | 20 | + * cpu->can_do_io is set automatically here at the beginning of | |
31 | #endif /* TCG_CPUS_H */ | 21 | * each translation block. The cost is minimal and only paid for |
32 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 22 | * -icount, plus it would be very easy to forget doing it in the |
33 | index XXXXXXX..XXXXXXX 100644 | 23 | * translator. |
34 | --- a/include/exec/exec-all.h | 24 | */ |
35 | +++ b/include/exec/exec-all.h | 25 | - set_can_do_io(db, false); |
36 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | 26 | + set_can_do_io(db, db->max_insns == 1 && (cflags & CF_LAST_IO)); |
37 | uintptr_t jmp_dest[2]; | ||
38 | }; | ||
39 | |||
40 | -extern bool parallel_cpus; | ||
41 | - | ||
42 | /* Hide the qatomic_read to make code a little easier on the eyes */ | ||
43 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
44 | { | ||
45 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
46 | /* current cflags for hashing/comparison */ | ||
47 | static inline uint32_t curr_cflags(CPUState *cpu) | ||
48 | { | ||
49 | - uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index); | ||
50 | - cflags |= parallel_cpus ? CF_PARALLEL : 0; | ||
51 | - cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
52 | - return cflags; | ||
53 | + return cpu->tcg_cflags; | ||
54 | } | ||
55 | |||
56 | /* TranslationBlock invalidate API */ | ||
57 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/include/hw/core/cpu.h | ||
60 | +++ b/include/hw/core/cpu.h | ||
61 | @@ -XXX,XX +XXX,XX @@ struct qemu_work_item; | ||
62 | * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will | ||
63 | * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER | ||
64 | * QOM parent. | ||
65 | + * @tcg_cflags: Pre-computed cflags for this cpu. | ||
66 | * @nr_cores: Number of cores within this CPU package. | ||
67 | * @nr_threads: Number of threads within this CPU. | ||
68 | * @running: #true if CPU is currently running (lockless). | ||
69 | @@ -XXX,XX +XXX,XX @@ struct CPUState { | ||
70 | /* TODO Move common fields from CPUArchState here. */ | ||
71 | int cpu_index; | ||
72 | int cluster_index; | ||
73 | + uint32_t tcg_cflags; | ||
74 | uint32_t halted; | ||
75 | uint32_t can_do_io; | ||
76 | int32_t exception_index; | ||
77 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
78 | index XXXXXXX..XXXXXXX 100644 | ||
79 | --- a/accel/tcg/cpu-exec.c | ||
80 | +++ b/accel/tcg/cpu-exec.c | ||
81 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
82 | mmap_unlock(); | ||
83 | } | ||
84 | |||
85 | - /* Since we got here, we know that parallel_cpus must be true. */ | ||
86 | - parallel_cpus = false; | ||
87 | cpu_exec_enter(cpu); | ||
88 | /* execute the generated code */ | ||
89 | trace_exec_tb(tb, pc); | ||
90 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
91 | * the execution. | ||
92 | */ | ||
93 | g_assert(cpu_in_exclusive_context(cpu)); | ||
94 | - parallel_cpus = true; | ||
95 | cpu->running = false; | ||
96 | end_exclusive(); | ||
97 | } | ||
98 | diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/accel/tcg/tcg-accel-ops-mttcg.c | ||
101 | +++ b/accel/tcg/tcg-accel-ops-mttcg.c | ||
102 | @@ -XXX,XX +XXX,XX @@ void mttcg_start_vcpu_thread(CPUState *cpu) | ||
103 | char thread_name[VCPU_THREAD_NAME_SIZE]; | ||
104 | |||
105 | g_assert(tcg_enabled()); | ||
106 | - | ||
107 | - parallel_cpus = (current_machine->smp.max_cpus > 1); | ||
108 | + tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); | ||
109 | |||
110 | cpu->thread = g_malloc0(sizeof(QemuThread)); | ||
111 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | ||
112 | diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c | ||
113 | index XXXXXXX..XXXXXXX 100644 | ||
114 | --- a/accel/tcg/tcg-accel-ops-rr.c | ||
115 | +++ b/accel/tcg/tcg-accel-ops-rr.c | ||
116 | @@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu) | ||
117 | static QemuThread *single_tcg_cpu_thread; | ||
118 | |||
119 | g_assert(tcg_enabled()); | ||
120 | - parallel_cpus = false; | ||
121 | + tcg_cpu_init_cflags(cpu, false); | ||
122 | |||
123 | if (!single_tcg_cpu_thread) { | ||
124 | cpu->thread = g_malloc0(sizeof(QemuThread)); | ||
125 | diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c | ||
126 | index XXXXXXX..XXXXXXX 100644 | ||
127 | --- a/accel/tcg/tcg-accel-ops.c | ||
128 | +++ b/accel/tcg/tcg-accel-ops.c | ||
129 | @@ -XXX,XX +XXX,XX @@ | ||
130 | |||
131 | /* common functionality among all TCG variants */ | ||
132 | |||
133 | +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) | ||
134 | +{ | ||
135 | + uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
136 | + cflags |= parallel ? CF_PARALLEL : 0; | ||
137 | + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
138 | + cpu->tcg_cflags = cflags; | ||
139 | +} | ||
140 | + | ||
141 | void tcg_cpus_destroy(CPUState *cpu) | ||
142 | { | ||
143 | cpu_thread_signal_destroyed(cpu); | ||
144 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
145 | index XXXXXXX..XXXXXXX 100644 | ||
146 | --- a/accel/tcg/translate-all.c | ||
147 | +++ b/accel/tcg/translate-all.c | ||
148 | @@ -XXX,XX +XXX,XX @@ static void *l1_map[V_L1_MAX_SIZE]; | ||
149 | TCGContext tcg_init_ctx; | ||
150 | __thread TCGContext *tcg_ctx; | ||
151 | TBContext tb_ctx; | ||
152 | -bool parallel_cpus; | ||
153 | |||
154 | static void page_table_config_init(void) | ||
155 | { | ||
156 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
157 | cflags = (cflags & ~CF_COUNT_MASK) | 1; | ||
158 | } | 27 | } |
159 | 28 | ||
160 | - cflags &= ~CF_CLUSTER_MASK; | 29 | return icount_start_insn; |
161 | - cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
162 | - | ||
163 | max_insns = cflags & CF_COUNT_MASK; | ||
164 | if (max_insns == 0) { | ||
165 | max_insns = CF_COUNT_MASK; | ||
166 | diff --git a/linux-user/main.c b/linux-user/main.c | ||
167 | index XXXXXXX..XXXXXXX 100644 | ||
168 | --- a/linux-user/main.c | ||
169 | +++ b/linux-user/main.c | ||
170 | @@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env) | ||
171 | /* Reset non arch specific state */ | ||
172 | cpu_reset(new_cpu); | ||
173 | |||
174 | + new_cpu->tcg_cflags = cpu->tcg_cflags; | ||
175 | memcpy(new_env, env, sizeof(CPUArchState)); | ||
176 | |||
177 | /* Clone all break/watchpoints. | ||
178 | diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c | ||
179 | index XXXXXXX..XXXXXXX 100644 | ||
180 | --- a/linux-user/sh4/signal.c | ||
181 | +++ b/linux-user/sh4/signal.c | ||
182 | @@ -XXX,XX +XXX,XX @@ static abi_ulong get_sigframe(struct target_sigaction *ka, | ||
183 | return (sp - frame_size) & -8ul; | ||
184 | } | ||
185 | |||
186 | -/* Notice when we're in the middle of a gUSA region and reset. | ||
187 | - Note that this will only occur for !parallel_cpus, as we will | ||
188 | - translate such sequences differently in a parallel context. */ | ||
189 | +/* | ||
190 | + * Notice when we're in the middle of a gUSA region and reset. | ||
191 | + * Note that this will only occur when #CF_PARALLEL is unset, as we | ||
192 | + * will translate such sequences differently in a parallel context. | ||
193 | + */ | ||
194 | static void unwind_gusa(CPUSH4State *regs) | ||
195 | { | ||
196 | /* If the stack pointer is sufficiently negative, and we haven't | ||
197 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c | ||
198 | index XXXXXXX..XXXXXXX 100644 | ||
199 | --- a/linux-user/syscall.c | ||
200 | +++ b/linux-user/syscall.c | ||
201 | @@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, | ||
202 | /* Grab a mutex so that thread setup appears atomic. */ | ||
203 | pthread_mutex_lock(&clone_lock); | ||
204 | |||
205 | + /* | ||
206 | + * If this is our first additional thread, we need to ensure we | ||
207 | + * generate code for parallel execution and flush old translations. | ||
208 | + * Do this now so that the copy gets CF_PARALLEL too. | ||
209 | + */ | ||
210 | + if (!(cpu->tcg_cflags & CF_PARALLEL)) { | ||
211 | + cpu->tcg_cflags |= CF_PARALLEL; | ||
212 | + tb_flush(cpu); | ||
213 | + } | ||
214 | + | ||
215 | /* we create a new CPU instance. */ | ||
216 | new_env = cpu_copy(env); | ||
217 | /* Init regs that differ from the parent. */ | ||
218 | @@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, | ||
219 | sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); | ||
220 | cpu->random_seed = qemu_guest_random_seed_thread_part1(); | ||
221 | |||
222 | - /* If this is our first additional thread, we need to ensure we | ||
223 | - * generate code for parallel execution and flush old translations. | ||
224 | - */ | ||
225 | - if (!parallel_cpus) { | ||
226 | - parallel_cpus = true; | ||
227 | - tb_flush(cpu); | ||
228 | - } | ||
229 | - | ||
230 | ret = pthread_create(&info.thread, &attr, clone_func, &info); | ||
231 | /* TODO: Free new CPU state if thread creation failed. */ | ||
232 | |||
233 | -- | 30 | -- |
234 | 2.25.1 | 31 | 2.34.1 |
235 | 32 | ||
236 | 33 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | Without this we can get see loops through cpu_io_recompile, |
---|---|---|---|
2 | in which the cpu makes no progress. | ||
2 | 3 | ||
3 | There is nothing special about this compile flag that doesn't mean we | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | can't just compute it with curr_cflags() which we should be using when | ||
5 | building a new set. | ||
6 | |||
7 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Message-Id: <20210224165811.11567-3-alex.bennee@linaro.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 6 | --- |
11 | include/exec/exec-all.h | 8 +++++--- | 7 | accel/tcg/cpu-exec.c | 2 +- |
12 | include/exec/tb-lookup.h | 3 --- | 8 | accel/tcg/tb-maint.c | 6 ++++-- |
13 | accel/tcg/cpu-exec.c | 9 ++++----- | 9 | 2 files changed, 5 insertions(+), 3 deletions(-) |
14 | accel/tcg/tcg-runtime.c | 2 +- | ||
15 | accel/tcg/translate-all.c | 6 +++--- | ||
16 | softmmu/physmem.c | 2 +- | ||
17 | 6 files changed, 14 insertions(+), 16 deletions(-) | ||
18 | 10 | ||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/exec-all.h | ||
22 | +++ b/include/exec/exec-all.h | ||
23 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
24 | } | ||
25 | |||
26 | /* current cflags for hashing/comparison */ | ||
27 | -static inline uint32_t curr_cflags(void) | ||
28 | +static inline uint32_t curr_cflags(CPUState *cpu) | ||
29 | { | ||
30 | - return (parallel_cpus ? CF_PARALLEL : 0) | ||
31 | - | (icount_enabled() ? CF_USE_ICOUNT : 0); | ||
32 | + uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index); | ||
33 | + cflags |= parallel_cpus ? CF_PARALLEL : 0; | ||
34 | + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
35 | + return cflags; | ||
36 | } | ||
37 | |||
38 | /* TranslationBlock invalidate API */ | ||
39 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/include/exec/tb-lookup.h | ||
42 | +++ b/include/exec/tb-lookup.h | ||
43 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
44 | hash = tb_jmp_cache_hash_func(pc); | ||
45 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
46 | |||
47 | - cf_mask &= ~CF_CLUSTER_MASK; | ||
48 | - cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
49 | - | ||
50 | if (likely(tb && | ||
51 | tb->pc == pc && | ||
52 | tb->cs_base == cs_base && | ||
53 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | 11 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c |
54 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
55 | --- a/accel/tcg/cpu-exec.c | 13 | --- a/accel/tcg/cpu-exec.c |
56 | +++ b/accel/tcg/cpu-exec.c | 14 | +++ b/accel/tcg/cpu-exec.c |
57 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
58 | TranslationBlock *tb; | ||
59 | target_ulong cs_base, pc; | ||
60 | uint32_t flags; | ||
61 | - uint32_t cflags = 1; | ||
62 | - uint32_t cf_mask = cflags & CF_HASH_MASK; | ||
63 | + uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1; | ||
64 | int tb_exit; | ||
65 | |||
66 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { | ||
67 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
68 | cpu->running = true; | ||
69 | |||
70 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
71 | - tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
72 | + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | ||
73 | |||
74 | if (tb == NULL) { | ||
75 | mmap_lock(); | ||
76 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) | 15 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
77 | if (replay_has_exception() | ||
78 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { | 16 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { |
79 | /* Execute just one insn to trigger exception pending in the log */ | 17 | /* Execute just one insn to trigger exception pending in the log */ |
80 | - cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1; | 18 | cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) |
81 | + cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1; | 19 | - | CF_NOIRQ | 1; |
20 | + | CF_LAST_IO | CF_NOIRQ | 1; | ||
82 | } | 21 | } |
83 | #endif | 22 | #endif |
84 | return false; | 23 | return false; |
85 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | 24 | diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c |
86 | have CF_INVALID set, -1 is a convenient invalid value that | ||
87 | does not require tcg headers for cpu_common_reset. */ | ||
88 | if (cflags == -1) { | ||
89 | - cflags = curr_cflags(); | ||
90 | + cflags = curr_cflags(cpu); | ||
91 | } else { | ||
92 | cpu->cflags_next_tb = -1; | ||
93 | } | ||
94 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
95 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
96 | --- a/accel/tcg/tcg-runtime.c | 26 | --- a/accel/tcg/tb-maint.c |
97 | +++ b/accel/tcg/tcg-runtime.c | 27 | +++ b/accel/tcg/tb-maint.c |
98 | @@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | 28 | @@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) |
99 | 29 | if (current_tb_modified) { | |
100 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | 30 | /* Force execution of one insn next time. */ |
101 | 31 | CPUState *cpu = current_cpu; | |
102 | - tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags()); | 32 | - cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); |
103 | + tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu)); | 33 | + cpu->cflags_next_tb = |
104 | if (tb == NULL) { | 34 | + 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu); |
105 | return tcg_code_gen_epilogue; | 35 | return true; |
106 | } | 36 | } |
107 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 37 | return false; |
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/accel/tcg/translate-all.c | ||
110 | +++ b/accel/tcg/translate-all.c | ||
111 | @@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, | 38 | @@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, |
112 | if (current_tb_modified) { | 39 | if (current_tb_modified) { |
113 | page_collection_unlock(pages); | 40 | page_collection_unlock(pages); |
114 | /* Force execution of one insn next time. */ | 41 | /* Force execution of one insn next time. */ |
115 | - cpu->cflags_next_tb = 1 | curr_cflags(); | 42 | - current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); |
116 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | 43 | + current_cpu->cflags_next_tb = |
44 | + 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu); | ||
117 | mmap_unlock(); | 45 | mmap_unlock(); |
118 | cpu_loop_exit_noexc(cpu); | 46 | cpu_loop_exit_noexc(current_cpu); |
119 | } | 47 | } |
120 | @@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) | ||
121 | #ifdef TARGET_HAS_PRECISE_SMC | ||
122 | if (current_tb_modified) { | ||
123 | /* Force execution of one insn next time. */ | ||
124 | - cpu->cflags_next_tb = 1 | curr_cflags(); | ||
125 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | ||
126 | return true; | ||
127 | } | ||
128 | #endif | ||
129 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||
130 | * operations only (which execute after completion) so we don't | ||
131 | * double instrument the instruction. | ||
132 | */ | ||
133 | - cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n; | ||
134 | + cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; | ||
135 | |||
136 | qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, | ||
137 | "cpu_io_recompile: rewound execution of TB to " | ||
138 | diff --git a/softmmu/physmem.c b/softmmu/physmem.c | ||
139 | index XXXXXXX..XXXXXXX 100644 | ||
140 | --- a/softmmu/physmem.c | ||
141 | +++ b/softmmu/physmem.c | ||
142 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
143 | cpu_loop_exit_restore(cpu, ra); | ||
144 | } else { | ||
145 | /* Force execution of one insn next time. */ | ||
146 | - cpu->cflags_next_tb = 1 | curr_cflags(); | ||
147 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | ||
148 | mmap_unlock(); | ||
149 | if (ra) { | ||
150 | cpu_restore_state(cpu, ra, true); | ||
151 | -- | 48 | -- |
152 | 2.25.1 | 49 | 2.34.1 |
153 | 50 | ||
154 | 51 | diff view generated by jsdifflib |
1 | For some vector operations, "1D" is not a valid type, and there | 1 | Require i/o as the last insn of a TranslationBlock always, |
---|---|---|---|
2 | are separate instructions for the 64-bit scalar operation. | 2 | not only with icount. This is required for i/o that alters |
3 | the address space, such as a pci config space write. | ||
3 | 4 | ||
4 | Tested-by: Stefan Weil <sw@weilnetz.de> | 5 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1866 |
5 | Buglink: https://bugs.launchpad.net/qemu/+bug/1916112 | 6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
6 | Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations") | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 8 | --- |
9 | tcg/aarch64/tcg-target.c.inc | 211 ++++++++++++++++++++++++++++++----- | 9 | accel/tcg/translator.c | 20 +++++++------------- |
10 | 1 file changed, 181 insertions(+), 30 deletions(-) | 10 | target/mips/tcg/translate.c | 1 - |
11 | 2 files changed, 7 insertions(+), 14 deletions(-) | ||
11 | 12 | ||
12 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | 13 | diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c |
13 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/aarch64/tcg-target.c.inc | 15 | --- a/accel/tcg/translator.c |
15 | +++ b/tcg/aarch64/tcg-target.c.inc | 16 | +++ b/accel/tcg/translator.c |
16 | @@ -XXX,XX +XXX,XX @@ typedef enum { | 17 | @@ -XXX,XX +XXX,XX @@ static void set_can_do_io(DisasContextBase *db, bool val) |
17 | I3606_BIC = 0x2f001400, | 18 | |
18 | I3606_ORR = 0x0f001400, | 19 | bool translator_io_start(DisasContextBase *db) |
19 | 20 | { | |
20 | + /* AdvSIMD scalar shift by immediate */ | 21 | - uint32_t cflags = tb_cflags(db->tb); |
21 | + I3609_SSHR = 0x5f000400, | 22 | - |
22 | + I3609_SSRA = 0x5f001400, | 23 | - if (!(cflags & CF_USE_ICOUNT)) { |
23 | + I3609_SHL = 0x5f005400, | 24 | - return false; |
24 | + I3609_USHR = 0x7f000400, | 25 | - } |
25 | + I3609_USRA = 0x7f001400, | 26 | - |
26 | + I3609_SLI = 0x7f005400, | 27 | set_can_do_io(db, true); |
28 | |||
29 | /* | ||
30 | @@ -XXX,XX +XXX,XX @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) | ||
31 | tcg_gen_st16_i32(count, cpu_env, | ||
32 | offsetof(ArchCPU, neg.icount_decr.u16.low) - | ||
33 | offsetof(ArchCPU, env)); | ||
34 | - /* | ||
35 | - * cpu->can_do_io is set automatically here at the beginning of | ||
36 | - * each translation block. The cost is minimal and only paid for | ||
37 | - * -icount, plus it would be very easy to forget doing it in the | ||
38 | - * translator. | ||
39 | - */ | ||
40 | - set_can_do_io(db, db->max_insns == 1 && (cflags & CF_LAST_IO)); | ||
41 | } | ||
42 | |||
43 | + /* | ||
44 | + * cpu->can_do_io is set automatically here at the beginning of | ||
45 | + * each translation block. The cost is minimal, plus it would be | ||
46 | + * very easy to forget doing it in the translator. | ||
47 | + */ | ||
48 | + set_can_do_io(db, db->max_insns == 1 && (cflags & CF_LAST_IO)); | ||
27 | + | 49 | + |
28 | + /* AdvSIMD scalar three same */ | 50 | return icount_start_insn; |
29 | + I3611_SQADD = 0x5e200c00, | ||
30 | + I3611_SQSUB = 0x5e202c00, | ||
31 | + I3611_CMGT = 0x5e203400, | ||
32 | + I3611_CMGE = 0x5e203c00, | ||
33 | + I3611_SSHL = 0x5e204400, | ||
34 | + I3611_ADD = 0x5e208400, | ||
35 | + I3611_CMTST = 0x5e208c00, | ||
36 | + I3611_UQADD = 0x7e200c00, | ||
37 | + I3611_UQSUB = 0x7e202c00, | ||
38 | + I3611_CMHI = 0x7e203400, | ||
39 | + I3611_CMHS = 0x7e203c00, | ||
40 | + I3611_USHL = 0x7e204400, | ||
41 | + I3611_SUB = 0x7e208400, | ||
42 | + I3611_CMEQ = 0x7e208c00, | ||
43 | + | ||
44 | + /* AdvSIMD scalar two-reg misc */ | ||
45 | + I3612_CMGT0 = 0x5e208800, | ||
46 | + I3612_CMEQ0 = 0x5e209800, | ||
47 | + I3612_CMLT0 = 0x5e20a800, | ||
48 | + I3612_ABS = 0x5e20b800, | ||
49 | + I3612_CMGE0 = 0x7e208800, | ||
50 | + I3612_CMLE0 = 0x7e209800, | ||
51 | + I3612_NEG = 0x7e20b800, | ||
52 | + | ||
53 | /* AdvSIMD shift by immediate */ | ||
54 | I3614_SSHR = 0x0f000400, | ||
55 | I3614_SSRA = 0x0f001400, | ||
56 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q, | ||
57 | | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5); | ||
58 | } | 51 | } |
59 | 52 | ||
60 | +static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn, | 53 | diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c |
61 | + TCGReg rd, TCGReg rn, unsigned immhb) | 54 | index XXXXXXX..XXXXXXX 100644 |
62 | +{ | 55 | --- a/target/mips/tcg/translate.c |
63 | + tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f)); | 56 | +++ b/target/mips/tcg/translate.c |
64 | +} | 57 | @@ -XXX,XX +XXX,XX @@ static void gen_branch(DisasContext *ctx, int insn_bytes) |
65 | + | 58 | /* Branches completion */ |
66 | +static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn, | 59 | clear_branch_hflags(ctx); |
67 | + unsigned size, TCGReg rd, TCGReg rn, TCGReg rm) | 60 | ctx->base.is_jmp = DISAS_NORETURN; |
68 | +{ | 61 | - /* FIXME: Need to clear can_do_io. */ |
69 | + tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16 | 62 | switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { |
70 | + | (rn & 0x1f) << 5 | (rd & 0x1f)); | 63 | case MIPS_HFLAG_FBNSLOT: |
71 | +} | 64 | gen_goto_tb(ctx, 0, ctx->base.pc_next + insn_bytes); |
72 | + | ||
73 | +static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn, | ||
74 | + unsigned size, TCGReg rd, TCGReg rn) | ||
75 | +{ | ||
76 | + tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f)); | ||
77 | +} | ||
78 | + | ||
79 | static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q, | ||
80 | TCGReg rd, TCGReg rn, unsigned immhb) | ||
81 | { | ||
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
83 | unsigned vecl, unsigned vece, | ||
84 | const TCGArg *args, const int *const_args) | ||
85 | { | ||
86 | - static const AArch64Insn cmp_insn[16] = { | ||
87 | + static const AArch64Insn cmp_vec_insn[16] = { | ||
88 | [TCG_COND_EQ] = I3616_CMEQ, | ||
89 | [TCG_COND_GT] = I3616_CMGT, | ||
90 | [TCG_COND_GE] = I3616_CMGE, | ||
91 | [TCG_COND_GTU] = I3616_CMHI, | ||
92 | [TCG_COND_GEU] = I3616_CMHS, | ||
93 | }; | ||
94 | - static const AArch64Insn cmp0_insn[16] = { | ||
95 | + static const AArch64Insn cmp_scalar_insn[16] = { | ||
96 | + [TCG_COND_EQ] = I3611_CMEQ, | ||
97 | + [TCG_COND_GT] = I3611_CMGT, | ||
98 | + [TCG_COND_GE] = I3611_CMGE, | ||
99 | + [TCG_COND_GTU] = I3611_CMHI, | ||
100 | + [TCG_COND_GEU] = I3611_CMHS, | ||
101 | + }; | ||
102 | + static const AArch64Insn cmp0_vec_insn[16] = { | ||
103 | [TCG_COND_EQ] = I3617_CMEQ0, | ||
104 | [TCG_COND_GT] = I3617_CMGT0, | ||
105 | [TCG_COND_GE] = I3617_CMGE0, | ||
106 | [TCG_COND_LT] = I3617_CMLT0, | ||
107 | [TCG_COND_LE] = I3617_CMLE0, | ||
108 | }; | ||
109 | + static const AArch64Insn cmp0_scalar_insn[16] = { | ||
110 | + [TCG_COND_EQ] = I3612_CMEQ0, | ||
111 | + [TCG_COND_GT] = I3612_CMGT0, | ||
112 | + [TCG_COND_GE] = I3612_CMGE0, | ||
113 | + [TCG_COND_LT] = I3612_CMLT0, | ||
114 | + [TCG_COND_LE] = I3612_CMLE0, | ||
115 | + }; | ||
116 | |||
117 | TCGType type = vecl + TCG_TYPE_V64; | ||
118 | unsigned is_q = vecl; | ||
119 | + bool is_scalar = !is_q && vece == MO_64; | ||
120 | TCGArg a0, a1, a2, a3; | ||
121 | int cmode, imm8; | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
124 | tcg_out_dupm_vec(s, type, vece, a0, a1, a2); | ||
125 | break; | ||
126 | case INDEX_op_add_vec: | ||
127 | - tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); | ||
128 | + if (is_scalar) { | ||
129 | + tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2); | ||
130 | + } else { | ||
131 | + tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); | ||
132 | + } | ||
133 | break; | ||
134 | case INDEX_op_sub_vec: | ||
135 | - tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); | ||
136 | + if (is_scalar) { | ||
137 | + tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2); | ||
138 | + } else { | ||
139 | + tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); | ||
140 | + } | ||
141 | break; | ||
142 | case INDEX_op_mul_vec: | ||
143 | tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2); | ||
144 | break; | ||
145 | case INDEX_op_neg_vec: | ||
146 | - tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); | ||
147 | + if (is_scalar) { | ||
148 | + tcg_out_insn(s, 3612, NEG, vece, a0, a1); | ||
149 | + } else { | ||
150 | + tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); | ||
151 | + } | ||
152 | break; | ||
153 | case INDEX_op_abs_vec: | ||
154 | - tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); | ||
155 | + if (is_scalar) { | ||
156 | + tcg_out_insn(s, 3612, ABS, vece, a0, a1); | ||
157 | + } else { | ||
158 | + tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); | ||
159 | + } | ||
160 | break; | ||
161 | case INDEX_op_and_vec: | ||
162 | if (const_args[2]) { | ||
163 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
164 | tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2); | ||
165 | break; | ||
166 | case INDEX_op_ssadd_vec: | ||
167 | - tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); | ||
168 | + if (is_scalar) { | ||
169 | + tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2); | ||
170 | + } else { | ||
171 | + tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); | ||
172 | + } | ||
173 | break; | ||
174 | case INDEX_op_sssub_vec: | ||
175 | - tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); | ||
176 | + if (is_scalar) { | ||
177 | + tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2); | ||
178 | + } else { | ||
179 | + tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); | ||
180 | + } | ||
181 | break; | ||
182 | case INDEX_op_usadd_vec: | ||
183 | - tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); | ||
184 | + if (is_scalar) { | ||
185 | + tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2); | ||
186 | + } else { | ||
187 | + tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); | ||
188 | + } | ||
189 | break; | ||
190 | case INDEX_op_ussub_vec: | ||
191 | - tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); | ||
192 | + if (is_scalar) { | ||
193 | + tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2); | ||
194 | + } else { | ||
195 | + tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); | ||
196 | + } | ||
197 | break; | ||
198 | case INDEX_op_smax_vec: | ||
199 | tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2); | ||
200 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
201 | tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1); | ||
202 | break; | ||
203 | case INDEX_op_shli_vec: | ||
204 | - tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); | ||
205 | + if (is_scalar) { | ||
206 | + tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece)); | ||
207 | + } else { | ||
208 | + tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); | ||
209 | + } | ||
210 | break; | ||
211 | case INDEX_op_shri_vec: | ||
212 | - tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); | ||
213 | + if (is_scalar) { | ||
214 | + tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2); | ||
215 | + } else { | ||
216 | + tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); | ||
217 | + } | ||
218 | break; | ||
219 | case INDEX_op_sari_vec: | ||
220 | - tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); | ||
221 | + if (is_scalar) { | ||
222 | + tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2); | ||
223 | + } else { | ||
224 | + tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); | ||
225 | + } | ||
226 | break; | ||
227 | case INDEX_op_aa64_sli_vec: | ||
228 | - tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); | ||
229 | + if (is_scalar) { | ||
230 | + tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece)); | ||
231 | + } else { | ||
232 | + tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); | ||
233 | + } | ||
234 | break; | ||
235 | case INDEX_op_shlv_vec: | ||
236 | - tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); | ||
237 | + if (is_scalar) { | ||
238 | + tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2); | ||
239 | + } else { | ||
240 | + tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); | ||
241 | + } | ||
242 | break; | ||
243 | case INDEX_op_aa64_sshl_vec: | ||
244 | - tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); | ||
245 | + if (is_scalar) { | ||
246 | + tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2); | ||
247 | + } else { | ||
248 | + tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); | ||
249 | + } | ||
250 | break; | ||
251 | case INDEX_op_cmp_vec: | ||
252 | { | ||
253 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
254 | |||
255 | if (cond == TCG_COND_NE) { | ||
256 | if (const_args[2]) { | ||
257 | - tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); | ||
258 | + if (is_scalar) { | ||
259 | + tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1); | ||
260 | + } else { | ||
261 | + tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); | ||
262 | + } | ||
263 | } else { | ||
264 | - tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); | ||
265 | + if (is_scalar) { | ||
266 | + tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2); | ||
267 | + } else { | ||
268 | + tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); | ||
269 | + } | ||
270 | tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0); | ||
271 | } | ||
272 | } else { | ||
273 | if (const_args[2]) { | ||
274 | - insn = cmp0_insn[cond]; | ||
275 | - if (insn) { | ||
276 | - tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); | ||
277 | - break; | ||
278 | + if (is_scalar) { | ||
279 | + insn = cmp0_scalar_insn[cond]; | ||
280 | + if (insn) { | ||
281 | + tcg_out_insn_3612(s, insn, vece, a0, a1); | ||
282 | + break; | ||
283 | + } | ||
284 | + } else { | ||
285 | + insn = cmp0_vec_insn[cond]; | ||
286 | + if (insn) { | ||
287 | + tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); | ||
288 | + break; | ||
289 | + } | ||
290 | } | ||
291 | tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); | ||
292 | a2 = TCG_VEC_TMP; | ||
293 | } | ||
294 | - insn = cmp_insn[cond]; | ||
295 | - if (insn == 0) { | ||
296 | - TCGArg t; | ||
297 | - t = a1, a1 = a2, a2 = t; | ||
298 | - cond = tcg_swap_cond(cond); | ||
299 | - insn = cmp_insn[cond]; | ||
300 | - tcg_debug_assert(insn != 0); | ||
301 | + if (is_scalar) { | ||
302 | + insn = cmp_scalar_insn[cond]; | ||
303 | + if (insn == 0) { | ||
304 | + TCGArg t; | ||
305 | + t = a1, a1 = a2, a2 = t; | ||
306 | + cond = tcg_swap_cond(cond); | ||
307 | + insn = cmp_scalar_insn[cond]; | ||
308 | + tcg_debug_assert(insn != 0); | ||
309 | + } | ||
310 | + tcg_out_insn_3611(s, insn, vece, a0, a1, a2); | ||
311 | + } else { | ||
312 | + insn = cmp_vec_insn[cond]; | ||
313 | + if (insn == 0) { | ||
314 | + TCGArg t; | ||
315 | + t = a1, a1 = a2, a2 = t; | ||
316 | + cond = tcg_swap_cond(cond); | ||
317 | + insn = cmp_vec_insn[cond]; | ||
318 | + tcg_debug_assert(insn != 0); | ||
319 | + } | ||
320 | + tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); | ||
321 | } | ||
322 | - tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); | ||
323 | } | ||
324 | } | ||
325 | break; | ||
326 | -- | 65 | -- |
327 | 2.25.1 | 66 | 2.34.1 |
328 | 67 | ||
329 | 68 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use the provided cpu_ldst.h interfaces. This fixes the build vs | ||
2 | the unconverted uses of g2h(), adds missed memory trace events, | ||
3 | and correctly recognizes when a SIGSEGV belongs to the guest via | ||
4 | set_helper_retaddr(). | ||
5 | 1 | ||
6 | Fixes: 3e8f1628e864 | ||
7 | Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | tcg/tci.c | 73 +++++++++++++++++++++---------------------------------- | ||
11 | 1 file changed, 28 insertions(+), 45 deletions(-) | ||
12 | |||
13 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/tcg/tci.c | ||
16 | +++ b/tcg/tci.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) | ||
18 | return result; | ||
19 | } | ||
20 | |||
21 | -#ifdef CONFIG_SOFTMMU | ||
22 | -# define qemu_ld_ub \ | ||
23 | - helper_ret_ldub_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
24 | -# define qemu_ld_leuw \ | ||
25 | - helper_le_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
26 | -# define qemu_ld_leul \ | ||
27 | - helper_le_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
28 | -# define qemu_ld_leq \ | ||
29 | - helper_le_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
30 | -# define qemu_ld_beuw \ | ||
31 | - helper_be_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
32 | -# define qemu_ld_beul \ | ||
33 | - helper_be_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
34 | -# define qemu_ld_beq \ | ||
35 | - helper_be_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
36 | -# define qemu_st_b(X) \ | ||
37 | - helper_ret_stb_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
38 | -# define qemu_st_lew(X) \ | ||
39 | - helper_le_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
40 | -# define qemu_st_lel(X) \ | ||
41 | - helper_le_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
42 | -# define qemu_st_leq(X) \ | ||
43 | - helper_le_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
44 | -# define qemu_st_bew(X) \ | ||
45 | - helper_be_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
46 | -# define qemu_st_bel(X) \ | ||
47 | - helper_be_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
48 | -# define qemu_st_beq(X) \ | ||
49 | - helper_be_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
50 | -#else | ||
51 | -# define qemu_ld_ub ldub_p(g2h(taddr)) | ||
52 | -# define qemu_ld_leuw lduw_le_p(g2h(taddr)) | ||
53 | -# define qemu_ld_leul (uint32_t)ldl_le_p(g2h(taddr)) | ||
54 | -# define qemu_ld_leq ldq_le_p(g2h(taddr)) | ||
55 | -# define qemu_ld_beuw lduw_be_p(g2h(taddr)) | ||
56 | -# define qemu_ld_beul (uint32_t)ldl_be_p(g2h(taddr)) | ||
57 | -# define qemu_ld_beq ldq_be_p(g2h(taddr)) | ||
58 | -# define qemu_st_b(X) stb_p(g2h(taddr), X) | ||
59 | -# define qemu_st_lew(X) stw_le_p(g2h(taddr), X) | ||
60 | -# define qemu_st_lel(X) stl_le_p(g2h(taddr), X) | ||
61 | -# define qemu_st_leq(X) stq_le_p(g2h(taddr), X) | ||
62 | -# define qemu_st_bew(X) stw_be_p(g2h(taddr), X) | ||
63 | -# define qemu_st_bel(X) stl_be_p(g2h(taddr), X) | ||
64 | -# define qemu_st_beq(X) stq_be_p(g2h(taddr), X) | ||
65 | -#endif | ||
66 | +#define qemu_ld_ub \ | ||
67 | + cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
68 | +#define qemu_ld_leuw \ | ||
69 | + cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
70 | +#define qemu_ld_leul \ | ||
71 | + cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
72 | +#define qemu_ld_leq \ | ||
73 | + cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
74 | +#define qemu_ld_beuw \ | ||
75 | + cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
76 | +#define qemu_ld_beul \ | ||
77 | + cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
78 | +#define qemu_ld_beq \ | ||
79 | + cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
80 | +#define qemu_st_b(X) \ | ||
81 | + cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
82 | +#define qemu_st_lew(X) \ | ||
83 | + cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
84 | +#define qemu_st_lel(X) \ | ||
85 | + cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
86 | +#define qemu_st_leq(X) \ | ||
87 | + cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
88 | +#define qemu_st_bew(X) \ | ||
89 | + cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
90 | +#define qemu_st_bel(X) \ | ||
91 | + cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
92 | +#define qemu_st_beq(X) \ | ||
93 | + cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
94 | |||
95 | #if TCG_TARGET_REG_BITS == 64 | ||
96 | # define CASE_32_64(x) \ | ||
97 | -- | ||
98 | 2.25.1 | ||
99 | |||
100 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Allow other places in tcg to restart with a smaller tb. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tcg.c | 9 +++++++-- | ||
7 | 1 file changed, 7 insertions(+), 2 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tcg.c | ||
12 | +++ b/tcg/tcg.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which) | ||
14 | s->tb_jmp_reset_offset[which] = tcg_current_code_size(s); | ||
15 | } | ||
16 | |||
17 | +/* Signal overflow, starting over with fewer guest insns. */ | ||
18 | +static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s) | ||
19 | +{ | ||
20 | + siglongjmp(s->jmp_trans, -2); | ||
21 | +} | ||
22 | + | ||
23 | #define C_PFX1(P, A) P##A | ||
24 | #define C_PFX2(P, A, B) P##A##_##B | ||
25 | #define C_PFX3(P, A, B, C) P##A##_##B##_##C | ||
26 | @@ -XXX,XX +XXX,XX @@ static TCGTemp *tcg_temp_alloc(TCGContext *s) | ||
27 | int n = s->nb_temps++; | ||
28 | |||
29 | if (n >= TCG_MAX_TEMPS) { | ||
30 | - /* Signal overflow, starting over with fewer guest insns. */ | ||
31 | - siglongjmp(s->jmp_trans, -2); | ||
32 | + tcg_raise_tb_overflow(s); | ||
33 | } | ||
34 | return memset(&s->temps[n], 0, sizeof(TCGTemp)); | ||
35 | } | ||
36 | -- | ||
37 | 2.25.1 | ||
38 | |||
39 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The use in tcg_tb_lookup is given a random pc that comes from the pc | ||
2 | of a signal handler. Do not assert that the pointer is already within | ||
3 | the code gen buffer at all, much less the writable mirror of it. | ||
4 | 1 | ||
5 | Fixes: db0c51a3803 | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/tcg.c | 20 ++++++++++++++++++-- | ||
9 | 1 file changed, 18 insertions(+), 2 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/tcg.c | ||
14 | +++ b/tcg/tcg.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void) | ||
16 | } | ||
17 | } | ||
18 | |||
19 | -static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp) | ||
20 | +static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) | ||
21 | { | ||
22 | - void *p = tcg_splitwx_to_rw(cp); | ||
23 | size_t region_idx; | ||
24 | |||
25 | + /* | ||
26 | + * Like tcg_splitwx_to_rw, with no assert. The pc may come from | ||
27 | + * a signal handler over which the caller has no control. | ||
28 | + */ | ||
29 | + if (!in_code_gen_buffer(p)) { | ||
30 | + p -= tcg_splitwx_diff; | ||
31 | + if (!in_code_gen_buffer(p)) { | ||
32 | + return NULL; | ||
33 | + } | ||
34 | + } | ||
35 | + | ||
36 | if (p < region.start_aligned) { | ||
37 | region_idx = 0; | ||
38 | } else { | ||
39 | @@ -XXX,XX +XXX,XX @@ void tcg_tb_insert(TranslationBlock *tb) | ||
40 | { | ||
41 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | ||
42 | |||
43 | + g_assert(rt != NULL); | ||
44 | qemu_mutex_lock(&rt->lock); | ||
45 | g_tree_insert(rt->tree, &tb->tc, tb); | ||
46 | qemu_mutex_unlock(&rt->lock); | ||
47 | @@ -XXX,XX +XXX,XX @@ void tcg_tb_remove(TranslationBlock *tb) | ||
48 | { | ||
49 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | ||
50 | |||
51 | + g_assert(rt != NULL); | ||
52 | qemu_mutex_lock(&rt->lock); | ||
53 | g_tree_remove(rt->tree, &tb->tc); | ||
54 | qemu_mutex_unlock(&rt->lock); | ||
55 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) | ||
56 | TranslationBlock *tb; | ||
57 | struct tb_tc s = { .ptr = (void *)tc_ptr }; | ||
58 | |||
59 | + if (rt == NULL) { | ||
60 | + return NULL; | ||
61 | + } | ||
62 | + | ||
63 | qemu_mutex_lock(&rt->lock); | ||
64 | tb = g_tree_lookup(rt->tree, &s); | ||
65 | qemu_mutex_unlock(&rt->lock); | ||
66 | -- | ||
67 | 2.25.1 | ||
68 | |||
69 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 1/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-2-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 85 +++++++++++++++++----------------------- | ||
13 | 1 file changed, 37 insertions(+), 48 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) | ||
20 | old_code_ptr[1] = s->code_ptr - old_code_ptr; | ||
21 | } | ||
22 | |||
23 | +#if TCG_TARGET_REG_BITS == 64 | ||
24 | +# define CASE_32_64(x) \ | ||
25 | + case glue(glue(INDEX_op_, x), _i64): \ | ||
26 | + case glue(glue(INDEX_op_, x), _i32): | ||
27 | +# define CASE_64(x) \ | ||
28 | + case glue(glue(INDEX_op_, x), _i64): | ||
29 | +#else | ||
30 | +# define CASE_32_64(x) \ | ||
31 | + case glue(glue(INDEX_op_, x), _i32): | ||
32 | +# define CASE_64(x) | ||
33 | +#endif | ||
34 | + | ||
35 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
36 | const int *const_args) | ||
37 | { | ||
38 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
39 | case INDEX_op_exit_tb: | ||
40 | tcg_out64(s, args[0]); | ||
41 | break; | ||
42 | + | ||
43 | case INDEX_op_goto_tb: | ||
44 | if (s->tb_jmp_insn_offset) { | ||
45 | /* Direct jump method. */ | ||
46 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
47 | tcg_debug_assert(args[2] == (int32_t)args[2]); | ||
48 | tcg_out32(s, args[2]); | ||
49 | break; | ||
50 | - case INDEX_op_add_i32: | ||
51 | - case INDEX_op_sub_i32: | ||
52 | - case INDEX_op_mul_i32: | ||
53 | - case INDEX_op_and_i32: | ||
54 | - case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */ | ||
55 | - case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */ | ||
56 | - case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */ | ||
57 | - case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */ | ||
58 | - case INDEX_op_or_i32: | ||
59 | - case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */ | ||
60 | - case INDEX_op_xor_i32: | ||
61 | - case INDEX_op_shl_i32: | ||
62 | - case INDEX_op_shr_i32: | ||
63 | - case INDEX_op_sar_i32: | ||
64 | - case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | ||
65 | - case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | ||
66 | + | ||
67 | + CASE_32_64(add) | ||
68 | + CASE_32_64(sub) | ||
69 | + CASE_32_64(mul) | ||
70 | + CASE_32_64(and) | ||
71 | + CASE_32_64(or) | ||
72 | + CASE_32_64(xor) | ||
73 | + CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */ | ||
74 | + CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */ | ||
75 | + CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */ | ||
76 | + CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */ | ||
77 | + CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */ | ||
78 | + CASE_32_64(shl) | ||
79 | + CASE_32_64(shr) | ||
80 | + CASE_32_64(sar) | ||
81 | + CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */ | ||
82 | + CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */ | ||
83 | + CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
84 | + CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
85 | + CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
86 | + CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
87 | tcg_out_r(s, args[0]); | ||
88 | tcg_out_r(s, args[1]); | ||
89 | tcg_out_r(s, args[2]); | ||
90 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
91 | break; | ||
92 | |||
93 | #if TCG_TARGET_REG_BITS == 64 | ||
94 | - case INDEX_op_add_i64: | ||
95 | - case INDEX_op_sub_i64: | ||
96 | - case INDEX_op_mul_i64: | ||
97 | - case INDEX_op_and_i64: | ||
98 | - case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */ | ||
99 | - case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */ | ||
100 | - case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */ | ||
101 | - case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */ | ||
102 | - case INDEX_op_or_i64: | ||
103 | - case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */ | ||
104 | - case INDEX_op_xor_i64: | ||
105 | - case INDEX_op_shl_i64: | ||
106 | - case INDEX_op_shr_i64: | ||
107 | - case INDEX_op_sar_i64: | ||
108 | - case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | ||
109 | - case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | ||
110 | - case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
111 | - case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
112 | - case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
113 | - case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
114 | - tcg_out_r(s, args[0]); | ||
115 | - tcg_out_r(s, args[1]); | ||
116 | - tcg_out_r(s, args[2]); | ||
117 | - break; | ||
118 | case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */ | ||
119 | tcg_out_r(s, args[0]); | ||
120 | tcg_out_r(s, args[1]); | ||
121 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
122 | tcg_out_r(s, args[0]); | ||
123 | tcg_out_r(s, args[1]); | ||
124 | break; | ||
125 | - case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
126 | - case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
127 | - case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
128 | - case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
129 | - tcg_out_r(s, args[0]); | ||
130 | - tcg_out_r(s, args[1]); | ||
131 | - tcg_out_r(s, args[2]); | ||
132 | - break; | ||
133 | + | ||
134 | #if TCG_TARGET_REG_BITS == 32 | ||
135 | case INDEX_op_add2_i32: | ||
136 | case INDEX_op_sub2_i32: | ||
137 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
138 | } | ||
139 | tcg_out_i(s, *args++); | ||
140 | break; | ||
141 | + | ||
142 | case INDEX_op_mb: | ||
143 | break; | ||
144 | + | ||
145 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | ||
146 | case INDEX_op_mov_i64: | ||
147 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | ||
148 | -- | ||
149 | 2.25.1 | ||
150 | |||
151 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 2/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-3-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 35 ++++++++++++++--------------------- | ||
13 | 1 file changed, 14 insertions(+), 21 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out8(s, args[2]); /* condition */ | ||
21 | tci_out_label(s, arg_label(args[3])); | ||
22 | break; | ||
23 | - case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ | ||
24 | - case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ | ||
25 | - case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ | ||
26 | - case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */ | ||
27 | - case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */ | ||
28 | - case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */ | ||
29 | - case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */ | ||
30 | - case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */ | ||
31 | - case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */ | ||
32 | - case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */ | ||
33 | - case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */ | ||
34 | - case INDEX_op_ext_i32_i64: | ||
35 | - case INDEX_op_extu_i32_i64: | ||
36 | #endif /* TCG_TARGET_REG_BITS == 64 */ | ||
37 | - case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */ | ||
38 | - case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */ | ||
39 | - case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */ | ||
40 | - case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */ | ||
41 | - case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */ | ||
42 | - case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */ | ||
43 | - case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ | ||
44 | - case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ | ||
45 | + | ||
46 | + CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ | ||
47 | + CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ | ||
48 | + CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */ | ||
49 | + CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */ | ||
50 | + CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */ | ||
51 | + CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */ | ||
52 | + CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */ | ||
53 | + CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */ | ||
54 | + CASE_64(ext_i32) | ||
55 | + CASE_64(extu_i32) | ||
56 | + CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */ | ||
57 | + CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */ | ||
58 | + CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */ | ||
59 | tcg_out_r(s, args[0]); | ||
60 | tcg_out_r(s, args[1]); | ||
61 | break; | ||
62 | -- | ||
63 | 2.25.1 | ||
64 | |||
65 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 3/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-4-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 12 ++---------- | ||
13 | 1 file changed, 2 insertions(+), 10 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out_r(s, args[1]); | ||
21 | tcg_out_r(s, args[2]); | ||
22 | break; | ||
23 | - case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */ | ||
24 | + | ||
25 | + CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */ | ||
26 | tcg_out_r(s, args[0]); | ||
27 | tcg_out_r(s, args[1]); | ||
28 | tcg_out_r(s, args[2]); | ||
29 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
30 | break; | ||
31 | |||
32 | #if TCG_TARGET_REG_BITS == 64 | ||
33 | - case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */ | ||
34 | - tcg_out_r(s, args[0]); | ||
35 | - tcg_out_r(s, args[1]); | ||
36 | - tcg_out_r(s, args[2]); | ||
37 | - tcg_debug_assert(args[3] <= UINT8_MAX); | ||
38 | - tcg_out8(s, args[3]); | ||
39 | - tcg_debug_assert(args[4] <= UINT8_MAX); | ||
40 | - tcg_out8(s, args[4]); | ||
41 | - break; | ||
42 | case INDEX_op_brcond_i64: | ||
43 | tcg_out_r(s, args[0]); | ||
44 | tcg_out_r(s, args[1]); | ||
45 | -- | ||
46 | 2.25.1 | ||
47 | |||
48 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 4/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-5-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 23 ++++++----------------- | ||
13 | 1 file changed, 6 insertions(+), 17 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | } | ||
21 | set_jmp_reset_offset(s, args[0]); | ||
22 | break; | ||
23 | + | ||
24 | case INDEX_op_br: | ||
25 | tci_out_label(s, arg_label(args[0])); | ||
26 | break; | ||
27 | - case INDEX_op_setcond_i32: | ||
28 | + | ||
29 | + CASE_32_64(setcond) | ||
30 | tcg_out_r(s, args[0]); | ||
31 | tcg_out_r(s, args[1]); | ||
32 | tcg_out_r(s, args[2]); | ||
33 | tcg_out8(s, args[3]); /* condition */ | ||
34 | break; | ||
35 | + | ||
36 | #if TCG_TARGET_REG_BITS == 32 | ||
37 | case INDEX_op_setcond2_i32: | ||
38 | /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */ | ||
39 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
40 | tcg_out_r(s, args[4]); | ||
41 | tcg_out8(s, args[5]); /* condition */ | ||
42 | break; | ||
43 | -#elif TCG_TARGET_REG_BITS == 64 | ||
44 | - case INDEX_op_setcond_i64: | ||
45 | - tcg_out_r(s, args[0]); | ||
46 | - tcg_out_r(s, args[1]); | ||
47 | - tcg_out_r(s, args[2]); | ||
48 | - tcg_out8(s, args[3]); /* condition */ | ||
49 | - break; | ||
50 | #endif | ||
51 | case INDEX_op_ld8u_i32: | ||
52 | case INDEX_op_ld8s_i32: | ||
53 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
54 | tcg_out8(s, args[4]); | ||
55 | break; | ||
56 | |||
57 | -#if TCG_TARGET_REG_BITS == 64 | ||
58 | - case INDEX_op_brcond_i64: | ||
59 | + CASE_32_64(brcond) | ||
60 | tcg_out_r(s, args[0]); | ||
61 | tcg_out_r(s, args[1]); | ||
62 | tcg_out8(s, args[2]); /* condition */ | ||
63 | tci_out_label(s, arg_label(args[3])); | ||
64 | break; | ||
65 | -#endif /* TCG_TARGET_REG_BITS == 64 */ | ||
66 | |||
67 | CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ | ||
68 | CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ | ||
69 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
70 | tcg_out_r(s, args[3]); | ||
71 | break; | ||
72 | #endif | ||
73 | - case INDEX_op_brcond_i32: | ||
74 | - tcg_out_r(s, args[0]); | ||
75 | - tcg_out_r(s, args[1]); | ||
76 | - tcg_out8(s, args[2]); /* condition */ | ||
77 | - tci_out_label(s, arg_label(args[3])); | ||
78 | - break; | ||
79 | + | ||
80 | case INDEX_op_qemu_ld_i32: | ||
81 | tcg_out_r(s, *args++); | ||
82 | tcg_out_r(s, *args++); | ||
83 | -- | ||
84 | 2.25.1 | ||
85 | |||
86 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 5/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-6-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 49 ++++++++++++---------------------------- | ||
13 | 1 file changed, 14 insertions(+), 35 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out8(s, args[5]); /* condition */ | ||
21 | break; | ||
22 | #endif | ||
23 | - case INDEX_op_ld8u_i32: | ||
24 | - case INDEX_op_ld8s_i32: | ||
25 | - case INDEX_op_ld16u_i32: | ||
26 | - case INDEX_op_ld16s_i32: | ||
27 | + | ||
28 | + CASE_32_64(ld8u) | ||
29 | + CASE_32_64(ld8s) | ||
30 | + CASE_32_64(ld16u) | ||
31 | + CASE_32_64(ld16s) | ||
32 | case INDEX_op_ld_i32: | ||
33 | - case INDEX_op_st8_i32: | ||
34 | - case INDEX_op_st16_i32: | ||
35 | + CASE_64(ld32u) | ||
36 | + CASE_64(ld32s) | ||
37 | + CASE_64(ld) | ||
38 | + CASE_32_64(st8) | ||
39 | + CASE_32_64(st16) | ||
40 | case INDEX_op_st_i32: | ||
41 | - case INDEX_op_ld8u_i64: | ||
42 | - case INDEX_op_ld8s_i64: | ||
43 | - case INDEX_op_ld16u_i64: | ||
44 | - case INDEX_op_ld16s_i64: | ||
45 | - case INDEX_op_ld32u_i64: | ||
46 | - case INDEX_op_ld32s_i64: | ||
47 | - case INDEX_op_ld_i64: | ||
48 | - case INDEX_op_st8_i64: | ||
49 | - case INDEX_op_st16_i64: | ||
50 | - case INDEX_op_st32_i64: | ||
51 | - case INDEX_op_st_i64: | ||
52 | + CASE_64(st32) | ||
53 | + CASE_64(st) | ||
54 | stack_bounds_check(args[1], args[2]); | ||
55 | tcg_out_r(s, args[0]); | ||
56 | tcg_out_r(s, args[1]); | ||
57 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
58 | #endif | ||
59 | |||
60 | case INDEX_op_qemu_ld_i32: | ||
61 | - tcg_out_r(s, *args++); | ||
62 | - tcg_out_r(s, *args++); | ||
63 | - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { | ||
64 | - tcg_out_r(s, *args++); | ||
65 | - } | ||
66 | - tcg_out_i(s, *args++); | ||
67 | - break; | ||
68 | - case INDEX_op_qemu_ld_i64: | ||
69 | - tcg_out_r(s, *args++); | ||
70 | - if (TCG_TARGET_REG_BITS == 32) { | ||
71 | - tcg_out_r(s, *args++); | ||
72 | - } | ||
73 | - tcg_out_r(s, *args++); | ||
74 | - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { | ||
75 | - tcg_out_r(s, *args++); | ||
76 | - } | ||
77 | - tcg_out_i(s, *args++); | ||
78 | - break; | ||
79 | case INDEX_op_qemu_st_i32: | ||
80 | tcg_out_r(s, *args++); | ||
81 | tcg_out_r(s, *args++); | ||
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
83 | } | ||
84 | tcg_out_i(s, *args++); | ||
85 | break; | ||
86 | + | ||
87 | + case INDEX_op_qemu_ld_i64: | ||
88 | case INDEX_op_qemu_st_i64: | ||
89 | tcg_out_r(s, *args++); | ||
90 | if (TCG_TARGET_REG_BITS == 32) { | ||
91 | -- | ||
92 | 2.25.1 | ||
93 | |||
94 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext8u opcodes, and allow truncation | ||
2 | to happen with the store for st8 opcodes. | ||
3 | 1 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | tcg/tci.c | 23 +++++------------------ | ||
8 | 1 file changed, 5 insertions(+), 18 deletions(-) | ||
9 | |||
10 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tcg/tci.c | ||
13 | +++ b/tcg/tci.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
15 | } | ||
16 | #endif | ||
17 | |||
18 | -static uint8_t tci_read_reg8(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (uint8_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | - | ||
23 | static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index) | ||
24 | { | ||
25 | return (uint16_t)tci_read_reg(regs, index); | ||
26 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
27 | return value; | ||
28 | } | ||
29 | |||
30 | -/* Read indexed register (8 bit) from bytecode. */ | ||
31 | -static uint8_t tci_read_r8(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
32 | -{ | ||
33 | - uint8_t value = tci_read_reg8(regs, **tb_ptr); | ||
34 | - *tb_ptr += 1; | ||
35 | - return value; | ||
36 | -} | ||
37 | - | ||
38 | #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
39 | /* Read indexed register (8 bit signed) from bytecode. */ | ||
40 | static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
41 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
42 | tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2)); | ||
43 | break; | ||
44 | CASE_32_64(st8) | ||
45 | - t0 = tci_read_r8(regs, &tb_ptr); | ||
46 | + t0 = tci_read_r(regs, &tb_ptr); | ||
47 | t1 = tci_read_r(regs, &tb_ptr); | ||
48 | t2 = tci_read_s32(&tb_ptr); | ||
49 | *(uint8_t *)(t1 + t2) = t0; | ||
50 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
51 | #if TCG_TARGET_HAS_ext8u_i32 | ||
52 | case INDEX_op_ext8u_i32: | ||
53 | t0 = *tb_ptr++; | ||
54 | - t1 = tci_read_r8(regs, &tb_ptr); | ||
55 | - tci_write_reg(regs, t0, t1); | ||
56 | + t1 = tci_read_r(regs, &tb_ptr); | ||
57 | + tci_write_reg(regs, t0, (uint8_t)t1); | ||
58 | break; | ||
59 | #endif | ||
60 | #if TCG_TARGET_HAS_ext16u_i32 | ||
61 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
62 | #if TCG_TARGET_HAS_ext8u_i64 | ||
63 | case INDEX_op_ext8u_i64: | ||
64 | t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r8(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, t1); | ||
67 | + t1 = tci_read_r(regs, &tb_ptr); | ||
68 | + tci_write_reg(regs, t0, (uint8_t)t1); | ||
69 | break; | ||
70 | #endif | ||
71 | #if TCG_TARGET_HAS_ext8s_i64 | ||
72 | -- | ||
73 | 2.25.1 | ||
74 | |||
75 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext8s opcodes. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 25 ++++--------------------- | ||
7 | 1 file changed, 4 insertions(+), 21 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
14 | return regs[index]; | ||
15 | } | ||
16 | |||
17 | -#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
18 | -static int8_t tci_read_reg8s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (int8_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
25 | static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index) | ||
26 | { | ||
27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
28 | return value; | ||
29 | } | ||
30 | |||
31 | -#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
32 | -/* Read indexed register (8 bit signed) from bytecode. */ | ||
33 | -static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - int8_t value = tci_read_reg8s(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
38 | -} | ||
39 | -#endif | ||
40 | - | ||
41 | /* Read indexed register (16 bit) from bytecode. */ | ||
42 | static uint16_t tci_read_r16(const tcg_target_ulong *regs, | ||
43 | const uint8_t **tb_ptr) | ||
44 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
45 | #if TCG_TARGET_HAS_ext8s_i32 | ||
46 | case INDEX_op_ext8s_i32: | ||
47 | t0 = *tb_ptr++; | ||
48 | - t1 = tci_read_r8s(regs, &tb_ptr); | ||
49 | - tci_write_reg(regs, t0, t1); | ||
50 | + t1 = tci_read_r(regs, &tb_ptr); | ||
51 | + tci_write_reg(regs, t0, (int8_t)t1); | ||
52 | break; | ||
53 | #endif | ||
54 | #if TCG_TARGET_HAS_ext16s_i32 | ||
55 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
56 | #if TCG_TARGET_HAS_ext8s_i64 | ||
57 | case INDEX_op_ext8s_i64: | ||
58 | t0 = *tb_ptr++; | ||
59 | - t1 = tci_read_r8s(regs, &tb_ptr); | ||
60 | - tci_write_reg(regs, t0, t1); | ||
61 | + t1 = tci_read_r(regs, &tb_ptr); | ||
62 | + tci_write_reg(regs, t0, (int8_t)t1); | ||
63 | break; | ||
64 | #endif | ||
65 | #if TCG_TARGET_HAS_ext16s_i64 | ||
66 | -- | ||
67 | 2.25.1 | ||
68 | |||
69 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext16u opcodes, and allow truncation | ||
2 | to happen with the store for st16 opcodes, and with the call | ||
3 | for bswap16 opcodes. | ||
4 | 1 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/tci.c | 28 +++++++--------------------- | ||
9 | 1 file changed, 7 insertions(+), 21 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/tci.c | ||
14 | +++ b/tcg/tci.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
16 | } | ||
17 | #endif | ||
18 | |||
19 | -static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index) | ||
20 | -{ | ||
21 | - return (uint16_t)tci_read_reg(regs, index); | ||
22 | -} | ||
23 | - | ||
24 | static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index) | ||
25 | { | ||
26 | return (uint32_t)tci_read_reg(regs, index); | ||
27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
28 | return value; | ||
29 | } | ||
30 | |||
31 | -/* Read indexed register (16 bit) from bytecode. */ | ||
32 | -static uint16_t tci_read_r16(const tcg_target_ulong *regs, | ||
33 | - const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - uint16_t value = tci_read_reg16(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
38 | -} | ||
39 | - | ||
40 | #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
41 | /* Read indexed register (16 bit signed) from bytecode. */ | ||
42 | static int16_t tci_read_r16s(const tcg_target_ulong *regs, | ||
43 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
44 | *(uint8_t *)(t1 + t2) = t0; | ||
45 | break; | ||
46 | CASE_32_64(st16) | ||
47 | - t0 = tci_read_r16(regs, &tb_ptr); | ||
48 | + t0 = tci_read_r(regs, &tb_ptr); | ||
49 | t1 = tci_read_r(regs, &tb_ptr); | ||
50 | t2 = tci_read_s32(&tb_ptr); | ||
51 | *(uint16_t *)(t1 + t2) = t0; | ||
52 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
53 | #if TCG_TARGET_HAS_ext16u_i32 | ||
54 | case INDEX_op_ext16u_i32: | ||
55 | t0 = *tb_ptr++; | ||
56 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
57 | - tci_write_reg(regs, t0, t1); | ||
58 | + t1 = tci_read_r(regs, &tb_ptr); | ||
59 | + tci_write_reg(regs, t0, (uint16_t)t1); | ||
60 | break; | ||
61 | #endif | ||
62 | #if TCG_TARGET_HAS_bswap16_i32 | ||
63 | case INDEX_op_bswap16_i32: | ||
64 | t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
66 | + t1 = tci_read_r(regs, &tb_ptr); | ||
67 | tci_write_reg(regs, t0, bswap16(t1)); | ||
68 | break; | ||
69 | #endif | ||
70 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
71 | #if TCG_TARGET_HAS_ext16u_i64 | ||
72 | case INDEX_op_ext16u_i64: | ||
73 | t0 = *tb_ptr++; | ||
74 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
75 | - tci_write_reg(regs, t0, t1); | ||
76 | + t1 = tci_read_r(regs, &tb_ptr); | ||
77 | + tci_write_reg(regs, t0, (uint16_t)t1); | ||
78 | break; | ||
79 | #endif | ||
80 | #if TCG_TARGET_HAS_ext32s_i64 | ||
81 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
82 | #if TCG_TARGET_HAS_bswap16_i64 | ||
83 | case INDEX_op_bswap16_i64: | ||
84 | t0 = *tb_ptr++; | ||
85 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
86 | + t1 = tci_read_r(regs, &tb_ptr); | ||
87 | tci_write_reg(regs, t0, bswap16(t1)); | ||
88 | break; | ||
89 | #endif | ||
90 | -- | ||
91 | 2.25.1 | ||
92 | |||
93 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext16s opcodes. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 26 ++++---------------------- | ||
7 | 1 file changed, 4 insertions(+), 22 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
14 | return regs[index]; | ||
15 | } | ||
16 | |||
17 | -#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
18 | -static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (int16_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_REG_BITS == 64 | ||
25 | static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
26 | { | ||
27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
28 | return value; | ||
29 | } | ||
30 | |||
31 | -#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
32 | -/* Read indexed register (16 bit signed) from bytecode. */ | ||
33 | -static int16_t tci_read_r16s(const tcg_target_ulong *regs, | ||
34 | - const uint8_t **tb_ptr) | ||
35 | -{ | ||
36 | - int16_t value = tci_read_reg16s(regs, **tb_ptr); | ||
37 | - *tb_ptr += 1; | ||
38 | - return value; | ||
39 | -} | ||
40 | -#endif | ||
41 | - | ||
42 | /* Read indexed register (32 bit) from bytecode. */ | ||
43 | static uint32_t tci_read_r32(const tcg_target_ulong *regs, | ||
44 | const uint8_t **tb_ptr) | ||
45 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
46 | #if TCG_TARGET_HAS_ext16s_i32 | ||
47 | case INDEX_op_ext16s_i32: | ||
48 | t0 = *tb_ptr++; | ||
49 | - t1 = tci_read_r16s(regs, &tb_ptr); | ||
50 | - tci_write_reg(regs, t0, t1); | ||
51 | + t1 = tci_read_r(regs, &tb_ptr); | ||
52 | + tci_write_reg(regs, t0, (int16_t)t1); | ||
53 | break; | ||
54 | #endif | ||
55 | #if TCG_TARGET_HAS_ext8u_i32 | ||
56 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
57 | #if TCG_TARGET_HAS_ext16s_i64 | ||
58 | case INDEX_op_ext16s_i64: | ||
59 | t0 = *tb_ptr++; | ||
60 | - t1 = tci_read_r16s(regs, &tb_ptr); | ||
61 | - tci_write_reg(regs, t0, t1); | ||
62 | + t1 = tci_read_r(regs, &tb_ptr); | ||
63 | + tci_write_reg(regs, t0, (int16_t)t1); | ||
64 | break; | ||
65 | #endif | ||
66 | #if TCG_TARGET_HAS_ext16u_i64 | ||
67 | -- | ||
68 | 2.25.1 | ||
69 | |||
70 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext32u opcodes, and allow truncation | ||
2 | to happen for other users. | ||
3 | 1 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | tcg/tci.c | 122 ++++++++++++++++++++++++------------------------------ | ||
8 | 1 file changed, 54 insertions(+), 68 deletions(-) | ||
9 | |||
10 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tcg/tci.c | ||
13 | +++ b/tcg/tci.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
15 | } | ||
16 | #endif | ||
17 | |||
18 | -static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (uint32_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | - | ||
23 | #if TCG_TARGET_REG_BITS == 64 | ||
24 | static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
25 | { | ||
26 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
27 | return value; | ||
28 | } | ||
29 | |||
30 | -/* Read indexed register (32 bit) from bytecode. */ | ||
31 | -static uint32_t tci_read_r32(const tcg_target_ulong *regs, | ||
32 | - const uint8_t **tb_ptr) | ||
33 | -{ | ||
34 | - uint32_t value = tci_read_reg32(regs, **tb_ptr); | ||
35 | - *tb_ptr += 1; | ||
36 | - return value; | ||
37 | -} | ||
38 | - | ||
39 | #if TCG_TARGET_REG_BITS == 32 | ||
40 | /* Read two indexed registers (2 * 32 bit) from bytecode. */ | ||
41 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
42 | const uint8_t **tb_ptr) | ||
43 | { | ||
44 | - uint32_t low = tci_read_r32(regs, tb_ptr); | ||
45 | - return tci_uint64(tci_read_r32(regs, tb_ptr), low); | ||
46 | + uint32_t low = tci_read_r(regs, tb_ptr); | ||
47 | + return tci_uint64(tci_read_r(regs, tb_ptr), low); | ||
48 | } | ||
49 | #elif TCG_TARGET_REG_BITS == 64 | ||
50 | /* Read indexed register (32 bit signed) from bytecode. */ | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | continue; | ||
53 | case INDEX_op_setcond_i32: | ||
54 | t0 = *tb_ptr++; | ||
55 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
56 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
57 | + t1 = tci_read_r(regs, &tb_ptr); | ||
58 | + t2 = tci_read_r(regs, &tb_ptr); | ||
59 | condition = *tb_ptr++; | ||
60 | tci_write_reg(regs, t0, tci_compare32(t1, t2, condition)); | ||
61 | break; | ||
62 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
63 | #endif | ||
64 | case INDEX_op_mov_i32: | ||
65 | t0 = *tb_ptr++; | ||
66 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
67 | + t1 = tci_read_r(regs, &tb_ptr); | ||
68 | tci_write_reg(regs, t0, t1); | ||
69 | break; | ||
70 | case INDEX_op_tci_movi_i32: | ||
71 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
72 | break; | ||
73 | case INDEX_op_st_i32: | ||
74 | CASE_64(st32) | ||
75 | - t0 = tci_read_r32(regs, &tb_ptr); | ||
76 | + t0 = tci_read_r(regs, &tb_ptr); | ||
77 | t1 = tci_read_r(regs, &tb_ptr); | ||
78 | t2 = tci_read_s32(&tb_ptr); | ||
79 | *(uint32_t *)(t1 + t2) = t0; | ||
80 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
81 | |||
82 | case INDEX_op_add_i32: | ||
83 | t0 = *tb_ptr++; | ||
84 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
85 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
86 | + t1 = tci_read_r(regs, &tb_ptr); | ||
87 | + t2 = tci_read_r(regs, &tb_ptr); | ||
88 | tci_write_reg(regs, t0, t1 + t2); | ||
89 | break; | ||
90 | case INDEX_op_sub_i32: | ||
91 | t0 = *tb_ptr++; | ||
92 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
93 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
94 | + t1 = tci_read_r(regs, &tb_ptr); | ||
95 | + t2 = tci_read_r(regs, &tb_ptr); | ||
96 | tci_write_reg(regs, t0, t1 - t2); | ||
97 | break; | ||
98 | case INDEX_op_mul_i32: | ||
99 | t0 = *tb_ptr++; | ||
100 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
101 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
102 | + t1 = tci_read_r(regs, &tb_ptr); | ||
103 | + t2 = tci_read_r(regs, &tb_ptr); | ||
104 | tci_write_reg(regs, t0, t1 * t2); | ||
105 | break; | ||
106 | case INDEX_op_div_i32: | ||
107 | t0 = *tb_ptr++; | ||
108 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
109 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
110 | + t1 = tci_read_r(regs, &tb_ptr); | ||
111 | + t2 = tci_read_r(regs, &tb_ptr); | ||
112 | tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2); | ||
113 | break; | ||
114 | case INDEX_op_divu_i32: | ||
115 | t0 = *tb_ptr++; | ||
116 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
117 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
118 | - tci_write_reg(regs, t0, t1 / t2); | ||
119 | + t1 = tci_read_r(regs, &tb_ptr); | ||
120 | + t2 = tci_read_r(regs, &tb_ptr); | ||
121 | + tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2); | ||
122 | break; | ||
123 | case INDEX_op_rem_i32: | ||
124 | t0 = *tb_ptr++; | ||
125 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
126 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
127 | + t1 = tci_read_r(regs, &tb_ptr); | ||
128 | + t2 = tci_read_r(regs, &tb_ptr); | ||
129 | tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2); | ||
130 | break; | ||
131 | case INDEX_op_remu_i32: | ||
132 | t0 = *tb_ptr++; | ||
133 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
134 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
135 | - tci_write_reg(regs, t0, t1 % t2); | ||
136 | + t1 = tci_read_r(regs, &tb_ptr); | ||
137 | + t2 = tci_read_r(regs, &tb_ptr); | ||
138 | + tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2); | ||
139 | break; | ||
140 | case INDEX_op_and_i32: | ||
141 | t0 = *tb_ptr++; | ||
142 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
143 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
144 | + t1 = tci_read_r(regs, &tb_ptr); | ||
145 | + t2 = tci_read_r(regs, &tb_ptr); | ||
146 | tci_write_reg(regs, t0, t1 & t2); | ||
147 | break; | ||
148 | case INDEX_op_or_i32: | ||
149 | t0 = *tb_ptr++; | ||
150 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
151 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
152 | + t1 = tci_read_r(regs, &tb_ptr); | ||
153 | + t2 = tci_read_r(regs, &tb_ptr); | ||
154 | tci_write_reg(regs, t0, t1 | t2); | ||
155 | break; | ||
156 | case INDEX_op_xor_i32: | ||
157 | t0 = *tb_ptr++; | ||
158 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
159 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
160 | + t1 = tci_read_r(regs, &tb_ptr); | ||
161 | + t2 = tci_read_r(regs, &tb_ptr); | ||
162 | tci_write_reg(regs, t0, t1 ^ t2); | ||
163 | break; | ||
164 | |||
165 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
166 | |||
167 | case INDEX_op_shl_i32: | ||
168 | t0 = *tb_ptr++; | ||
169 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
170 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
171 | - tci_write_reg(regs, t0, t1 << (t2 & 31)); | ||
172 | + t1 = tci_read_r(regs, &tb_ptr); | ||
173 | + t2 = tci_read_r(regs, &tb_ptr); | ||
174 | + tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31)); | ||
175 | break; | ||
176 | case INDEX_op_shr_i32: | ||
177 | t0 = *tb_ptr++; | ||
178 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
179 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
180 | - tci_write_reg(regs, t0, t1 >> (t2 & 31)); | ||
181 | + t1 = tci_read_r(regs, &tb_ptr); | ||
182 | + t2 = tci_read_r(regs, &tb_ptr); | ||
183 | + tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31)); | ||
184 | break; | ||
185 | case INDEX_op_sar_i32: | ||
186 | t0 = *tb_ptr++; | ||
187 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
188 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
189 | - tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31))); | ||
190 | + t1 = tci_read_r(regs, &tb_ptr); | ||
191 | + t2 = tci_read_r(regs, &tb_ptr); | ||
192 | + tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31)); | ||
193 | break; | ||
194 | #if TCG_TARGET_HAS_rot_i32 | ||
195 | case INDEX_op_rotl_i32: | ||
196 | t0 = *tb_ptr++; | ||
197 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
198 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
199 | + t1 = tci_read_r(regs, &tb_ptr); | ||
200 | + t2 = tci_read_r(regs, &tb_ptr); | ||
201 | tci_write_reg(regs, t0, rol32(t1, t2 & 31)); | ||
202 | break; | ||
203 | case INDEX_op_rotr_i32: | ||
204 | t0 = *tb_ptr++; | ||
205 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
206 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
207 | + t1 = tci_read_r(regs, &tb_ptr); | ||
208 | + t2 = tci_read_r(regs, &tb_ptr); | ||
209 | tci_write_reg(regs, t0, ror32(t1, t2 & 31)); | ||
210 | break; | ||
211 | #endif | ||
212 | #if TCG_TARGET_HAS_deposit_i32 | ||
213 | case INDEX_op_deposit_i32: | ||
214 | t0 = *tb_ptr++; | ||
215 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
216 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
217 | + t1 = tci_read_r(regs, &tb_ptr); | ||
218 | + t2 = tci_read_r(regs, &tb_ptr); | ||
219 | tmp16 = *tb_ptr++; | ||
220 | tmp8 = *tb_ptr++; | ||
221 | tmp32 = (((1 << tmp8) - 1) << tmp16); | ||
222 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
223 | break; | ||
224 | #endif | ||
225 | case INDEX_op_brcond_i32: | ||
226 | - t0 = tci_read_r32(regs, &tb_ptr); | ||
227 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
228 | + t0 = tci_read_r(regs, &tb_ptr); | ||
229 | + t1 = tci_read_r(regs, &tb_ptr); | ||
230 | condition = *tb_ptr++; | ||
231 | label = tci_read_label(&tb_ptr); | ||
232 | if (tci_compare32(t0, t1, condition)) { | ||
233 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
234 | case INDEX_op_mulu2_i32: | ||
235 | t0 = *tb_ptr++; | ||
236 | t1 = *tb_ptr++; | ||
237 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
238 | - tmp64 = tci_read_r32(regs, &tb_ptr); | ||
239 | - tci_write_reg64(regs, t1, t0, t2 * tmp64); | ||
240 | + t2 = tci_read_r(regs, &tb_ptr); | ||
241 | + tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr); | ||
242 | + tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64); | ||
243 | break; | ||
244 | #endif /* TCG_TARGET_REG_BITS == 32 */ | ||
245 | #if TCG_TARGET_HAS_ext8s_i32 | ||
246 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
247 | #if TCG_TARGET_HAS_bswap32_i32 | ||
248 | case INDEX_op_bswap32_i32: | ||
249 | t0 = *tb_ptr++; | ||
250 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
251 | + t1 = tci_read_r(regs, &tb_ptr); | ||
252 | tci_write_reg(regs, t0, bswap32(t1)); | ||
253 | break; | ||
254 | #endif | ||
255 | #if TCG_TARGET_HAS_not_i32 | ||
256 | case INDEX_op_not_i32: | ||
257 | t0 = *tb_ptr++; | ||
258 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
259 | + t1 = tci_read_r(regs, &tb_ptr); | ||
260 | tci_write_reg(regs, t0, ~t1); | ||
261 | break; | ||
262 | #endif | ||
263 | #if TCG_TARGET_HAS_neg_i32 | ||
264 | case INDEX_op_neg_i32: | ||
265 | t0 = *tb_ptr++; | ||
266 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
267 | + t1 = tci_read_r(regs, &tb_ptr); | ||
268 | tci_write_reg(regs, t0, -t1); | ||
269 | break; | ||
270 | #endif | ||
271 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
272 | #endif | ||
273 | case INDEX_op_extu_i32_i64: | ||
274 | t0 = *tb_ptr++; | ||
275 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
276 | - tci_write_reg(regs, t0, t1); | ||
277 | + t1 = tci_read_r(regs, &tb_ptr); | ||
278 | + tci_write_reg(regs, t0, (uint32_t)t1); | ||
279 | break; | ||
280 | #if TCG_TARGET_HAS_bswap16_i64 | ||
281 | case INDEX_op_bswap16_i64: | ||
282 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
283 | #if TCG_TARGET_HAS_bswap32_i64 | ||
284 | case INDEX_op_bswap32_i64: | ||
285 | t0 = *tb_ptr++; | ||
286 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
287 | + t1 = tci_read_r(regs, &tb_ptr); | ||
288 | tci_write_reg(regs, t0, bswap32(t1)); | ||
289 | break; | ||
290 | #endif | ||
291 | -- | ||
292 | 2.25.1 | ||
293 | |||
294 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext32s opcodes. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 20 ++------------------ | ||
7 | 1 file changed, 2 insertions(+), 18 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
14 | return regs[index]; | ||
15 | } | ||
16 | |||
17 | -#if TCG_TARGET_REG_BITS == 64 | ||
18 | -static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (int32_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_REG_BITS == 64 | ||
25 | static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
26 | { | ||
27 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
28 | return tci_uint64(tci_read_r(regs, tb_ptr), low); | ||
29 | } | ||
30 | #elif TCG_TARGET_REG_BITS == 64 | ||
31 | -/* Read indexed register (32 bit signed) from bytecode. */ | ||
32 | -static int32_t tci_read_r32s(const tcg_target_ulong *regs, | ||
33 | - const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - int32_t value = tci_read_reg32s(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
38 | -} | ||
39 | - | ||
40 | /* Read indexed register (64 bit) from bytecode. */ | ||
41 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
42 | const uint8_t **tb_ptr) | ||
43 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
44 | #endif | ||
45 | case INDEX_op_ext_i32_i64: | ||
46 | t0 = *tb_ptr++; | ||
47 | - t1 = tci_read_r32s(regs, &tb_ptr); | ||
48 | - tci_write_reg(regs, t0, t1); | ||
49 | + t1 = tci_read_r(regs, &tb_ptr); | ||
50 | + tci_write_reg(regs, t0, (int32_t)t1); | ||
51 | break; | ||
52 | #if TCG_TARGET_HAS_ext32u_i64 | ||
53 | case INDEX_op_ext32u_i64: | ||
54 | -- | ||
55 | 2.25.1 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | In all cases restricted to 64-bit hosts, tcg_read_r is | ||
2 | identical. We retain the 64-bit symbol for the single | ||
3 | case of INDEX_op_qemu_st_i64. | ||
4 | 1 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/tci.c | 93 +++++++++++++++++++++++++------------------------------ | ||
9 | 1 file changed, 42 insertions(+), 51 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/tci.c | ||
14 | +++ b/tcg/tci.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
16 | return regs[index]; | ||
17 | } | ||
18 | |||
19 | -#if TCG_TARGET_REG_BITS == 64 | ||
20 | -static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
21 | -{ | ||
22 | - return tci_read_reg(regs, index); | ||
23 | -} | ||
24 | -#endif | ||
25 | - | ||
26 | static void | ||
27 | tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value) | ||
28 | { | ||
29 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
30 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
31 | const uint8_t **tb_ptr) | ||
32 | { | ||
33 | - uint64_t value = tci_read_reg64(regs, **tb_ptr); | ||
34 | - *tb_ptr += 1; | ||
35 | - return value; | ||
36 | + return tci_read_r(regs, tb_ptr); | ||
37 | } | ||
38 | #endif | ||
39 | |||
40 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
41 | #elif TCG_TARGET_REG_BITS == 64 | ||
42 | case INDEX_op_setcond_i64: | ||
43 | t0 = *tb_ptr++; | ||
44 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
45 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
46 | + t1 = tci_read_r(regs, &tb_ptr); | ||
47 | + t2 = tci_read_r(regs, &tb_ptr); | ||
48 | condition = *tb_ptr++; | ||
49 | tci_write_reg(regs, t0, tci_compare64(t1, t2, condition)); | ||
50 | break; | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | #if TCG_TARGET_REG_BITS == 64 | ||
53 | case INDEX_op_mov_i64: | ||
54 | t0 = *tb_ptr++; | ||
55 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
56 | + t1 = tci_read_r(regs, &tb_ptr); | ||
57 | tci_write_reg(regs, t0, t1); | ||
58 | break; | ||
59 | case INDEX_op_tci_movi_i64: | ||
60 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
61 | tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2)); | ||
62 | break; | ||
63 | case INDEX_op_st_i64: | ||
64 | - t0 = tci_read_r64(regs, &tb_ptr); | ||
65 | + t0 = tci_read_r(regs, &tb_ptr); | ||
66 | t1 = tci_read_r(regs, &tb_ptr); | ||
67 | t2 = tci_read_s32(&tb_ptr); | ||
68 | *(uint64_t *)(t1 + t2) = t0; | ||
69 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
70 | |||
71 | case INDEX_op_add_i64: | ||
72 | t0 = *tb_ptr++; | ||
73 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
74 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
75 | + t1 = tci_read_r(regs, &tb_ptr); | ||
76 | + t2 = tci_read_r(regs, &tb_ptr); | ||
77 | tci_write_reg(regs, t0, t1 + t2); | ||
78 | break; | ||
79 | case INDEX_op_sub_i64: | ||
80 | t0 = *tb_ptr++; | ||
81 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
82 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
83 | + t1 = tci_read_r(regs, &tb_ptr); | ||
84 | + t2 = tci_read_r(regs, &tb_ptr); | ||
85 | tci_write_reg(regs, t0, t1 - t2); | ||
86 | break; | ||
87 | case INDEX_op_mul_i64: | ||
88 | t0 = *tb_ptr++; | ||
89 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
90 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
91 | + t1 = tci_read_r(regs, &tb_ptr); | ||
92 | + t2 = tci_read_r(regs, &tb_ptr); | ||
93 | tci_write_reg(regs, t0, t1 * t2); | ||
94 | break; | ||
95 | case INDEX_op_div_i64: | ||
96 | t0 = *tb_ptr++; | ||
97 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
98 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
99 | + t1 = tci_read_r(regs, &tb_ptr); | ||
100 | + t2 = tci_read_r(regs, &tb_ptr); | ||
101 | tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2); | ||
102 | break; | ||
103 | case INDEX_op_divu_i64: | ||
104 | t0 = *tb_ptr++; | ||
105 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
106 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
107 | + t1 = tci_read_r(regs, &tb_ptr); | ||
108 | + t2 = tci_read_r(regs, &tb_ptr); | ||
109 | tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2); | ||
110 | break; | ||
111 | case INDEX_op_rem_i64: | ||
112 | t0 = *tb_ptr++; | ||
113 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
114 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
115 | + t1 = tci_read_r(regs, &tb_ptr); | ||
116 | + t2 = tci_read_r(regs, &tb_ptr); | ||
117 | tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2); | ||
118 | break; | ||
119 | case INDEX_op_remu_i64: | ||
120 | t0 = *tb_ptr++; | ||
121 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
122 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
123 | + t1 = tci_read_r(regs, &tb_ptr); | ||
124 | + t2 = tci_read_r(regs, &tb_ptr); | ||
125 | tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2); | ||
126 | break; | ||
127 | case INDEX_op_and_i64: | ||
128 | t0 = *tb_ptr++; | ||
129 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
130 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
131 | + t1 = tci_read_r(regs, &tb_ptr); | ||
132 | + t2 = tci_read_r(regs, &tb_ptr); | ||
133 | tci_write_reg(regs, t0, t1 & t2); | ||
134 | break; | ||
135 | case INDEX_op_or_i64: | ||
136 | t0 = *tb_ptr++; | ||
137 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
138 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
139 | + t1 = tci_read_r(regs, &tb_ptr); | ||
140 | + t2 = tci_read_r(regs, &tb_ptr); | ||
141 | tci_write_reg(regs, t0, t1 | t2); | ||
142 | break; | ||
143 | case INDEX_op_xor_i64: | ||
144 | t0 = *tb_ptr++; | ||
145 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
146 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
147 | + t1 = tci_read_r(regs, &tb_ptr); | ||
148 | + t2 = tci_read_r(regs, &tb_ptr); | ||
149 | tci_write_reg(regs, t0, t1 ^ t2); | ||
150 | break; | ||
151 | |||
152 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
153 | |||
154 | case INDEX_op_shl_i64: | ||
155 | t0 = *tb_ptr++; | ||
156 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
157 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
158 | + t1 = tci_read_r(regs, &tb_ptr); | ||
159 | + t2 = tci_read_r(regs, &tb_ptr); | ||
160 | tci_write_reg(regs, t0, t1 << (t2 & 63)); | ||
161 | break; | ||
162 | case INDEX_op_shr_i64: | ||
163 | t0 = *tb_ptr++; | ||
164 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
165 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
166 | + t1 = tci_read_r(regs, &tb_ptr); | ||
167 | + t2 = tci_read_r(regs, &tb_ptr); | ||
168 | tci_write_reg(regs, t0, t1 >> (t2 & 63)); | ||
169 | break; | ||
170 | case INDEX_op_sar_i64: | ||
171 | t0 = *tb_ptr++; | ||
172 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
173 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
174 | + t1 = tci_read_r(regs, &tb_ptr); | ||
175 | + t2 = tci_read_r(regs, &tb_ptr); | ||
176 | tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63))); | ||
177 | break; | ||
178 | #if TCG_TARGET_HAS_rot_i64 | ||
179 | case INDEX_op_rotl_i64: | ||
180 | t0 = *tb_ptr++; | ||
181 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
182 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
183 | + t1 = tci_read_r(regs, &tb_ptr); | ||
184 | + t2 = tci_read_r(regs, &tb_ptr); | ||
185 | tci_write_reg(regs, t0, rol64(t1, t2 & 63)); | ||
186 | break; | ||
187 | case INDEX_op_rotr_i64: | ||
188 | t0 = *tb_ptr++; | ||
189 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
190 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
191 | + t1 = tci_read_r(regs, &tb_ptr); | ||
192 | + t2 = tci_read_r(regs, &tb_ptr); | ||
193 | tci_write_reg(regs, t0, ror64(t1, t2 & 63)); | ||
194 | break; | ||
195 | #endif | ||
196 | #if TCG_TARGET_HAS_deposit_i64 | ||
197 | case INDEX_op_deposit_i64: | ||
198 | t0 = *tb_ptr++; | ||
199 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
200 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
201 | + t1 = tci_read_r(regs, &tb_ptr); | ||
202 | + t2 = tci_read_r(regs, &tb_ptr); | ||
203 | tmp16 = *tb_ptr++; | ||
204 | tmp8 = *tb_ptr++; | ||
205 | tmp64 = (((1ULL << tmp8) - 1) << tmp16); | ||
206 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
207 | break; | ||
208 | #endif | ||
209 | case INDEX_op_brcond_i64: | ||
210 | - t0 = tci_read_r64(regs, &tb_ptr); | ||
211 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
212 | + t0 = tci_read_r(regs, &tb_ptr); | ||
213 | + t1 = tci_read_r(regs, &tb_ptr); | ||
214 | condition = *tb_ptr++; | ||
215 | label = tci_read_label(&tb_ptr); | ||
216 | if (tci_compare64(t0, t1, condition)) { | ||
217 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
218 | #if TCG_TARGET_HAS_bswap64_i64 | ||
219 | case INDEX_op_bswap64_i64: | ||
220 | t0 = *tb_ptr++; | ||
221 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
222 | + t1 = tci_read_r(regs, &tb_ptr); | ||
223 | tci_write_reg(regs, t0, bswap64(t1)); | ||
224 | break; | ||
225 | #endif | ||
226 | #if TCG_TARGET_HAS_not_i64 | ||
227 | case INDEX_op_not_i64: | ||
228 | t0 = *tb_ptr++; | ||
229 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
230 | + t1 = tci_read_r(regs, &tb_ptr); | ||
231 | tci_write_reg(regs, t0, ~t1); | ||
232 | break; | ||
233 | #endif | ||
234 | #if TCG_TARGET_HAS_neg_i64 | ||
235 | case INDEX_op_neg_i64: | ||
236 | t0 = *tb_ptr++; | ||
237 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
238 | + t1 = tci_read_r(regs, &tb_ptr); | ||
239 | tci_write_reg(regs, t0, -t1); | ||
240 | break; | ||
241 | #endif | ||
242 | -- | ||
243 | 2.25.1 | ||
244 | |||
245 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This includes add, sub, mul, and, or, xor. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 83 +++++++++++++++++-------------------------------------- | ||
7 | 1 file changed, 25 insertions(+), 58 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
14 | *(uint32_t *)(t1 + t2) = t0; | ||
15 | break; | ||
16 | |||
17 | - /* Arithmetic operations (32 bit). */ | ||
18 | + /* Arithmetic operations (mixed 32/64 bit). */ | ||
19 | |||
20 | - case INDEX_op_add_i32: | ||
21 | + CASE_32_64(add) | ||
22 | t0 = *tb_ptr++; | ||
23 | t1 = tci_read_r(regs, &tb_ptr); | ||
24 | t2 = tci_read_r(regs, &tb_ptr); | ||
25 | tci_write_reg(regs, t0, t1 + t2); | ||
26 | break; | ||
27 | - case INDEX_op_sub_i32: | ||
28 | + CASE_32_64(sub) | ||
29 | t0 = *tb_ptr++; | ||
30 | t1 = tci_read_r(regs, &tb_ptr); | ||
31 | t2 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, t1 - t2); | ||
33 | break; | ||
34 | - case INDEX_op_mul_i32: | ||
35 | + CASE_32_64(mul) | ||
36 | t0 = *tb_ptr++; | ||
37 | t1 = tci_read_r(regs, &tb_ptr); | ||
38 | t2 = tci_read_r(regs, &tb_ptr); | ||
39 | tci_write_reg(regs, t0, t1 * t2); | ||
40 | break; | ||
41 | + CASE_32_64(and) | ||
42 | + t0 = *tb_ptr++; | ||
43 | + t1 = tci_read_r(regs, &tb_ptr); | ||
44 | + t2 = tci_read_r(regs, &tb_ptr); | ||
45 | + tci_write_reg(regs, t0, t1 & t2); | ||
46 | + break; | ||
47 | + CASE_32_64(or) | ||
48 | + t0 = *tb_ptr++; | ||
49 | + t1 = tci_read_r(regs, &tb_ptr); | ||
50 | + t2 = tci_read_r(regs, &tb_ptr); | ||
51 | + tci_write_reg(regs, t0, t1 | t2); | ||
52 | + break; | ||
53 | + CASE_32_64(xor) | ||
54 | + t0 = *tb_ptr++; | ||
55 | + t1 = tci_read_r(regs, &tb_ptr); | ||
56 | + t2 = tci_read_r(regs, &tb_ptr); | ||
57 | + tci_write_reg(regs, t0, t1 ^ t2); | ||
58 | + break; | ||
59 | + | ||
60 | + /* Arithmetic operations (32 bit). */ | ||
61 | + | ||
62 | case INDEX_op_div_i32: | ||
63 | t0 = *tb_ptr++; | ||
64 | t1 = tci_read_r(regs, &tb_ptr); | ||
65 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
66 | t2 = tci_read_r(regs, &tb_ptr); | ||
67 | tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2); | ||
68 | break; | ||
69 | - case INDEX_op_and_i32: | ||
70 | - t0 = *tb_ptr++; | ||
71 | - t1 = tci_read_r(regs, &tb_ptr); | ||
72 | - t2 = tci_read_r(regs, &tb_ptr); | ||
73 | - tci_write_reg(regs, t0, t1 & t2); | ||
74 | - break; | ||
75 | - case INDEX_op_or_i32: | ||
76 | - t0 = *tb_ptr++; | ||
77 | - t1 = tci_read_r(regs, &tb_ptr); | ||
78 | - t2 = tci_read_r(regs, &tb_ptr); | ||
79 | - tci_write_reg(regs, t0, t1 | t2); | ||
80 | - break; | ||
81 | - case INDEX_op_xor_i32: | ||
82 | - t0 = *tb_ptr++; | ||
83 | - t1 = tci_read_r(regs, &tb_ptr); | ||
84 | - t2 = tci_read_r(regs, &tb_ptr); | ||
85 | - tci_write_reg(regs, t0, t1 ^ t2); | ||
86 | - break; | ||
87 | |||
88 | /* Shift/rotate operations (32 bit). */ | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
91 | |||
92 | /* Arithmetic operations (64 bit). */ | ||
93 | |||
94 | - case INDEX_op_add_i64: | ||
95 | - t0 = *tb_ptr++; | ||
96 | - t1 = tci_read_r(regs, &tb_ptr); | ||
97 | - t2 = tci_read_r(regs, &tb_ptr); | ||
98 | - tci_write_reg(regs, t0, t1 + t2); | ||
99 | - break; | ||
100 | - case INDEX_op_sub_i64: | ||
101 | - t0 = *tb_ptr++; | ||
102 | - t1 = tci_read_r(regs, &tb_ptr); | ||
103 | - t2 = tci_read_r(regs, &tb_ptr); | ||
104 | - tci_write_reg(regs, t0, t1 - t2); | ||
105 | - break; | ||
106 | - case INDEX_op_mul_i64: | ||
107 | - t0 = *tb_ptr++; | ||
108 | - t1 = tci_read_r(regs, &tb_ptr); | ||
109 | - t2 = tci_read_r(regs, &tb_ptr); | ||
110 | - tci_write_reg(regs, t0, t1 * t2); | ||
111 | - break; | ||
112 | case INDEX_op_div_i64: | ||
113 | t0 = *tb_ptr++; | ||
114 | t1 = tci_read_r(regs, &tb_ptr); | ||
115 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
116 | t2 = tci_read_r(regs, &tb_ptr); | ||
117 | tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2); | ||
118 | break; | ||
119 | - case INDEX_op_and_i64: | ||
120 | - t0 = *tb_ptr++; | ||
121 | - t1 = tci_read_r(regs, &tb_ptr); | ||
122 | - t2 = tci_read_r(regs, &tb_ptr); | ||
123 | - tci_write_reg(regs, t0, t1 & t2); | ||
124 | - break; | ||
125 | - case INDEX_op_or_i64: | ||
126 | - t0 = *tb_ptr++; | ||
127 | - t1 = tci_read_r(regs, &tb_ptr); | ||
128 | - t2 = tci_read_r(regs, &tb_ptr); | ||
129 | - tci_write_reg(regs, t0, t1 | t2); | ||
130 | - break; | ||
131 | - case INDEX_op_xor_i64: | ||
132 | - t0 = *tb_ptr++; | ||
133 | - t1 = tci_read_r(regs, &tb_ptr); | ||
134 | - t2 = tci_read_r(regs, &tb_ptr); | ||
135 | - tci_write_reg(regs, t0, t1 ^ t2); | ||
136 | - break; | ||
137 | |||
138 | /* Shift/rotate operations (64 bit). */ | ||
139 | |||
140 | -- | ||
141 | 2.25.1 | ||
142 | |||
143 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This includes ext8s, ext8u, ext16s, ext16u. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 44 ++++++++------------------------------------ | ||
7 | 1 file changed, 8 insertions(+), 36 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
14 | tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64); | ||
15 | break; | ||
16 | #endif /* TCG_TARGET_REG_BITS == 32 */ | ||
17 | -#if TCG_TARGET_HAS_ext8s_i32 | ||
18 | - case INDEX_op_ext8s_i32: | ||
19 | +#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
20 | + CASE_32_64(ext8s) | ||
21 | t0 = *tb_ptr++; | ||
22 | t1 = tci_read_r(regs, &tb_ptr); | ||
23 | tci_write_reg(regs, t0, (int8_t)t1); | ||
24 | break; | ||
25 | #endif | ||
26 | -#if TCG_TARGET_HAS_ext16s_i32 | ||
27 | - case INDEX_op_ext16s_i32: | ||
28 | +#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
29 | + CASE_32_64(ext16s) | ||
30 | t0 = *tb_ptr++; | ||
31 | t1 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, (int16_t)t1); | ||
33 | break; | ||
34 | #endif | ||
35 | -#if TCG_TARGET_HAS_ext8u_i32 | ||
36 | - case INDEX_op_ext8u_i32: | ||
37 | +#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 | ||
38 | + CASE_32_64(ext8u) | ||
39 | t0 = *tb_ptr++; | ||
40 | t1 = tci_read_r(regs, &tb_ptr); | ||
41 | tci_write_reg(regs, t0, (uint8_t)t1); | ||
42 | break; | ||
43 | #endif | ||
44 | -#if TCG_TARGET_HAS_ext16u_i32 | ||
45 | - case INDEX_op_ext16u_i32: | ||
46 | +#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 | ||
47 | + CASE_32_64(ext16u) | ||
48 | t0 = *tb_ptr++; | ||
49 | t1 = tci_read_r(regs, &tb_ptr); | ||
50 | tci_write_reg(regs, t0, (uint16_t)t1); | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | continue; | ||
53 | } | ||
54 | break; | ||
55 | -#if TCG_TARGET_HAS_ext8u_i64 | ||
56 | - case INDEX_op_ext8u_i64: | ||
57 | - t0 = *tb_ptr++; | ||
58 | - t1 = tci_read_r(regs, &tb_ptr); | ||
59 | - tci_write_reg(regs, t0, (uint8_t)t1); | ||
60 | - break; | ||
61 | -#endif | ||
62 | -#if TCG_TARGET_HAS_ext8s_i64 | ||
63 | - case INDEX_op_ext8s_i64: | ||
64 | - t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, (int8_t)t1); | ||
67 | - break; | ||
68 | -#endif | ||
69 | -#if TCG_TARGET_HAS_ext16s_i64 | ||
70 | - case INDEX_op_ext16s_i64: | ||
71 | - t0 = *tb_ptr++; | ||
72 | - t1 = tci_read_r(regs, &tb_ptr); | ||
73 | - tci_write_reg(regs, t0, (int16_t)t1); | ||
74 | - break; | ||
75 | -#endif | ||
76 | -#if TCG_TARGET_HAS_ext16u_i64 | ||
77 | - case INDEX_op_ext16u_i64: | ||
78 | - t0 = *tb_ptr++; | ||
79 | - t1 = tci_read_r(regs, &tb_ptr); | ||
80 | - tci_write_reg(regs, t0, (uint16_t)t1); | ||
81 | - break; | ||
82 | -#endif | ||
83 | #if TCG_TARGET_HAS_ext32s_i64 | ||
84 | case INDEX_op_ext32s_i64: | ||
85 | #endif | ||
86 | -- | ||
87 | 2.25.1 | ||
88 | |||
89 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This includes bswap16 and bswap32. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 22 ++++------------------ | ||
7 | 1 file changed, 4 insertions(+), 18 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
14 | tci_write_reg(regs, t0, (uint16_t)t1); | ||
15 | break; | ||
16 | #endif | ||
17 | -#if TCG_TARGET_HAS_bswap16_i32 | ||
18 | - case INDEX_op_bswap16_i32: | ||
19 | +#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 | ||
20 | + CASE_32_64(bswap16) | ||
21 | t0 = *tb_ptr++; | ||
22 | t1 = tci_read_r(regs, &tb_ptr); | ||
23 | tci_write_reg(regs, t0, bswap16(t1)); | ||
24 | break; | ||
25 | #endif | ||
26 | -#if TCG_TARGET_HAS_bswap32_i32 | ||
27 | - case INDEX_op_bswap32_i32: | ||
28 | +#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 | ||
29 | + CASE_32_64(bswap32) | ||
30 | t0 = *tb_ptr++; | ||
31 | t1 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, bswap32(t1)); | ||
33 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
34 | t1 = tci_read_r(regs, &tb_ptr); | ||
35 | tci_write_reg(regs, t0, (uint32_t)t1); | ||
36 | break; | ||
37 | -#if TCG_TARGET_HAS_bswap16_i64 | ||
38 | - case INDEX_op_bswap16_i64: | ||
39 | - t0 = *tb_ptr++; | ||
40 | - t1 = tci_read_r(regs, &tb_ptr); | ||
41 | - tci_write_reg(regs, t0, bswap16(t1)); | ||
42 | - break; | ||
43 | -#endif | ||
44 | -#if TCG_TARGET_HAS_bswap32_i64 | ||
45 | - case INDEX_op_bswap32_i64: | ||
46 | - t0 = *tb_ptr++; | ||
47 | - t1 = tci_read_r(regs, &tb_ptr); | ||
48 | - tci_write_reg(regs, t0, bswap32(t1)); | ||
49 | - break; | ||
50 | -#endif | ||
51 | #if TCG_TARGET_HAS_bswap64_i64 | ||
52 | case INDEX_op_bswap64_i64: | ||
53 | t0 = *tb_ptr++; | ||
54 | -- | ||
55 | 2.25.1 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
3 | --- | ||
4 | tcg/tci.c | 29 +++++------------------------ | ||
5 | 1 file changed, 5 insertions(+), 24 deletions(-) | ||
6 | 1 | ||
7 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
8 | index XXXXXXX..XXXXXXX 100644 | ||
9 | --- a/tcg/tci.c | ||
10 | +++ b/tcg/tci.c | ||
11 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
12 | tci_write_reg(regs, t0, tci_compare64(t1, t2, condition)); | ||
13 | break; | ||
14 | #endif | ||
15 | - case INDEX_op_mov_i32: | ||
16 | + CASE_32_64(mov) | ||
17 | t0 = *tb_ptr++; | ||
18 | t1 = tci_read_r(regs, &tb_ptr); | ||
19 | tci_write_reg(regs, t0, t1); | ||
20 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
21 | tci_write_reg(regs, t0, bswap32(t1)); | ||
22 | break; | ||
23 | #endif | ||
24 | -#if TCG_TARGET_HAS_not_i32 | ||
25 | - case INDEX_op_not_i32: | ||
26 | +#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 | ||
27 | + CASE_32_64(not) | ||
28 | t0 = *tb_ptr++; | ||
29 | t1 = tci_read_r(regs, &tb_ptr); | ||
30 | tci_write_reg(regs, t0, ~t1); | ||
31 | break; | ||
32 | #endif | ||
33 | -#if TCG_TARGET_HAS_neg_i32 | ||
34 | - case INDEX_op_neg_i32: | ||
35 | +#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 | ||
36 | + CASE_32_64(neg) | ||
37 | t0 = *tb_ptr++; | ||
38 | t1 = tci_read_r(regs, &tb_ptr); | ||
39 | tci_write_reg(regs, t0, -t1); | ||
40 | break; | ||
41 | #endif | ||
42 | #if TCG_TARGET_REG_BITS == 64 | ||
43 | - case INDEX_op_mov_i64: | ||
44 | - t0 = *tb_ptr++; | ||
45 | - t1 = tci_read_r(regs, &tb_ptr); | ||
46 | - tci_write_reg(regs, t0, t1); | ||
47 | - break; | ||
48 | case INDEX_op_tci_movi_i64: | ||
49 | t0 = *tb_ptr++; | ||
50 | t1 = tci_read_i64(&tb_ptr); | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | tci_write_reg(regs, t0, bswap64(t1)); | ||
53 | break; | ||
54 | #endif | ||
55 | -#if TCG_TARGET_HAS_not_i64 | ||
56 | - case INDEX_op_not_i64: | ||
57 | - t0 = *tb_ptr++; | ||
58 | - t1 = tci_read_r(regs, &tb_ptr); | ||
59 | - tci_write_reg(regs, t0, ~t1); | ||
60 | - break; | ||
61 | -#endif | ||
62 | -#if TCG_TARGET_HAS_neg_i64 | ||
63 | - case INDEX_op_neg_i64: | ||
64 | - t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, -t1); | ||
67 | - break; | ||
68 | -#endif | ||
69 | #endif /* TCG_TARGET_REG_BITS == 64 */ | ||
70 | |||
71 | /* QEMU specific operations. */ | ||
72 | -- | ||
73 | 2.25.1 | ||
74 | |||
75 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alex Bennée <alex.bennee@linaro.org> | ||
2 | 1 | ||
3 | Having a function return either and valid TB and some system state | ||
4 | seems excessive. It will make the subsequent re-factoring easier if we | ||
5 | lookup the current state where we are. | ||
6 | |||
7 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Message-Id: <20210224165811.11567-2-alex.bennee@linaro.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | --- | ||
11 | include/exec/tb-lookup.h | 18 ++++++++---------- | ||
12 | accel/tcg/cpu-exec.c | 10 ++++++++-- | ||
13 | accel/tcg/tcg-runtime.c | 4 +++- | ||
14 | 3 files changed, 19 insertions(+), 13 deletions(-) | ||
15 | |||
16 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/exec/tb-lookup.h | ||
19 | +++ b/include/exec/tb-lookup.h | ||
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | #include "exec/tb-hash.h" | ||
22 | |||
23 | /* Might cause an exception, so have a longjmp destination ready */ | ||
24 | -static inline TranslationBlock * | ||
25 | -tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, | ||
26 | - uint32_t *flags, uint32_t cf_mask) | ||
27 | +static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
28 | + target_ulong cs_base, | ||
29 | + uint32_t flags, uint32_t cf_mask) | ||
30 | { | ||
31 | - CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
32 | TranslationBlock *tb; | ||
33 | uint32_t hash; | ||
34 | |||
35 | - cpu_get_tb_cpu_state(env, pc, cs_base, flags); | ||
36 | - hash = tb_jmp_cache_hash_func(*pc); | ||
37 | + hash = tb_jmp_cache_hash_func(pc); | ||
38 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
39 | |||
40 | cf_mask &= ~CF_CLUSTER_MASK; | ||
41 | cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
42 | |||
43 | if (likely(tb && | ||
44 | - tb->pc == *pc && | ||
45 | - tb->cs_base == *cs_base && | ||
46 | - tb->flags == *flags && | ||
47 | + tb->pc == pc && | ||
48 | + tb->cs_base == cs_base && | ||
49 | + tb->flags == flags && | ||
50 | tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||
51 | (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { | ||
52 | return tb; | ||
53 | } | ||
54 | - tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask); | ||
55 | + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
56 | if (tb == NULL) { | ||
57 | return NULL; | ||
58 | } | ||
59 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/accel/tcg/cpu-exec.c | ||
62 | +++ b/accel/tcg/cpu-exec.c | ||
63 | @@ -XXX,XX +XXX,XX @@ static void cpu_exec_exit(CPUState *cpu) | ||
64 | |||
65 | void cpu_exec_step_atomic(CPUState *cpu) | ||
66 | { | ||
67 | + CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
68 | TranslationBlock *tb; | ||
69 | target_ulong cs_base, pc; | ||
70 | uint32_t flags; | ||
71 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
72 | g_assert(!cpu->running); | ||
73 | cpu->running = true; | ||
74 | |||
75 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); | ||
76 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
77 | + tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
78 | + | ||
79 | if (tb == NULL) { | ||
80 | mmap_lock(); | ||
81 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||
82 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu, | ||
83 | TranslationBlock *last_tb, | ||
84 | int tb_exit, uint32_t cf_mask) | ||
85 | { | ||
86 | + CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
87 | TranslationBlock *tb; | ||
88 | target_ulong cs_base, pc; | ||
89 | uint32_t flags; | ||
90 | |||
91 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); | ||
92 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
93 | + | ||
94 | + tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
95 | if (tb == NULL) { | ||
96 | mmap_lock(); | ||
97 | tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); | ||
98 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/accel/tcg/tcg-runtime.c | ||
101 | +++ b/accel/tcg/tcg-runtime.c | ||
102 | @@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | ||
103 | target_ulong cs_base, pc; | ||
104 | uint32_t flags; | ||
105 | |||
106 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags()); | ||
107 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
108 | + | ||
109 | + tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags()); | ||
110 | if (tb == NULL) { | ||
111 | return tcg_code_gen_epilogue; | ||
112 | } | ||
113 | -- | ||
114 | 2.25.1 | ||
115 | |||
116 | diff view generated by jsdifflib |