1 | The following changes since commit 2d20a57453f6a206938cbbf77bed0b378c806c1f: | 1 | The following changes since commit 813bac3d8d70d85cb7835f7945eb9eed84c2d8d0: |
---|---|---|---|
2 | 2 | ||
3 | Merge tag 'pull-fixes-for-7.1-200422-1' of https://github.com/stsquad/qemu into staging (2022-04-20 11:13:08 -0700) | 3 | Merge tag '2023q3-bsd-user-pull-request' of https://gitlab.com/bsdimp/qemu into staging (2023-08-29 08:58:00 -0400) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220420 | 7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230829 |
8 | 8 | ||
9 | for you to fetch changes up to a61532faa5a4d5e021e35b6a4a1e180c72d4a22f: | 9 | for you to fetch changes up to dad2f2f5afbaf58d6056f31dfd4b9edd0854b8ab: |
10 | 10 | ||
11 | tcg: Add tcg_constant_ptr (2022-04-20 12:12:47 -0700) | 11 | tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 (2023-08-29 09:57:39 -0700) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Cleanup sysemu/tcg.h usage. | 14 | softmmu: Use async_run_on_cpu in tcg_commit |
15 | Fix indirect lowering vs cond branches | 15 | tcg: Remove vecop_list check from tcg_gen_not_vec |
16 | Remove ATOMIC_MMU_IDX | 16 | tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 |
17 | Add tcg_constant_ptr | ||
18 | 17 | ||
19 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
20 | Richard Henderson (3): | 19 | Richard Henderson (4): |
21 | tcg: Fix indirect lowering vs TCG_OPF_COND_BRANCH | 20 | softmmu: Assert data in bounds in iotlb_to_section |
22 | accel/tcg: Remove ATOMIC_MMU_IDX | 21 | softmmu: Use async_run_on_cpu in tcg_commit |
23 | tcg: Add tcg_constant_ptr | 22 | tcg: Remove vecop_list check from tcg_gen_not_vec |
23 | tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 | ||
24 | 24 | ||
25 | Thomas Huth (1): | 25 | include/exec/cpu-common.h | 1 - |
26 | Don't include sysemu/tcg.h if it is not necessary | 26 | tcg/sparc64/tcg-target.h | 2 +- |
27 | 27 | accel/tcg/cpu-exec-common.c | 30 -------------------------- | |
28 | include/tcg/tcg.h | 4 ++++ | 28 | softmmu/physmem.c | 50 ++++++++++++++++++++++++++++++++------------ |
29 | accel/tcg/cputlb.c | 1 - | 29 | tcg/tcg-op-vec.c | 7 +++---- |
30 | accel/tcg/hmp.c | 1 - | 30 | tcg/sparc64/tcg-target.c.inc | 11 ---------- |
31 | accel/tcg/tcg-accel-ops-icount.c | 1 - | 31 | 6 files changed, 41 insertions(+), 60 deletions(-) |
32 | accel/tcg/user-exec.c | 1 - | ||
33 | bsd-user/main.c | 1 - | ||
34 | hw/virtio/vhost.c | 1 - | ||
35 | linux-user/main.c | 1 - | ||
36 | monitor/misc.c | 1 - | ||
37 | target/arm/helper.c | 1 - | ||
38 | target/s390x/cpu_models_sysemu.c | 1 - | ||
39 | target/s390x/helper.c | 1 - | ||
40 | tcg/tcg.c | 34 +++++++++++++++++++++++++++------- | ||
41 | 13 files changed, 31 insertions(+), 18 deletions(-) | diff view generated by jsdifflib |
1 | Similar to tcg_const_ptr, defer to tcg_constant_{i32,i64}. | 1 | Acked-by: Alex Bennée <alex.bennee@linaro.org> |
---|---|---|---|
2 | 2 | Suggested-by: Alex Bennée <alex.bennee@linaro.org> | |
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 3 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 4 | --- |
6 | include/tcg/tcg.h | 2 ++ | 5 | softmmu/physmem.c | 10 ++++++++-- |
7 | 1 file changed, 2 insertions(+) | 6 | 1 file changed, 8 insertions(+), 2 deletions(-) |
8 | 7 | ||
9 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 8 | diff --git a/softmmu/physmem.c b/softmmu/physmem.c |
10 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/include/tcg/tcg.h | 10 | --- a/softmmu/physmem.c |
12 | +++ b/include/tcg/tcg.h | 11 | +++ b/softmmu/physmem.c |
13 | @@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val); | 12 | @@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu, |
14 | #if UINTPTR_MAX == UINT32_MAX | 13 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
15 | # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) | 14 | CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; |
16 | # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x))) | 15 | AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch); |
17 | +# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i32((intptr_t)(x))) | 16 | - MemoryRegionSection *sections = d->map.sections; |
18 | #else | 17 | + int section_index = index & ~TARGET_PAGE_MASK; |
19 | # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x))) | 18 | + MemoryRegionSection *ret; |
20 | # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x))) | 19 | |
21 | +# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i64((intptr_t)(x))) | 20 | - return §ions[index & ~TARGET_PAGE_MASK]; |
22 | #endif | 21 | + assert(section_index < d->map.sections_nb); |
23 | 22 | + ret = d->map.sections + section_index; | |
24 | TCGLabel *gen_new_label(void); | 23 | + assert(ret->mr); |
24 | + assert(ret->mr->ops); | ||
25 | + | ||
26 | + return ret; | ||
27 | } | ||
28 | |||
29 | static void io_mem_init(void) | ||
25 | -- | 30 | -- |
26 | 2.34.1 | 31 | 2.34.1 |
27 | 32 | ||
28 | 33 | diff view generated by jsdifflib |
1 | With TCG_OPF_COND_BRANCH, we extended the lifetimes of | 1 | After system startup, run the update to memory_dispatch |
---|---|---|---|
2 | globals across extended basic blocks. This means that | 2 | and the tlb_flush on the cpu. This eliminates a race, |
3 | the liveness computed in pass 1 does not kill globals | 3 | wherein a running cpu sees the memory_dispatch change |
4 | in the same way as normal temps. | 4 | but has not yet seen the tlb_flush. |
5 | 5 | ||
6 | Introduce TYPE_EBB to match this lifetime, so that we | 6 | Since the update now happens on the cpu, we need not use |
7 | get correct register allocation for the temps that we | 7 | qatomic_rcu_read to protect the read of memory_dispatch. |
8 | introduce during the indirect lowering pass. | ||
9 | 8 | ||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1826 |
11 | Fixes: b4cb76e6208 ("tcg: Do not kill globals at conditional branches") | 10 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1834 |
11 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1846 | ||
12 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | ||
13 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | --- | 15 | --- |
14 | include/tcg/tcg.h | 2 ++ | 16 | include/exec/cpu-common.h | 1 - |
15 | tcg/tcg.c | 34 +++++++++++++++++++++++++++------- | 17 | accel/tcg/cpu-exec-common.c | 30 ---------------------------- |
16 | 2 files changed, 29 insertions(+), 7 deletions(-) | 18 | softmmu/physmem.c | 40 +++++++++++++++++++++++++++---------- |
19 | 3 files changed, 29 insertions(+), 42 deletions(-) | ||
17 | 20 | ||
18 | diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h | 21 | diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h |
19 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/include/tcg/tcg.h | 23 | --- a/include/exec/cpu-common.h |
21 | +++ b/include/tcg/tcg.h | 24 | +++ b/include/exec/cpu-common.h |
22 | @@ -XXX,XX +XXX,XX @@ typedef enum TCGTempVal { | 25 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_physical_memory_write(hwaddr addr, |
23 | typedef enum TCGTempKind { | 26 | { |
24 | /* Temp is dead at the end of all basic blocks. */ | 27 | cpu_physical_memory_rw(addr, (void *)buf, len, true); |
25 | TEMP_NORMAL, | 28 | } |
26 | + /* Temp is live across conditional branch, but dead otherwise. */ | 29 | -void cpu_reloading_memory_map(void); |
27 | + TEMP_EBB, | 30 | void *cpu_physical_memory_map(hwaddr addr, |
28 | /* Temp is saved across basic blocks but dead at the end of TBs. */ | 31 | hwaddr *plen, |
29 | TEMP_LOCAL, | 32 | bool is_write); |
30 | /* Temp is saved across both basic blocks and translation blocks. */ | 33 | diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c |
31 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | 34 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/tcg/tcg.c | 35 | --- a/accel/tcg/cpu-exec-common.c |
34 | +++ b/tcg/tcg.c | 36 | +++ b/accel/tcg/cpu-exec-common.c |
35 | @@ -XXX,XX +XXX,XX @@ void tcg_temp_free_internal(TCGTemp *ts) | 37 | @@ -XXX,XX +XXX,XX @@ void cpu_loop_exit_noexc(CPUState *cpu) |
36 | TCGContext *s = tcg_ctx; | 38 | cpu_loop_exit(cpu); |
37 | int k, idx; | 39 | } |
38 | 40 | ||
39 | - /* In order to simplify users of tcg_constant_*, silently ignore free. */ | 41 | -#if defined(CONFIG_SOFTMMU) |
40 | - if (ts->kind == TEMP_CONST) { | 42 | -void cpu_reloading_memory_map(void) |
41 | + switch (ts->kind) { | 43 | -{ |
42 | + case TEMP_CONST: | 44 | - if (qemu_in_vcpu_thread() && current_cpu->running) { |
43 | + /* | 45 | - /* The guest can in theory prolong the RCU critical section as long |
44 | + * In order to simplify users of tcg_constant_*, | 46 | - * as it feels like. The major problem with this is that because it |
45 | + * silently ignore free. | 47 | - * can do multiple reconfigurations of the memory map within the |
46 | + */ | 48 | - * critical section, we could potentially accumulate an unbounded |
47 | return; | 49 | - * collection of memory data structures awaiting reclamation. |
48 | + case TEMP_NORMAL: | 50 | - * |
49 | + case TEMP_LOCAL: | 51 | - * Because the only thing we're currently protecting with RCU is the |
50 | + break; | 52 | - * memory data structures, it's sufficient to break the critical section |
51 | + default: | 53 | - * in this callback, which we know will get called every time the |
52 | + g_assert_not_reached(); | 54 | - * memory map is rearranged. |
55 | - * | ||
56 | - * (If we add anything else in the system that uses RCU to protect | ||
57 | - * its data structures, we will need to implement some other mechanism | ||
58 | - * to force TCG CPUs to exit the critical section, at which point this | ||
59 | - * part of this callback might become unnecessary.) | ||
60 | - * | ||
61 | - * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which | ||
62 | - * only protects cpu->as->dispatch. Since we know our caller is about | ||
63 | - * to reload it, it's safe to split the critical section. | ||
64 | - */ | ||
65 | - rcu_read_unlock(); | ||
66 | - rcu_read_lock(); | ||
67 | - } | ||
68 | -} | ||
69 | -#endif | ||
70 | - | ||
71 | void cpu_loop_exit(CPUState *cpu) | ||
72 | { | ||
73 | /* Undo the setting in cpu_tb_exec. */ | ||
74 | diff --git a/softmmu/physmem.c b/softmmu/physmem.c | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/softmmu/physmem.c | ||
77 | +++ b/softmmu/physmem.c | ||
78 | @@ -XXX,XX +XXX,XX @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, | ||
79 | IOMMUTLBEntry iotlb; | ||
80 | int iommu_idx; | ||
81 | hwaddr addr = orig_addr; | ||
82 | - AddressSpaceDispatch *d = | ||
83 | - qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); | ||
84 | + AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; | ||
85 | |||
86 | for (;;) { | ||
87 | section = address_space_translate_internal(d, addr, &addr, plen, false); | ||
88 | @@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu, | ||
89 | { | ||
90 | int asidx = cpu_asidx_from_attrs(cpu, attrs); | ||
91 | CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; | ||
92 | - AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch); | ||
93 | + AddressSpaceDispatch *d = cpuas->memory_dispatch; | ||
94 | int section_index = index & ~TARGET_PAGE_MASK; | ||
95 | MemoryRegionSection *ret; | ||
96 | |||
97 | @@ -XXX,XX +XXX,XX @@ static void tcg_log_global_after_sync(MemoryListener *listener) | ||
53 | } | 98 | } |
54 | |||
55 | #if defined(CONFIG_DEBUG_TCG) | ||
56 | @@ -XXX,XX +XXX,XX @@ void tcg_temp_free_internal(TCGTemp *ts) | ||
57 | } | ||
58 | #endif | ||
59 | |||
60 | - tcg_debug_assert(ts->kind < TEMP_GLOBAL); | ||
61 | tcg_debug_assert(ts->temp_allocated != 0); | ||
62 | ts->temp_allocated = 0; | ||
63 | |||
64 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_start(TCGContext *s) | ||
65 | case TEMP_GLOBAL: | ||
66 | break; | ||
67 | case TEMP_NORMAL: | ||
68 | + case TEMP_EBB: | ||
69 | val = TEMP_VAL_DEAD; | ||
70 | /* fall through */ | ||
71 | case TEMP_LOCAL: | ||
72 | @@ -XXX,XX +XXX,XX @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, | ||
73 | case TEMP_LOCAL: | ||
74 | snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); | ||
75 | break; | ||
76 | + case TEMP_EBB: | ||
77 | + snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals); | ||
78 | + break; | ||
79 | case TEMP_NORMAL: | ||
80 | snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); | ||
81 | break; | ||
82 | @@ -XXX,XX +XXX,XX @@ static void la_bb_end(TCGContext *s, int ng, int nt) | ||
83 | state = TS_DEAD | TS_MEM; | ||
84 | break; | ||
85 | case TEMP_NORMAL: | ||
86 | + case TEMP_EBB: | ||
87 | case TEMP_CONST: | ||
88 | state = TS_DEAD; | ||
89 | break; | ||
90 | @@ -XXX,XX +XXX,XX @@ static void la_global_sync(TCGContext *s, int ng) | ||
91 | } | 99 | } |
92 | 100 | ||
93 | /* | 101 | +static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) |
94 | - * liveness analysis: conditional branch: all temps are dead, | 102 | +{ |
95 | - * globals and local temps should be synced. | 103 | + CPUAddressSpace *cpuas = data.host_ptr; |
96 | + * liveness analysis: conditional branch: all temps are dead unless | 104 | + |
97 | + * explicitly live-across-conditional-branch, globals and local temps | 105 | + cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); |
98 | + * should be synced. | 106 | + tlb_flush(cpu); |
99 | */ | 107 | +} |
100 | static void la_bb_sync(TCGContext *s, int ng, int nt) | 108 | + |
109 | static void tcg_commit(MemoryListener *listener) | ||
101 | { | 110 | { |
102 | @@ -XXX,XX +XXX,XX @@ static void la_bb_sync(TCGContext *s, int ng, int nt) | 111 | CPUAddressSpace *cpuas; |
103 | case TEMP_NORMAL: | 112 | - AddressSpaceDispatch *d; |
104 | s->temps[i].state = TS_DEAD; | 113 | + CPUState *cpu; |
105 | break; | 114 | |
106 | + case TEMP_EBB: | 115 | assert(tcg_enabled()); |
107 | case TEMP_CONST: | 116 | /* since each CPU stores ram addresses in its TLB cache, we must |
108 | continue; | 117 | reset the modified entries */ |
109 | default: | 118 | cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); |
110 | @@ -XXX,XX +XXX,XX @@ static bool liveness_pass_2(TCGContext *s) | 119 | - cpu_reloading_memory_map(); |
111 | TCGTemp *dts = tcg_temp_alloc(s); | 120 | - /* The CPU and TLB are protected by the iothread lock. |
112 | dts->type = its->type; | 121 | - * We reload the dispatch pointer now because cpu_reloading_memory_map() |
113 | dts->base_type = its->base_type; | 122 | - * may have split the RCU critical section. |
114 | + dts->kind = TEMP_EBB; | 123 | + cpu = cpuas->cpu; |
115 | its->state_ptr = dts; | 124 | + |
116 | } else { | 125 | + /* |
117 | its->state_ptr = NULL; | 126 | + * Defer changes to as->memory_dispatch until the cpu is quiescent. |
118 | @@ -XXX,XX +XXX,XX @@ static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) | 127 | + * Otherwise we race between (1) other cpu threads and (2) ongoing |
119 | new_type = TEMP_VAL_MEM; | 128 | + * i/o for the current cpu thread, with data cached by mmu_lookup(). |
120 | break; | 129 | + * |
121 | case TEMP_NORMAL: | 130 | + * In addition, queueing the work function will kick the cpu back to |
122 | + case TEMP_EBB: | 131 | + * the main loop, which will end the RCU critical section and reclaim |
123 | new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD; | 132 | + * the memory data structures. |
124 | break; | 133 | + * |
125 | case TEMP_CONST: | 134 | + * That said, the listener is also called during realize, before |
126 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) | 135 | + * all of the tcg machinery for run-on is initialized: thus halt_cond. |
127 | temp_save(s, ts, allocated_regs); | 136 | */ |
128 | break; | 137 | - d = address_space_to_dispatch(cpuas->as); |
129 | case TEMP_NORMAL: | 138 | - qatomic_rcu_set(&cpuas->memory_dispatch, d); |
130 | + case TEMP_EBB: | 139 | - tlb_flush(cpuas->cpu); |
131 | /* The liveness analysis already ensures that temps are dead. | 140 | + if (cpu->halt_cond) { |
132 | Keep an tcg_debug_assert for safety. */ | 141 | + async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas)); |
133 | tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); | 142 | + } else { |
134 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) | 143 | + tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas)); |
144 | + } | ||
135 | } | 145 | } |
136 | 146 | ||
137 | /* | 147 | static void memory_map_init(void) |
138 | - * At a conditional branch, we assume all temporaries are dead and | ||
139 | - * all globals and local temps are synced to their location. | ||
140 | + * At a conditional branch, we assume all temporaries are dead unless | ||
141 | + * explicitly live-across-conditional-branch; all globals and local | ||
142 | + * temps are synced to their location. | ||
143 | */ | ||
144 | static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) | ||
145 | { | ||
146 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) | ||
147 | case TEMP_NORMAL: | ||
148 | tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); | ||
149 | break; | ||
150 | + case TEMP_EBB: | ||
151 | case TEMP_CONST: | ||
152 | break; | ||
153 | default: | ||
154 | -- | 148 | -- |
155 | 2.34.1 | 149 | 2.34.1 |
150 | |||
151 | diff view generated by jsdifflib |
1 | The last use of this macro was removed in f3e182b10013 | 1 | The not pattern is always available via generic expansion. |
---|---|---|---|
2 | ("accel/tcg: Push trace info building into atomic_common.c.inc") | 2 | See debug block in tcg_can_emit_vecop_list. |
3 | 3 | ||
4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Fixes: 11978f6f58 ("tcg: Fix expansion of INDEX_op_not_vec") |
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 7 | --- |
7 | accel/tcg/cputlb.c | 1 - | 8 | tcg/tcg-op-vec.c | 7 +++---- |
8 | accel/tcg/user-exec.c | 1 - | 9 | 1 file changed, 3 insertions(+), 4 deletions(-) |
9 | 2 files changed, 2 deletions(-) | ||
10 | 10 | ||
11 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 11 | diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/accel/tcg/cputlb.c | 13 | --- a/tcg/tcg-op-vec.c |
14 | +++ b/accel/tcg/cputlb.c | 14 | +++ b/tcg/tcg-op-vec.c |
15 | @@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | 15 | @@ -XXX,XX +XXX,XX @@ static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc) |
16 | glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) | 16 | |
17 | 17 | void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a) | |
18 | #define ATOMIC_MMU_CLEANUP | 18 | { |
19 | -#define ATOMIC_MMU_IDX get_mmuidx(oi) | 19 | - const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); |
20 | 20 | - | |
21 | #include "atomic_common.c.inc" | 21 | - if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) { |
22 | 22 | + if (TCG_TARGET_HAS_not_vec) { | |
23 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | 23 | + vec_gen_op2(INDEX_op_not_vec, 0, r, a); |
24 | index XXXXXXX..XXXXXXX 100644 | 24 | + } else { |
25 | --- a/accel/tcg/user-exec.c | 25 | tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1)); |
26 | +++ b/accel/tcg/user-exec.c | 26 | } |
27 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | 27 | - tcg_swap_vecop_list(hold_list); |
28 | #define ATOMIC_NAME(X) \ | 28 | } |
29 | glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) | 29 | |
30 | #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) | 30 | void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a) |
31 | -#define ATOMIC_MMU_IDX MMU_USER_IDX | ||
32 | |||
33 | #define DATA_SIZE 1 | ||
34 | #include "atomic_template.h" | ||
35 | -- | 31 | -- |
36 | 2.34.1 | 32 | 2.34.1 |
33 | |||
34 | diff view generated by jsdifflib |
1 | From: Thomas Huth <thuth@redhat.com> | 1 | Since a59a29312660 ("tcg/sparc64: Remove sparc32plus constraints") |
---|---|---|---|
2 | we no longer distinguish registers with 32 vs 64 bits. | ||
3 | Therefore we can remove support for the backend-specific | ||
4 | type change opcodes. | ||
2 | 5 | ||
3 | This header only defines the tcg_allowed variable and the tcg_enabled() | ||
4 | function - which are not required in many files that include this | ||
5 | header. Drop the #include statement there. | ||
6 | |||
7 | Signed-off-by: Thomas Huth <thuth@redhat.com> | ||
8 | Reviewed-by: Markus Armbruster <armbru@redhat.com> | ||
9 | Message-Id: <20220315144107.1012530-1-thuth@redhat.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 7 | --- |
12 | accel/tcg/hmp.c | 1 - | 8 | tcg/sparc64/tcg-target.h | 2 +- |
13 | accel/tcg/tcg-accel-ops-icount.c | 1 - | 9 | tcg/sparc64/tcg-target.c.inc | 11 ----------- |
14 | bsd-user/main.c | 1 - | 10 | 2 files changed, 1 insertion(+), 12 deletions(-) |
15 | hw/virtio/vhost.c | 1 - | ||
16 | linux-user/main.c | 1 - | ||
17 | monitor/misc.c | 1 - | ||
18 | target/arm/helper.c | 1 - | ||
19 | target/s390x/cpu_models_sysemu.c | 1 - | ||
20 | target/s390x/helper.c | 1 - | ||
21 | 9 files changed, 9 deletions(-) | ||
22 | 11 | ||
23 | diff --git a/accel/tcg/hmp.c b/accel/tcg/hmp.c | 12 | diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h |
24 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/accel/tcg/hmp.c | 14 | --- a/tcg/sparc64/tcg-target.h |
26 | +++ b/accel/tcg/hmp.c | 15 | +++ b/tcg/sparc64/tcg-target.h |
27 | @@ -XXX,XX +XXX,XX @@ | 16 | @@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions; |
28 | #include "qapi/qapi-commands-machine.h" | 17 | #define TCG_TARGET_HAS_mulsh_i32 0 |
29 | #include "exec/exec-all.h" | 18 | #define TCG_TARGET_HAS_qemu_st8_i32 0 |
30 | #include "monitor/monitor.h" | 19 | |
31 | -#include "sysemu/tcg.h" | 20 | -#define TCG_TARGET_HAS_extr_i64_i32 1 |
32 | 21 | +#define TCG_TARGET_HAS_extr_i64_i32 0 | |
33 | static void hmp_tcg_register(void) | 22 | #define TCG_TARGET_HAS_div_i64 1 |
23 | #define TCG_TARGET_HAS_rem_i64 0 | ||
24 | #define TCG_TARGET_HAS_rot_i64 0 | ||
25 | diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/tcg/sparc64/tcg-target.c.inc | ||
28 | +++ b/tcg/sparc64/tcg-target.c.inc | ||
29 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) | ||
30 | tcg_out_ext32u(s, rd, rs); | ||
31 | } | ||
32 | |||
33 | -static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) | ||
34 | -{ | ||
35 | - tcg_out_mov(s, TCG_TYPE_I32, rd, rs); | ||
36 | -} | ||
37 | - | ||
38 | static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) | ||
34 | { | 39 | { |
35 | diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c | 40 | return false; |
36 | index XXXXXXX..XXXXXXX 100644 | 41 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
37 | --- a/accel/tcg/tcg-accel-ops-icount.c | 42 | case INDEX_op_divu_i64: |
38 | +++ b/accel/tcg/tcg-accel-ops-icount.c | 43 | c = ARITH_UDIVX; |
39 | @@ -XXX,XX +XXX,XX @@ | 44 | goto gen_arith; |
40 | */ | 45 | - case INDEX_op_extrh_i64_i32: |
41 | 46 | - tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX); | |
42 | #include "qemu/osdep.h" | 47 | - break; |
43 | -#include "sysemu/tcg.h" | 48 | |
44 | #include "sysemu/replay.h" | 49 | case INDEX_op_brcond_i64: |
45 | #include "sysemu/cpu-timers.h" | 50 | tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3])); |
46 | #include "qemu/main-loop.h" | 51 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
47 | diff --git a/bsd-user/main.c b/bsd-user/main.c | 52 | case INDEX_op_ext32u_i64: |
48 | index XXXXXXX..XXXXXXX 100644 | 53 | case INDEX_op_ext_i32_i64: |
49 | --- a/bsd-user/main.c | 54 | case INDEX_op_extu_i32_i64: |
50 | +++ b/bsd-user/main.c | 55 | - case INDEX_op_extrl_i64_i32: |
51 | @@ -XXX,XX +XXX,XX @@ | 56 | default: |
52 | #include "qemu-common.h" | 57 | g_assert_not_reached(); |
53 | #include "qemu/units.h" | 58 | } |
54 | #include "qemu/accel.h" | 59 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) |
55 | -#include "sysemu/tcg.h" | 60 | case INDEX_op_ext32u_i64: |
56 | #include "qemu-version.h" | 61 | case INDEX_op_ext_i32_i64: |
57 | #include <machine/trap.h> | 62 | case INDEX_op_extu_i32_i64: |
58 | 63 | - case INDEX_op_extrl_i64_i32: | |
59 | diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c | 64 | - case INDEX_op_extrh_i64_i32: |
60 | index XXXXXXX..XXXXXXX 100644 | 65 | case INDEX_op_qemu_ld_a32_i32: |
61 | --- a/hw/virtio/vhost.c | 66 | case INDEX_op_qemu_ld_a64_i32: |
62 | +++ b/hw/virtio/vhost.c | 67 | case INDEX_op_qemu_ld_a32_i64: |
63 | @@ -XXX,XX +XXX,XX @@ | ||
64 | #include "migration/blocker.h" | ||
65 | #include "migration/qemu-file-types.h" | ||
66 | #include "sysemu/dma.h" | ||
67 | -#include "sysemu/tcg.h" | ||
68 | #include "trace.h" | ||
69 | |||
70 | /* enabled until disconnected backend stabilizes */ | ||
71 | diff --git a/linux-user/main.c b/linux-user/main.c | ||
72 | index XXXXXXX..XXXXXXX 100644 | ||
73 | --- a/linux-user/main.c | ||
74 | +++ b/linux-user/main.c | ||
75 | @@ -XXX,XX +XXX,XX @@ | ||
76 | #include "qemu-common.h" | ||
77 | #include "qemu/units.h" | ||
78 | #include "qemu/accel.h" | ||
79 | -#include "sysemu/tcg.h" | ||
80 | #include "qemu-version.h" | ||
81 | #include <sys/syscall.h> | ||
82 | #include <sys/resource.h> | ||
83 | diff --git a/monitor/misc.c b/monitor/misc.c | ||
84 | index XXXXXXX..XXXXXXX 100644 | ||
85 | --- a/monitor/misc.c | ||
86 | +++ b/monitor/misc.c | ||
87 | @@ -XXX,XX +XXX,XX @@ | ||
88 | #include "qapi/util.h" | ||
89 | #include "sysemu/blockdev.h" | ||
90 | #include "sysemu/sysemu.h" | ||
91 | -#include "sysemu/tcg.h" | ||
92 | #include "sysemu/tpm.h" | ||
93 | #include "qapi/qmp/qdict.h" | ||
94 | #include "qapi/qmp/qerror.h" | ||
95 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
96 | index XXXXXXX..XXXXXXX 100644 | ||
97 | --- a/target/arm/helper.c | ||
98 | +++ b/target/arm/helper.c | ||
99 | @@ -XXX,XX +XXX,XX @@ | ||
100 | #include "sysemu/cpus.h" | ||
101 | #include "sysemu/cpu-timers.h" | ||
102 | #include "sysemu/kvm.h" | ||
103 | -#include "sysemu/tcg.h" | ||
104 | #include "qemu/range.h" | ||
105 | #include "qapi/qapi-commands-machine-target.h" | ||
106 | #include "qapi/error.h" | ||
107 | diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c | ||
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/target/s390x/cpu_models_sysemu.c | ||
110 | +++ b/target/s390x/cpu_models_sysemu.c | ||
111 | @@ -XXX,XX +XXX,XX @@ | ||
112 | #include "s390x-internal.h" | ||
113 | #include "kvm/kvm_s390x.h" | ||
114 | #include "sysemu/kvm.h" | ||
115 | -#include "sysemu/tcg.h" | ||
116 | #include "qapi/error.h" | ||
117 | #include "qapi/visitor.h" | ||
118 | #include "qapi/qmp/qerror.h" | ||
119 | diff --git a/target/s390x/helper.c b/target/s390x/helper.c | ||
120 | index XXXXXXX..XXXXXXX 100644 | ||
121 | --- a/target/s390x/helper.c | ||
122 | +++ b/target/s390x/helper.c | ||
123 | @@ -XXX,XX +XXX,XX @@ | ||
124 | #include "hw/s390x/pv.h" | ||
125 | #include "sysemu/hw_accel.h" | ||
126 | #include "sysemu/runstate.h" | ||
127 | -#include "sysemu/tcg.h" | ||
128 | |||
129 | void s390x_tod_timer(void *opaque) | ||
130 | { | ||
131 | -- | 68 | -- |
132 | 2.34.1 | 69 | 2.34.1 | diff view generated by jsdifflib |