1
The following changes since commit 2ecfc0657afa5d29a373271b342f704a1a3c6737:
1
The following changes since commit 75d30fde55485b965a1168a21d016dd07b50ed32:
2
2
3
Merge remote-tracking branch 'remotes/armbru/tags/pull-misc-2020-12-10' into staging (2020-12-10 17:01:05 +0000)
3
Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging (2022-10-30 15:07:25 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20201210
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20221031
8
8
9
for you to fetch changes up to 9e2658d62ebc23efe7df43fc0e306f129510d874:
9
for you to fetch changes up to cb375590983fc3d23600d02ba05a05d34fe44150:
10
10
11
accel/tcg: rename tcg-cpus functions to match module name (2020-12-10 17:44:10 -0600)
11
target/i386: Expand eflags updates inline (2022-10-31 11:39:10 +1100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Split CpusAccel for tcg variants
14
Remove sparc32plus support from tcg/sparc.
15
target/i386: Use cpu_unwind_state_data for tpr access.
16
target/i386: Expand eflags updates inline
15
17
16
----------------------------------------------------------------
18
----------------------------------------------------------------
17
Claudio Fontana (3):
19
Icenowy Zheng (1):
18
accel/tcg: split CpusAccel into three TCG variants
20
tcg/tci: fix logic error when registering helpers via FFI
19
accel/tcg: split tcg_start_vcpu_thread
20
accel/tcg: rename tcg-cpus functions to match module name
21
21
22
accel/tcg/tcg-cpus-icount.h | 17 ++
22
Richard Henderson (10):
23
accel/tcg/tcg-cpus-rr.h | 21 ++
23
tcg/sparc: Remove support for sparc32plus
24
accel/tcg/tcg-cpus.h | 12 +-
24
tcg/sparc64: Rename from tcg/sparc
25
accel/tcg/tcg-all.c | 13 +-
25
tcg/sparc64: Remove sparc32plus constraints
26
accel/tcg/tcg-cpus-icount.c | 147 +++++++++++++
26
accel/tcg: Introduce cpu_unwind_state_data
27
accel/tcg/tcg-cpus-mttcg.c | 140 ++++++++++++
27
target/i386: Use cpu_unwind_state_data for tpr access
28
accel/tcg/tcg-cpus-rr.c | 305 ++++++++++++++++++++++++++
28
target/openrisc: Always exit after mtspr npc
29
accel/tcg/tcg-cpus.c | 506 +-------------------------------------------
29
target/openrisc: Use cpu_unwind_state_data for mfspr
30
softmmu/icount.c | 2 +-
30
accel/tcg: Remove will_exit argument from cpu_restore_state
31
accel/tcg/meson.build | 9 +-
31
accel/tcg: Remove reset_icount argument from cpu_restore_state_from_tb
32
10 files changed, 670 insertions(+), 502 deletions(-)
32
target/i386: Expand eflags updates inline
33
create mode 100644 accel/tcg/tcg-cpus-icount.h
34
create mode 100644 accel/tcg/tcg-cpus-rr.h
35
create mode 100644 accel/tcg/tcg-cpus-icount.c
36
create mode 100644 accel/tcg/tcg-cpus-mttcg.c
37
create mode 100644 accel/tcg/tcg-cpus-rr.c
38
33
34
meson.build | 4 +-
35
accel/tcg/internal.h | 4 +-
36
include/exec/exec-all.h | 24 ++-
37
target/i386/helper.h | 5 -
38
tcg/{sparc => sparc64}/tcg-target-con-set.h | 16 +-
39
tcg/{sparc => sparc64}/tcg-target-con-str.h | 3 -
40
tcg/{sparc => sparc64}/tcg-target.h | 11 --
41
accel/tcg/cpu-exec-common.c | 2 +-
42
accel/tcg/tb-maint.c | 4 +-
43
accel/tcg/translate-all.c | 91 +++++----
44
target/alpha/helper.c | 2 +-
45
target/alpha/mem_helper.c | 2 +-
46
target/arm/op_helper.c | 2 +-
47
target/arm/tlb_helper.c | 8 +-
48
target/cris/helper.c | 2 +-
49
target/i386/helper.c | 21 ++-
50
target/i386/tcg/cc_helper.c | 41 -----
51
target/i386/tcg/sysemu/svm_helper.c | 2 +-
52
target/i386/tcg/translate.c | 30 ++-
53
target/m68k/op_helper.c | 4 +-
54
target/microblaze/helper.c | 2 +-
55
target/nios2/op_helper.c | 2 +-
56
target/openrisc/sys_helper.c | 17 +-
57
target/ppc/excp_helper.c | 2 +-
58
target/s390x/tcg/excp_helper.c | 2 +-
59
target/tricore/op_helper.c | 2 +-
60
target/xtensa/helper.c | 6 +-
61
tcg/tcg.c | 81 +-------
62
tcg/{sparc => sparc64}/tcg-target.c.inc | 275 ++++++++--------------------
63
MAINTAINERS | 2 +-
64
30 files changed, 232 insertions(+), 437 deletions(-)
65
rename tcg/{sparc => sparc64}/tcg-target-con-set.h (69%)
66
rename tcg/{sparc => sparc64}/tcg-target-con-str.h (77%)
67
rename tcg/{sparc => sparc64}/tcg-target.h (95%)
68
rename tcg/{sparc => sparc64}/tcg-target.c.inc (91%)
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Since 9b9c37c36439, we have only supported sparc64 cpus.
2
Debian and Gentoo now only support 64-bit sparc64 userland,
3
so it is time to drop the 32-bit sparc64 userland: sparc32plus.
2
4
3
after the initial split into 3 tcg variants, we proceed to also
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
split tcg_start_vcpu_thread.
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
6
We actually split it in 2 this time, since the icount variant
7
just uses the round robin function.
8
9
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Claudio Fontana <cfontana@suse.de>
11
Message-Id: <20201015143217.29337-3-cfontana@suse.de>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
8
---
14
accel/tcg/tcg-cpus-mttcg.h | 21 --------------
9
tcg/sparc/tcg-target.h | 11 ---
15
accel/tcg/tcg-cpus-rr.h | 3 +-
10
tcg/tcg.c | 75 +----------------
16
accel/tcg/tcg-cpus.h | 1 -
11
tcg/sparc/tcg-target.c.inc | 166 +++++++------------------------------
17
accel/tcg/tcg-all.c | 5 ++++
12
3 files changed, 33 insertions(+), 219 deletions(-)
18
accel/tcg/tcg-cpus-icount.c | 2 +-
19
accel/tcg/tcg-cpus-mttcg.c | 29 +++++++++++++++++--
20
accel/tcg/tcg-cpus-rr.c | 39 +++++++++++++++++++++++--
21
accel/tcg/tcg-cpus.c | 58 -------------------------------------
22
8 files changed, 71 insertions(+), 87 deletions(-)
23
delete mode 100644 accel/tcg/tcg-cpus-mttcg.h
24
13
25
diff --git a/accel/tcg/tcg-cpus-mttcg.h b/accel/tcg/tcg-cpus-mttcg.h
14
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
26
deleted file mode 100644
15
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX
16
--- a/tcg/sparc/tcg-target.h
28
--- a/accel/tcg/tcg-cpus-mttcg.h
17
+++ b/tcg/sparc/tcg-target.h
29
+++ /dev/null
30
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@
31
-/*
19
#ifndef SPARC_TCG_TARGET_H
32
- * QEMU TCG Multi Threaded vCPUs implementation
20
#define SPARC_TCG_TARGET_H
33
- *
21
34
- * Copyright 2020 SUSE LLC
22
-#define TCG_TARGET_REG_BITS 64
35
- *
23
-
36
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
24
#define TCG_TARGET_INSN_UNIT_SIZE 4
37
- * See the COPYING file in the top-level directory.
25
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32
38
- */
26
#define TCG_TARGET_NB_REGS 32
39
-
27
@@ -XXX,XX +XXX,XX @@ typedef enum {
40
-#ifndef TCG_CPUS_MTTCG_H
28
/* used for function call generation */
41
-#define TCG_CPUS_MTTCG_H
29
#define TCG_REG_CALL_STACK TCG_REG_O6
42
-
30
43
-/*
31
-#ifdef __arch64__
44
- * In the multi-threaded case each vCPU has its own thread. The TLS
32
#define TCG_TARGET_STACK_BIAS 2047
45
- * variable current_cpu can be used deep in the code to find the
33
#define TCG_TARGET_STACK_ALIGN 16
46
- * current CPUState for a given thread.
34
#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6*8 + TCG_TARGET_STACK_BIAS)
47
- */
35
-#else
48
-
36
-#define TCG_TARGET_STACK_BIAS 0
49
-void *tcg_cpu_thread_fn(void *arg);
37
-#define TCG_TARGET_STACK_ALIGN 8
50
-
38
-#define TCG_TARGET_CALL_STACK_OFFSET (64 + 4 + 6*4)
51
-#endif /* TCG_CPUS_MTTCG_H */
39
-#endif
52
diff --git a/accel/tcg/tcg-cpus-rr.h b/accel/tcg/tcg-cpus-rr.h
40
-
41
-#ifdef __arch64__
42
#define TCG_TARGET_EXTEND_ARGS 1
43
-#endif
44
45
#if defined(__VIS__) && __VIS__ >= 0x300
46
#define use_vis3_instructions 1
47
diff --git a/tcg/tcg.c b/tcg/tcg.c
53
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
54
--- a/accel/tcg/tcg-cpus-rr.h
49
--- a/tcg/tcg.c
55
+++ b/accel/tcg/tcg-cpus-rr.h
50
+++ b/tcg/tcg.c
51
@@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
52
}
53
#endif
54
55
-#if defined(__sparc__) && !defined(__arch64__) \
56
- && !defined(CONFIG_TCG_INTERPRETER)
57
- /* We have 64-bit values in one register, but need to pass as two
58
- separate parameters. Split them. */
59
- int orig_typemask = typemask;
60
- int orig_nargs = nargs;
61
- TCGv_i64 retl, reth;
62
- TCGTemp *split_args[MAX_OPC_PARAM];
63
-
64
- retl = NULL;
65
- reth = NULL;
66
- typemask = 0;
67
- for (i = real_args = 0; i < nargs; ++i) {
68
- int argtype = extract32(orig_typemask, (i + 1) * 3, 3);
69
- bool is_64bit = (argtype & ~1) == dh_typecode_i64;
70
-
71
- if (is_64bit) {
72
- TCGv_i64 orig = temp_tcgv_i64(args[i]);
73
- TCGv_i32 h = tcg_temp_new_i32();
74
- TCGv_i32 l = tcg_temp_new_i32();
75
- tcg_gen_extr_i64_i32(l, h, orig);
76
- split_args[real_args++] = tcgv_i32_temp(h);
77
- typemask |= dh_typecode_i32 << (real_args * 3);
78
- split_args[real_args++] = tcgv_i32_temp(l);
79
- typemask |= dh_typecode_i32 << (real_args * 3);
80
- } else {
81
- split_args[real_args++] = args[i];
82
- typemask |= argtype << (real_args * 3);
83
- }
84
- }
85
- nargs = real_args;
86
- args = split_args;
87
-#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
88
+#if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
89
for (i = 0; i < nargs; ++i) {
90
int argtype = extract32(typemask, (i + 1) * 3, 3);
91
bool is_32bit = (argtype & ~1) == dh_typecode_i32;
92
@@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
93
94
pi = 0;
95
if (ret != NULL) {
96
-#if defined(__sparc__) && !defined(__arch64__) \
97
- && !defined(CONFIG_TCG_INTERPRETER)
98
- if ((typemask & 6) == dh_typecode_i64) {
99
- /* The 32-bit ABI is going to return the 64-bit value in
100
- the %o0/%o1 register pair. Prepare for this by using
101
- two return temporaries, and reassemble below. */
102
- retl = tcg_temp_new_i64();
103
- reth = tcg_temp_new_i64();
104
- op->args[pi++] = tcgv_i64_arg(reth);
105
- op->args[pi++] = tcgv_i64_arg(retl);
106
- nb_rets = 2;
107
- } else {
108
- op->args[pi++] = temp_arg(ret);
109
- nb_rets = 1;
110
- }
111
-#else
112
if (TCG_TARGET_REG_BITS < 64 && (typemask & 6) == dh_typecode_i64) {
113
#if HOST_BIG_ENDIAN
114
op->args[pi++] = temp_arg(ret + 1);
115
@@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
116
op->args[pi++] = temp_arg(ret);
117
nb_rets = 1;
118
}
119
-#endif
120
} else {
121
nb_rets = 0;
122
}
123
@@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
124
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
125
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
126
127
-#if defined(__sparc__) && !defined(__arch64__) \
128
- && !defined(CONFIG_TCG_INTERPRETER)
129
- /* Free all of the parts we allocated above. */
130
- for (i = real_args = 0; i < orig_nargs; ++i) {
131
- int argtype = extract32(orig_typemask, (i + 1) * 3, 3);
132
- bool is_64bit = (argtype & ~1) == dh_typecode_i64;
133
-
134
- if (is_64bit) {
135
- tcg_temp_free_internal(args[real_args++]);
136
- tcg_temp_free_internal(args[real_args++]);
137
- } else {
138
- real_args++;
139
- }
140
- }
141
- if ((orig_typemask & 6) == dh_typecode_i64) {
142
- /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
143
- Note that describing these as TCGv_i64 eliminates an unnecessary
144
- zero-extension that tcg_gen_concat_i32_i64 would create. */
145
- tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
146
- tcg_temp_free_i64(retl);
147
- tcg_temp_free_i64(reth);
148
- }
149
-#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
150
+#if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
151
for (i = 0; i < nargs; ++i) {
152
int argtype = extract32(typemask, (i + 1) * 3, 3);
153
bool is_32bit = (argtype & ~1) == dh_typecode_i32;
154
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/sparc/tcg-target.c.inc
157
+++ b/tcg/sparc/tcg-target.c.inc
56
@@ -XXX,XX +XXX,XX @@
158
@@ -XXX,XX +XXX,XX @@
57
/* Kick all RR vCPUs. */
159
* THE SOFTWARE.
58
void qemu_cpu_kick_rr_cpus(CPUState *unused);
160
*/
59
161
60
-void *tcg_rr_cpu_thread_fn(void *arg);
162
+/* We only support generating code for 64-bit mode. */
61
+/* start the round robin vcpu thread */
163
+#ifndef __arch64__
62
+void rr_start_vcpu_thread(CPUState *cpu);
164
+#error "unsupported code generation mode"
63
165
+#endif
64
#endif /* TCG_CPUS_RR_H */
65
diff --git a/accel/tcg/tcg-cpus.h b/accel/tcg/tcg-cpus.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/accel/tcg/tcg-cpus.h
68
+++ b/accel/tcg/tcg-cpus.h
69
@@ -XXX,XX +XXX,XX @@ extern const CpusAccel tcg_cpus_mttcg;
70
extern const CpusAccel tcg_cpus_icount;
71
extern const CpusAccel tcg_cpus_rr;
72
73
-void tcg_start_vcpu_thread(CPUState *cpu);
74
void qemu_tcg_destroy_vcpu(CPUState *cpu);
75
int tcg_cpu_exec(CPUState *cpu);
76
void tcg_handle_interrupt(CPUState *cpu, int mask);
77
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/accel/tcg/tcg-all.c
80
+++ b/accel/tcg/tcg-all.c
81
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
82
tcg_exec_init(s->tb_size * 1024 * 1024);
83
mttcg_enabled = s->mttcg_enabled;
84
85
+ /*
86
+ * Initialize TCG regions
87
+ */
88
+ tcg_region_init();
89
+
166
+
90
if (mttcg_enabled) {
167
#include "../tcg-pool.c.inc"
91
cpus_register_accel(&tcg_cpus_mttcg);
168
92
} else if (icount_enabled()) {
169
#ifdef CONFIG_DEBUG_TCG
93
diff --git a/accel/tcg/tcg-cpus-icount.c b/accel/tcg/tcg-cpus-icount.c
170
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
94
index XXXXXXX..XXXXXXX 100644
171
};
95
--- a/accel/tcg/tcg-cpus-icount.c
172
#endif
96
+++ b/accel/tcg/tcg-cpus-icount.c
173
97
@@ -XXX,XX +XXX,XX @@ static void icount_handle_interrupt(CPUState *cpu, int mask)
174
-#ifdef __arch64__
175
-# define SPARC64 1
176
-#else
177
-# define SPARC64 0
178
-#endif
179
-
180
#define TCG_CT_CONST_S11 0x100
181
#define TCG_CT_CONST_S13 0x200
182
#define TCG_CT_CONST_ZERO 0x400
183
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
184
* high bits of the %i and %l registers garbage at all times.
185
*/
186
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
187
-#if SPARC64
188
# define ALL_GENERAL_REGS64 ALL_GENERAL_REGS
189
-#else
190
-# define ALL_GENERAL_REGS64 MAKE_64BIT_MASK(0, 16)
191
-#endif
192
#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
193
#define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
194
195
@@ -XXX,XX +XXX,XX @@ static bool check_fit_i32(int32_t val, unsigned int bits)
98
}
196
}
99
197
100
const CpusAccel tcg_cpus_icount = {
198
#define check_fit_tl check_fit_i64
101
- .create_vcpu_thread = tcg_start_vcpu_thread,
199
-#if SPARC64
102
+ .create_vcpu_thread = rr_start_vcpu_thread,
200
-# define check_fit_ptr check_fit_i64
103
.kick_vcpu_thread = qemu_cpu_kick_rr_cpus,
201
-#else
104
202
-# define check_fit_ptr check_fit_i32
105
.handle_interrupt = icount_handle_interrupt,
203
-#endif
106
diff --git a/accel/tcg/tcg-cpus-mttcg.c b/accel/tcg/tcg-cpus-mttcg.c
204
+#define check_fit_ptr check_fit_i64
107
index XXXXXXX..XXXXXXX 100644
205
108
--- a/accel/tcg/tcg-cpus-mttcg.c
206
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
109
+++ b/accel/tcg/tcg-cpus-mttcg.c
207
intptr_t value, intptr_t addend)
110
@@ -XXX,XX +XXX,XX @@
208
@@ -XXX,XX +XXX,XX @@ static void tcg_out_sety(TCGContext *s, TCGReg rs)
111
#include "hw/boards.h"
209
tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
112
210
}
113
#include "tcg-cpus.h"
211
114
-#include "tcg-cpus-mttcg.h"
212
-static void tcg_out_rdy(TCGContext *s, TCGReg rd)
115
213
-{
116
/*
214
- tcg_out32(s, RDY | INSN_RD(rd));
117
* In the multi-threaded case each vCPU has its own thread. The TLS
215
-}
118
@@ -XXX,XX +XXX,XX @@
216
-
119
* current CPUState for a given thread.
217
static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
120
*/
218
int32_t val2, int val2const, int uns)
121
122
-void *tcg_cpu_thread_fn(void *arg)
123
+static void *tcg_cpu_thread_fn(void *arg)
124
{
219
{
125
CPUState *cpu = arg;
220
@@ -XXX,XX +XXX,XX @@ static void emit_extend(TCGContext *s, TCGReg r, int op)
126
221
tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
127
@@ -XXX,XX +XXX,XX @@ static void mttcg_kick_vcpu_thread(CPUState *cpu)
222
break;
128
cpu_exit(cpu);
223
case MO_32:
224
- if (SPARC64) {
225
- tcg_out_arith(s, r, r, 0, SHIFT_SRL);
226
- }
227
+ tcg_out_arith(s, r, r, 0, SHIFT_SRL);
228
break;
229
case MO_64:
230
break;
231
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
232
};
233
234
int i;
235
- TCGReg ra;
236
237
for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
238
if (qemu_ld_helpers[i] == NULL) {
239
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
240
}
241
qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
242
243
- if (SPARC64 || TARGET_LONG_BITS == 32) {
244
- ra = TCG_REG_O3;
245
- } else {
246
- /* Install the high part of the address. */
247
- tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
248
- ra = TCG_REG_O4;
249
- }
250
-
251
/* Set the retaddr operand. */
252
- tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
253
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
254
/* Tail call. */
255
tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
256
/* delay slot -- set the env argument */
257
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
258
}
259
qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
260
261
- if (SPARC64) {
262
- emit_extend(s, TCG_REG_O2, i);
263
- ra = TCG_REG_O4;
264
- } else {
265
- ra = TCG_REG_O1;
266
- if (TARGET_LONG_BITS == 64) {
267
- /* Install the high part of the address. */
268
- tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
269
- ra += 2;
270
- } else {
271
- ra += 1;
272
- }
273
- if ((i & MO_SIZE) == MO_64) {
274
- /* Install the high part of the data. */
275
- tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
276
- ra += 2;
277
- } else {
278
- emit_extend(s, ra, i);
279
- ra += 1;
280
- }
281
- /* Skip the oi argument. */
282
- ra += 1;
283
- }
284
-
285
+ emit_extend(s, TCG_REG_O2, i);
286
+
287
/* Set the retaddr operand. */
288
- if (ra >= TCG_REG_O6) {
289
- tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
290
- TCG_TARGET_CALL_STACK_OFFSET);
291
- } else {
292
- tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
293
- }
294
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
295
296
/* Tail call. */
297
tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
298
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
299
qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
300
}
301
302
- if (!SPARC64 && TARGET_LONG_BITS == 64) {
303
- /* Install the high part of the address. */
304
- tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
305
- }
306
-
307
/* Tail call. */
308
tcg_out_jmpl_const(s, helper, true, true);
309
/* delay slot -- set the env argument */
310
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
311
tcg_out_cmp(s, r0, r2, 0);
312
313
/* If the guest address must be zero-extended, do so now. */
314
- if (SPARC64 && TARGET_LONG_BITS == 32) {
315
+ if (TARGET_LONG_BITS == 32) {
316
tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
317
return r0;
318
}
319
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
320
321
#ifdef CONFIG_SOFTMMU
322
unsigned memi = get_mmuidx(oi);
323
- TCGReg addrz, param;
324
+ TCGReg addrz;
325
const tcg_insn_unit *func;
326
327
addrz = tcg_out_tlb_load(s, addr, memi, memop,
328
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
329
330
/* TLB Miss. */
331
332
- param = TCG_REG_O1;
333
- if (!SPARC64 && TARGET_LONG_BITS == 64) {
334
- /* Skip the high-part; we'll perform the extract in the trampoline. */
335
- param++;
336
- }
337
- tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
338
+ tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
339
340
/* We use the helpers to extend SB and SW data, leaving the case
341
of SL needing explicit extending below. */
342
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
343
tcg_debug_assert(func != NULL);
344
tcg_out_call_nodelay(s, func, false);
345
/* delay slot */
346
- tcg_out_movi(s, TCG_TYPE_I32, param, oi);
347
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
348
349
- /* Recall that all of the helpers return 64-bit results.
350
- Which complicates things for sparcv8plus. */
351
- if (SPARC64) {
352
- /* We let the helper sign-extend SB and SW, but leave SL for here. */
353
- if (is_64 && (memop & MO_SSIZE) == MO_SL) {
354
- tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
355
- } else {
356
- tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
357
- }
358
+ /* We let the helper sign-extend SB and SW, but leave SL for here. */
359
+ if (is_64 && (memop & MO_SSIZE) == MO_SL) {
360
+ tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
361
} else {
362
- if ((memop & MO_SIZE) == MO_64) {
363
- tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
364
- tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
365
- tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
366
- } else if (is_64) {
367
- /* Re-extend from 32-bit rather than reassembling when we
368
- know the high register must be an extension. */
369
- tcg_out_arithi(s, data, TCG_REG_O1, 0,
370
- memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
371
- } else {
372
- tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
373
- }
374
+ tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
375
}
376
377
*label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
378
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
379
unsigned s_bits = memop & MO_SIZE;
380
unsigned t_bits;
381
382
- if (SPARC64 && TARGET_LONG_BITS == 32) {
383
+ if (TARGET_LONG_BITS == 32) {
384
tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
385
addr = TCG_REG_T1;
386
}
387
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
388
* operation in the delay slot, and failure need only invoke the
389
* handler for SIGBUS.
390
*/
391
- TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
392
tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
393
/* delay slot -- move to low part of argument reg */
394
- tcg_out_mov_delay(s, arg_low, addr);
395
+ tcg_out_mov_delay(s, TCG_REG_O1, addr);
396
} else {
397
/* Underalignment: load by pieces of minimum alignment. */
398
int ld_opc, a_size, s_size, i;
399
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
400
401
#ifdef CONFIG_SOFTMMU
402
unsigned memi = get_mmuidx(oi);
403
- TCGReg addrz, param;
404
+ TCGReg addrz;
405
const tcg_insn_unit *func;
406
407
addrz = tcg_out_tlb_load(s, addr, memi, memop,
408
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
409
410
/* TLB Miss. */
411
412
- param = TCG_REG_O1;
413
- if (!SPARC64 && TARGET_LONG_BITS == 64) {
414
- /* Skip the high-part; we'll perform the extract in the trampoline. */
415
- param++;
416
- }
417
- tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
418
- if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
419
- /* Skip the high-part; we'll perform the extract in the trampoline. */
420
- param++;
421
- }
422
- tcg_out_mov(s, TCG_TYPE_REG, param++, data);
423
+ tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
424
+ tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
425
426
func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
427
tcg_debug_assert(func != NULL);
428
tcg_out_call_nodelay(s, func, false);
429
/* delay slot */
430
- tcg_out_movi(s, TCG_TYPE_I32, param, oi);
431
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
432
433
*label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
434
#else
435
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
436
unsigned s_bits = memop & MO_SIZE;
437
unsigned t_bits;
438
439
- if (SPARC64 && TARGET_LONG_BITS == 32) {
440
+ if (TARGET_LONG_BITS == 32) {
441
tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
442
addr = TCG_REG_T1;
443
}
444
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
445
* operation in the delay slot, and failure need only invoke the
446
* handler for SIGBUS.
447
*/
448
- TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
449
tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
450
/* delay slot -- move to low part of argument reg */
451
- tcg_out_mov_delay(s, arg_low, addr);
452
+ tcg_out_mov_delay(s, TCG_REG_O1, addr);
453
} else {
454
/* Underalignment: store by pieces of minimum alignment. */
455
int st_opc, a_size, s_size, i;
456
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
457
case INDEX_op_muls2_i32:
458
c = ARITH_SMUL;
459
do_mul2:
460
- /* The 32-bit multiply insns produce a full 64-bit result. If the
461
- destination register can hold it, we can avoid the slower RDY. */
462
+ /* The 32-bit multiply insns produce a full 64-bit result. */
463
tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
464
- if (SPARC64 || a0 <= TCG_REG_O7) {
465
- tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
466
- } else {
467
- tcg_out_rdy(s, a1);
468
- }
469
+ tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
470
break;
471
472
case INDEX_op_qemu_ld_i32:
473
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
474
tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
129
}
475
}
130
476
131
+static void mttcg_start_vcpu_thread(CPUState *cpu)
477
-#if SPARC64
132
+{
478
-# define ELF_HOST_MACHINE EM_SPARCV9
133
+ char thread_name[VCPU_THREAD_NAME_SIZE];
479
-#else
134
+
480
-# define ELF_HOST_MACHINE EM_SPARC32PLUS
135
+ g_assert(tcg_enabled());
481
-# define ELF_HOST_FLAGS EF_SPARC_32PLUS
136
+
482
-#endif
137
+ parallel_cpus = (current_machine->smp.max_cpus > 1);
483
+#define ELF_HOST_MACHINE EM_SPARCV9
138
+
484
139
+ cpu->thread = g_malloc0(sizeof(QemuThread));
485
typedef struct {
140
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
486
DebugFrameHeader h;
141
+ qemu_cond_init(cpu->halt_cond);
487
- uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
142
+
488
+ uint8_t fde_def_cfa[4];
143
+ /* create a thread per vCPU with TCG (MTTCG) */
489
uint8_t fde_win_save;
144
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
490
uint8_t fde_ret_save[3];
145
+ cpu->cpu_index);
491
} DebugFrame;
146
+
492
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
147
+ qemu_thread_create(cpu->thread, thread_name, tcg_cpu_thread_fn,
493
.h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
148
+ cpu, QEMU_THREAD_JOINABLE);
494
149
+
495
.fde_def_cfa = {
150
+#ifdef _WIN32
496
-#if SPARC64
151
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
497
12, 30, /* DW_CFA_def_cfa i6, 2047 */
152
+#endif
498
(2047 & 0x7f) | 0x80, (2047 >> 7)
153
+}
499
-#else
154
+
500
- 13, 30 /* DW_CFA_def_cfa_register i6 */
155
const CpusAccel tcg_cpus_mttcg = {
501
-#endif
156
- .create_vcpu_thread = tcg_start_vcpu_thread,
502
},
157
+ .create_vcpu_thread = mttcg_start_vcpu_thread,
503
.fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
158
.kick_vcpu_thread = mttcg_kick_vcpu_thread,
504
.fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
159
160
.handle_interrupt = tcg_handle_interrupt,
161
diff --git a/accel/tcg/tcg-cpus-rr.c b/accel/tcg/tcg-cpus-rr.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/accel/tcg/tcg-cpus-rr.c
164
+++ b/accel/tcg/tcg-cpus-rr.c
165
@@ -XXX,XX +XXX,XX @@ static void deal_with_unplugged_cpus(void)
166
* elsewhere.
167
*/
168
169
-void *tcg_rr_cpu_thread_fn(void *arg)
170
+static void *tcg_rr_cpu_thread_fn(void *arg)
171
{
172
CPUState *cpu = arg;
173
174
@@ -XXX,XX +XXX,XX @@ void *tcg_rr_cpu_thread_fn(void *arg)
175
return NULL;
176
}
177
178
+void rr_start_vcpu_thread(CPUState *cpu)
179
+{
180
+ char thread_name[VCPU_THREAD_NAME_SIZE];
181
+ static QemuCond *single_tcg_halt_cond;
182
+ static QemuThread *single_tcg_cpu_thread;
183
+
184
+ g_assert(tcg_enabled());
185
+ parallel_cpus = false;
186
+
187
+ if (!single_tcg_cpu_thread) {
188
+ cpu->thread = g_malloc0(sizeof(QemuThread));
189
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
190
+ qemu_cond_init(cpu->halt_cond);
191
+
192
+ /* share a single thread for all cpus with TCG */
193
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
194
+ qemu_thread_create(cpu->thread, thread_name,
195
+ tcg_rr_cpu_thread_fn,
196
+ cpu, QEMU_THREAD_JOINABLE);
197
+
198
+ single_tcg_halt_cond = cpu->halt_cond;
199
+ single_tcg_cpu_thread = cpu->thread;
200
+#ifdef _WIN32
201
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
202
+#endif
203
+ } else {
204
+ /* we share the thread */
205
+ cpu->thread = single_tcg_cpu_thread;
206
+ cpu->halt_cond = single_tcg_halt_cond;
207
+ cpu->thread_id = first_cpu->thread_id;
208
+ cpu->can_do_io = 1;
209
+ cpu->created = true;
210
+ }
211
+}
212
+
213
const CpusAccel tcg_cpus_rr = {
214
- .create_vcpu_thread = tcg_start_vcpu_thread,
215
+ .create_vcpu_thread = rr_start_vcpu_thread,
216
.kick_vcpu_thread = qemu_cpu_kick_rr_cpus,
217
218
.handle_interrupt = tcg_handle_interrupt,
219
diff --git a/accel/tcg/tcg-cpus.c b/accel/tcg/tcg-cpus.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/accel/tcg/tcg-cpus.c
222
+++ b/accel/tcg/tcg-cpus.c
223
@@ -XXX,XX +XXX,XX @@
224
#include "hw/boards.h"
225
226
#include "tcg-cpus.h"
227
-#include "tcg-cpus-mttcg.h"
228
-#include "tcg-cpus-rr.h"
229
230
/* common functionality among all TCG variants */
231
232
-void tcg_start_vcpu_thread(CPUState *cpu)
233
-{
234
- char thread_name[VCPU_THREAD_NAME_SIZE];
235
- static QemuCond *single_tcg_halt_cond;
236
- static QemuThread *single_tcg_cpu_thread;
237
- static int tcg_region_inited;
238
-
239
- assert(tcg_enabled());
240
- /*
241
- * Initialize TCG regions--once. Now is a good time, because:
242
- * (1) TCG's init context, prologue and target globals have been set up.
243
- * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
244
- * -accel flag is processed, so the check doesn't work then).
245
- */
246
- if (!tcg_region_inited) {
247
- tcg_region_inited = 1;
248
- tcg_region_init();
249
- parallel_cpus = qemu_tcg_mttcg_enabled() && current_machine->smp.max_cpus > 1;
250
- }
251
-
252
- if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
253
- cpu->thread = g_malloc0(sizeof(QemuThread));
254
- cpu->halt_cond = g_malloc0(sizeof(QemuCond));
255
- qemu_cond_init(cpu->halt_cond);
256
-
257
- if (qemu_tcg_mttcg_enabled()) {
258
- /* create a thread per vCPU with TCG (MTTCG) */
259
- snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
260
- cpu->cpu_index);
261
-
262
- qemu_thread_create(cpu->thread, thread_name, tcg_cpu_thread_fn,
263
- cpu, QEMU_THREAD_JOINABLE);
264
-
265
- } else {
266
- /* share a single thread for all cpus with TCG */
267
- snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
268
- qemu_thread_create(cpu->thread, thread_name,
269
- tcg_rr_cpu_thread_fn,
270
- cpu, QEMU_THREAD_JOINABLE);
271
-
272
- single_tcg_halt_cond = cpu->halt_cond;
273
- single_tcg_cpu_thread = cpu->thread;
274
- }
275
-#ifdef _WIN32
276
- cpu->hThread = qemu_thread_get_handle(cpu->thread);
277
-#endif
278
- } else {
279
- /* For non-MTTCG cases we share the thread */
280
- cpu->thread = single_tcg_cpu_thread;
281
- cpu->halt_cond = single_tcg_halt_cond;
282
- cpu->thread_id = first_cpu->thread_id;
283
- cpu->can_do_io = 1;
284
- cpu->created = true;
285
- }
286
-}
287
-
288
void qemu_tcg_destroy_vcpu(CPUState *cpu)
289
{
290
cpu_thread_signal_destroyed(cpu);
291
--
505
--
292
2.25.1
506
2.34.1
293
507
294
508
diff view generated by jsdifflib
New patch
1
Emphasize that we only support full 64-bit code generation.
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
meson.build | 4 +---
8
tcg/{sparc => sparc64}/tcg-target-con-set.h | 0
9
tcg/{sparc => sparc64}/tcg-target-con-str.h | 0
10
tcg/{sparc => sparc64}/tcg-target.h | 0
11
tcg/{sparc => sparc64}/tcg-target.c.inc | 0
12
MAINTAINERS | 2 +-
13
6 files changed, 2 insertions(+), 4 deletions(-)
14
rename tcg/{sparc => sparc64}/tcg-target-con-set.h (100%)
15
rename tcg/{sparc => sparc64}/tcg-target-con-str.h (100%)
16
rename tcg/{sparc => sparc64}/tcg-target.h (100%)
17
rename tcg/{sparc => sparc64}/tcg-target.c.inc (100%)
18
19
diff --git a/meson.build b/meson.build
20
index XXXXXXX..XXXXXXX 100644
21
--- a/meson.build
22
+++ b/meson.build
23
@@ -XXX,XX +XXX,XX @@ qapi_trace_events = []
24
bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin']
25
supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux']
26
supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv', 'x86', 'x86_64',
27
- 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc', 'sparc64']
28
+ 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64']
29
30
cpu = host_machine.cpu_family()
31
32
@@ -XXX,XX +XXX,XX @@ if get_option('tcg').allowed()
33
endif
34
if get_option('tcg_interpreter')
35
tcg_arch = 'tci'
36
- elif host_arch == 'sparc64'
37
- tcg_arch = 'sparc'
38
elif host_arch == 'x86_64'
39
tcg_arch = 'i386'
40
elif host_arch == 'ppc64'
41
diff --git a/tcg/sparc/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
42
similarity index 100%
43
rename from tcg/sparc/tcg-target-con-set.h
44
rename to tcg/sparc64/tcg-target-con-set.h
45
diff --git a/tcg/sparc/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h
46
similarity index 100%
47
rename from tcg/sparc/tcg-target-con-str.h
48
rename to tcg/sparc64/tcg-target-con-str.h
49
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc64/tcg-target.h
50
similarity index 100%
51
rename from tcg/sparc/tcg-target.h
52
rename to tcg/sparc64/tcg-target.h
53
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
54
similarity index 100%
55
rename from tcg/sparc/tcg-target.c.inc
56
rename to tcg/sparc64/tcg-target.c.inc
57
diff --git a/MAINTAINERS b/MAINTAINERS
58
index XXXXXXX..XXXXXXX 100644
59
--- a/MAINTAINERS
60
+++ b/MAINTAINERS
61
@@ -XXX,XX +XXX,XX @@ L: qemu-s390x@nongnu.org
62
63
SPARC TCG target
64
S: Odd Fixes
65
-F: tcg/sparc/
66
+F: tcg/sparc64/
67
F: disas/sparc.c
68
69
TCI TCG target
70
--
71
2.34.1
72
73
diff view generated by jsdifflib
New patch
1
With sparc64 we need not distinguish between registers that
2
can hold 32-bit values and those that can hold 64-bit values.
1
3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/sparc64/tcg-target-con-set.h | 16 +----
8
tcg/sparc64/tcg-target-con-str.h | 3 -
9
tcg/sparc64/tcg-target.c.inc | 109 ++++++++++++-------------------
10
3 files changed, 44 insertions(+), 84 deletions(-)
11
12
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/sparc64/tcg-target-con-set.h
15
+++ b/tcg/sparc64/tcg-target-con-set.h
16
@@ -XXX,XX +XXX,XX @@
17
*/
18
C_O0_I1(r)
19
C_O0_I2(rZ, r)
20
-C_O0_I2(RZ, r)
21
C_O0_I2(rZ, rJ)
22
-C_O0_I2(RZ, RJ)
23
-C_O0_I2(sZ, A)
24
-C_O0_I2(SZ, A)
25
-C_O1_I1(r, A)
26
-C_O1_I1(R, A)
27
+C_O0_I2(sZ, s)
28
+C_O1_I1(r, s)
29
C_O1_I1(r, r)
30
-C_O1_I1(r, R)
31
-C_O1_I1(R, r)
32
-C_O1_I1(R, R)
33
-C_O1_I2(R, R, R)
34
+C_O1_I2(r, r, r)
35
C_O1_I2(r, rZ, rJ)
36
-C_O1_I2(R, RZ, RJ)
37
C_O1_I4(r, rZ, rJ, rI, 0)
38
-C_O1_I4(R, RZ, RJ, RI, 0)
39
C_O2_I2(r, r, rZ, rJ)
40
-C_O2_I4(R, R, RZ, RZ, RJ, RI)
41
C_O2_I4(r, r, rZ, rZ, rJ, rJ)
42
diff --git a/tcg/sparc64/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/sparc64/tcg-target-con-str.h
45
+++ b/tcg/sparc64/tcg-target-con-str.h
46
@@ -XXX,XX +XXX,XX @@
47
* REGS(letter, register_mask)
48
*/
49
REGS('r', ALL_GENERAL_REGS)
50
-REGS('R', ALL_GENERAL_REGS64)
51
REGS('s', ALL_QLDST_REGS)
52
-REGS('S', ALL_QLDST_REGS64)
53
-REGS('A', TARGET_LONG_BITS == 64 ? ALL_QLDST_REGS64 : ALL_QLDST_REGS)
54
55
/*
56
* Define constraint letters for constants:
57
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/sparc64/tcg-target.c.inc
60
+++ b/tcg/sparc64/tcg-target.c.inc
61
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
62
#else
63
#define SOFTMMU_RESERVE_REGS 0
64
#endif
65
-
66
-/*
67
- * Note that sparcv8plus can only hold 64 bit quantities in %g and %o
68
- * registers. These are saved manually by the kernel in full 64-bit
69
- * slots. The %i and %l registers are saved by the register window
70
- * mechanism, which only allocates space for 32 bits. Given that this
71
- * window spill/fill can happen on any signal, we must consider the
72
- * high bits of the %i and %l registers garbage at all times.
73
- */
74
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
75
-# define ALL_GENERAL_REGS64 ALL_GENERAL_REGS
76
#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
77
-#define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
78
79
/* Define some temporary registers. T2 is used for constant generation. */
80
#define TCG_REG_T1 TCG_REG_G1
81
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
82
return C_O0_I1(r);
83
84
case INDEX_op_ld8u_i32:
85
+ case INDEX_op_ld8u_i64:
86
case INDEX_op_ld8s_i32:
87
+ case INDEX_op_ld8s_i64:
88
case INDEX_op_ld16u_i32:
89
+ case INDEX_op_ld16u_i64:
90
case INDEX_op_ld16s_i32:
91
+ case INDEX_op_ld16s_i64:
92
case INDEX_op_ld_i32:
93
+ case INDEX_op_ld32u_i64:
94
+ case INDEX_op_ld32s_i64:
95
+ case INDEX_op_ld_i64:
96
case INDEX_op_neg_i32:
97
+ case INDEX_op_neg_i64:
98
case INDEX_op_not_i32:
99
+ case INDEX_op_not_i64:
100
+ case INDEX_op_ext32s_i64:
101
+ case INDEX_op_ext32u_i64:
102
+ case INDEX_op_ext_i32_i64:
103
+ case INDEX_op_extu_i32_i64:
104
+ case INDEX_op_extrl_i64_i32:
105
+ case INDEX_op_extrh_i64_i32:
106
return C_O1_I1(r, r);
107
108
case INDEX_op_st8_i32:
109
+ case INDEX_op_st8_i64:
110
case INDEX_op_st16_i32:
111
+ case INDEX_op_st16_i64:
112
case INDEX_op_st_i32:
113
+ case INDEX_op_st32_i64:
114
+ case INDEX_op_st_i64:
115
return C_O0_I2(rZ, r);
116
117
case INDEX_op_add_i32:
118
+ case INDEX_op_add_i64:
119
case INDEX_op_mul_i32:
120
+ case INDEX_op_mul_i64:
121
case INDEX_op_div_i32:
122
+ case INDEX_op_div_i64:
123
case INDEX_op_divu_i32:
124
+ case INDEX_op_divu_i64:
125
case INDEX_op_sub_i32:
126
+ case INDEX_op_sub_i64:
127
case INDEX_op_and_i32:
128
+ case INDEX_op_and_i64:
129
case INDEX_op_andc_i32:
130
+ case INDEX_op_andc_i64:
131
case INDEX_op_or_i32:
132
+ case INDEX_op_or_i64:
133
case INDEX_op_orc_i32:
134
+ case INDEX_op_orc_i64:
135
case INDEX_op_xor_i32:
136
+ case INDEX_op_xor_i64:
137
case INDEX_op_shl_i32:
138
+ case INDEX_op_shl_i64:
139
case INDEX_op_shr_i32:
140
+ case INDEX_op_shr_i64:
141
case INDEX_op_sar_i32:
142
+ case INDEX_op_sar_i64:
143
case INDEX_op_setcond_i32:
144
+ case INDEX_op_setcond_i64:
145
return C_O1_I2(r, rZ, rJ);
146
147
case INDEX_op_brcond_i32:
148
+ case INDEX_op_brcond_i64:
149
return C_O0_I2(rZ, rJ);
150
case INDEX_op_movcond_i32:
151
+ case INDEX_op_movcond_i64:
152
return C_O1_I4(r, rZ, rJ, rI, 0);
153
case INDEX_op_add2_i32:
154
+ case INDEX_op_add2_i64:
155
case INDEX_op_sub2_i32:
156
+ case INDEX_op_sub2_i64:
157
return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
158
case INDEX_op_mulu2_i32:
159
case INDEX_op_muls2_i32:
160
return C_O2_I2(r, r, rZ, rJ);
161
-
162
- case INDEX_op_ld8u_i64:
163
- case INDEX_op_ld8s_i64:
164
- case INDEX_op_ld16u_i64:
165
- case INDEX_op_ld16s_i64:
166
- case INDEX_op_ld32u_i64:
167
- case INDEX_op_ld32s_i64:
168
- case INDEX_op_ld_i64:
169
- case INDEX_op_ext_i32_i64:
170
- case INDEX_op_extu_i32_i64:
171
- return C_O1_I1(R, r);
172
-
173
- case INDEX_op_st8_i64:
174
- case INDEX_op_st16_i64:
175
- case INDEX_op_st32_i64:
176
- case INDEX_op_st_i64:
177
- return C_O0_I2(RZ, r);
178
-
179
- case INDEX_op_add_i64:
180
- case INDEX_op_mul_i64:
181
- case INDEX_op_div_i64:
182
- case INDEX_op_divu_i64:
183
- case INDEX_op_sub_i64:
184
- case INDEX_op_and_i64:
185
- case INDEX_op_andc_i64:
186
- case INDEX_op_or_i64:
187
- case INDEX_op_orc_i64:
188
- case INDEX_op_xor_i64:
189
- case INDEX_op_shl_i64:
190
- case INDEX_op_shr_i64:
191
- case INDEX_op_sar_i64:
192
- case INDEX_op_setcond_i64:
193
- return C_O1_I2(R, RZ, RJ);
194
-
195
- case INDEX_op_neg_i64:
196
- case INDEX_op_not_i64:
197
- case INDEX_op_ext32s_i64:
198
- case INDEX_op_ext32u_i64:
199
- return C_O1_I1(R, R);
200
-
201
- case INDEX_op_extrl_i64_i32:
202
- case INDEX_op_extrh_i64_i32:
203
- return C_O1_I1(r, R);
204
-
205
- case INDEX_op_brcond_i64:
206
- return C_O0_I2(RZ, RJ);
207
- case INDEX_op_movcond_i64:
208
- return C_O1_I4(R, RZ, RJ, RI, 0);
209
- case INDEX_op_add2_i64:
210
- case INDEX_op_sub2_i64:
211
- return C_O2_I4(R, R, RZ, RZ, RJ, RI);
212
case INDEX_op_muluh_i64:
213
- return C_O1_I2(R, R, R);
214
+ return C_O1_I2(r, r, r);
215
216
case INDEX_op_qemu_ld_i32:
217
- return C_O1_I1(r, A);
218
case INDEX_op_qemu_ld_i64:
219
- return C_O1_I1(R, A);
220
+ return C_O1_I1(r, s);
221
case INDEX_op_qemu_st_i32:
222
- return C_O0_I2(sZ, A);
223
case INDEX_op_qemu_st_i64:
224
- return C_O0_I2(SZ, A);
225
+ return C_O0_I2(sZ, s);
226
227
default:
228
g_assert_not_reached();
229
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
230
#endif
231
232
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
233
- tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64;
234
+ tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
235
236
tcg_target_call_clobber_regs = 0;
237
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
238
--
239
2.34.1
diff view generated by jsdifflib
New patch
1
From: Icenowy Zheng <uwu@icenowy.me>
1
2
3
When registering helpers via FFI for TCI, the inner loop that iterates
4
parameters of the helper reuses (and thus pollutes) the same variable
5
used by the outer loop that iterates all helpers, thus made some helpers
6
unregistered.
7
8
Fix this logic error by using a dedicated temporary variable for the
9
inner loop.
10
11
Fixes: 22f15579fa ("tcg: Build ffi data structures for helpers")
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
14
Signed-off-by: Icenowy Zheng <uwu@icenowy.me>
15
Message-Id: <20221028072145.1593205-1-uwu@icenowy.me>
16
[rth: Move declaration of j to the for loop itself]
17
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
---
19
tcg/tcg.c | 6 +++---
20
1 file changed, 3 insertions(+), 3 deletions(-)
21
22
diff --git a/tcg/tcg.c b/tcg/tcg.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/tcg.c
25
+++ b/tcg/tcg.c
26
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(unsigned max_cpus)
27
28
if (nargs != 0) {
29
ca->cif.arg_types = ca->args;
30
- for (i = 0; i < nargs; ++i) {
31
- int typecode = extract32(typemask, (i + 1) * 3, 3);
32
- ca->args[i] = typecode_to_ffi[typecode];
33
+ for (int j = 0; j < nargs; ++j) {
34
+ int typecode = extract32(typemask, (j + 1) * 3, 3);
35
+ ca->args[j] = typecode_to_ffi[typecode];
36
}
37
}
38
39
--
40
2.34.1
41
42
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Add a way to examine the unwind data without actually
2
restoring the data back into env.
2
3
3
split up the CpusAccel tcg_cpus into three TCG variants:
4
Reviewed-by: Claudio Fontana <cfontana@suse.de>
4
5
tcg_cpus_rr (single threaded, round robin cpus)
6
tcg_cpus_icount (same as rr, but with instruction counting enabled)
7
tcg_cpus_mttcg (multi-threaded cpus)
8
9
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Claudio Fontana <cfontana@suse.de>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-Id: <20201015143217.29337-2-cfontana@suse.de>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
6
---
16
accel/tcg/tcg-cpus-icount.h | 17 ++
7
accel/tcg/internal.h | 4 +--
17
accel/tcg/tcg-cpus-mttcg.h | 21 ++
8
include/exec/exec-all.h | 21 ++++++++---
18
accel/tcg/tcg-cpus-rr.h | 20 ++
9
accel/tcg/translate-all.c | 74 ++++++++++++++++++++++++++-------------
19
accel/tcg/tcg-cpus.h | 13 +-
10
3 files changed, 68 insertions(+), 31 deletions(-)
20
accel/tcg/tcg-all.c | 8 +-
21
accel/tcg/tcg-cpus-icount.c | 147 +++++++++++
22
accel/tcg/tcg-cpus-mttcg.c | 117 +++++++++
23
accel/tcg/tcg-cpus-rr.c | 270 ++++++++++++++++++++
24
accel/tcg/tcg-cpus.c | 484 ++----------------------------------
25
softmmu/icount.c | 2 +-
26
accel/tcg/meson.build | 9 +-
27
11 files changed, 646 insertions(+), 462 deletions(-)
28
create mode 100644 accel/tcg/tcg-cpus-icount.h
29
create mode 100644 accel/tcg/tcg-cpus-mttcg.h
30
create mode 100644 accel/tcg/tcg-cpus-rr.h
31
create mode 100644 accel/tcg/tcg-cpus-icount.c
32
create mode 100644 accel/tcg/tcg-cpus-mttcg.c
33
create mode 100644 accel/tcg/tcg-cpus-rr.c
34
11
35
diff --git a/accel/tcg/tcg-cpus-icount.h b/accel/tcg/tcg-cpus-icount.h
12
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
36
new file mode 100644
13
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX
14
--- a/accel/tcg/internal.h
38
--- /dev/null
15
+++ b/accel/tcg/internal.h
39
+++ b/accel/tcg/tcg-cpus-icount.h
16
@@ -XXX,XX +XXX,XX @@ void tb_reset_jump(TranslationBlock *tb, int n);
40
@@ -XXX,XX +XXX,XX @@
17
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
41
+/*
18
tb_page_addr_t phys_page2);
42
+ * QEMU TCG Single Threaded vCPUs implementation using instruction counting
19
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
20
-int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
21
- uintptr_t searched_pc, bool reset_icount);
22
+void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
23
+ uintptr_t host_pc, bool reset_icount);
24
25
/* Return the current PC from CPU, which may be cached in TB. */
26
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
27
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/exec/exec-all.h
30
+++ b/include/exec/exec-all.h
31
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
32
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
33
#endif
34
35
+/**
36
+ * cpu_unwind_state_data:
37
+ * @cpu: the cpu context
38
+ * @host_pc: the host pc within the translation
39
+ * @data: output data
43
+ *
40
+ *
44
+ * Copyright 2020 SUSE LLC
41
+ * Attempt to load the the unwind state for a host pc occurring in
45
+ *
42
+ * translated code. If @host_pc is not in translated code, the
46
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
43
+ * function returns false; otherwise @data is loaded.
47
+ * See the COPYING file in the top-level directory.
44
+ * This is the same unwind info as given to restore_state_to_opc.
48
+ */
45
+ */
46
+bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
49
+
47
+
50
+#ifndef TCG_CPUS_ICOUNT_H
48
/**
51
+#define TCG_CPUS_ICOUNT_H
49
* cpu_restore_state:
52
+
50
- * @cpu: the vCPU state is to be restore to
53
+void handle_icount_deadline(void);
51
- * @searched_pc: the host PC the fault occurred at
54
+void prepare_icount_for_run(CPUState *cpu);
52
+ * @cpu: the cpu context
55
+void process_icount_data(CPUState *cpu);
53
+ * @host_pc: the host pc within the translation
56
+
54
* @will_exit: true if the TB executed will be interrupted after some
57
+#endif /* TCG_CPUS_ICOUNT_H */
55
cpu adjustments. Required for maintaining the correct
58
diff --git a/accel/tcg/tcg-cpus-mttcg.h b/accel/tcg/tcg-cpus-mttcg.h
56
icount valus
59
new file mode 100644
57
* @return: true if state was restored, false otherwise
60
index XXXXXXX..XXXXXXX
58
*
61
--- /dev/null
59
* Attempt to restore the state for a fault occurring in translated
62
+++ b/accel/tcg/tcg-cpus-mttcg.h
60
- * code. If the searched_pc is not in translated code no state is
63
@@ -XXX,XX +XXX,XX @@
61
+ * code. If @host_pc is not in translated code no state is
64
+/*
62
* restored and the function returns false.
65
+ * QEMU TCG Multi Threaded vCPUs implementation
63
*/
66
+ *
64
-bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
67
+ * Copyright 2020 SUSE LLC
65
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit);
68
+ *
66
69
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
67
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
70
+ * See the COPYING file in the top-level directory.
68
G_NORETURN void cpu_loop_exit(CPUState *cpu);
71
+ */
69
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
72
+
73
+#ifndef TCG_CPUS_MTTCG_H
74
+#define TCG_CPUS_MTTCG_H
75
+
76
+/*
77
+ * In the multi-threaded case each vCPU has its own thread. The TLS
78
+ * variable current_cpu can be used deep in the code to find the
79
+ * current CPUState for a given thread.
80
+ */
81
+
82
+void *tcg_cpu_thread_fn(void *arg);
83
+
84
+#endif /* TCG_CPUS_MTTCG_H */
85
diff --git a/accel/tcg/tcg-cpus-rr.h b/accel/tcg/tcg-cpus-rr.h
86
new file mode 100644
87
index XXXXXXX..XXXXXXX
88
--- /dev/null
89
+++ b/accel/tcg/tcg-cpus-rr.h
90
@@ -XXX,XX +XXX,XX @@
91
+/*
92
+ * QEMU TCG Single Threaded vCPUs implementation
93
+ *
94
+ * Copyright 2020 SUSE LLC
95
+ *
96
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
97
+ * See the COPYING file in the top-level directory.
98
+ */
99
+
100
+#ifndef TCG_CPUS_RR_H
101
+#define TCG_CPUS_RR_H
102
+
103
+#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
104
+
105
+/* Kick all RR vCPUs. */
106
+void qemu_cpu_kick_rr_cpus(CPUState *unused);
107
+
108
+void *tcg_rr_cpu_thread_fn(void *arg);
109
+
110
+#endif /* TCG_CPUS_RR_H */
111
diff --git a/accel/tcg/tcg-cpus.h b/accel/tcg/tcg-cpus.h
112
index XXXXXXX..XXXXXXX 100644
70
index XXXXXXX..XXXXXXX 100644
113
--- a/accel/tcg/tcg-cpus.h
71
--- a/accel/tcg/translate-all.c
114
+++ b/accel/tcg/tcg-cpus.h
72
+++ b/accel/tcg/translate-all.c
115
@@ -XXX,XX +XXX,XX @@
73
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
116
/*
74
return p - block;
117
- * Accelerator CPUS Interface
118
+ * QEMU TCG vCPU common functionality
119
+ *
120
+ * Functionality common to all TCG vcpu variants: mttcg, rr and icount.
121
*
122
* Copyright 2020 SUSE LLC
123
*
124
@@ -XXX,XX +XXX,XX @@
125
126
#include "sysemu/cpus.h"
127
128
-extern const CpusAccel tcg_cpus;
129
+extern const CpusAccel tcg_cpus_mttcg;
130
+extern const CpusAccel tcg_cpus_icount;
131
+extern const CpusAccel tcg_cpus_rr;
132
+
133
+void tcg_start_vcpu_thread(CPUState *cpu);
134
+void qemu_tcg_destroy_vcpu(CPUState *cpu);
135
+int tcg_cpu_exec(CPUState *cpu);
136
+void tcg_handle_interrupt(CPUState *cpu, int mask);
137
138
#endif /* TCG_CPUS_H */
139
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/accel/tcg/tcg-all.c
142
+++ b/accel/tcg/tcg-all.c
143
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
144
145
tcg_exec_init(s->tb_size * 1024 * 1024);
146
mttcg_enabled = s->mttcg_enabled;
147
- cpus_register_accel(&tcg_cpus);
148
149
+ if (mttcg_enabled) {
150
+ cpus_register_accel(&tcg_cpus_mttcg);
151
+ } else if (icount_enabled()) {
152
+ cpus_register_accel(&tcg_cpus_icount);
153
+ } else {
154
+ cpus_register_accel(&tcg_cpus_rr);
155
+ }
156
return 0;
157
}
75
}
158
76
159
diff --git a/accel/tcg/tcg-cpus-icount.c b/accel/tcg/tcg-cpus-icount.c
77
-/* The cpu state corresponding to 'searched_pc' is restored.
160
new file mode 100644
78
- * When reset_icount is true, current TB will be interrupted and
161
index XXXXXXX..XXXXXXX
79
- * icount should be recalculated.
162
--- /dev/null
80
- */
163
+++ b/accel/tcg/tcg-cpus-icount.c
81
-int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
164
@@ -XXX,XX +XXX,XX @@
82
- uintptr_t searched_pc, bool reset_icount)
165
+/*
83
+static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
166
+ * QEMU TCG Single Threaded vCPUs implementation using instruction counting
84
+ uint64_t *data)
167
+ *
85
{
168
+ * Copyright (c) 2003-2008 Fabrice Bellard
86
- uint64_t data[TARGET_INSN_START_WORDS];
169
+ * Copyright (c) 2014 Red Hat Inc.
87
- uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
170
+ *
88
+ uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
171
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
89
const uint8_t *p = tb->tc.ptr + tb->tc.size;
172
+ * of this software and associated documentation files (the "Software"), to deal
90
int i, j, num_insns = tb->icount;
173
+ * in the Software without restriction, including without limitation the rights
91
-#ifdef CONFIG_PROFILER
174
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
92
- TCGProfile *prof = &tcg_ctx->prof;
175
+ * copies of the Software, and to permit persons to whom the Software is
93
- int64_t ti = profile_getclock();
176
+ * furnished to do so, subject to the following conditions:
94
-#endif
177
+ *
95
178
+ * The above copyright notice and this permission notice shall be included in
96
- searched_pc -= GETPC_ADJ;
179
+ * all copies or substantial portions of the Software.
97
+ host_pc -= GETPC_ADJ;
180
+ *
98
181
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
99
- if (searched_pc < host_pc) {
182
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
100
+ if (host_pc < iter_pc) {
183
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
101
return -1;
184
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
102
}
185
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
103
186
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
104
- memset(data, 0, sizeof(data));
187
+ * THE SOFTWARE.
105
+ memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
188
+ */
106
if (!TARGET_TB_PCREL) {
189
+
107
data[0] = tb_pc(tb);
190
+#include "qemu/osdep.h"
108
}
191
+#include "qemu-common.h"
109
192
+#include "sysemu/tcg.h"
110
- /* Reconstruct the stored insn data while looking for the point at
193
+#include "sysemu/replay.h"
111
- which the end of the insn exceeds the searched_pc. */
194
+#include "qemu/main-loop.h"
195
+#include "qemu/guest-random.h"
196
+#include "exec/exec-all.h"
197
+#include "hw/boards.h"
198
+
199
+#include "tcg-cpus.h"
200
+#include "tcg-cpus-icount.h"
201
+#include "tcg-cpus-rr.h"
202
+
203
+static int64_t tcg_get_icount_limit(void)
204
+{
205
+ int64_t deadline;
206
+
207
+ if (replay_mode != REPLAY_MODE_PLAY) {
208
+ /*
209
+ * Include all the timers, because they may need an attention.
210
+ * Too long CPU execution may create unnecessary delay in UI.
211
+ */
212
+ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
213
+ QEMU_TIMER_ATTR_ALL);
214
+ /* Check realtime timers, because they help with input processing */
215
+ deadline = qemu_soonest_timeout(deadline,
216
+ qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
217
+ QEMU_TIMER_ATTR_ALL));
218
+
219
+ /*
220
+ * Maintain prior (possibly buggy) behaviour where if no deadline
221
+ * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
222
+ * INT32_MAX nanoseconds ahead, we still use INT32_MAX
223
+ * nanoseconds.
224
+ */
225
+ if ((deadline < 0) || (deadline > INT32_MAX)) {
226
+ deadline = INT32_MAX;
227
+ }
228
+
229
+ return icount_round(deadline);
230
+ } else {
231
+ return replay_get_instructions();
232
+ }
233
+}
234
+
235
+static void notify_aio_contexts(void)
236
+{
237
+ /* Wake up other AioContexts. */
238
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
239
+ qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
240
+}
241
+
242
+void handle_icount_deadline(void)
243
+{
244
+ assert(qemu_in_vcpu_thread());
245
+ int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
246
+ QEMU_TIMER_ATTR_ALL);
247
+
248
+ if (deadline == 0) {
249
+ notify_aio_contexts();
250
+ }
251
+}
252
+
253
+void prepare_icount_for_run(CPUState *cpu)
254
+{
255
+ int insns_left;
256
+
257
+ /*
112
+ /*
258
+ * These should always be cleared by process_icount_data after
113
+ * Reconstruct the stored insn data while looking for the point
259
+ * each vCPU execution. However u16.high can be raised
114
+ * at which the end of the insn exceeds host_pc.
260
+ * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
261
+ */
115
+ */
262
+ g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
116
for (i = 0; i < num_insns; ++i) {
263
+ g_assert(cpu->icount_extra == 0);
117
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
264
+
118
data[j] += decode_sleb128(&p);
265
+ cpu->icount_budget = tcg_get_icount_limit();
119
}
266
+ insns_left = MIN(0xffff, cpu->icount_budget);
120
- host_pc += decode_sleb128(&p);
267
+ cpu_neg(cpu)->icount_decr.u16.low = insns_left;
121
- if (host_pc > searched_pc) {
268
+ cpu->icount_extra = cpu->icount_budget - insns_left;
122
- goto found;
269
+
123
+ iter_pc += decode_sleb128(&p);
270
+ replay_mutex_lock();
124
+ if (iter_pc > host_pc) {
271
+
125
+ return num_insns - i;
272
+ if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
126
}
273
+ notify_aio_contexts();
127
}
274
+ }
128
return -1;
275
+}
276
+
277
+void process_icount_data(CPUState *cpu)
278
+{
279
+ /* Account for executed instructions */
280
+ icount_update(cpu);
281
+
282
+ /* Reset the counters */
283
+ cpu_neg(cpu)->icount_decr.u16.low = 0;
284
+ cpu->icount_extra = 0;
285
+ cpu->icount_budget = 0;
286
+
287
+ replay_account_executed_instructions();
288
+
289
+ replay_mutex_unlock();
290
+}
291
+
292
+static void icount_handle_interrupt(CPUState *cpu, int mask)
293
+{
294
+ int old_mask = cpu->interrupt_request;
295
+
296
+ tcg_handle_interrupt(cpu, mask);
297
+ if (qemu_cpu_is_self(cpu) &&
298
+ !cpu->can_do_io
299
+ && (mask & ~old_mask) != 0) {
300
+ cpu_abort(cpu, "Raised interrupt while not in I/O function");
301
+ }
302
+}
303
+
304
+const CpusAccel tcg_cpus_icount = {
305
+ .create_vcpu_thread = tcg_start_vcpu_thread,
306
+ .kick_vcpu_thread = qemu_cpu_kick_rr_cpus,
307
+
308
+ .handle_interrupt = icount_handle_interrupt,
309
+ .get_virtual_clock = icount_get,
310
+ .get_elapsed_ticks = icount_get,
311
+};
312
diff --git a/accel/tcg/tcg-cpus-mttcg.c b/accel/tcg/tcg-cpus-mttcg.c
313
new file mode 100644
314
index XXXXXXX..XXXXXXX
315
--- /dev/null
316
+++ b/accel/tcg/tcg-cpus-mttcg.c
317
@@ -XXX,XX +XXX,XX @@
318
+/*
319
+ * QEMU TCG Multi Threaded vCPUs implementation
320
+ *
321
+ * Copyright (c) 2003-2008 Fabrice Bellard
322
+ * Copyright (c) 2014 Red Hat Inc.
323
+ *
324
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
325
+ * of this software and associated documentation files (the "Software"), to deal
326
+ * in the Software without restriction, including without limitation the rights
327
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
328
+ * copies of the Software, and to permit persons to whom the Software is
329
+ * furnished to do so, subject to the following conditions:
330
+ *
331
+ * The above copyright notice and this permission notice shall be included in
332
+ * all copies or substantial portions of the Software.
333
+ *
334
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
335
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
336
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
337
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
338
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
339
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
340
+ * THE SOFTWARE.
341
+ */
342
+
343
+#include "qemu/osdep.h"
344
+#include "qemu-common.h"
345
+#include "sysemu/tcg.h"
346
+#include "sysemu/replay.h"
347
+#include "qemu/main-loop.h"
348
+#include "qemu/guest-random.h"
349
+#include "exec/exec-all.h"
350
+#include "hw/boards.h"
351
+
352
+#include "tcg-cpus.h"
353
+#include "tcg-cpus-mttcg.h"
354
+
355
+/*
356
+ * In the multi-threaded case each vCPU has its own thread. The TLS
357
+ * variable current_cpu can be used deep in the code to find the
358
+ * current CPUState for a given thread.
359
+ */
360
+
361
+void *tcg_cpu_thread_fn(void *arg)
362
+{
363
+ CPUState *cpu = arg;
364
+
365
+ assert(tcg_enabled());
366
+ g_assert(!icount_enabled());
367
+
368
+ rcu_register_thread();
369
+ tcg_register_thread();
370
+
371
+ qemu_mutex_lock_iothread();
372
+ qemu_thread_get_self(cpu->thread);
373
+
374
+ cpu->thread_id = qemu_get_thread_id();
375
+ cpu->can_do_io = 1;
376
+ current_cpu = cpu;
377
+ cpu_thread_signal_created(cpu);
378
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
379
+
380
+ /* process any pending work */
381
+ cpu->exit_request = 1;
382
+
383
+ do {
384
+ if (cpu_can_run(cpu)) {
385
+ int r;
386
+ qemu_mutex_unlock_iothread();
387
+ r = tcg_cpu_exec(cpu);
388
+ qemu_mutex_lock_iothread();
389
+ switch (r) {
390
+ case EXCP_DEBUG:
391
+ cpu_handle_guest_debug(cpu);
392
+ break;
393
+ case EXCP_HALTED:
394
+ /*
395
+ * during start-up the vCPU is reset and the thread is
396
+ * kicked several times. If we don't ensure we go back
397
+ * to sleep in the halted state we won't cleanly
398
+ * start-up when the vCPU is enabled.
399
+ *
400
+ * cpu->halted should ensure we sleep in wait_io_event
401
+ */
402
+ g_assert(cpu->halted);
403
+ break;
404
+ case EXCP_ATOMIC:
405
+ qemu_mutex_unlock_iothread();
406
+ cpu_exec_step_atomic(cpu);
407
+ qemu_mutex_lock_iothread();
408
+ default:
409
+ /* Ignore everything else? */
410
+ break;
411
+ }
412
+ }
413
+
414
+ qatomic_mb_set(&cpu->exit_request, 0);
415
+ qemu_wait_io_event(cpu);
416
+ } while (!cpu->unplug || cpu_can_run(cpu));
417
+
418
+ qemu_tcg_destroy_vcpu(cpu);
419
+ qemu_mutex_unlock_iothread();
420
+ rcu_unregister_thread();
421
+ return NULL;
422
+}
423
+
424
+static void mttcg_kick_vcpu_thread(CPUState *cpu)
425
+{
426
+ cpu_exit(cpu);
427
+}
428
+
429
+const CpusAccel tcg_cpus_mttcg = {
430
+ .create_vcpu_thread = tcg_start_vcpu_thread,
431
+ .kick_vcpu_thread = mttcg_kick_vcpu_thread,
432
+
433
+ .handle_interrupt = tcg_handle_interrupt,
434
+};
435
diff --git a/accel/tcg/tcg-cpus-rr.c b/accel/tcg/tcg-cpus-rr.c
436
new file mode 100644
437
index XXXXXXX..XXXXXXX
438
--- /dev/null
439
+++ b/accel/tcg/tcg-cpus-rr.c
440
@@ -XXX,XX +XXX,XX @@
441
+/*
442
+ * QEMU TCG Single Threaded vCPUs implementation
443
+ *
444
+ * Copyright (c) 2003-2008 Fabrice Bellard
445
+ * Copyright (c) 2014 Red Hat Inc.
446
+ *
447
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
448
+ * of this software and associated documentation files (the "Software"), to deal
449
+ * in the Software without restriction, including without limitation the rights
450
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
451
+ * copies of the Software, and to permit persons to whom the Software is
452
+ * furnished to do so, subject to the following conditions:
453
+ *
454
+ * The above copyright notice and this permission notice shall be included in
455
+ * all copies or substantial portions of the Software.
456
+ *
457
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
458
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
459
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
460
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
461
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
462
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
463
+ * THE SOFTWARE.
464
+ */
465
+
466
+#include "qemu/osdep.h"
467
+#include "qemu-common.h"
468
+#include "sysemu/tcg.h"
469
+#include "sysemu/replay.h"
470
+#include "qemu/main-loop.h"
471
+#include "qemu/guest-random.h"
472
+#include "exec/exec-all.h"
473
+#include "hw/boards.h"
474
+
475
+#include "tcg-cpus.h"
476
+#include "tcg-cpus-rr.h"
477
+#include "tcg-cpus-icount.h"
478
+
479
+/* Kick all RR vCPUs */
480
+void qemu_cpu_kick_rr_cpus(CPUState *unused)
481
+{
482
+ CPUState *cpu;
483
+
484
+ CPU_FOREACH(cpu) {
485
+ cpu_exit(cpu);
486
+ };
487
+}
129
+}
488
+
130
+
489
+/*
131
+/*
490
+ * TCG vCPU kick timer
132
+ * The cpu state corresponding to 'host_pc' is restored.
491
+ *
133
+ * When reset_icount is true, current TB will be interrupted and
492
+ * The kick timer is responsible for moving single threaded vCPU
134
+ * icount should be recalculated.
493
+ * emulation on to the next vCPU. If more than one vCPU is running a
494
+ * timer event with force a cpu->exit so the next vCPU can get
495
+ * scheduled.
496
+ *
497
+ * The timer is removed if all vCPUs are idle and restarted again once
498
+ * idleness is complete.
499
+ */
135
+ */
136
+void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
137
+ uintptr_t host_pc, bool reset_icount)
138
+{
139
+ uint64_t data[TARGET_INSN_START_WORDS];
140
+#ifdef CONFIG_PROFILER
141
+ TCGProfile *prof = &tcg_ctx->prof;
142
+ int64_t ti = profile_getclock();
143
+#endif
144
+ int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
500
+
145
+
501
+static QEMUTimer *tcg_kick_vcpu_timer;
146
+ if (insns_left < 0) {
502
+static CPUState *tcg_current_rr_cpu;
147
+ return;
503
+
148
+ }
504
+#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
149
505
+
150
- found:
506
+static inline int64_t qemu_tcg_next_kick(void)
151
if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
152
assert(icount_enabled());
153
- /* Reset the cycle counter to the start of the block
154
- and shift if to the number of actually executed instructions */
155
- cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
156
+ /*
157
+ * Reset the cycle counter to the start of the block and
158
+ * shift if to the number of actually executed instructions.
159
+ */
160
+ cpu_neg(cpu)->icount_decr.u16.low += insns_left;
161
}
162
163
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
164
@@ -XXX,XX +XXX,XX @@ int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
165
prof->restore_time + profile_getclock() - ti);
166
qatomic_set(&prof->restore_count, prof->restore_count + 1);
167
#endif
168
- return 0;
169
}
170
171
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
172
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
173
return false;
174
}
175
176
+bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
507
+{
177
+{
508
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
178
+ if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
179
+ TranslationBlock *tb = tcg_tb_lookup(host_pc);
180
+ if (tb) {
181
+ return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
182
+ }
183
+ }
184
+ return false;
509
+}
185
+}
510
+
186
+
511
+/* Kick the currently round-robin scheduled vCPU to next */
187
void page_init(void)
512
+static void qemu_cpu_kick_rr_next_cpu(void)
513
+{
514
+ CPUState *cpu;
515
+ do {
516
+ cpu = qatomic_mb_read(&tcg_current_rr_cpu);
517
+ if (cpu) {
518
+ cpu_exit(cpu);
519
+ }
520
+ } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
521
+}
522
+
523
+static void kick_tcg_thread(void *opaque)
524
+{
525
+ timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
526
+ qemu_cpu_kick_rr_next_cpu();
527
+}
528
+
529
+static void start_tcg_kick_timer(void)
530
+{
531
+ if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
532
+ tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
533
+ kick_tcg_thread, NULL);
534
+ }
535
+ if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
536
+ timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
537
+ }
538
+}
539
+
540
+static void stop_tcg_kick_timer(void)
541
+{
542
+ if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
543
+ timer_del(tcg_kick_vcpu_timer);
544
+ }
545
+}
546
+
547
+static void qemu_tcg_rr_wait_io_event(void)
548
+{
549
+ CPUState *cpu;
550
+
551
+ while (all_cpu_threads_idle()) {
552
+ stop_tcg_kick_timer();
553
+ qemu_cond_wait_iothread(first_cpu->halt_cond);
554
+ }
555
+
556
+ start_tcg_kick_timer();
557
+
558
+ CPU_FOREACH(cpu) {
559
+ qemu_wait_io_event_common(cpu);
560
+ }
561
+}
562
+
563
+/*
564
+ * Destroy any remaining vCPUs which have been unplugged and have
565
+ * finished running
566
+ */
567
+static void deal_with_unplugged_cpus(void)
568
+{
569
+ CPUState *cpu;
570
+
571
+ CPU_FOREACH(cpu) {
572
+ if (cpu->unplug && !cpu_can_run(cpu)) {
573
+ qemu_tcg_destroy_vcpu(cpu);
574
+ break;
575
+ }
576
+ }
577
+}
578
+
579
+/*
580
+ * In the single-threaded case each vCPU is simulated in turn. If
581
+ * there is more than a single vCPU we create a simple timer to kick
582
+ * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
583
+ * This is done explicitly rather than relying on side-effects
584
+ * elsewhere.
585
+ */
586
+
587
+void *tcg_rr_cpu_thread_fn(void *arg)
588
+{
589
+ CPUState *cpu = arg;
590
+
591
+ assert(tcg_enabled());
592
+ rcu_register_thread();
593
+ tcg_register_thread();
594
+
595
+ qemu_mutex_lock_iothread();
596
+ qemu_thread_get_self(cpu->thread);
597
+
598
+ cpu->thread_id = qemu_get_thread_id();
599
+ cpu->can_do_io = 1;
600
+ cpu_thread_signal_created(cpu);
601
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
602
+
603
+ /* wait for initial kick-off after machine start */
604
+ while (first_cpu->stopped) {
605
+ qemu_cond_wait_iothread(first_cpu->halt_cond);
606
+
607
+ /* process any pending work */
608
+ CPU_FOREACH(cpu) {
609
+ current_cpu = cpu;
610
+ qemu_wait_io_event_common(cpu);
611
+ }
612
+ }
613
+
614
+ start_tcg_kick_timer();
615
+
616
+ cpu = first_cpu;
617
+
618
+ /* process any pending work */
619
+ cpu->exit_request = 1;
620
+
621
+ while (1) {
622
+ qemu_mutex_unlock_iothread();
623
+ replay_mutex_lock();
624
+ qemu_mutex_lock_iothread();
625
+
626
+ if (icount_enabled()) {
627
+ /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
628
+ icount_account_warp_timer();
629
+ /*
630
+ * Run the timers here. This is much more efficient than
631
+ * waking up the I/O thread and waiting for completion.
632
+ */
633
+ handle_icount_deadline();
634
+ }
635
+
636
+ replay_mutex_unlock();
637
+
638
+ if (!cpu) {
639
+ cpu = first_cpu;
640
+ }
641
+
642
+ while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
643
+
644
+ qatomic_mb_set(&tcg_current_rr_cpu, cpu);
645
+ current_cpu = cpu;
646
+
647
+ qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
648
+ (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
649
+
650
+ if (cpu_can_run(cpu)) {
651
+ int r;
652
+
653
+ qemu_mutex_unlock_iothread();
654
+ if (icount_enabled()) {
655
+ prepare_icount_for_run(cpu);
656
+ }
657
+ r = tcg_cpu_exec(cpu);
658
+ if (icount_enabled()) {
659
+ process_icount_data(cpu);
660
+ }
661
+ qemu_mutex_lock_iothread();
662
+
663
+ if (r == EXCP_DEBUG) {
664
+ cpu_handle_guest_debug(cpu);
665
+ break;
666
+ } else if (r == EXCP_ATOMIC) {
667
+ qemu_mutex_unlock_iothread();
668
+ cpu_exec_step_atomic(cpu);
669
+ qemu_mutex_lock_iothread();
670
+ break;
671
+ }
672
+ } else if (cpu->stop) {
673
+ if (cpu->unplug) {
674
+ cpu = CPU_NEXT(cpu);
675
+ }
676
+ break;
677
+ }
678
+
679
+ cpu = CPU_NEXT(cpu);
680
+ } /* while (cpu && !cpu->exit_request).. */
681
+
682
+ /* Does not need qatomic_mb_set because a spurious wakeup is okay. */
683
+ qatomic_set(&tcg_current_rr_cpu, NULL);
684
+
685
+ if (cpu && cpu->exit_request) {
686
+ qatomic_mb_set(&cpu->exit_request, 0);
687
+ }
688
+
689
+ if (icount_enabled() && all_cpu_threads_idle()) {
690
+ /*
691
+ * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
692
+ * in the main_loop, wake it up in order to start the warp timer.
693
+ */
694
+ qemu_notify_event();
695
+ }
696
+
697
+ qemu_tcg_rr_wait_io_event();
698
+ deal_with_unplugged_cpus();
699
+ }
700
+
701
+ rcu_unregister_thread();
702
+ return NULL;
703
+}
704
+
705
+const CpusAccel tcg_cpus_rr = {
706
+ .create_vcpu_thread = tcg_start_vcpu_thread,
707
+ .kick_vcpu_thread = qemu_cpu_kick_rr_cpus,
708
+
709
+ .handle_interrupt = tcg_handle_interrupt,
710
+};
711
diff --git a/accel/tcg/tcg-cpus.c b/accel/tcg/tcg-cpus.c
712
index XXXXXXX..XXXXXXX 100644
713
--- a/accel/tcg/tcg-cpus.c
714
+++ b/accel/tcg/tcg-cpus.c
715
@@ -XXX,XX +XXX,XX @@
716
/*
717
- * QEMU System Emulator
718
+ * QEMU TCG vCPU common functionality
719
+ *
720
+ * Functionality common to all TCG vCPU variants: mttcg, rr and icount.
721
*
722
* Copyright (c) 2003-2008 Fabrice Bellard
723
* Copyright (c) 2014 Red Hat Inc.
724
@@ -XXX,XX +XXX,XX @@
725
#include "hw/boards.h"
726
727
#include "tcg-cpus.h"
728
+#include "tcg-cpus-mttcg.h"
729
+#include "tcg-cpus-rr.h"
730
731
-/* Kick all RR vCPUs */
732
-static void qemu_cpu_kick_rr_cpus(void)
733
-{
734
- CPUState *cpu;
735
+/* common functionality among all TCG variants */
736
737
- CPU_FOREACH(cpu) {
738
- cpu_exit(cpu);
739
- };
740
-}
741
-
742
-static void tcg_kick_vcpu_thread(CPUState *cpu)
743
-{
744
- if (qemu_tcg_mttcg_enabled()) {
745
- cpu_exit(cpu);
746
- } else {
747
- qemu_cpu_kick_rr_cpus();
748
- }
749
-}
750
-
751
-/*
752
- * TCG vCPU kick timer
753
- *
754
- * The kick timer is responsible for moving single threaded vCPU
755
- * emulation on to the next vCPU. If more than one vCPU is running a
756
- * timer event with force a cpu->exit so the next vCPU can get
757
- * scheduled.
758
- *
759
- * The timer is removed if all vCPUs are idle and restarted again once
760
- * idleness is complete.
761
- */
762
-
763
-static QEMUTimer *tcg_kick_vcpu_timer;
764
-static CPUState *tcg_current_rr_cpu;
765
-
766
-#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
767
-
768
-static inline int64_t qemu_tcg_next_kick(void)
769
-{
770
- return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
771
-}
772
-
773
-/* Kick the currently round-robin scheduled vCPU to next */
774
-static void qemu_cpu_kick_rr_next_cpu(void)
775
-{
776
- CPUState *cpu;
777
- do {
778
- cpu = qatomic_mb_read(&tcg_current_rr_cpu);
779
- if (cpu) {
780
- cpu_exit(cpu);
781
- }
782
- } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
783
-}
784
-
785
-static void kick_tcg_thread(void *opaque)
786
-{
787
- timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
788
- qemu_cpu_kick_rr_next_cpu();
789
-}
790
-
791
-static void start_tcg_kick_timer(void)
792
-{
793
- assert(!mttcg_enabled);
794
- if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
795
- tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
796
- kick_tcg_thread, NULL);
797
- }
798
- if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
799
- timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
800
- }
801
-}
802
-
803
-static void stop_tcg_kick_timer(void)
804
-{
805
- assert(!mttcg_enabled);
806
- if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
807
- timer_del(tcg_kick_vcpu_timer);
808
- }
809
-}
810
-
811
-static void qemu_tcg_destroy_vcpu(CPUState *cpu)
812
-{
813
-}
814
-
815
-static void qemu_tcg_rr_wait_io_event(void)
816
-{
817
- CPUState *cpu;
818
-
819
- while (all_cpu_threads_idle()) {
820
- stop_tcg_kick_timer();
821
- qemu_cond_wait_iothread(first_cpu->halt_cond);
822
- }
823
-
824
- start_tcg_kick_timer();
825
-
826
- CPU_FOREACH(cpu) {
827
- qemu_wait_io_event_common(cpu);
828
- }
829
-}
830
-
831
-static int64_t tcg_get_icount_limit(void)
832
-{
833
- int64_t deadline;
834
-
835
- if (replay_mode != REPLAY_MODE_PLAY) {
836
- /*
837
- * Include all the timers, because they may need an attention.
838
- * Too long CPU execution may create unnecessary delay in UI.
839
- */
840
- deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
841
- QEMU_TIMER_ATTR_ALL);
842
- /* Check realtime timers, because they help with input processing */
843
- deadline = qemu_soonest_timeout(deadline,
844
- qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
845
- QEMU_TIMER_ATTR_ALL));
846
-
847
- /*
848
- * Maintain prior (possibly buggy) behaviour where if no deadline
849
- * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
850
- * INT32_MAX nanoseconds ahead, we still use INT32_MAX
851
- * nanoseconds.
852
- */
853
- if ((deadline < 0) || (deadline > INT32_MAX)) {
854
- deadline = INT32_MAX;
855
- }
856
-
857
- return icount_round(deadline);
858
- } else {
859
- return replay_get_instructions();
860
- }
861
-}
862
-
863
-static void notify_aio_contexts(void)
864
-{
865
- /* Wake up other AioContexts. */
866
- qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
867
- qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
868
-}
869
-
870
-static void handle_icount_deadline(void)
871
-{
872
- assert(qemu_in_vcpu_thread());
873
- if (icount_enabled()) {
874
- int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
875
- QEMU_TIMER_ATTR_ALL);
876
-
877
- if (deadline == 0) {
878
- notify_aio_contexts();
879
- }
880
- }
881
-}
882
-
883
-static void prepare_icount_for_run(CPUState *cpu)
884
-{
885
- if (icount_enabled()) {
886
- int insns_left;
887
-
888
- /*
889
- * These should always be cleared by process_icount_data after
890
- * each vCPU execution. However u16.high can be raised
891
- * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
892
- */
893
- g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
894
- g_assert(cpu->icount_extra == 0);
895
-
896
- cpu->icount_budget = tcg_get_icount_limit();
897
- insns_left = MIN(0xffff, cpu->icount_budget);
898
- cpu_neg(cpu)->icount_decr.u16.low = insns_left;
899
- cpu->icount_extra = cpu->icount_budget - insns_left;
900
-
901
- replay_mutex_lock();
902
-
903
- if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
904
- notify_aio_contexts();
905
- }
906
- }
907
-}
908
-
909
-static void process_icount_data(CPUState *cpu)
910
-{
911
- if (icount_enabled()) {
912
- /* Account for executed instructions */
913
- icount_update(cpu);
914
-
915
- /* Reset the counters */
916
- cpu_neg(cpu)->icount_decr.u16.low = 0;
917
- cpu->icount_extra = 0;
918
- cpu->icount_budget = 0;
919
-
920
- replay_account_executed_instructions();
921
-
922
- replay_mutex_unlock();
923
- }
924
-}
925
-
926
-static int tcg_cpu_exec(CPUState *cpu)
927
-{
928
- int ret;
929
-#ifdef CONFIG_PROFILER
930
- int64_t ti;
931
-#endif
932
-
933
- assert(tcg_enabled());
934
-#ifdef CONFIG_PROFILER
935
- ti = profile_getclock();
936
-#endif
937
- cpu_exec_start(cpu);
938
- ret = cpu_exec(cpu);
939
- cpu_exec_end(cpu);
940
-#ifdef CONFIG_PROFILER
941
- qatomic_set(&tcg_ctx->prof.cpu_exec_time,
942
- tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
943
-#endif
944
- return ret;
945
-}
946
-
947
-/*
948
- * Destroy any remaining vCPUs which have been unplugged and have
949
- * finished running
950
- */
951
-static void deal_with_unplugged_cpus(void)
952
-{
953
- CPUState *cpu;
954
-
955
- CPU_FOREACH(cpu) {
956
- if (cpu->unplug && !cpu_can_run(cpu)) {
957
- qemu_tcg_destroy_vcpu(cpu);
958
- cpu_thread_signal_destroyed(cpu);
959
- break;
960
- }
961
- }
962
-}
963
-
964
-/*
965
- * Single-threaded TCG
966
- *
967
- * In the single-threaded case each vCPU is simulated in turn. If
968
- * there is more than a single vCPU we create a simple timer to kick
969
- * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
970
- * This is done explicitly rather than relying on side-effects
971
- * elsewhere.
972
- */
973
-
974
-static void *tcg_rr_cpu_thread_fn(void *arg)
975
-{
976
- CPUState *cpu = arg;
977
-
978
- assert(tcg_enabled());
979
- rcu_register_thread();
980
- tcg_register_thread();
981
-
982
- qemu_mutex_lock_iothread();
983
- qemu_thread_get_self(cpu->thread);
984
-
985
- cpu->thread_id = qemu_get_thread_id();
986
- cpu->can_do_io = 1;
987
- cpu_thread_signal_created(cpu);
988
- qemu_guest_random_seed_thread_part2(cpu->random_seed);
989
-
990
- /* wait for initial kick-off after machine start */
991
- while (first_cpu->stopped) {
992
- qemu_cond_wait_iothread(first_cpu->halt_cond);
993
-
994
- /* process any pending work */
995
- CPU_FOREACH(cpu) {
996
- current_cpu = cpu;
997
- qemu_wait_io_event_common(cpu);
998
- }
999
- }
1000
-
1001
- start_tcg_kick_timer();
1002
-
1003
- cpu = first_cpu;
1004
-
1005
- /* process any pending work */
1006
- cpu->exit_request = 1;
1007
-
1008
- while (1) {
1009
- qemu_mutex_unlock_iothread();
1010
- replay_mutex_lock();
1011
- qemu_mutex_lock_iothread();
1012
- /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1013
- icount_account_warp_timer();
1014
-
1015
- /*
1016
- * Run the timers here. This is much more efficient than
1017
- * waking up the I/O thread and waiting for completion.
1018
- */
1019
- handle_icount_deadline();
1020
-
1021
- replay_mutex_unlock();
1022
-
1023
- if (!cpu) {
1024
- cpu = first_cpu;
1025
- }
1026
-
1027
- while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
1028
-
1029
- qatomic_mb_set(&tcg_current_rr_cpu, cpu);
1030
- current_cpu = cpu;
1031
-
1032
- qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1033
- (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1034
-
1035
- if (cpu_can_run(cpu)) {
1036
- int r;
1037
-
1038
- qemu_mutex_unlock_iothread();
1039
- prepare_icount_for_run(cpu);
1040
-
1041
- r = tcg_cpu_exec(cpu);
1042
-
1043
- process_icount_data(cpu);
1044
- qemu_mutex_lock_iothread();
1045
-
1046
- if (r == EXCP_DEBUG) {
1047
- cpu_handle_guest_debug(cpu);
1048
- break;
1049
- } else if (r == EXCP_ATOMIC) {
1050
- qemu_mutex_unlock_iothread();
1051
- cpu_exec_step_atomic(cpu);
1052
- qemu_mutex_lock_iothread();
1053
- break;
1054
- }
1055
- } else if (cpu->stop) {
1056
- if (cpu->unplug) {
1057
- cpu = CPU_NEXT(cpu);
1058
- }
1059
- break;
1060
- }
1061
-
1062
- cpu = CPU_NEXT(cpu);
1063
- } /* while (cpu && !cpu->exit_request).. */
1064
-
1065
- /* Does not need qatomic_mb_set because a spurious wakeup is okay. */
1066
- qatomic_set(&tcg_current_rr_cpu, NULL);
1067
-
1068
- if (cpu && cpu->exit_request) {
1069
- qatomic_mb_set(&cpu->exit_request, 0);
1070
- }
1071
-
1072
- if (icount_enabled() && all_cpu_threads_idle()) {
1073
- /*
1074
- * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
1075
- * in the main_loop, wake it up in order to start the warp timer.
1076
- */
1077
- qemu_notify_event();
1078
- }
1079
-
1080
- qemu_tcg_rr_wait_io_event();
1081
- deal_with_unplugged_cpus();
1082
- }
1083
-
1084
- rcu_unregister_thread();
1085
- return NULL;
1086
-}
1087
-
1088
-/*
1089
- * Multi-threaded TCG
1090
- *
1091
- * In the multi-threaded case each vCPU has its own thread. The TLS
1092
- * variable current_cpu can be used deep in the code to find the
1093
- * current CPUState for a given thread.
1094
- */
1095
-
1096
-static void *tcg_cpu_thread_fn(void *arg)
1097
-{
1098
- CPUState *cpu = arg;
1099
-
1100
- assert(tcg_enabled());
1101
- g_assert(!icount_enabled());
1102
-
1103
- rcu_register_thread();
1104
- tcg_register_thread();
1105
-
1106
- qemu_mutex_lock_iothread();
1107
- qemu_thread_get_self(cpu->thread);
1108
-
1109
- cpu->thread_id = qemu_get_thread_id();
1110
- cpu->can_do_io = 1;
1111
- current_cpu = cpu;
1112
- cpu_thread_signal_created(cpu);
1113
- qemu_guest_random_seed_thread_part2(cpu->random_seed);
1114
-
1115
- /* process any pending work */
1116
- cpu->exit_request = 1;
1117
-
1118
- do {
1119
- if (cpu_can_run(cpu)) {
1120
- int r;
1121
- qemu_mutex_unlock_iothread();
1122
- r = tcg_cpu_exec(cpu);
1123
- qemu_mutex_lock_iothread();
1124
- switch (r) {
1125
- case EXCP_DEBUG:
1126
- cpu_handle_guest_debug(cpu);
1127
- break;
1128
- case EXCP_HALTED:
1129
- /*
1130
- * during start-up the vCPU is reset and the thread is
1131
- * kicked several times. If we don't ensure we go back
1132
- * to sleep in the halted state we won't cleanly
1133
- * start-up when the vCPU is enabled.
1134
- *
1135
- * cpu->halted should ensure we sleep in wait_io_event
1136
- */
1137
- g_assert(cpu->halted);
1138
- break;
1139
- case EXCP_ATOMIC:
1140
- qemu_mutex_unlock_iothread();
1141
- cpu_exec_step_atomic(cpu);
1142
- qemu_mutex_lock_iothread();
1143
- default:
1144
- /* Ignore everything else? */
1145
- break;
1146
- }
1147
- }
1148
-
1149
- qatomic_mb_set(&cpu->exit_request, 0);
1150
- qemu_wait_io_event(cpu);
1151
- } while (!cpu->unplug || cpu_can_run(cpu));
1152
-
1153
- qemu_tcg_destroy_vcpu(cpu);
1154
- cpu_thread_signal_destroyed(cpu);
1155
- qemu_mutex_unlock_iothread();
1156
- rcu_unregister_thread();
1157
- return NULL;
1158
-}
1159
-
1160
-static void tcg_start_vcpu_thread(CPUState *cpu)
1161
+void tcg_start_vcpu_thread(CPUState *cpu)
1162
{
188
{
1163
char thread_name[VCPU_THREAD_NAME_SIZE];
189
page_size_init();
1164
static QemuCond *single_tcg_halt_cond;
1165
@@ -XXX,XX +XXX,XX @@ static void tcg_start_vcpu_thread(CPUState *cpu)
1166
}
1167
}
1168
1169
-static int64_t tcg_get_virtual_clock(void)
1170
+void qemu_tcg_destroy_vcpu(CPUState *cpu)
1171
{
1172
- if (icount_enabled()) {
1173
- return icount_get();
1174
- }
1175
- return cpu_get_clock();
1176
+ cpu_thread_signal_destroyed(cpu);
1177
}
1178
1179
-static int64_t tcg_get_elapsed_ticks(void)
1180
+int tcg_cpu_exec(CPUState *cpu)
1181
{
1182
- if (icount_enabled()) {
1183
- return icount_get();
1184
- }
1185
- return cpu_get_ticks();
1186
+ int ret;
1187
+#ifdef CONFIG_PROFILER
1188
+ int64_t ti;
1189
+#endif
1190
+ assert(tcg_enabled());
1191
+#ifdef CONFIG_PROFILER
1192
+ ti = profile_getclock();
1193
+#endif
1194
+ cpu_exec_start(cpu);
1195
+ ret = cpu_exec(cpu);
1196
+ cpu_exec_end(cpu);
1197
+#ifdef CONFIG_PROFILER
1198
+ qatomic_set(&tcg_ctx->prof.cpu_exec_time,
1199
+ tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
1200
+#endif
1201
+ return ret;
1202
}
1203
1204
/* mask must never be zero, except for A20 change call */
1205
-static void tcg_handle_interrupt(CPUState *cpu, int mask)
1206
+void tcg_handle_interrupt(CPUState *cpu, int mask)
1207
{
1208
- int old_mask;
1209
g_assert(qemu_mutex_iothread_locked());
1210
1211
- old_mask = cpu->interrupt_request;
1212
cpu->interrupt_request |= mask;
1213
1214
/*
1215
@@ -XXX,XX +XXX,XX @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
1216
qemu_cpu_kick(cpu);
1217
} else {
1218
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
1219
- if (icount_enabled() &&
1220
- !cpu->can_do_io
1221
- && (mask & ~old_mask) != 0) {
1222
- cpu_abort(cpu, "Raised interrupt while not in I/O function");
1223
- }
1224
}
1225
}
1226
-
1227
-const CpusAccel tcg_cpus = {
1228
- .create_vcpu_thread = tcg_start_vcpu_thread,
1229
- .kick_vcpu_thread = tcg_kick_vcpu_thread,
1230
-
1231
- .handle_interrupt = tcg_handle_interrupt,
1232
-
1233
- .get_virtual_clock = tcg_get_virtual_clock,
1234
- .get_elapsed_ticks = tcg_get_elapsed_ticks,
1235
-};
1236
diff --git a/softmmu/icount.c b/softmmu/icount.c
1237
index XXXXXXX..XXXXXXX 100644
1238
--- a/softmmu/icount.c
1239
+++ b/softmmu/icount.c
1240
@@ -XXX,XX +XXX,XX @@ void icount_start_warp_timer(void)
1241
1242
void icount_account_warp_timer(void)
1243
{
1244
- if (!icount_enabled() || !icount_sleep) {
1245
+ if (!icount_sleep) {
1246
return;
1247
}
1248
1249
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
1250
index XXXXXXX..XXXXXXX 100644
1251
--- a/accel/tcg/meson.build
1252
+++ b/accel/tcg/meson.build
1253
@@ -XXX,XX +XXX,XX @@ tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c'))
1254
tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c'), libdl])
1255
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
1256
1257
-specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files('tcg-all.c', 'cputlb.c', 'tcg-cpus.c'))
1258
+specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
1259
+ 'tcg-all.c',
1260
+ 'cputlb.c',
1261
+ 'tcg-cpus.c',
1262
+ 'tcg-cpus-mttcg.c',
1263
+ 'tcg-cpus-icount.c',
1264
+ 'tcg-cpus-rr.c'
1265
+))
1266
--
190
--
1267
2.25.1
191
2.34.1
1268
1269
diff view generated by jsdifflib
New patch
1
Avoid cpu_restore_state, and modifying env->eip out from
2
underneath the translator with TARGET_TB_PCREL. There is
3
some slight duplication from x86_restore_state_to_opc,
4
but it's just a few lines.
1
5
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1269
7
Reviewed-by: Claudio Fontana <cfontana@suse.de>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/i386/helper.c | 21 +++++++++++++++++++--
11
1 file changed, 19 insertions(+), 2 deletions(-)
12
13
diff --git a/target/i386/helper.c b/target/i386/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/i386/helper.c
16
+++ b/target/i386/helper.c
17
@@ -XXX,XX +XXX,XX @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
18
}
19
}
20
21
+static target_ulong get_memio_eip(CPUX86State *env)
22
+{
23
+ uint64_t data[TARGET_INSN_START_WORDS];
24
+ CPUState *cs = env_cpu(env);
25
+
26
+ if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {
27
+ return env->eip;
28
+ }
29
+
30
+ /* Per x86_restore_state_to_opc. */
31
+ if (TARGET_TB_PCREL) {
32
+ return (env->eip & TARGET_PAGE_MASK) | data[0];
33
+ } else {
34
+ return data[0] - env->segs[R_CS].base;
35
+ }
36
+}
37
+
38
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
39
{
40
X86CPU *cpu = env_archcpu(env);
41
@@ -XXX,XX +XXX,XX @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
42
43
cpu_interrupt(cs, CPU_INTERRUPT_TPR);
44
} else if (tcg_enabled()) {
45
- cpu_restore_state(cs, cs->mem_io_pc, false);
46
+ target_ulong eip = get_memio_eip(env);
47
48
- apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
49
+ apic_handle_tpr_access_report(cpu->apic_state, eip, access);
50
}
51
}
52
#endif /* !CONFIG_USER_ONLY */
53
--
54
2.34.1
diff view generated by jsdifflib
New patch
1
We have called cpu_restore_state asserting will_exit.
2
Do not go back on that promise. This affects icount.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/openrisc/sys_helper.c | 2 +-
8
1 file changed, 1 insertion(+), 1 deletion(-)
9
10
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/openrisc/sys_helper.c
13
+++ b/target/openrisc/sys_helper.c
14
@@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
15
if (env->pc != rb) {
16
env->pc = rb;
17
env->dflag = 0;
18
- cpu_loop_exit(cs);
19
}
20
+ cpu_loop_exit(cs);
21
break;
22
23
case TO_SPR(0, 17): /* SR */
24
--
25
2.34.1
26
27
diff view generated by jsdifflib
New patch
1
Since we do not plan to exit, use cpu_unwind_state_data
2
and extract exactly the data requested.
1
3
4
This is a bug fix, in that we no longer clobber dflag.
5
6
Consider:
7
8
l.j L2 // branch
9
l.mfspr r1, ppc // delay
10
11
L1: boom
12
L2: l.lwa r3, (r4)
13
14
Here, dflag would be set by cpu_restore_state (because that is the current
15
state of the cpu), but but not cleared by tb_stop on exiting the TB
16
(because DisasContext has recorded the current value as zero).
17
18
The next TB begins at L2 with dflag incorrectly set. If the load has a
19
tlb miss, then the exception will be delivered as per a delay slot:
20
with DSX set in the status register and PC decremented (delay slots
21
restart by re-executing the branch). This will cause the return from
22
interrupt to go to L1, and boom!
23
24
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
25
---
26
target/openrisc/sys_helper.c | 11 +++++++++--
27
1 file changed, 9 insertions(+), 2 deletions(-)
28
29
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/openrisc/sys_helper.c
32
+++ b/target/openrisc/sys_helper.c
33
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
34
target_ulong spr)
35
{
36
#ifndef CONFIG_USER_ONLY
37
+ uint64_t data[TARGET_INSN_START_WORDS];
38
MachineState *ms = MACHINE(qdev_get_machine());
39
OpenRISCCPU *cpu = env_archcpu(env);
40
CPUState *cs = env_cpu(env);
41
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
42
return env->evbar;
43
44
case TO_SPR(0, 16): /* NPC (equals PC) */
45
- cpu_restore_state(cs, GETPC(), false);
46
+ if (cpu_unwind_state_data(cs, GETPC(), data)) {
47
+ return data[0];
48
+ }
49
return env->pc;
50
51
case TO_SPR(0, 17): /* SR */
52
return cpu_get_sr(env);
53
54
case TO_SPR(0, 18): /* PPC */
55
- cpu_restore_state(cs, GETPC(), false);
56
+ if (cpu_unwind_state_data(cs, GETPC(), data)) {
57
+ if (data[1] & 2) {
58
+ return data[0] - 4;
59
+ }
60
+ }
61
return env->ppc;
62
63
case TO_SPR(0, 32): /* EPCR */
64
--
65
2.34.1
diff view generated by jsdifflib
New patch
1
The value passed is always true, and if the target's
2
synchronize_from_tb hook is non-trivial, not exiting
3
may be erroneous.
1
4
5
Reviewed-by: Claudio Fontana <cfontana@suse.de>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/exec/exec-all.h | 5 +----
9
accel/tcg/cpu-exec-common.c | 2 +-
10
accel/tcg/translate-all.c | 12 ++----------
11
target/alpha/helper.c | 2 +-
12
target/alpha/mem_helper.c | 2 +-
13
target/arm/op_helper.c | 2 +-
14
target/arm/tlb_helper.c | 8 ++++----
15
target/cris/helper.c | 2 +-
16
target/i386/tcg/sysemu/svm_helper.c | 2 +-
17
target/m68k/op_helper.c | 4 ++--
18
target/microblaze/helper.c | 2 +-
19
target/nios2/op_helper.c | 2 +-
20
target/openrisc/sys_helper.c | 4 ++--
21
target/ppc/excp_helper.c | 2 +-
22
target/s390x/tcg/excp_helper.c | 2 +-
23
target/tricore/op_helper.c | 2 +-
24
target/xtensa/helper.c | 6 +++---
25
17 files changed, 25 insertions(+), 36 deletions(-)
26
27
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/exec/exec-all.h
30
+++ b/include/exec/exec-all.h
31
@@ -XXX,XX +XXX,XX @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
32
* cpu_restore_state:
33
* @cpu: the cpu context
34
* @host_pc: the host pc within the translation
35
- * @will_exit: true if the TB executed will be interrupted after some
36
- cpu adjustments. Required for maintaining the correct
37
- icount valus
38
* @return: true if state was restored, false otherwise
39
*
40
* Attempt to restore the state for a fault occurring in translated
41
* code. If @host_pc is not in translated code no state is
42
* restored and the function returns false.
43
*/
44
-bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit);
45
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
46
47
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
48
G_NORETURN void cpu_loop_exit(CPUState *cpu);
49
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/accel/tcg/cpu-exec-common.c
52
+++ b/accel/tcg/cpu-exec-common.c
53
@@ -XXX,XX +XXX,XX @@ void cpu_loop_exit(CPUState *cpu)
54
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
55
{
56
if (pc) {
57
- cpu_restore_state(cpu, pc, true);
58
+ cpu_restore_state(cpu, pc);
59
}
60
cpu_loop_exit(cpu);
61
}
62
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/accel/tcg/translate-all.c
65
+++ b/accel/tcg/translate-all.c
66
@@ -XXX,XX +XXX,XX @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
67
#endif
68
}
69
70
-bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
71
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
72
{
73
- /*
74
- * The pc update associated with restore without exit will
75
- * break the relative pc adjustments performed by TARGET_TB_PCREL.
76
- */
77
- if (TARGET_TB_PCREL) {
78
- assert(will_exit);
79
- }
80
-
81
/*
82
* The host_pc has to be in the rx region of the code buffer.
83
* If it is not we will not be able to resolve it here.
84
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
85
if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
86
TranslationBlock *tb = tcg_tb_lookup(host_pc);
87
if (tb) {
88
- cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
89
+ cpu_restore_state_from_tb(cpu, tb, host_pc, true);
90
return true;
91
}
92
}
93
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/alpha/helper.c
96
+++ b/target/alpha/helper.c
97
@@ -XXX,XX +XXX,XX @@ G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
98
cs->exception_index = excp;
99
env->error_code = error;
100
if (retaddr) {
101
- cpu_restore_state(cs, retaddr, true);
102
+ cpu_restore_state(cs, retaddr);
103
/* Floating-point exceptions (our only users) point to the next PC. */
104
env->pc += 4;
105
}
106
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/alpha/mem_helper.c
109
+++ b/target/alpha/mem_helper.c
110
@@ -XXX,XX +XXX,XX @@ static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retadd
111
uint64_t pc;
112
uint32_t insn;
113
114
- cpu_restore_state(env_cpu(env), retaddr, true);
115
+ cpu_restore_state(env_cpu(env), retaddr);
116
117
pc = env->pc;
118
insn = cpu_ldl_code(env, pc);
119
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/op_helper.c
122
+++ b/target/arm/op_helper.c
123
@@ -XXX,XX +XXX,XX @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
124
* we must restore CPU state here before setting the syndrome
125
* the caller passed us, and cannot use cpu_loop_exit_restore().
126
*/
127
- cpu_restore_state(cs, ra, true);
128
+ cpu_restore_state(cs, ra);
129
raise_exception(env, excp, syndrome, target_el);
130
}
131
132
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/target/arm/tlb_helper.c
135
+++ b/target/arm/tlb_helper.c
136
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
137
ARMMMUFaultInfo fi = {};
138
139
/* now we have a real cpu fault */
140
- cpu_restore_state(cs, retaddr, true);
141
+ cpu_restore_state(cs, retaddr);
142
143
fi.type = ARMFault_Alignment;
144
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
145
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
146
ARMMMUFaultInfo fi = {};
147
148
/* now we have a real cpu fault */
149
- cpu_restore_state(cs, retaddr, true);
150
+ cpu_restore_state(cs, retaddr);
151
152
fi.ea = arm_extabort_type(response);
153
fi.type = ARMFault_SyncExternal;
154
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
155
return false;
156
} else {
157
/* now we have a real cpu fault */
158
- cpu_restore_state(cs, retaddr, true);
159
+ cpu_restore_state(cs, retaddr);
160
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
161
}
162
}
163
@@ -XXX,XX +XXX,XX @@ void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
164
* We report both ESR and FAR to signal handlers.
165
* For now, it's easiest to deliver the fault normally.
166
*/
167
- cpu_restore_state(cs, ra, true);
168
+ cpu_restore_state(cs, ra);
169
arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
170
}
171
172
diff --git a/target/cris/helper.c b/target/cris/helper.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/cris/helper.c
175
+++ b/target/cris/helper.c
176
@@ -XXX,XX +XXX,XX @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
177
cs->exception_index = EXCP_BUSFAULT;
178
env->fault_vector = res.bf_vec;
179
if (retaddr) {
180
- if (cpu_restore_state(cs, retaddr, true)) {
181
+ if (cpu_restore_state(cs, retaddr)) {
182
/* Evaluate flags after retranslation. */
183
helper_top_evaluate_flags(env);
184
}
185
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
186
index XXXXXXX..XXXXXXX 100644
187
--- a/target/i386/tcg/sysemu/svm_helper.c
188
+++ b/target/i386/tcg/sysemu/svm_helper.c
189
@@ -XXX,XX +XXX,XX @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
190
{
191
CPUState *cs = env_cpu(env);
192
193
- cpu_restore_state(cs, retaddr, true);
194
+ cpu_restore_state(cs, retaddr);
195
196
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
197
PRIx64 ", " TARGET_FMT_lx ")!\n",
198
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/target/m68k/op_helper.c
201
+++ b/target/m68k/op_helper.c
202
@@ -XXX,XX +XXX,XX @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
203
M68kCPU *cpu = M68K_CPU(cs);
204
CPUM68KState *env = &cpu->env;
205
206
- cpu_restore_state(cs, retaddr, true);
207
+ cpu_restore_state(cs, retaddr);
208
209
if (m68k_feature(env, M68K_FEATURE_M68040)) {
210
env->mmu.mmusr = 0;
211
@@ -XXX,XX +XXX,XX @@ raise_exception_format2(CPUM68KState *env, int tt, int ilen, uintptr_t raddr)
212
cs->exception_index = tt;
213
214
/* Recover PC and CC_OP for the beginning of the insn. */
215
- cpu_restore_state(cs, raddr, true);
216
+ cpu_restore_state(cs, raddr);
217
218
/* Flags are current in env->cc_*, or are undefined. */
219
env->cc_op = CC_OP_FLAGS;
220
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/target/microblaze/helper.c
223
+++ b/target/microblaze/helper.c
224
@@ -XXX,XX +XXX,XX @@ void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
225
uint32_t esr, iflags;
226
227
/* Recover the pc and iflags from the corresponding insn_start. */
228
- cpu_restore_state(cs, retaddr, true);
229
+ cpu_restore_state(cs, retaddr);
230
iflags = cpu->env.iflags;
231
232
qemu_log_mask(CPU_LOG_INT,
233
diff --git a/target/nios2/op_helper.c b/target/nios2/op_helper.c
234
index XXXXXXX..XXXXXXX 100644
235
--- a/target/nios2/op_helper.c
236
+++ b/target/nios2/op_helper.c
237
@@ -XXX,XX +XXX,XX @@ void nios2_cpu_loop_exit_advance(CPUNios2State *env, uintptr_t retaddr)
238
* Do this here, rather than in restore_state_to_opc(),
239
* lest we affect QEMU internal exceptions, like EXCP_DEBUG.
240
*/
241
- cpu_restore_state(cs, retaddr, true);
242
+ cpu_restore_state(cs, retaddr);
243
env->pc += 4;
244
cpu_loop_exit(cs);
245
}
246
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/target/openrisc/sys_helper.c
249
+++ b/target/openrisc/sys_helper.c
250
@@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
251
break;
252
253
case TO_SPR(0, 16): /* NPC */
254
- cpu_restore_state(cs, GETPC(), true);
255
+ cpu_restore_state(cs, GETPC());
256
/* ??? Mirror or1ksim in not trashing delayed branch state
257
when "jumping" to the current instruction. */
258
if (env->pc != rb) {
259
@@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
260
case TO_SPR(8, 0): /* PMR */
261
env->pmr = rb;
262
if (env->pmr & PMR_DME || env->pmr & PMR_SME) {
263
- cpu_restore_state(cs, GETPC(), true);
264
+ cpu_restore_state(cs, GETPC());
265
env->pc += 4;
266
cs->halted = 1;
267
raise_exception(cpu, EXCP_HALTED);
268
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/target/ppc/excp_helper.c
271
+++ b/target/ppc/excp_helper.c
272
@@ -XXX,XX +XXX,XX @@ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
273
uint32_t insn;
274
275
/* Restore state and reload the insn we executed, for filling in DSISR. */
276
- cpu_restore_state(cs, retaddr, true);
277
+ cpu_restore_state(cs, retaddr);
278
insn = cpu_ldl_code(env, env->nip);
279
280
switch (env->mmu_model) {
281
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
282
index XXXXXXX..XXXXXXX 100644
283
--- a/target/s390x/tcg/excp_helper.c
284
+++ b/target/s390x/tcg/excp_helper.c
285
@@ -XXX,XX +XXX,XX @@ G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
286
{
287
CPUState *cs = env_cpu(env);
288
289
- cpu_restore_state(cs, ra, true);
290
+ cpu_restore_state(cs, ra);
291
qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
292
env->psw.addr);
293
trigger_pgm_exception(env, code);
294
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
295
index XXXXXXX..XXXXXXX 100644
296
--- a/target/tricore/op_helper.c
297
+++ b/target/tricore/op_helper.c
298
@@ -XXX,XX +XXX,XX @@ void raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin
299
{
300
CPUState *cs = env_cpu(env);
301
/* in case we come from a helper-call we need to restore the PC */
302
- cpu_restore_state(cs, pc, true);
303
+ cpu_restore_state(cs, pc);
304
305
/* Tin is loaded into d[15] */
306
env->gpr_d[15] = tin;
307
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
308
index XXXXXXX..XXXXXXX 100644
309
--- a/target/xtensa/helper.c
310
+++ b/target/xtensa/helper.c
311
@@ -XXX,XX +XXX,XX @@ void xtensa_cpu_do_unaligned_access(CPUState *cs,
312
313
assert(xtensa_option_enabled(env->config,
314
XTENSA_OPTION_UNALIGNED_EXCEPTION));
315
- cpu_restore_state(CPU(cpu), retaddr, true);
316
+ cpu_restore_state(CPU(cpu), retaddr);
317
HELPER(exception_cause_vaddr)(env,
318
env->pc, LOAD_STORE_ALIGNMENT_CAUSE,
319
addr);
320
@@ -XXX,XX +XXX,XX @@ bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
321
} else if (probe) {
322
return false;
323
} else {
324
- cpu_restore_state(cs, retaddr, true);
325
+ cpu_restore_state(cs, retaddr);
326
HELPER(exception_cause_vaddr)(env, env->pc, ret, address);
327
}
328
}
329
@@ -XXX,XX +XXX,XX @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
330
XtensaCPU *cpu = XTENSA_CPU(cs);
331
CPUXtensaState *env = &cpu->env;
332
333
- cpu_restore_state(cs, retaddr, true);
334
+ cpu_restore_state(cs, retaddr);
335
HELPER(exception_cause_vaddr)(env, env->pc,
336
access_type == MMU_INST_FETCH ?
337
INSTR_PIF_ADDR_ERROR_CAUSE :
338
--
339
2.34.1
diff view generated by jsdifflib
New patch
1
The value passed is always true.
1
2
3
Reviewed-by: Claudio Fontana <cfontana@suse.de>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
accel/tcg/internal.h | 2 +-
7
accel/tcg/tb-maint.c | 4 ++--
8
accel/tcg/translate-all.c | 15 +++++++--------
9
3 files changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/internal.h
14
+++ b/accel/tcg/internal.h
15
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
16
tb_page_addr_t phys_page2);
17
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
18
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
19
- uintptr_t host_pc, bool reset_icount);
20
+ uintptr_t host_pc);
21
22
/* Return the current PC from CPU, which may be cached in TB. */
23
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
24
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/accel/tcg/tb-maint.c
27
+++ b/accel/tcg/tb-maint.c
28
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
29
* restore the CPU state.
30
*/
31
current_tb_modified = true;
32
- cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
33
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr);
34
}
35
#endif /* TARGET_HAS_PRECISE_SMC */
36
tb_phys_invalidate__locked(tb);
37
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
38
* function to partially restore the CPU state.
39
*/
40
current_tb_modified = true;
41
- cpu_restore_state_from_tb(cpu, current_tb, pc, true);
42
+ cpu_restore_state_from_tb(cpu, current_tb, pc);
43
}
44
#endif /* TARGET_HAS_PRECISE_SMC */
45
tb_phys_invalidate(tb, addr);
46
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/accel/tcg/translate-all.c
49
+++ b/accel/tcg/translate-all.c
50
@@ -XXX,XX +XXX,XX @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
51
}
52
53
/*
54
- * The cpu state corresponding to 'host_pc' is restored.
55
- * When reset_icount is true, current TB will be interrupted and
56
- * icount should be recalculated.
57
+ * The cpu state corresponding to 'host_pc' is restored in
58
+ * preparation for exiting the TB.
59
*/
60
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
61
- uintptr_t host_pc, bool reset_icount)
62
+ uintptr_t host_pc)
63
{
64
uint64_t data[TARGET_INSN_START_WORDS];
65
#ifdef CONFIG_PROFILER
66
@@ -XXX,XX +XXX,XX @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
67
return;
68
}
69
70
- if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
71
+ if (tb_cflags(tb) & CF_USE_ICOUNT) {
72
assert(icount_enabled());
73
/*
74
* Reset the cycle counter to the start of the block and
75
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
76
if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
77
TranslationBlock *tb = tcg_tb_lookup(host_pc);
78
if (tb) {
79
- cpu_restore_state_from_tb(cpu, tb, host_pc, true);
80
+ cpu_restore_state_from_tb(cpu, tb, host_pc);
81
return true;
82
}
83
}
84
@@ -XXX,XX +XXX,XX @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
85
tb = tcg_tb_lookup(retaddr);
86
if (tb) {
87
/* We can use retranslation to find the PC. */
88
- cpu_restore_state_from_tb(cpu, tb, retaddr, true);
89
+ cpu_restore_state_from_tb(cpu, tb, retaddr);
90
tb_phys_invalidate(tb, -1);
91
} else {
92
/* The exception probably happened in a helper. The CPU state should
93
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
94
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
95
(void *)retaddr);
96
}
97
- cpu_restore_state_from_tb(cpu, tb, retaddr, true);
98
+ cpu_restore_state_from_tb(cpu, tb, retaddr);
99
100
/*
101
* Some guests must re-execute the branch when re-executing a delay
102
--
103
2.34.1
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
The helpers for reset_rf, cli, sti, clac, stac are
2
completely trivial; implement them inline.
2
3
3
Signed-off-by: Claudio Fontana <cfontana@suse.de>
4
Drop some nearby #if 0 code.
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
5
Message-Id: <20201015143217.29337-4-cfontana@suse.de>
6
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
9
---
8
accel/tcg/tcg-cpus-icount.h | 6 +--
10
target/i386/helper.h | 5 -----
9
accel/tcg/tcg-cpus-rr.h | 2 +-
11
target/i386/tcg/cc_helper.c | 41 -------------------------------------
10
accel/tcg/tcg-cpus.h | 6 +--
12
target/i386/tcg/translate.c | 30 ++++++++++++++++++++++-----
11
accel/tcg/tcg-cpus-icount.c | 24 ++++++------
13
3 files changed, 25 insertions(+), 51 deletions(-)
12
accel/tcg/tcg-cpus-mttcg.c | 10 ++---
13
accel/tcg/tcg-cpus-rr.c | 74 ++++++++++++++++++-------------------
14
accel/tcg/tcg-cpus.c | 6 +--
15
7 files changed, 64 insertions(+), 64 deletions(-)
16
14
17
diff --git a/accel/tcg/tcg-cpus-icount.h b/accel/tcg/tcg-cpus-icount.h
15
diff --git a/target/i386/helper.h b/target/i386/helper.h
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/tcg-cpus-icount.h
17
--- a/target/i386/helper.h
20
+++ b/accel/tcg/tcg-cpus-icount.h
18
+++ b/target/i386/helper.h
21
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(syscall, void, env, int)
22
#ifndef TCG_CPUS_ICOUNT_H
20
DEF_HELPER_2(sysret, void, env, int)
23
#define TCG_CPUS_ICOUNT_H
21
#endif
24
22
DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
25
-void handle_icount_deadline(void);
23
-DEF_HELPER_1(reset_rf, void, env)
26
-void prepare_icount_for_run(CPUState *cpu);
24
DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
27
-void process_icount_data(CPUState *cpu);
25
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
28
+void icount_handle_deadline(void);
26
-DEF_HELPER_1(cli, void, env)
29
+void icount_prepare_for_run(CPUState *cpu);
27
-DEF_HELPER_1(sti, void, env)
30
+void icount_process_data(CPUState *cpu);
28
-DEF_HELPER_1(clac, void, env)
31
29
-DEF_HELPER_1(stac, void, env)
32
#endif /* TCG_CPUS_ICOUNT_H */
30
DEF_HELPER_3(boundw, void, env, tl, int)
33
diff --git a/accel/tcg/tcg-cpus-rr.h b/accel/tcg/tcg-cpus-rr.h
31
DEF_HELPER_3(boundl, void, env, tl, int)
32
33
diff --git a/target/i386/tcg/cc_helper.c b/target/i386/tcg/cc_helper.c
34
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/tcg-cpus-rr.h
35
--- a/target/i386/tcg/cc_helper.c
36
+++ b/accel/tcg/tcg-cpus-rr.h
36
+++ b/target/i386/tcg/cc_helper.c
37
@@ -XXX,XX +XXX,XX @@
37
@@ -XXX,XX +XXX,XX @@ void helper_clts(CPUX86State *env)
38
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
38
env->cr[0] &= ~CR0_TS_MASK;
39
39
env->hflags &= ~HF_TS_MASK;
40
/* Kick all RR vCPUs. */
40
}
41
-void qemu_cpu_kick_rr_cpus(CPUState *unused);
41
-
42
+void rr_kick_vcpu_thread(CPUState *unused);
42
-void helper_reset_rf(CPUX86State *env)
43
43
-{
44
/* start the round robin vcpu thread */
44
- env->eflags &= ~RF_MASK;
45
void rr_start_vcpu_thread(CPUState *cpu);
45
-}
46
diff --git a/accel/tcg/tcg-cpus.h b/accel/tcg/tcg-cpus.h
46
-
47
-void helper_cli(CPUX86State *env)
48
-{
49
- env->eflags &= ~IF_MASK;
50
-}
51
-
52
-void helper_sti(CPUX86State *env)
53
-{
54
- env->eflags |= IF_MASK;
55
-}
56
-
57
-void helper_clac(CPUX86State *env)
58
-{
59
- env->eflags &= ~AC_MASK;
60
-}
61
-
62
-void helper_stac(CPUX86State *env)
63
-{
64
- env->eflags |= AC_MASK;
65
-}
66
-
67
-#if 0
68
-/* vm86plus instructions */
69
-void helper_cli_vm(CPUX86State *env)
70
-{
71
- env->eflags &= ~VIF_MASK;
72
-}
73
-
74
-void helper_sti_vm(CPUX86State *env)
75
-{
76
- env->eflags |= VIF_MASK;
77
- if (env->eflags & VIP_MASK) {
78
- raise_exception_ra(env, EXCP0D_GPF, GETPC());
79
- }
80
-}
81
-#endif
82
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
47
index XXXXXXX..XXXXXXX 100644
83
index XXXXXXX..XXXXXXX 100644
48
--- a/accel/tcg/tcg-cpus.h
84
--- a/target/i386/tcg/translate.c
49
+++ b/accel/tcg/tcg-cpus.h
85
+++ b/target/i386/tcg/translate.c
50
@@ -XXX,XX +XXX,XX @@ extern const CpusAccel tcg_cpus_mttcg;
86
@@ -XXX,XX +XXX,XX @@ static void gen_reset_hflag(DisasContext *s, uint32_t mask)
51
extern const CpusAccel tcg_cpus_icount;
52
extern const CpusAccel tcg_cpus_rr;
53
54
-void qemu_tcg_destroy_vcpu(CPUState *cpu);
55
-int tcg_cpu_exec(CPUState *cpu);
56
-void tcg_handle_interrupt(CPUState *cpu, int mask);
57
+void tcg_cpus_destroy(CPUState *cpu);
58
+int tcg_cpus_exec(CPUState *cpu);
59
+void tcg_cpus_handle_interrupt(CPUState *cpu, int mask);
60
61
#endif /* TCG_CPUS_H */
62
diff --git a/accel/tcg/tcg-cpus-icount.c b/accel/tcg/tcg-cpus-icount.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/accel/tcg/tcg-cpus-icount.c
65
+++ b/accel/tcg/tcg-cpus-icount.c
66
@@ -XXX,XX +XXX,XX @@
67
#include "tcg-cpus-icount.h"
68
#include "tcg-cpus-rr.h"
69
70
-static int64_t tcg_get_icount_limit(void)
71
+static int64_t icount_get_limit(void)
72
{
73
int64_t deadline;
74
75
@@ -XXX,XX +XXX,XX @@ static int64_t tcg_get_icount_limit(void)
76
}
87
}
77
}
88
}
78
89
79
-static void notify_aio_contexts(void)
90
+static void gen_set_eflags(DisasContext *s, target_ulong mask)
80
+static void icount_notify_aio_contexts(void)
91
+{
92
+ TCGv t = tcg_temp_new();
93
+
94
+ tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
95
+ tcg_gen_ori_tl(t, t, mask);
96
+ tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
97
+ tcg_temp_free(t);
98
+}
99
+
100
+static void gen_reset_eflags(DisasContext *s, target_ulong mask)
101
+{
102
+ TCGv t = tcg_temp_new();
103
+
104
+ tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
105
+ tcg_gen_andi_tl(t, t, ~mask);
106
+ tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
107
+ tcg_temp_free(t);
108
+}
109
+
110
/* Clear BND registers during legacy branches. */
111
static void gen_bnd_jmp(DisasContext *s)
81
{
112
{
82
/* Wake up other AioContexts. */
113
@@ -XXX,XX +XXX,XX @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
83
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
84
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
85
}
86
87
-void handle_icount_deadline(void)
88
+void icount_handle_deadline(void)
89
{
90
assert(qemu_in_vcpu_thread());
91
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
92
QEMU_TIMER_ATTR_ALL);
93
94
if (deadline == 0) {
95
- notify_aio_contexts();
96
+ icount_notify_aio_contexts();
97
}
114
}
98
}
115
99
116
if (s->base.tb->flags & HF_RF_MASK) {
100
-void prepare_icount_for_run(CPUState *cpu)
117
- gen_helper_reset_rf(cpu_env);
101
+void icount_prepare_for_run(CPUState *cpu)
118
+ gen_reset_eflags(s, RF_MASK);
102
{
103
int insns_left;
104
105
/*
106
- * These should always be cleared by process_icount_data after
107
+ * These should always be cleared by icount_process_data after
108
* each vCPU execution. However u16.high can be raised
109
- * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
110
+ * asynchronously by cpu_exit/cpu_interrupt/tcg_cpus_handle_interrupt
111
*/
112
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
113
g_assert(cpu->icount_extra == 0);
114
115
- cpu->icount_budget = tcg_get_icount_limit();
116
+ cpu->icount_budget = icount_get_limit();
117
insns_left = MIN(0xffff, cpu->icount_budget);
118
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
119
cpu->icount_extra = cpu->icount_budget - insns_left;
120
@@ -XXX,XX +XXX,XX @@ void prepare_icount_for_run(CPUState *cpu)
121
replay_mutex_lock();
122
123
if (cpu->icount_budget == 0 && replay_has_checkpoint()) {
124
- notify_aio_contexts();
125
+ icount_notify_aio_contexts();
126
}
119
}
127
}
120
if (recheck_tf) {
128
121
gen_helper_rechecking_single_step(cpu_env);
129
-void process_icount_data(CPUState *cpu)
122
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
130
+void icount_process_data(CPUState *cpu)
123
#endif
131
{
124
case 0xfa: /* cli */
132
/* Account for executed instructions */
125
if (check_iopl(s)) {
133
icount_update(cpu);
126
- gen_helper_cli(cpu_env);
134
@@ -XXX,XX +XXX,XX @@ static void icount_handle_interrupt(CPUState *cpu, int mask)
127
+ gen_reset_eflags(s, IF_MASK);
135
{
136
int old_mask = cpu->interrupt_request;
137
138
- tcg_handle_interrupt(cpu, mask);
139
+ tcg_cpus_handle_interrupt(cpu, mask);
140
if (qemu_cpu_is_self(cpu) &&
141
!cpu->can_do_io
142
&& (mask & ~old_mask) != 0) {
143
@@ -XXX,XX +XXX,XX @@ static void icount_handle_interrupt(CPUState *cpu, int mask)
144
145
const CpusAccel tcg_cpus_icount = {
146
.create_vcpu_thread = rr_start_vcpu_thread,
147
- .kick_vcpu_thread = qemu_cpu_kick_rr_cpus,
148
+ .kick_vcpu_thread = rr_kick_vcpu_thread,
149
150
.handle_interrupt = icount_handle_interrupt,
151
.get_virtual_clock = icount_get,
152
diff --git a/accel/tcg/tcg-cpus-mttcg.c b/accel/tcg/tcg-cpus-mttcg.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/accel/tcg/tcg-cpus-mttcg.c
155
+++ b/accel/tcg/tcg-cpus-mttcg.c
156
@@ -XXX,XX +XXX,XX @@
157
* current CPUState for a given thread.
158
*/
159
160
-static void *tcg_cpu_thread_fn(void *arg)
161
+static void *mttcg_cpu_thread_fn(void *arg)
162
{
163
CPUState *cpu = arg;
164
165
@@ -XXX,XX +XXX,XX @@ static void *tcg_cpu_thread_fn(void *arg)
166
if (cpu_can_run(cpu)) {
167
int r;
168
qemu_mutex_unlock_iothread();
169
- r = tcg_cpu_exec(cpu);
170
+ r = tcg_cpus_exec(cpu);
171
qemu_mutex_lock_iothread();
172
switch (r) {
173
case EXCP_DEBUG:
174
@@ -XXX,XX +XXX,XX @@ static void *tcg_cpu_thread_fn(void *arg)
175
qemu_wait_io_event(cpu);
176
} while (!cpu->unplug || cpu_can_run(cpu));
177
178
- qemu_tcg_destroy_vcpu(cpu);
179
+ tcg_cpus_destroy(cpu);
180
qemu_mutex_unlock_iothread();
181
rcu_unregister_thread();
182
return NULL;
183
@@ -XXX,XX +XXX,XX @@ static void mttcg_start_vcpu_thread(CPUState *cpu)
184
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
185
cpu->cpu_index);
186
187
- qemu_thread_create(cpu->thread, thread_name, tcg_cpu_thread_fn,
188
+ qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
189
cpu, QEMU_THREAD_JOINABLE);
190
191
#ifdef _WIN32
192
@@ -XXX,XX +XXX,XX @@ const CpusAccel tcg_cpus_mttcg = {
193
.create_vcpu_thread = mttcg_start_vcpu_thread,
194
.kick_vcpu_thread = mttcg_kick_vcpu_thread,
195
196
- .handle_interrupt = tcg_handle_interrupt,
197
+ .handle_interrupt = tcg_cpus_handle_interrupt,
198
};
199
diff --git a/accel/tcg/tcg-cpus-rr.c b/accel/tcg/tcg-cpus-rr.c
200
index XXXXXXX..XXXXXXX 100644
201
--- a/accel/tcg/tcg-cpus-rr.c
202
+++ b/accel/tcg/tcg-cpus-rr.c
203
@@ -XXX,XX +XXX,XX @@
204
#include "tcg-cpus-icount.h"
205
206
/* Kick all RR vCPUs */
207
-void qemu_cpu_kick_rr_cpus(CPUState *unused)
208
+void rr_kick_vcpu_thread(CPUState *unused)
209
{
210
CPUState *cpu;
211
212
@@ -XXX,XX +XXX,XX @@ void qemu_cpu_kick_rr_cpus(CPUState *unused)
213
* idleness is complete.
214
*/
215
216
-static QEMUTimer *tcg_kick_vcpu_timer;
217
-static CPUState *tcg_current_rr_cpu;
218
+static QEMUTimer *rr_kick_vcpu_timer;
219
+static CPUState *rr_current_cpu;
220
221
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
222
223
-static inline int64_t qemu_tcg_next_kick(void)
224
+static inline int64_t rr_next_kick_time(void)
225
{
226
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
227
}
228
229
/* Kick the currently round-robin scheduled vCPU to next */
230
-static void qemu_cpu_kick_rr_next_cpu(void)
231
+static void rr_kick_next_cpu(void)
232
{
233
CPUState *cpu;
234
do {
235
- cpu = qatomic_mb_read(&tcg_current_rr_cpu);
236
+ cpu = qatomic_mb_read(&rr_current_cpu);
237
if (cpu) {
238
cpu_exit(cpu);
239
}
128
}
240
- } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
129
break;
241
+ } while (cpu != qatomic_mb_read(&rr_current_cpu));
130
case 0xfb: /* sti */
242
}
131
if (check_iopl(s)) {
243
132
- gen_helper_sti(cpu_env);
244
-static void kick_tcg_thread(void *opaque)
133
+ gen_set_eflags(s, IF_MASK);
245
+static void rr_kick_thread(void *opaque)
134
/* interruptions are enabled only the first insn after sti */
246
{
135
gen_update_eip_next(s);
247
- timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
136
gen_eob_inhibit_irq(s, true);
248
- qemu_cpu_kick_rr_next_cpu();
137
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
249
+ timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
138
|| CPL(s) != 0) {
250
+ rr_kick_next_cpu();
139
goto illegal_op;
251
}
140
}
252
141
- gen_helper_clac(cpu_env);
253
-static void start_tcg_kick_timer(void)
142
+ gen_reset_eflags(s, AC_MASK);
254
+static void rr_start_kick_timer(void)
143
s->base.is_jmp = DISAS_EOB_NEXT;
255
{
256
- if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
257
- tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
258
- kick_tcg_thread, NULL);
259
+ if (!rr_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
260
+ rr_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
261
+ rr_kick_thread, NULL);
262
}
263
- if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
264
- timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
265
+ if (rr_kick_vcpu_timer && !timer_pending(rr_kick_vcpu_timer)) {
266
+ timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
267
}
268
}
269
270
-static void stop_tcg_kick_timer(void)
271
+static void rr_stop_kick_timer(void)
272
{
273
- if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
274
- timer_del(tcg_kick_vcpu_timer);
275
+ if (rr_kick_vcpu_timer && timer_pending(rr_kick_vcpu_timer)) {
276
+ timer_del(rr_kick_vcpu_timer);
277
}
278
}
279
280
-static void qemu_tcg_rr_wait_io_event(void)
281
+static void rr_wait_io_event(void)
282
{
283
CPUState *cpu;
284
285
while (all_cpu_threads_idle()) {
286
- stop_tcg_kick_timer();
287
+ rr_stop_kick_timer();
288
qemu_cond_wait_iothread(first_cpu->halt_cond);
289
}
290
291
- start_tcg_kick_timer();
292
+ rr_start_kick_timer();
293
294
CPU_FOREACH(cpu) {
295
qemu_wait_io_event_common(cpu);
296
@@ -XXX,XX +XXX,XX @@ static void qemu_tcg_rr_wait_io_event(void)
297
* Destroy any remaining vCPUs which have been unplugged and have
298
* finished running
299
*/
300
-static void deal_with_unplugged_cpus(void)
301
+static void rr_deal_with_unplugged_cpus(void)
302
{
303
CPUState *cpu;
304
305
CPU_FOREACH(cpu) {
306
if (cpu->unplug && !cpu_can_run(cpu)) {
307
- qemu_tcg_destroy_vcpu(cpu);
308
+ tcg_cpus_destroy(cpu);
309
break;
144
break;
310
}
145
311
}
146
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
312
@@ -XXX,XX +XXX,XX @@ static void deal_with_unplugged_cpus(void)
147
|| CPL(s) != 0) {
313
* elsewhere.
148
goto illegal_op;
314
*/
149
}
315
150
- gen_helper_stac(cpu_env);
316
-static void *tcg_rr_cpu_thread_fn(void *arg)
151
+ gen_set_eflags(s, AC_MASK);
317
+static void *rr_cpu_thread_fn(void *arg)
152
s->base.is_jmp = DISAS_EOB_NEXT;
318
{
153
break;
319
CPUState *cpu = arg;
320
321
@@ -XXX,XX +XXX,XX @@ static void *tcg_rr_cpu_thread_fn(void *arg)
322
}
323
}
324
325
- start_tcg_kick_timer();
326
+ rr_start_kick_timer();
327
328
cpu = first_cpu;
329
330
@@ -XXX,XX +XXX,XX @@ static void *tcg_rr_cpu_thread_fn(void *arg)
331
* Run the timers here. This is much more efficient than
332
* waking up the I/O thread and waiting for completion.
333
*/
334
- handle_icount_deadline();
335
+ icount_handle_deadline();
336
}
337
338
replay_mutex_unlock();
339
@@ -XXX,XX +XXX,XX @@ static void *tcg_rr_cpu_thread_fn(void *arg)
340
341
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
342
343
- qatomic_mb_set(&tcg_current_rr_cpu, cpu);
344
+ qatomic_mb_set(&rr_current_cpu, cpu);
345
current_cpu = cpu;
346
347
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
348
@@ -XXX,XX +XXX,XX @@ static void *tcg_rr_cpu_thread_fn(void *arg)
349
350
qemu_mutex_unlock_iothread();
351
if (icount_enabled()) {
352
- prepare_icount_for_run(cpu);
353
+ icount_prepare_for_run(cpu);
354
}
355
- r = tcg_cpu_exec(cpu);
356
+ r = tcg_cpus_exec(cpu);
357
if (icount_enabled()) {
358
- process_icount_data(cpu);
359
+ icount_process_data(cpu);
360
}
361
qemu_mutex_lock_iothread();
362
363
@@ -XXX,XX +XXX,XX @@ static void *tcg_rr_cpu_thread_fn(void *arg)
364
} /* while (cpu && !cpu->exit_request).. */
365
366
/* Does not need qatomic_mb_set because a spurious wakeup is okay. */
367
- qatomic_set(&tcg_current_rr_cpu, NULL);
368
+ qatomic_set(&rr_current_cpu, NULL);
369
370
if (cpu && cpu->exit_request) {
371
qatomic_mb_set(&cpu->exit_request, 0);
372
@@ -XXX,XX +XXX,XX @@ static void *tcg_rr_cpu_thread_fn(void *arg)
373
qemu_notify_event();
374
}
375
376
- qemu_tcg_rr_wait_io_event();
377
- deal_with_unplugged_cpus();
378
+ rr_wait_io_event();
379
+ rr_deal_with_unplugged_cpus();
380
}
381
382
rcu_unregister_thread();
383
@@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu)
384
/* share a single thread for all cpus with TCG */
385
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
386
qemu_thread_create(cpu->thread, thread_name,
387
- tcg_rr_cpu_thread_fn,
388
+ rr_cpu_thread_fn,
389
cpu, QEMU_THREAD_JOINABLE);
390
391
single_tcg_halt_cond = cpu->halt_cond;
392
@@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu)
393
394
const CpusAccel tcg_cpus_rr = {
395
.create_vcpu_thread = rr_start_vcpu_thread,
396
- .kick_vcpu_thread = qemu_cpu_kick_rr_cpus,
397
+ .kick_vcpu_thread = rr_kick_vcpu_thread,
398
399
- .handle_interrupt = tcg_handle_interrupt,
400
+ .handle_interrupt = tcg_cpus_handle_interrupt,
401
};
402
diff --git a/accel/tcg/tcg-cpus.c b/accel/tcg/tcg-cpus.c
403
index XXXXXXX..XXXXXXX 100644
404
--- a/accel/tcg/tcg-cpus.c
405
+++ b/accel/tcg/tcg-cpus.c
406
@@ -XXX,XX +XXX,XX @@
407
408
/* common functionality among all TCG variants */
409
410
-void qemu_tcg_destroy_vcpu(CPUState *cpu)
411
+void tcg_cpus_destroy(CPUState *cpu)
412
{
413
cpu_thread_signal_destroyed(cpu);
414
}
415
416
-int tcg_cpu_exec(CPUState *cpu)
417
+int tcg_cpus_exec(CPUState *cpu)
418
{
419
int ret;
420
#ifdef CONFIG_PROFILER
421
@@ -XXX,XX +XXX,XX @@ int tcg_cpu_exec(CPUState *cpu)
422
}
423
424
/* mask must never be zero, except for A20 change call */
425
-void tcg_handle_interrupt(CPUState *cpu, int mask)
426
+void tcg_cpus_handle_interrupt(CPUState *cpu, int mask)
427
{
428
g_assert(qemu_mutex_iothread_locked());
429
154
430
--
155
--
431
2.25.1
156
2.34.1
432
157
433
158
diff view generated by jsdifflib