1
This is v4 of my notdirty + rom patch set with two suggested name
1
The following changes since commit 6587b0c1331d427b0939c37e763842550ed581db:
2
changes (qemu_build_not_reached, TLB_DISCARD_WRITE) from David and Alex.
3
2
4
3
Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2021-10-15' into staging (2021-10-15 14:16:28 -0700)
5
r~
6
7
8
The following changes since commit 240ab11fb72049d6373cbbec8d788f8e411a00bc:
9
10
Merge remote-tracking branch 'remotes/aperard/tags/pull-xen-20190924' into staging (2019-09-24 15:36:31 +0100)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://github.com/rth7680/qemu.git tags/pull-tcg-20190925
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211016
15
8
16
for you to fetch changes up to ae57db63acf5a0399232f852acc5c1d83ef63400:
9
for you to fetch changes up to 995b87dedc78b0467f5f18bbc3546072ba97516a:
17
10
18
cputlb: Pass retaddr to tb_check_watchpoint (2019-09-25 10:56:28 -0700)
11
Revert "cpu: Move cpu_common_props to hw/core/cpu.c" (2021-10-15 16:39:15 -0700)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
Fixes for TLB_BSWAP
14
Move gdb singlestep to generic code
22
Coversion of NOTDIRTY and ROM handling to cputlb
15
Fix cpu_common_props
23
Followup cleanups to cputlb
24
16
25
----------------------------------------------------------------
17
----------------------------------------------------------------
26
Richard Henderson (16):
18
Richard Henderson (24):
27
exec: Use TARGET_PAGE_BITS_MIN for TLB flags
19
accel/tcg: Handle gdb singlestep in cpu_tb_exec
28
cputlb: Disable __always_inline__ without optimization
20
target/alpha: Drop checks for singlestep_enabled
29
qemu/compiler.h: Add qemu_build_not_reached
21
target/avr: Drop checks for singlestep_enabled
30
cputlb: Use qemu_build_not_reached in load/store_helpers
22
target/cris: Drop checks for singlestep_enabled
31
cputlb: Split out load/store_memop
23
target/hexagon: Drop checks for singlestep_enabled
32
cputlb: Introduce TLB_BSWAP
24
target/arm: Drop checks for singlestep_enabled
33
exec: Adjust notdirty tracing
25
target/hppa: Drop checks for singlestep_enabled
34
cputlb: Move ROM handling from I/O path to TLB path
26
target/i386: Check CF_NO_GOTO_TB for dc->jmp_opt
35
cputlb: Move NOTDIRTY handling from I/O path to TLB path
27
target/i386: Drop check for singlestep_enabled
36
cputlb: Partially inline memory_region_section_get_iotlb
28
target/m68k: Drop checks for singlestep_enabled
37
cputlb: Merge and move memory_notdirty_write_{prepare,complete}
29
target/microblaze: Check CF_NO_GOTO_TB for DISAS_JUMP
38
cputlb: Handle TLB_NOTDIRTY in probe_access
30
target/microblaze: Drop checks for singlestep_enabled
39
cputlb: Remove cpu->mem_io_vaddr
31
target/mips: Fix single stepping
40
cputlb: Remove tb_invalidate_phys_page_range is_cpu_write_access
32
target/mips: Drop exit checks for singlestep_enabled
41
cputlb: Pass retaddr to tb_invalidate_phys_page_fast
33
target/openrisc: Drop checks for singlestep_enabled
42
cputlb: Pass retaddr to tb_check_watchpoint
34
target/ppc: Drop exit checks for singlestep_enabled
35
target/riscv: Remove dead code after exception
36
target/riscv: Remove exit_tb and lookup_and_goto_ptr
37
target/rx: Drop checks for singlestep_enabled
38
target/s390x: Drop check for singlestep_enabled
39
target/sh4: Drop check for singlestep_enabled
40
target/tricore: Drop check for singlestep_enabled
41
target/xtensa: Drop check for singlestep_enabled
42
Revert "cpu: Move cpu_common_props to hw/core/cpu.c"
43
43
44
accel/tcg/translate-all.h | 8 +-
44
include/hw/core/cpu.h | 1 +
45
include/exec/cpu-all.h | 23 ++-
45
target/i386/helper.h | 1 -
46
include/exec/cpu-common.h | 3 -
46
target/rx/helper.h | 1 -
47
include/exec/exec-all.h | 6 +-
47
target/sh4/helper.h | 1 -
48
include/exec/memory-internal.h | 65 --------
48
target/tricore/helper.h | 1 -
49
include/hw/core/cpu.h | 2 -
49
accel/tcg/cpu-exec.c | 11 ++++
50
include/qemu/compiler.h | 26 +++
50
cpu.c | 21 ++++++++
51
accel/tcg/cputlb.c | 348 +++++++++++++++++++++++++----------------
51
hw/core/cpu-common.c | 17 +-----
52
accel/tcg/translate-all.c | 51 +++---
52
target/alpha/translate.c | 13 ++---
53
exec.c | 158 +------------------
53
target/arm/translate-a64.c | 10 +---
54
hw/core/cpu.c | 1 -
54
target/arm/translate.c | 36 +++----------
55
memory.c | 20 ---
55
target/avr/translate.c | 19 ++-----
56
trace-events | 4 +-
56
target/cris/translate.c | 16 ------
57
13 files changed, 288 insertions(+), 427 deletions(-)
57
target/hexagon/translate.c | 12 +----
58
target/hppa/translate.c | 17 ++----
59
target/i386/tcg/misc_helper.c | 8 ---
60
target/i386/tcg/translate.c | 9 ++--
61
target/m68k/translate.c | 44 ++++-----------
62
target/microblaze/translate.c | 18 ++-----
63
target/mips/tcg/translate.c | 75 ++++++++++++--------------
64
target/openrisc/translate.c | 18 ++-----
65
target/ppc/translate.c | 38 +++----------
66
target/riscv/translate.c | 27 +---------
67
target/rx/op_helper.c | 8 ---
68
target/rx/translate.c | 12 +----
69
target/s390x/tcg/translate.c | 8 +--
70
target/sh4/op_helper.c | 5 --
71
target/sh4/translate.c | 14 ++---
72
target/tricore/op_helper.c | 7 ---
73
target/tricore/translate.c | 14 +----
74
target/xtensa/translate.c | 25 +++------
75
target/riscv/insn_trans/trans_privileged.c.inc | 10 ++--
76
target/riscv/insn_trans/trans_rvi.c.inc | 8 ++-
77
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
78
34 files changed, 141 insertions(+), 386 deletions(-)
58
79
diff view generated by jsdifflib
1
There is only one caller, tlb_set_page_with_attrs. We cannot
1
Currently the change in cpu_tb_exec is masked by the debug exception
2
inline the entire function because the AddressSpaceDispatch
2
being raised by the translators. But this allows us to remove that code.
3
structure is private to exec.c, and cannot easily be moved to
4
include/exec/memory-internal.h.
5
3
6
Compute is_ram and is_romd once within tlb_set_page_with_attrs.
7
Fold the number of tests against these predicates. Compute
8
cpu_physical_memory_is_clean outside of the tlb lock region.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
5
---
13
include/exec/exec-all.h | 6 +---
6
accel/tcg/cpu-exec.c | 11 +++++++++++
14
accel/tcg/cputlb.c | 68 ++++++++++++++++++++++++++---------------
7
1 file changed, 11 insertions(+)
15
exec.c | 22 ++-----------
16
3 files changed, 47 insertions(+), 49 deletions(-)
17
8
18
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
9
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
19
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
20
--- a/include/exec/exec-all.h
11
--- a/accel/tcg/cpu-exec.c
21
+++ b/include/exec/exec-all.h
12
+++ b/accel/tcg/cpu-exec.c
22
@@ -XXX,XX +XXX,XX @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
13
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
23
hwaddr *xlat, hwaddr *plen,
14
cc->set_pc(cpu, last_tb->pc);
24
MemTxAttrs attrs, int *prot);
15
}
25
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
26
- MemoryRegionSection *section,
27
- target_ulong vaddr,
28
- hwaddr paddr, hwaddr xlat,
29
- int prot,
30
- target_ulong *address);
31
+ MemoryRegionSection *section);
32
#endif
33
34
/* vl.c */
35
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/accel/tcg/cputlb.c
38
+++ b/accel/tcg/cputlb.c
39
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
40
MemoryRegionSection *section;
41
unsigned int index;
42
target_ulong address;
43
- target_ulong code_address;
44
+ target_ulong write_address;
45
uintptr_t addend;
46
CPUTLBEntry *te, tn;
47
hwaddr iotlb, xlat, sz, paddr_page;
48
target_ulong vaddr_page;
49
int asidx = cpu_asidx_from_attrs(cpu, attrs);
50
int wp_flags;
51
+ bool is_ram, is_romd;
52
53
assert_cpu_is_self(cpu);
54
55
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
56
if (attrs.byte_swap) {
57
address |= TLB_BSWAP;
58
}
16
}
59
- if (!memory_region_is_ram(section->mr) &&
60
- !memory_region_is_romd(section->mr)) {
61
- /* IO memory case */
62
- address |= TLB_MMIO;
63
- addend = 0;
64
- } else {
65
+
17
+
66
+ is_ram = memory_region_is_ram(section->mr);
18
+ /*
67
+ is_romd = memory_region_is_romd(section->mr);
19
+ * If gdb single-step, and we haven't raised another exception,
68
+
20
+ * raise a debug exception. Single-step with another exception
69
+ if (is_ram || is_romd) {
21
+ * is handled in cpu_handle_exception.
70
+ /* RAM and ROMD both have associated host memory. */
22
+ */
71
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
23
+ if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
72
+ } else {
24
+ cpu->exception_index = EXCP_DEBUG;
73
+ /* I/O does not; force the host address to NULL. */
25
+ cpu_loop_exit(cpu);
74
+ addend = 0;
75
+ }
26
+ }
76
+
27
+
77
+ write_address = address;
28
return last_tb;
78
+ if (is_ram) {
79
+ iotlb = memory_region_get_ram_addr(section->mr) + xlat;
80
+ /*
81
+ * Computing is_clean is expensive; avoid all that unless
82
+ * the page is actually writable.
83
+ */
84
+ if (prot & PAGE_WRITE) {
85
+ if (section->readonly) {
86
+ write_address |= TLB_DISCARD_WRITE;
87
+ } else if (cpu_physical_memory_is_clean(iotlb)) {
88
+ write_address |= TLB_NOTDIRTY;
89
+ }
90
+ }
91
+ } else {
92
+ /* I/O or ROMD */
93
+ iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
94
+ /*
95
+ * Writes to romd devices must go through MMIO to enable write.
96
+ * Reads to romd devices go through the ram_ptr found above,
97
+ * but of course reads to I/O must go through MMIO.
98
+ */
99
+ write_address |= TLB_MMIO;
100
+ if (!is_romd) {
101
+ address = write_address;
102
+ }
103
}
104
105
- code_address = address;
106
- iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
107
- paddr_page, xlat, prot, &address);
108
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
109
TARGET_PAGE_SIZE);
110
111
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
112
/*
113
* At this point iotlb contains a physical section number in the lower
114
* TARGET_PAGE_BITS, and either
115
- * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
116
- * + the offset within section->mr of the page base (otherwise)
117
+ * + the ram_addr_t of the page base of the target RAM (RAM)
118
+ * + the offset within section->mr of the page base (I/O, ROMD)
119
* We subtract the vaddr_page (which is page aligned and thus won't
120
* disturb the low bits) to give an offset which can be added to the
121
* (non-page-aligned) vaddr of the eventual memory access to get
122
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
123
}
124
125
if (prot & PAGE_EXEC) {
126
- tn.addr_code = code_address;
127
+ tn.addr_code = address;
128
} else {
129
tn.addr_code = -1;
130
}
131
132
tn.addr_write = -1;
133
if (prot & PAGE_WRITE) {
134
- tn.addr_write = address;
135
- if (memory_region_is_romd(section->mr)) {
136
- /* Use the MMIO path so that the device can switch states. */
137
- tn.addr_write |= TLB_MMIO;
138
- } else if (memory_region_is_ram(section->mr)) {
139
- if (section->readonly) {
140
- tn.addr_write |= TLB_DISCARD_WRITE;
141
- } else if (cpu_physical_memory_is_clean(
142
- memory_region_get_ram_addr(section->mr) + xlat)) {
143
- tn.addr_write |= TLB_NOTDIRTY;
144
- }
145
- }
146
+ tn.addr_write = write_address;
147
if (prot & PAGE_WRITE_INV) {
148
tn.addr_write |= TLB_INVALID_MASK;
149
}
150
diff --git a/exec.c b/exec.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/exec.c
153
+++ b/exec.c
154
@@ -XXX,XX +XXX,XX @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
155
156
/* Called from RCU critical section */
157
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
158
- MemoryRegionSection *section,
159
- target_ulong vaddr,
160
- hwaddr paddr, hwaddr xlat,
161
- int prot,
162
- target_ulong *address)
163
+ MemoryRegionSection *section)
164
{
165
- hwaddr iotlb;
166
-
167
- if (memory_region_is_ram(section->mr)) {
168
- /* Normal RAM. */
169
- iotlb = memory_region_get_ram_addr(section->mr) + xlat;
170
- } else {
171
- AddressSpaceDispatch *d;
172
-
173
- d = flatview_to_dispatch(section->fv);
174
- iotlb = section - d->map.sections;
175
- iotlb += xlat;
176
- }
177
-
178
- return iotlb;
179
+ AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
180
+ return section - d->map.sections;
181
}
29
}
182
#endif /* defined(CONFIG_USER_ONLY) */
183
30
184
--
31
--
185
2.17.1
32
2.25.1
186
33
187
34
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/alpha/translate.c | 13 +++----------
7
1 file changed, 3 insertions(+), 10 deletions(-)
8
9
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/alpha/translate.c
12
+++ b/target/alpha/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14
tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
15
/* FALLTHRU */
16
case DISAS_PC_UPDATED:
17
- if (!ctx->base.singlestep_enabled) {
18
- tcg_gen_lookup_and_goto_ptr();
19
- break;
20
- }
21
- /* FALLTHRU */
22
+ tcg_gen_lookup_and_goto_ptr();
23
+ break;
24
case DISAS_PC_UPDATED_NOCHAIN:
25
- if (ctx->base.singlestep_enabled) {
26
- gen_excp_1(EXCP_DEBUG, 0);
27
- } else {
28
- tcg_gen_exit_tb(NULL, 0);
29
- }
30
+ tcg_gen_exit_tb(NULL, 0);
31
break;
32
default:
33
g_assert_not_reached();
34
--
35
2.25.1
36
37
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Tested-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/avr/translate.c | 19 ++++---------------
9
1 file changed, 4 insertions(+), 15 deletions(-)
10
11
diff --git a/target/avr/translate.c b/target/avr/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/translate.c
14
+++ b/target/avr/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
16
tcg_gen_exit_tb(tb, n);
17
} else {
18
tcg_gen_movi_i32(cpu_pc, dest);
19
- if (ctx->base.singlestep_enabled) {
20
- gen_helper_debug(cpu_env);
21
- } else {
22
- tcg_gen_lookup_and_goto_ptr();
23
- }
24
+ tcg_gen_lookup_and_goto_ptr();
25
}
26
ctx->base.is_jmp = DISAS_NORETURN;
27
}
28
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
29
tcg_gen_movi_tl(cpu_pc, ctx->npc);
30
/* fall through */
31
case DISAS_LOOKUP:
32
- if (!ctx->base.singlestep_enabled) {
33
- tcg_gen_lookup_and_goto_ptr();
34
- break;
35
- }
36
- /* fall through */
37
+ tcg_gen_lookup_and_goto_ptr();
38
+ break;
39
case DISAS_EXIT:
40
- if (ctx->base.singlestep_enabled) {
41
- gen_helper_debug(cpu_env);
42
- } else {
43
- tcg_gen_exit_tb(NULL, 0);
44
- }
45
+ tcg_gen_exit_tb(NULL, 0);
46
break;
47
default:
48
g_assert_not_reached();
49
--
50
2.25.1
51
52
diff view generated by jsdifflib
1
We can use notdirty_write for the write and return a valid host
1
GDB single-stepping is now handled generically.
2
pointer for this case.
3
2
4
Reviewed-by: David Hildenbrand <david@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
accel/tcg/cputlb.c | 26 +++++++++++++++++---------
5
target/cris/translate.c | 16 ----------------
9
1 file changed, 17 insertions(+), 9 deletions(-)
6
1 file changed, 16 deletions(-)
10
7
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
8
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
10
--- a/target/cris/translate.c
14
+++ b/accel/tcg/cputlb.c
11
+++ b/target/cris/translate.c
15
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
12
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
16
return NULL;
13
}
17
}
14
}
18
15
19
- /* Handle watchpoints. */
16
- if (unlikely(dc->base.singlestep_enabled)) {
20
- if (tlb_addr & TLB_WATCHPOINT) {
17
- switch (is_jmp) {
21
- cpu_check_watchpoint(env_cpu(env), addr, size,
18
- case DISAS_TOO_MANY:
22
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
19
- case DISAS_UPDATE_NEXT:
23
- wp_access, retaddr);
20
- tcg_gen_movi_tl(env_pc, npc);
21
- /* fall through */
22
- case DISAS_JUMP:
23
- case DISAS_UPDATE:
24
- t_gen_raise_exception(EXCP_DEBUG);
25
- return;
26
- default:
27
- break;
28
- }
29
- g_assert_not_reached();
24
- }
30
- }
25
+ if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
31
-
26
+ CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
32
switch (is_jmp) {
27
33
case DISAS_TOO_MANY:
28
- /* Reject I/O access, or other required slow-path. */
34
gen_goto_tb(dc, 0, npc);
29
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
30
- return NULL;
31
+ /* Reject I/O access, or other required slow-path. */
32
+ if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
33
+ return NULL;
34
+ }
35
+
36
+ /* Handle watchpoints. */
37
+ if (tlb_addr & TLB_WATCHPOINT) {
38
+ cpu_check_watchpoint(env_cpu(env), addr, size,
39
+ iotlbentry->attrs, wp_access, retaddr);
40
+ }
41
+
42
+ /* Handle clean RAM pages. */
43
+ if (tlb_addr & TLB_NOTDIRTY) {
44
+ notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
45
+ }
46
}
47
48
return (void *)((uintptr_t)addr + entry->addend);
49
--
35
--
50
2.17.1
36
2.25.1
51
37
52
38
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/translate.c | 12 ++----------
7
1 file changed, 2 insertions(+), 10 deletions(-)
8
9
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/translate.c
12
+++ b/target/hexagon/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_end_tb(DisasContext *ctx)
14
{
15
gen_exec_counters(ctx);
16
tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
17
- if (ctx->base.singlestep_enabled) {
18
- gen_exception_raw(EXCP_DEBUG);
19
- } else {
20
- tcg_gen_exit_tb(NULL, 0);
21
- }
22
+ tcg_gen_exit_tb(NULL, 0);
23
ctx->base.is_jmp = DISAS_NORETURN;
24
}
25
26
@@ -XXX,XX +XXX,XX @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
27
case DISAS_TOO_MANY:
28
gen_exec_counters(ctx);
29
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
30
- if (ctx->base.singlestep_enabled) {
31
- gen_exception_raw(EXCP_DEBUG);
32
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
34
- }
35
+ tcg_gen_exit_tb(NULL, 0);
36
break;
37
case DISAS_NORETURN:
38
break;
39
--
40
2.25.1
41
42
diff view generated by jsdifflib
1
Since 9458a9a1df1a, all readers of the dirty bitmaps wait
1
GDB single-stepping is now handled generically.
2
for the rcu lock, which means that they wait until the end
3
of any executing TranslationBlock.
4
2
5
As a consequence, there is no need for the actual access
6
to happen in between the _prepare and _complete. Therefore,
7
we can improve things by merging the two functions into
8
notdirty_write and dropping the NotDirtyInfo structure.
9
10
In addition, the only users of notdirty_write are in cputlb.c,
11
so move the merged function there. Pass in the CPUIOTLBEntry
12
from which the ram_addr_t may be computed.
13
14
Reviewed-by: David Hildenbrand <david@redhat.com>
15
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
4
---
18
include/exec/memory-internal.h | 65 -----------------------------
5
target/arm/translate-a64.c | 10 ++--------
19
accel/tcg/cputlb.c | 76 +++++++++++++++++++---------------
6
target/arm/translate.c | 36 ++++++------------------------------
20
exec.c | 44 --------------------
7
2 files changed, 8 insertions(+), 38 deletions(-)
21
3 files changed, 42 insertions(+), 143 deletions(-)
22
8
23
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
9
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
24
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
25
--- a/include/exec/memory-internal.h
11
--- a/target/arm/translate-a64.c
26
+++ b/include/exec/memory-internal.h
12
+++ b/target/arm/translate-a64.c
27
@@ -XXX,XX +XXX,XX @@ void address_space_dispatch_free(AddressSpaceDispatch *d);
13
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
28
14
gen_a64_set_pc_im(dest);
29
void mtree_print_dispatch(struct AddressSpaceDispatch *d,
15
if (s->ss_active) {
30
MemoryRegion *root);
16
gen_step_complete_exception(s);
31
-
17
- } else if (s->base.singlestep_enabled) {
32
-struct page_collection;
18
- gen_exception_internal(EXCP_DEBUG);
33
-
19
} else {
34
-/* Opaque struct for passing info from memory_notdirty_write_prepare()
20
tcg_gen_lookup_and_goto_ptr();
35
- * to memory_notdirty_write_complete(). Callers should treat all fields
21
s->base.is_jmp = DISAS_NORETURN;
36
- * as private, with the exception of @active.
22
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
37
- *
23
{
38
- * @active is a field which is not touched by either the prepare or
24
DisasContext *dc = container_of(dcbase, DisasContext, base);
39
- * complete functions, but which the caller can use if it wishes to
25
40
- * track whether it has called prepare for this struct and so needs
26
- if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
41
- * to later call the complete function.
27
+ if (unlikely(dc->ss_active)) {
42
- */
28
/* Note that this means single stepping WFI doesn't halt the CPU.
43
-typedef struct {
29
* For conditional branch insns this is harmless unreachable code as
44
- CPUState *cpu;
30
* gen_goto_tb() has already handled emitting the debug exception
45
- struct page_collection *pages;
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
46
- ram_addr_t ram_addr;
32
/* fall through */
47
- vaddr mem_vaddr;
33
case DISAS_EXIT:
48
- unsigned size;
34
case DISAS_JUMP:
49
- bool active;
35
- if (dc->base.singlestep_enabled) {
50
-} NotDirtyInfo;
36
- gen_exception_internal(EXCP_DEBUG);
51
-
37
- } else {
52
-/**
38
- gen_step_complete_exception(dc);
53
- * memory_notdirty_write_prepare: call before writing to non-dirty memory
39
- }
54
- * @ndi: pointer to opaque NotDirtyInfo struct
40
+ gen_step_complete_exception(dc);
55
- * @cpu: CPU doing the write
41
break;
56
- * @mem_vaddr: virtual address of write
42
case DISAS_NORETURN:
57
- * @ram_addr: the ram address of the write
43
break;
58
- * @size: size of write in bytes
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
59
- *
60
- * Any code which writes to the host memory corresponding to
61
- * guest RAM which has been marked as NOTDIRTY must wrap those
62
- * writes in calls to memory_notdirty_write_prepare() and
63
- * memory_notdirty_write_complete():
64
- *
65
- * NotDirtyInfo ndi;
66
- * memory_notdirty_write_prepare(&ndi, ....);
67
- * ... perform write here ...
68
- * memory_notdirty_write_complete(&ndi);
69
- *
70
- * These calls will ensure that we flush any TCG translated code for
71
- * the memory being written, update the dirty bits and (if possible)
72
- * remove the slowpath callback for writing to the memory.
73
- *
74
- * This must only be called if we are using TCG; it will assert otherwise.
75
- *
76
- * We may take locks in the prepare call, so callers must ensure that
77
- * they don't exit (via longjump or otherwise) without calling complete.
78
- *
79
- * This call must only be made inside an RCU critical section.
80
- * (Note that while we're executing a TCG TB we're always in an
81
- * RCU critical section, which is likely to be the case for callers
82
- * of these functions.)
83
- */
84
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
85
- CPUState *cpu,
86
- vaddr mem_vaddr,
87
- ram_addr_t ram_addr,
88
- unsigned size);
89
-/**
90
- * memory_notdirty_write_complete: finish write to non-dirty memory
91
- * @ndi: pointer to the opaque NotDirtyInfo struct which was initialized
92
- * by memory_not_dirty_write_prepare().
93
- */
94
-void memory_notdirty_write_complete(NotDirtyInfo *ndi);
95
-
96
#endif
97
#endif
98
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
99
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
100
--- a/accel/tcg/cputlb.c
46
--- a/target/arm/translate.c
101
+++ b/accel/tcg/cputlb.c
47
+++ b/target/arm/translate.c
102
@@ -XXX,XX +XXX,XX @@
48
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
103
#include "exec/helper-proto.h"
49
tcg_temp_free_i32(tcg_excp);
104
#include "qemu/atomic.h"
105
#include "qemu/atomic128.h"
106
+#include "translate-all.h"
107
108
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
109
/* #define DEBUG_TLB */
110
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
111
return qemu_ram_addr_from_host_nofail(p);
112
}
50
}
113
51
114
+static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
52
-static void gen_step_complete_exception(DisasContext *s)
115
+ CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
53
+static void gen_singlestep_exception(DisasContext *s)
116
+{
117
+ ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
118
+
119
+ trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
120
+
121
+ if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
122
+ struct page_collection *pages
123
+ = page_collection_lock(ram_addr, ram_addr + size);
124
+
125
+ /* We require mem_io_pc in tb_invalidate_phys_page_range. */
126
+ cpu->mem_io_pc = retaddr;
127
+
128
+ tb_invalidate_phys_page_fast(pages, ram_addr, size);
129
+ page_collection_unlock(pages);
130
+ }
131
+
132
+ /*
133
+ * Set both VGA and migration bits for simplicity and to remove
134
+ * the notdirty callback faster.
135
+ */
136
+ cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
137
+
138
+ /* We remove the notdirty callback only if the code has been flushed. */
139
+ if (!cpu_physical_memory_is_clean(ram_addr)) {
140
+ trace_memory_notdirty_set_dirty(mem_vaddr);
141
+ tlb_set_dirty(cpu, mem_vaddr);
142
+ }
143
+}
144
+
145
/*
146
* Probe for whether the specified guest access is permitted. If it is not
147
* permitted then an exception will be taken in the same way as if this
148
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
149
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
150
* operations, or io operations to proceed. Return the host address. */
151
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
152
- TCGMemOpIdx oi, uintptr_t retaddr,
153
- NotDirtyInfo *ndi)
154
+ TCGMemOpIdx oi, uintptr_t retaddr)
155
{
54
{
156
size_t mmu_idx = get_mmuidx(oi);
55
/* We just completed step of an insn. Move from Active-not-pending
157
uintptr_t index = tlb_index(env, mmu_idx, addr);
56
* to Active-pending, and then also take the swstep exception.
158
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
57
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
159
58
s->base.is_jmp = DISAS_NORETURN;
160
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
161
162
- ndi->active = false;
163
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
164
- ndi->active = true;
165
- memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
166
- qemu_ram_addr_from_host_nofail(hostaddr),
167
- 1 << s_bits);
168
+ notdirty_write(env_cpu(env), addr, 1 << s_bits,
169
+ &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
170
}
171
172
return hostaddr;
173
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
174
return;
175
}
176
177
- haddr = (void *)((uintptr_t)addr + entry->addend);
178
-
179
/* Handle clean RAM pages. */
180
if (tlb_addr & TLB_NOTDIRTY) {
181
- NotDirtyInfo ndi;
182
-
183
- /* We require mem_io_pc in tb_invalidate_phys_page_range. */
184
- env_cpu(env)->mem_io_pc = retaddr;
185
-
186
- memory_notdirty_write_prepare(&ndi, env_cpu(env), addr,
187
- addr + iotlbentry->addr, size);
188
-
189
- if (unlikely(need_swap)) {
190
- store_memop(haddr, val, op ^ MO_BSWAP);
191
- } else {
192
- store_memop(haddr, val, op);
193
- }
194
-
195
- memory_notdirty_write_complete(&ndi);
196
- return;
197
+ notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
198
}
199
200
+ haddr = (void *)((uintptr_t)addr + entry->addend);
201
+
202
/*
203
* Keep these two store_memop separate to ensure that the compiler
204
* is able to fold the entire function to a single instruction.
205
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
206
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
207
#define ATOMIC_NAME(X) \
208
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
209
-#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
210
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
211
-#define ATOMIC_MMU_CLEANUP \
212
- do { \
213
- if (unlikely(ndi.active)) { \
214
- memory_notdirty_write_complete(&ndi); \
215
- } \
216
- } while (0)
217
+#define ATOMIC_MMU_DECLS
218
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
219
+#define ATOMIC_MMU_CLEANUP
220
221
#define DATA_SIZE 1
222
#include "atomic_template.h"
223
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
224
#undef ATOMIC_MMU_LOOKUP
225
#define EXTRA_ARGS , TCGMemOpIdx oi
226
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
227
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
228
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
229
230
#define DATA_SIZE 1
231
#include "atomic_template.h"
232
diff --git a/exec.c b/exec.c
233
index XXXXXXX..XXXXXXX 100644
234
--- a/exec.c
235
+++ b/exec.c
236
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
237
return block->offset + offset;
238
}
59
}
239
60
240
-/* Called within RCU critical section. */
61
-static void gen_singlestep_exception(DisasContext *s)
241
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
242
- CPUState *cpu,
243
- vaddr mem_vaddr,
244
- ram_addr_t ram_addr,
245
- unsigned size)
246
-{
62
-{
247
- ndi->cpu = cpu;
63
- /* Generate the right kind of exception for singlestep, which is
248
- ndi->ram_addr = ram_addr;
64
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
249
- ndi->mem_vaddr = mem_vaddr;
65
- * gdb singlestepping.
250
- ndi->size = size;
66
- */
251
- ndi->pages = NULL;
67
- if (s->ss_active) {
252
-
68
- gen_step_complete_exception(s);
253
- trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
69
- } else {
254
-
70
- gen_exception_internal(EXCP_DEBUG);
255
- assert(tcg_enabled());
256
- if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
257
- ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
258
- tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
259
- }
71
- }
260
-}
72
-}
261
-
73
-
262
-/* Called within RCU critical section. */
74
-static inline bool is_singlestepping(DisasContext *s)
263
-void memory_notdirty_write_complete(NotDirtyInfo *ndi)
264
-{
75
-{
265
- if (ndi->pages) {
76
- /* Return true if we are singlestepping either because of
266
- assert(tcg_enabled());
77
- * architectural singlestep or QEMU gdbstub singlestep. This does
267
- page_collection_unlock(ndi->pages);
78
- * not include the command line '-singlestep' mode which is rather
268
- ndi->pages = NULL;
79
- * misnamed as it only means "one instruction per TB" and doesn't
269
- }
80
- * affect the code we generate.
270
-
271
- /* Set both VGA and migration bits for simplicity and to remove
272
- * the notdirty callback faster.
273
- */
81
- */
274
- cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
82
- return s->base.singlestep_enabled || s->ss_active;
275
- DIRTY_CLIENTS_NOCODE);
276
- /* we remove the notdirty callback only if the code has been
277
- flushed */
278
- if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
279
- trace_memory_notdirty_set_dirty(ndi->mem_vaddr);
280
- tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
281
- }
282
-}
83
-}
283
-
84
-
284
/* Generate a debug exception if a watchpoint has been hit. */
85
void clear_eci_state(DisasContext *s)
285
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
86
{
286
MemTxAttrs attrs, int flags, uintptr_t ra)
87
/*
88
@@ -XXX,XX +XXX,XX @@ static inline void gen_bx_excret_final_code(DisasContext *s)
89
/* Is the new PC value in the magic range indicating exception return? */
90
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
91
/* No: end the TB as we would for a DISAS_JMP */
92
- if (is_singlestepping(s)) {
93
+ if (s->ss_active) {
94
gen_singlestep_exception(s);
95
} else {
96
tcg_gen_exit_tb(NULL, 0);
97
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
98
/* Jump, specifying which TB number to use if we gen_goto_tb() */
99
static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
100
{
101
- if (unlikely(is_singlestepping(s))) {
102
+ if (unlikely(s->ss_active)) {
103
/* An indirect jump so that we still trigger the debug exception. */
104
gen_set_pc_im(s, dest);
105
s->base.is_jmp = DISAS_JUMP;
106
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
107
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
108
109
/* If architectural single step active, limit to 1. */
110
- if (is_singlestepping(dc)) {
111
+ if (dc->ss_active) {
112
dc->base.max_insns = 1;
113
}
114
115
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
116
* insn codepath itself.
117
*/
118
gen_bx_excret_final_code(dc);
119
- } else if (unlikely(is_singlestepping(dc))) {
120
+ } else if (unlikely(dc->ss_active)) {
121
/* Unconditional and "condition passed" instruction codepath. */
122
switch (dc->base.is_jmp) {
123
case DISAS_SWI:
124
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
125
/* "Condition failed" instruction codepath for the branch/trap insn */
126
gen_set_label(dc->condlabel);
127
gen_set_condexec(dc);
128
- if (unlikely(is_singlestepping(dc))) {
129
+ if (unlikely(dc->ss_active)) {
130
gen_set_pc_im(dc, dc->base.pc_next);
131
gen_singlestep_exception(dc);
132
} else {
287
--
133
--
288
2.17.1
134
2.25.1
289
135
290
136
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hppa/translate.c | 17 ++++-------------
7
1 file changed, 4 insertions(+), 13 deletions(-)
8
9
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hppa/translate.c
12
+++ b/target/hppa/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int which,
14
} else {
15
copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
16
copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
17
- if (ctx->base.singlestep_enabled) {
18
- gen_excp_1(EXCP_DEBUG);
19
- } else {
20
- tcg_gen_lookup_and_goto_ptr();
21
- }
22
+ tcg_gen_lookup_and_goto_ptr();
23
}
24
}
25
26
@@ -XXX,XX +XXX,XX @@ static bool do_rfi(DisasContext *ctx, bool rfi_r)
27
gen_helper_rfi(cpu_env);
28
}
29
/* Exit the TB to recognize new interrupts. */
30
- if (ctx->base.singlestep_enabled) {
31
- gen_excp_1(EXCP_DEBUG);
32
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
34
- }
35
+ tcg_gen_exit_tb(NULL, 0);
36
ctx->base.is_jmp = DISAS_NORETURN;
37
38
return nullify_end(ctx);
39
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
40
nullify_save(ctx);
41
/* FALLTHRU */
42
case DISAS_IAQ_N_UPDATED:
43
- if (ctx->base.singlestep_enabled) {
44
- gen_excp_1(EXCP_DEBUG);
45
- } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
46
+ if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
47
tcg_gen_lookup_and_goto_ptr();
48
+ break;
49
}
50
/* FALLTHRU */
51
case DISAS_EXIT:
52
--
53
2.25.1
54
55
diff view generated by jsdifflib
New patch
1
We were using singlestep_enabled as a proxy for whether
2
translator_use_goto_tb would always return false.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/i386/tcg/translate.c | 5 +++--
7
1 file changed, 3 insertions(+), 2 deletions(-)
8
9
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/i386/tcg/translate.c
12
+++ b/target/i386/tcg/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
14
DisasContext *dc = container_of(dcbase, DisasContext, base);
15
CPUX86State *env = cpu->env_ptr;
16
uint32_t flags = dc->base.tb->flags;
17
+ uint32_t cflags = tb_cflags(dc->base.tb);
18
int cpl = (flags >> HF_CPL_SHIFT) & 3;
19
int iopl = (flags >> IOPL_SHIFT) & 3;
20
21
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
22
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
23
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
24
dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
25
- dc->jmp_opt = !(dc->base.singlestep_enabled ||
26
+ dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
27
(flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
28
/*
29
* If jmp_opt, we want to handle each string instruction individually.
30
* For icount also disable repz optimization so that each iteration
31
* is accounted separately.
32
*/
33
- dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
34
+ dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
35
36
dc->T0 = tcg_temp_new();
37
dc->T1 = tcg_temp_new();
38
--
39
2.25.1
40
41
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/i386/helper.h | 1 -
6
target/i386/tcg/misc_helper.c | 8 --------
7
target/i386/tcg/translate.c | 4 +---
8
3 files changed, 1 insertion(+), 12 deletions(-)
9
10
diff --git a/target/i386/helper.h b/target/i386/helper.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/i386/helper.h
13
+++ b/target/i386/helper.h
14
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(syscall, void, env, int)
15
DEF_HELPER_2(sysret, void, env, int)
16
#endif
17
DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
18
-DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env)
19
DEF_HELPER_1(reset_rf, void, env)
20
DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
21
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
22
diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/i386/tcg/misc_helper.c
25
+++ b/target/i386/tcg/misc_helper.c
26
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend)
27
do_pause(env);
28
}
29
30
-void QEMU_NORETURN helper_debug(CPUX86State *env)
31
-{
32
- CPUState *cs = env_cpu(env);
33
-
34
- cs->exception_index = EXCP_DEBUG;
35
- cpu_loop_exit(cs);
36
-}
37
-
38
uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
39
{
40
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
41
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/i386/tcg/translate.c
44
+++ b/target/i386/tcg/translate.c
45
@@ -XXX,XX +XXX,XX @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
46
if (s->base.tb->flags & HF_RF_MASK) {
47
gen_helper_reset_rf(cpu_env);
48
}
49
- if (s->base.singlestep_enabled) {
50
- gen_helper_debug(cpu_env);
51
- } else if (recheck_tf) {
52
+ if (recheck_tf) {
53
gen_helper_rechecking_single_step(cpu_env);
54
tcg_gen_exit_tb(NULL, 0);
55
} else if (s->flags & HF_TF_MASK) {
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
1
Pages that we want to track for NOTDIRTY are RAM. We do not
1
GDB single-stepping is now handled generically.
2
really need to go through the I/O path to handle them.
3
2
4
Acked-by: David Hildenbrand <david@redhat.com>
3
Acked-by: Laurent Vivier <laurent@vivier.eu>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
include/exec/cpu-common.h | 2 --
6
target/m68k/translate.c | 44 +++++++++--------------------------------
10
accel/tcg/cputlb.c | 26 +++++++++++++++++---
7
1 file changed, 9 insertions(+), 35 deletions(-)
11
exec.c | 50 ---------------------------------------
12
memory.c | 16 -------------
13
4 files changed, 23 insertions(+), 71 deletions(-)
14
8
15
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
9
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
16
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu-common.h
11
--- a/target/m68k/translate.c
18
+++ b/include/exec/cpu-common.h
12
+++ b/target/m68k/translate.c
19
@@ -XXX,XX +XXX,XX @@ void qemu_flush_coalesced_mmio_buffer(void);
13
@@ -XXX,XX +XXX,XX @@ static void do_writebacks(DisasContext *s)
20
21
void cpu_flush_icache_range(hwaddr start, hwaddr len);
22
23
-extern struct MemoryRegion io_mem_notdirty;
24
-
25
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
26
27
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
28
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/tcg/cputlb.c
31
+++ b/accel/tcg/cputlb.c
32
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
33
mr = section->mr;
34
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
35
cpu->mem_io_pc = retaddr;
36
- if (mr != &io_mem_notdirty && !cpu->can_do_io) {
37
+ if (!cpu->can_do_io) {
38
cpu_io_recompile(cpu, retaddr);
39
}
40
41
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
42
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
43
mr = section->mr;
44
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
45
- if (mr != &io_mem_notdirty && !cpu->can_do_io) {
46
+ if (!cpu->can_do_io) {
47
cpu_io_recompile(cpu, retaddr);
48
}
49
cpu->mem_io_vaddr = addr;
50
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
51
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
52
53
/* Handle I/O access. */
54
- if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
55
+ if (tlb_addr & TLB_MMIO) {
56
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
57
op ^ (need_swap * MO_BSWAP));
58
return;
59
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
60
61
haddr = (void *)((uintptr_t)addr + entry->addend);
62
63
+ /* Handle clean RAM pages. */
64
+ if (tlb_addr & TLB_NOTDIRTY) {
65
+ NotDirtyInfo ndi;
66
+
67
+ /* We require mem_io_pc in tb_invalidate_phys_page_range. */
68
+ env_cpu(env)->mem_io_pc = retaddr;
69
+
70
+ memory_notdirty_write_prepare(&ndi, env_cpu(env), addr,
71
+ addr + iotlbentry->addr, size);
72
+
73
+ if (unlikely(need_swap)) {
74
+ store_memop(haddr, val, op ^ MO_BSWAP);
75
+ } else {
76
+ store_memop(haddr, val, op);
77
+ }
78
+
79
+ memory_notdirty_write_complete(&ndi);
80
+ return;
81
+ }
82
+
83
/*
84
* Keep these two store_memop separate to ensure that the compiler
85
* is able to fold the entire function to a single instruction.
86
diff --git a/exec.c b/exec.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/exec.c
89
+++ b/exec.c
90
@@ -XXX,XX +XXX,XX @@ static MemoryRegion *system_io;
91
AddressSpace address_space_io;
92
AddressSpace address_space_memory;
93
94
-MemoryRegion io_mem_notdirty;
95
static MemoryRegion io_mem_unassigned;
96
#endif
97
98
@@ -XXX,XX +XXX,XX @@ typedef struct subpage_t {
99
} subpage_t;
100
101
#define PHYS_SECTION_UNASSIGNED 0
102
-#define PHYS_SECTION_NOTDIRTY 1
103
104
static void io_mem_init(void);
105
static void memory_map_init(void);
106
@@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
107
if (memory_region_is_ram(section->mr)) {
108
/* Normal RAM. */
109
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
110
- if (!section->readonly) {
111
- iotlb |= PHYS_SECTION_NOTDIRTY;
112
- }
113
} else {
114
AddressSpaceDispatch *d;
115
116
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_complete(NotDirtyInfo *ndi)
117
}
14
}
118
}
15
}
119
16
120
-/* Called within RCU critical section. */
17
-static bool is_singlestepping(DisasContext *s)
121
-static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
122
- uint64_t val, unsigned size)
123
-{
18
-{
124
- NotDirtyInfo ndi;
19
- /*
125
-
20
- * Return true if we are singlestepping either because of
126
- memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
21
- * architectural singlestep or QEMU gdbstub singlestep. This does
127
- ram_addr, size);
22
- * not include the command line '-singlestep' mode which is rather
128
-
23
- * misnamed as it only means "one instruction per TB" and doesn't
129
- stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
24
- * affect the code we generate.
130
- memory_notdirty_write_complete(&ndi);
25
- */
26
- return s->base.singlestep_enabled || s->ss_active;
131
-}
27
-}
132
-
28
-
133
-static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
29
/* is_jmp field values */
134
- unsigned size, bool is_write,
30
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
135
- MemTxAttrs attrs)
31
#define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
32
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *s, uint32_t dest, int nr)
33
s->base.is_jmp = DISAS_NORETURN;
34
}
35
36
-static void gen_singlestep_exception(DisasContext *s)
136
-{
37
-{
137
- return is_write;
38
- /*
39
- * Generate the right kind of exception for singlestep, which is
40
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
41
- * gdb singlestepping.
42
- */
43
- if (s->ss_active) {
44
- gen_raise_exception(EXCP_TRACE);
45
- } else {
46
- gen_raise_exception(EXCP_DEBUG);
47
- }
138
-}
48
-}
139
-
49
-
140
-static const MemoryRegionOps notdirty_mem_ops = {
50
static inline void gen_addr_fault(DisasContext *s)
141
- .write = notdirty_mem_write,
142
- .valid.accepts = notdirty_mem_accepts,
143
- .endianness = DEVICE_NATIVE_ENDIAN,
144
- .valid = {
145
- .min_access_size = 1,
146
- .max_access_size = 8,
147
- .unaligned = false,
148
- },
149
- .impl = {
150
- .min_access_size = 1,
151
- .max_access_size = 8,
152
- .unaligned = false,
153
- },
154
-};
155
-
156
/* Generate a debug exception if a watchpoint has been hit. */
157
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
158
MemTxAttrs attrs, int flags, uintptr_t ra)
159
@@ -XXX,XX +XXX,XX @@ static void io_mem_init(void)
160
{
51
{
161
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
52
gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
162
NULL, UINT64_MAX);
53
@@ -XXX,XX +XXX,XX @@ static void gen_exit_tb(DisasContext *s)
163
-
54
/* Generate a jump to an immediate address. */
164
- /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
55
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
165
- * which can be called without the iothread mutex.
56
{
166
- */
57
- if (unlikely(is_singlestepping(s))) {
167
- memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
58
+ if (unlikely(s->ss_active)) {
168
- NULL, UINT64_MAX);
59
update_cc_op(s);
169
- memory_region_clear_global_locking(&io_mem_notdirty);
60
tcg_gen_movi_i32(QREG_PC, dest);
61
- gen_singlestep_exception(s);
62
+ gen_raise_exception(EXCP_TRACE);
63
} else if (translator_use_goto_tb(&s->base, dest)) {
64
tcg_gen_goto_tb(n);
65
tcg_gen_movi_i32(QREG_PC, dest);
66
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
67
68
dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
69
/* If architectural single step active, limit to 1 */
70
- if (is_singlestepping(dc)) {
71
+ if (dc->ss_active) {
72
dc->base.max_insns = 1;
73
}
170
}
74
}
171
75
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
172
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
76
break;
173
@@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
77
case DISAS_TOO_MANY:
174
78
update_cc_op(dc);
175
n = dummy_section(&d->map, fv, &io_mem_unassigned);
79
- if (is_singlestepping(dc)) {
176
assert(n == PHYS_SECTION_UNASSIGNED);
80
+ if (dc->ss_active) {
177
- n = dummy_section(&d->map, fv, &io_mem_notdirty);
81
tcg_gen_movi_i32(QREG_PC, dc->pc);
178
- assert(n == PHYS_SECTION_NOTDIRTY);
82
- gen_singlestep_exception(dc);
179
83
+ gen_raise_exception(EXCP_TRACE);
180
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
84
} else {
181
85
gen_jmp_tb(dc, 0, dc->pc);
182
diff --git a/memory.c b/memory.c
86
}
183
index XXXXXXX..XXXXXXX 100644
87
break;
184
--- a/memory.c
88
case DISAS_JUMP:
185
+++ b/memory.c
89
/* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
186
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
90
- if (is_singlestepping(dc)) {
187
tmp = mr->ops->read(mr->opaque, addr, size);
91
- gen_singlestep_exception(dc);
188
if (mr->subpage) {
92
+ if (dc->ss_active) {
189
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
93
+ gen_raise_exception(EXCP_TRACE);
190
- } else if (mr == &io_mem_notdirty) {
94
} else {
191
- /* Accesses to code which has previously been translated into a TB show
95
tcg_gen_lookup_and_goto_ptr();
192
- * up in the MMIO path, as accesses to the io_mem_notdirty
96
}
193
- * MemoryRegion. */
97
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
194
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
98
* We updated CC_OP and PC in gen_exit_tb, but also modified
195
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
99
* other state that may require returning to the main loop.
196
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
100
*/
197
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
101
- if (is_singlestepping(dc)) {
198
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
102
- gen_singlestep_exception(dc);
199
if (mr->subpage) {
103
+ if (dc->ss_active) {
200
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
104
+ gen_raise_exception(EXCP_TRACE);
201
- } else if (mr == &io_mem_notdirty) {
105
} else {
202
- /* Accesses to code which has previously been translated into a TB show
106
tcg_gen_exit_tb(NULL, 0);
203
- * up in the MMIO path, as accesses to the io_mem_notdirty
107
}
204
- * MemoryRegion. */
205
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
206
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
207
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
208
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
209
210
if (mr->subpage) {
211
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
212
- } else if (mr == &io_mem_notdirty) {
213
- /* Accesses to code which has previously been translated into a TB show
214
- * up in the MMIO path, as accesses to the io_mem_notdirty
215
- * MemoryRegion. */
216
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
217
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
218
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
219
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
220
221
if (mr->subpage) {
222
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
223
- } else if (mr == &io_mem_notdirty) {
224
- /* Accesses to code which has previously been translated into a TB show
225
- * up in the MMIO path, as accesses to the io_mem_notdirty
226
- * MemoryRegion. */
227
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
228
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
229
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
230
--
108
--
231
2.17.1
109
2.25.1
232
110
233
111
diff view generated by jsdifflib
New patch
1
We were using singlestep_enabled as a proxy for whether
2
translator_use_goto_tb would always return false.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/microblaze/translate.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/microblaze/translate.c
12
+++ b/target/microblaze/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
14
break;
15
16
case DISAS_JUMP:
17
- if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
18
+ if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
19
/* Direct jump. */
20
tcg_gen_discard_i32(cpu_btarget);
21
22
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
23
return;
24
}
25
26
- /* Indirect jump (or direct jump w/ singlestep) */
27
+ /* Indirect jump (or direct jump w/ goto_tb disabled) */
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
29
tcg_gen_discard_i32(cpu_btarget);
30
31
--
32
2.25.1
33
34
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/microblaze/translate.c | 14 ++------------
6
1 file changed, 2 insertions(+), 12 deletions(-)
7
8
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/microblaze/translate.c
11
+++ b/target/microblaze/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
13
14
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
15
{
16
- if (dc->base.singlestep_enabled) {
17
- TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
18
- tcg_gen_movi_i32(cpu_pc, dest);
19
- gen_helper_raise_exception(cpu_env, tmp);
20
- tcg_temp_free_i32(tmp);
21
- } else if (translator_use_goto_tb(&dc->base, dest)) {
22
+ if (translator_use_goto_tb(&dc->base, dest)) {
23
tcg_gen_goto_tb(n);
24
tcg_gen_movi_i32(cpu_pc, dest);
25
tcg_gen_exit_tb(dc->base.tb, n);
26
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
27
/* Indirect jump (or direct jump w/ goto_tb disabled) */
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
29
tcg_gen_discard_i32(cpu_btarget);
30
-
31
- if (unlikely(cs->singlestep_enabled)) {
32
- gen_raise_exception(dc, EXCP_DEBUG);
33
- } else {
34
- tcg_gen_lookup_and_goto_ptr();
35
- }
36
+ tcg_gen_lookup_and_goto_ptr();
37
return;
38
39
default:
40
--
41
2.25.1
42
43
diff view generated by jsdifflib
1
Rather than rely on cpu->mem_io_pc, pass retaddr down directly.
1
As per an ancient comment in mips_tr_translate_insn about the
2
expectations of gdb, when restarting the insn in a delay slot
3
we also re-execute the branch. Which means that we are
4
expected to execute two insns in this case.
2
5
3
Within tb_invalidate_phys_page_range__locked, the is_cpu_write_access
6
This has been broken since 8b86d6d2580, where we forced max_insns
4
parameter is non-zero exactly when retaddr would be non-zero, so that
7
to 1 while single-stepping. This resulted in an exit from the
5
is a simple replacement.
8
translator loop after the branch but before the delay slot is
9
translated.
6
10
7
Recognize that current_tb_not_found is true only when mem_io_pc
11
Increase the max_insns to 2 for this case. In addition, bypass
8
(and now retaddr) are also non-zero, so remove a redundant test.
12
the end-of-page check, for when the branch itself ends the page.
9
13
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: David Hildenbrand <david@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
16
---
14
accel/tcg/translate-all.h | 3 ++-
17
target/mips/tcg/translate.c | 25 ++++++++++++++++---------
15
accel/tcg/cputlb.c | 6 +-----
18
1 file changed, 16 insertions(+), 9 deletions(-)
16
accel/tcg/translate-all.c | 39 +++++++++++++++++++--------------------
17
3 files changed, 22 insertions(+), 26 deletions(-)
18
19
19
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
20
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
20
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/translate-all.h
22
--- a/target/mips/tcg/translate.c
22
+++ b/accel/tcg/translate-all.h
23
+++ b/target/mips/tcg/translate.c
23
@@ -XXX,XX +XXX,XX @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
24
@@ -XXX,XX +XXX,XX @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
24
tb_page_addr_t end);
25
ctx->default_tcg_memop_mask = (ctx->insn_flags & (ISA_MIPS_R6 |
25
void page_collection_unlock(struct page_collection *set);
26
INSN_LOONGSON3A)) ? MO_UNALN : MO_ALIGN;
26
void tb_invalidate_phys_page_fast(struct page_collection *pages,
27
27
- tb_page_addr_t start, int len);
28
+ /*
28
+ tb_page_addr_t start, int len,
29
+ * Execute a branch and its delay slot as a single instruction.
29
+ uintptr_t retaddr);
30
+ * This is what GDB expects and is consistent with what the
30
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
31
+ * hardware does (e.g. if a delay slot instruction faults, the
31
void tb_check_watchpoint(CPUState *cpu);
32
+ * reported PC is the PC of the branch).
32
33
+ */
33
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
34
+ if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
34
index XXXXXXX..XXXXXXX 100644
35
+ ctx->base.max_insns = 2;
35
--- a/accel/tcg/cputlb.c
36
+ }
36
+++ b/accel/tcg/cputlb.c
37
+
37
@@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
38
LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx,
38
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
39
ctx->hflags);
39
struct page_collection *pages
40
}
40
= page_collection_lock(ram_addr, ram_addr + size);
41
@@ -XXX,XX +XXX,XX @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
41
-
42
if (ctx->base.is_jmp != DISAS_NEXT) {
42
- /* We require mem_io_pc in tb_invalidate_phys_page_range. */
43
return;
43
- cpu->mem_io_pc = retaddr;
44
-
45
- tb_invalidate_phys_page_fast(pages, ram_addr, size);
46
+ tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
47
page_collection_unlock(pages);
48
}
44
}
49
45
+
50
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
46
/*
51
index XXXXXXX..XXXXXXX 100644
47
- * Execute a branch and its delay slot as a single instruction.
52
--- a/accel/tcg/translate-all.c
48
- * This is what GDB expects and is consistent with what the
53
+++ b/accel/tcg/translate-all.c
49
- * hardware does (e.g. if a delay slot instruction faults, the
54
@@ -XXX,XX +XXX,XX @@ static void
50
- * reported PC is the PC of the branch).
55
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
51
+ * End the TB on (most) page crossings.
56
PageDesc *p, tb_page_addr_t start,
52
+ * See mips_tr_init_disas_context about single-stepping a branch
57
tb_page_addr_t end,
53
+ * together with its delay slot.
58
- int is_cpu_write_access)
54
*/
59
+ uintptr_t retaddr)
55
- if (ctx->base.singlestep_enabled &&
60
{
56
- (ctx->hflags & MIPS_HFLAG_BMASK) == 0) {
61
TranslationBlock *tb;
57
- ctx->base.is_jmp = DISAS_TOO_MANY;
62
tb_page_addr_t tb_start, tb_end;
58
- }
63
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
59
- if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) {
64
#ifdef TARGET_HAS_PRECISE_SMC
60
+ if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
65
CPUState *cpu = current_cpu;
61
+ && !ctx->base.singlestep_enabled) {
66
CPUArchState *env = NULL;
62
ctx->base.is_jmp = DISAS_TOO_MANY;
67
- int current_tb_not_found = is_cpu_write_access;
68
+ bool current_tb_not_found = retaddr != 0;
69
+ bool current_tb_modified = false;
70
TranslationBlock *current_tb = NULL;
71
- int current_tb_modified = 0;
72
target_ulong current_pc = 0;
73
target_ulong current_cs_base = 0;
74
uint32_t current_flags = 0;
75
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
76
if (!(tb_end <= start || tb_start >= end)) {
77
#ifdef TARGET_HAS_PRECISE_SMC
78
if (current_tb_not_found) {
79
- current_tb_not_found = 0;
80
- current_tb = NULL;
81
- if (cpu->mem_io_pc) {
82
- /* now we have a real cpu fault */
83
- current_tb = tcg_tb_lookup(cpu->mem_io_pc);
84
- }
85
+ current_tb_not_found = false;
86
+ /* now we have a real cpu fault */
87
+ current_tb = tcg_tb_lookup(retaddr);
88
}
89
if (current_tb == tb &&
90
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
91
- /* If we are modifying the current TB, we must stop
92
- its execution. We could be more precise by checking
93
- that the modification is after the current PC, but it
94
- would require a specialized function to partially
95
- restore the CPU state */
96
-
97
- current_tb_modified = 1;
98
- cpu_restore_state_from_tb(cpu, current_tb,
99
- cpu->mem_io_pc, true);
100
+ /*
101
+ * If we are modifying the current TB, we must stop
102
+ * its execution. We could be more precise by checking
103
+ * that the modification is after the current PC, but it
104
+ * would require a specialized function to partially
105
+ * restore the CPU state.
106
+ */
107
+ current_tb_modified = true;
108
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
109
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
110
&current_flags);
111
}
112
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end)
113
* Call with all @pages in the range [@start, @start + len[ locked.
114
*/
115
void tb_invalidate_phys_page_fast(struct page_collection *pages,
116
- tb_page_addr_t start, int len)
117
+ tb_page_addr_t start, int len,
118
+ uintptr_t retaddr)
119
{
120
PageDesc *p;
121
122
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
123
}
124
} else {
125
do_invalidate:
126
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
127
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
128
+ retaddr);
129
}
63
}
130
}
64
}
131
#else
132
--
65
--
133
2.17.1
66
2.25.1
134
67
135
68
diff view generated by jsdifflib
1
We will shortly be using these more than once.
1
GDB single-stepping is now handled generically.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: David Hildenbrand <david@redhat.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/cputlb.c | 107 +++++++++++++++++++++++----------------------
6
target/mips/tcg/translate.c | 50 +++++++++++++------------------------
8
1 file changed, 55 insertions(+), 52 deletions(-)
7
1 file changed, 18 insertions(+), 32 deletions(-)
9
8
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
11
--- a/target/mips/tcg/translate.c
13
+++ b/accel/tcg/cputlb.c
12
+++ b/target/mips/tcg/translate.c
14
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
15
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
14
tcg_gen_exit_tb(ctx->base.tb, n);
16
TCGMemOpIdx oi, uintptr_t retaddr);
15
} else {
17
16
gen_save_pc(dest);
18
+static inline uint64_t QEMU_ALWAYS_INLINE
17
- if (ctx->base.singlestep_enabled) {
19
+load_memop(const void *haddr, MemOp op)
18
- save_cpu_state(ctx, 0);
20
+{
19
- gen_helper_raise_exception_debug(cpu_env);
21
+ switch (op) {
20
- } else {
22
+ case MO_UB:
21
- tcg_gen_lookup_and_goto_ptr();
23
+ return ldub_p(haddr);
22
- }
24
+ case MO_BEUW:
23
+ tcg_gen_lookup_and_goto_ptr();
25
+ return lduw_be_p(haddr);
24
}
26
+ case MO_LEUW:
27
+ return lduw_le_p(haddr);
28
+ case MO_BEUL:
29
+ return (uint32_t)ldl_be_p(haddr);
30
+ case MO_LEUL:
31
+ return (uint32_t)ldl_le_p(haddr);
32
+ case MO_BEQ:
33
+ return ldq_be_p(haddr);
34
+ case MO_LEQ:
35
+ return ldq_le_p(haddr);
36
+ default:
37
+ qemu_build_not_reached();
38
+ }
39
+}
40
+
41
static inline uint64_t QEMU_ALWAYS_INLINE
42
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
43
uintptr_t retaddr, MemOp op, bool code_read,
44
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
45
46
do_aligned_access:
47
haddr = (void *)((uintptr_t)addr + entry->addend);
48
- switch (op) {
49
- case MO_UB:
50
- res = ldub_p(haddr);
51
- break;
52
- case MO_BEUW:
53
- res = lduw_be_p(haddr);
54
- break;
55
- case MO_LEUW:
56
- res = lduw_le_p(haddr);
57
- break;
58
- case MO_BEUL:
59
- res = (uint32_t)ldl_be_p(haddr);
60
- break;
61
- case MO_LEUL:
62
- res = (uint32_t)ldl_le_p(haddr);
63
- break;
64
- case MO_BEQ:
65
- res = ldq_be_p(haddr);
66
- break;
67
- case MO_LEQ:
68
- res = ldq_le_p(haddr);
69
- break;
70
- default:
71
- qemu_build_not_reached();
72
- }
73
-
74
- return res;
75
+ return load_memop(haddr, op);
76
}
25
}
77
26
78
/*
27
@@ -XXX,XX +XXX,XX @@ static void gen_branch(DisasContext *ctx, int insn_bytes)
79
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
28
} else {
80
* Store Helpers
29
tcg_gen_mov_tl(cpu_PC, btarget);
81
*/
30
}
82
31
- if (ctx->base.singlestep_enabled) {
83
+static inline void QEMU_ALWAYS_INLINE
32
- save_cpu_state(ctx, 0);
84
+store_memop(void *haddr, uint64_t val, MemOp op)
33
- gen_helper_raise_exception_debug(cpu_env);
85
+{
34
- }
86
+ switch (op) {
35
tcg_gen_lookup_and_goto_ptr();
87
+ case MO_UB:
36
break;
88
+ stb_p(haddr, val);
37
default:
38
@@ -XXX,XX +XXX,XX @@ static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
39
{
40
DisasContext *ctx = container_of(dcbase, DisasContext, base);
41
42
- if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) {
43
- save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT);
44
- gen_helper_raise_exception_debug(cpu_env);
45
- } else {
46
- switch (ctx->base.is_jmp) {
47
- case DISAS_STOP:
48
- gen_save_pc(ctx->base.pc_next);
49
- tcg_gen_lookup_and_goto_ptr();
50
- break;
51
- case DISAS_NEXT:
52
- case DISAS_TOO_MANY:
53
- save_cpu_state(ctx, 0);
54
- gen_goto_tb(ctx, 0, ctx->base.pc_next);
55
- break;
56
- case DISAS_EXIT:
57
- tcg_gen_exit_tb(NULL, 0);
58
- break;
59
- case DISAS_NORETURN:
60
- break;
61
- default:
62
- g_assert_not_reached();
63
- }
64
+ switch (ctx->base.is_jmp) {
65
+ case DISAS_STOP:
66
+ gen_save_pc(ctx->base.pc_next);
67
+ tcg_gen_lookup_and_goto_ptr();
89
+ break;
68
+ break;
90
+ case MO_BEUW:
69
+ case DISAS_NEXT:
91
+ stw_be_p(haddr, val);
70
+ case DISAS_TOO_MANY:
71
+ save_cpu_state(ctx, 0);
72
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
92
+ break;
73
+ break;
93
+ case MO_LEUW:
74
+ case DISAS_EXIT:
94
+ stw_le_p(haddr, val);
75
+ tcg_gen_exit_tb(NULL, 0);
95
+ break;
76
+ break;
96
+ case MO_BEUL:
77
+ case DISAS_NORETURN:
97
+ stl_be_p(haddr, val);
98
+ break;
99
+ case MO_LEUL:
100
+ stl_le_p(haddr, val);
101
+ break;
102
+ case MO_BEQ:
103
+ stq_be_p(haddr, val);
104
+ break;
105
+ case MO_LEQ:
106
+ stq_le_p(haddr, val);
107
+ break;
78
+ break;
108
+ default:
79
+ default:
109
+ qemu_build_not_reached();
80
+ g_assert_not_reached();
110
+ }
81
}
111
+}
112
+
113
static inline void QEMU_ALWAYS_INLINE
114
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
115
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
116
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
117
118
do_aligned_access:
119
haddr = (void *)((uintptr_t)addr + entry->addend);
120
- switch (op) {
121
- case MO_UB:
122
- stb_p(haddr, val);
123
- break;
124
- case MO_BEUW:
125
- stw_be_p(haddr, val);
126
- break;
127
- case MO_LEUW:
128
- stw_le_p(haddr, val);
129
- break;
130
- case MO_BEUL:
131
- stl_be_p(haddr, val);
132
- break;
133
- case MO_LEUL:
134
- stl_le_p(haddr, val);
135
- break;
136
- case MO_BEQ:
137
- stq_be_p(haddr, val);
138
- break;
139
- case MO_LEQ:
140
- stq_le_p(haddr, val);
141
- break;
142
- default:
143
- qemu_build_not_reached();
144
- }
145
+ store_memop(haddr, val, op);
146
}
82
}
147
83
148
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
149
--
84
--
150
2.17.1
85
2.25.1
151
86
152
87
diff view generated by jsdifflib
1
Fixes the previous TLB_WATCHPOINT patches because we are currently
1
GDB single-stepping is now handled generically.
2
failing to set cpu->mem_io_pc with the call to cpu_check_watchpoint.
3
Pass down the retaddr directly because it's readily available.
4
2
5
Fixes: 50b107c5d61
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
accel/tcg/translate-all.h | 2 +-
6
target/openrisc/translate.c | 18 +++---------------
11
accel/tcg/translate-all.c | 6 +++---
7
1 file changed, 3 insertions(+), 15 deletions(-)
12
exec.c | 2 +-
13
3 files changed, 5 insertions(+), 5 deletions(-)
14
8
15
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
9
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
16
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/translate-all.h
11
--- a/target/openrisc/translate.c
18
+++ b/accel/tcg/translate-all.h
12
+++ b/target/openrisc/translate.c
19
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
13
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
20
tb_page_addr_t start, int len,
14
/* The jump destination is indirect/computed; use jmp_pc. */
21
uintptr_t retaddr);
15
tcg_gen_mov_tl(cpu_pc, jmp_pc);
22
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
16
tcg_gen_discard_tl(jmp_pc);
23
-void tb_check_watchpoint(CPUState *cpu);
17
- if (unlikely(dc->base.singlestep_enabled)) {
24
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
18
- gen_exception(dc, EXCP_DEBUG);
25
19
- } else {
26
#ifdef CONFIG_USER_ONLY
20
- tcg_gen_lookup_and_goto_ptr();
27
int page_unprotect(target_ulong address, uintptr_t pc);
21
- }
28
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
22
+ tcg_gen_lookup_and_goto_ptr();
29
index XXXXXXX..XXXXXXX 100644
23
break;
30
--- a/accel/tcg/translate-all.c
24
}
31
+++ b/accel/tcg/translate-all.c
25
/* The jump destination is direct; use jmp_pc_imm.
32
@@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
26
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
33
#endif
27
break;
34
28
}
35
/* user-mode: call with mmap_lock held */
29
tcg_gen_movi_tl(cpu_pc, jmp_dest);
36
-void tb_check_watchpoint(CPUState *cpu)
30
- if (unlikely(dc->base.singlestep_enabled)) {
37
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
31
- gen_exception(dc, EXCP_DEBUG);
38
{
32
- } else {
39
TranslationBlock *tb;
33
- tcg_gen_lookup_and_goto_ptr();
40
34
- }
41
assert_memory_lock();
35
+ tcg_gen_lookup_and_goto_ptr();
42
36
break;
43
- tb = tcg_tb_lookup(cpu->mem_io_pc);
37
44
+ tb = tcg_tb_lookup(retaddr);
38
case DISAS_EXIT:
45
if (tb) {
39
- if (unlikely(dc->base.singlestep_enabled)) {
46
/* We can use retranslation to find the PC. */
40
- gen_exception(dc, EXCP_DEBUG);
47
- cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
41
- } else {
48
+ cpu_restore_state_from_tb(cpu, tb, retaddr, true);
42
- tcg_gen_exit_tb(NULL, 0);
49
tb_phys_invalidate(tb, -1);
43
- }
50
} else {
44
+ tcg_gen_exit_tb(NULL, 0);
51
/* The exception probably happened in a helper. The CPU state should
45
break;
52
diff --git a/exec.c b/exec.c
46
default:
53
index XXXXXXX..XXXXXXX 100644
47
g_assert_not_reached();
54
--- a/exec.c
55
+++ b/exec.c
56
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
57
cpu->watchpoint_hit = wp;
58
59
mmap_lock();
60
- tb_check_watchpoint(cpu);
61
+ tb_check_watchpoint(cpu, ra);
62
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
63
cpu->exception_index = EXCP_DEBUG;
64
mmap_unlock();
65
--
48
--
66
2.17.1
49
2.25.1
67
50
68
51
diff view generated by jsdifflib
1
Handle bswap on ram directly in load/store_helper. This fixes a
1
GDB single-stepping is now handled generically.
2
bug with the previous implementation in that one cannot use the
2
Reuse gen_debug_exception to handle architectural debug exceptions.
3
I/O path for RAM.
4
3
5
Fixes: a26fc6f5152b47f1
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
include/exec/cpu-all.h | 4 ++-
6
target/ppc/translate.c | 38 ++++++++------------------------------
11
accel/tcg/cputlb.c | 72 +++++++++++++++++++++++++-----------------
7
1 file changed, 8 insertions(+), 30 deletions(-)
12
2 files changed, 46 insertions(+), 30 deletions(-)
13
8
14
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
9
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-all.h
11
--- a/target/ppc/translate.c
17
+++ b/include/exec/cpu-all.h
12
+++ b/target/ppc/translate.c
18
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
13
@@ -XXX,XX +XXX,XX @@
19
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
14
20
/* Set if TLB entry contains a watchpoint. */
15
#define CPU_SINGLE_STEP 0x1
21
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
16
#define CPU_BRANCH_STEP 0x2
22
+/* Set if TLB entry requires byte swap. */
17
-#define GDBSTUB_SINGLE_STEP 0x4
23
+#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
18
24
19
/* Include definitions for instructions classes and implementations flags */
25
/* Use this mask to check interception with an alignment mask
20
/* #define PPC_DEBUG_DISAS */
26
* in a TCG backend.
21
@@ -XXX,XX +XXX,XX @@ static uint32_t gen_prep_dbgex(DisasContext *ctx)
27
*/
22
28
#define TLB_FLAGS_MASK \
23
static void gen_debug_exception(DisasContext *ctx)
29
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
24
{
30
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
25
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
31
26
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
32
/**
27
ctx->base.is_jmp = DISAS_NORETURN;
33
* tlb_hit_page: return true if page aligned @addr is a hit against the
28
}
34
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
29
35
index XXXXXXX..XXXXXXX 100644
30
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
36
--- a/accel/tcg/cputlb.c
31
37
+++ b/accel/tcg/cputlb.c
32
static void gen_lookup_and_goto_ptr(DisasContext *ctx)
38
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
33
{
39
address |= TLB_INVALID_MASK;
34
- int sse = ctx->singlestep_enabled;
35
- if (unlikely(sse)) {
36
- if (sse & GDBSTUB_SINGLE_STEP) {
37
- gen_debug_exception(ctx);
38
- } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) {
39
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
40
- } else {
41
- tcg_gen_exit_tb(NULL, 0);
42
- }
43
+ if (unlikely(ctx->singlestep_enabled)) {
44
+ gen_debug_exception(ctx);
45
} else {
46
tcg_gen_lookup_and_goto_ptr();
40
}
47
}
41
if (attrs.byte_swap) {
48
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
42
- /* Force the access through the I/O slow path. */
49
ctx->singlestep_enabled = 0;
43
- address |= TLB_MMIO;
50
if ((hflags >> HFLAGS_SE) & 1) {
44
+ address |= TLB_BSWAP;
51
ctx->singlestep_enabled |= CPU_SINGLE_STEP;
52
+ ctx->base.max_insns = 1;
45
}
53
}
46
if (!memory_region_is_ram(section->mr) &&
54
if ((hflags >> HFLAGS_BE) & 1) {
47
!memory_region_is_romd(section->mr)) {
55
ctx->singlestep_enabled |= CPU_BRANCH_STEP;
48
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
56
}
49
bool locked = false;
57
- if (unlikely(ctx->base.singlestep_enabled)) {
50
MemTxResult r;
58
- ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP;
51
52
- if (iotlbentry->attrs.byte_swap) {
53
- op ^= MO_BSWAP;
54
- }
59
- }
55
-
60
-
56
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
61
- if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) {
57
mr = section->mr;
62
- ctx->base.max_insns = 1;
58
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
59
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
60
bool locked = false;
61
MemTxResult r;
62
63
- if (iotlbentry->attrs.byte_swap) {
64
- op ^= MO_BSWAP;
65
- }
63
- }
66
-
64
}
67
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
65
68
mr = section->mr;
66
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
69
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
67
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
70
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
68
DisasContext *ctx = container_of(dcbase, DisasContext, base);
71
wp_access, retaddr);
69
DisasJumpType is_jmp = ctx->base.is_jmp;
70
target_ulong nip = ctx->base.pc_next;
71
- int sse;
72
73
if (is_jmp == DISAS_NORETURN) {
74
/* We have already exited the TB. */
75
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
72
}
76
}
73
77
74
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
78
/* Honor single stepping. */
75
- /* I/O access */
79
- sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP);
76
+ /* Reject I/O access, or other required slow-path. */
80
- if (unlikely(sse)) {
77
+ if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
81
+ if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
78
return NULL;
82
+ && (nip <= 0x100 || nip > 0xf00)) {
83
switch (is_jmp) {
84
case DISAS_TOO_MANY:
85
case DISAS_EXIT_UPDATE:
86
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
87
g_assert_not_reached();
88
}
89
90
- if (sse & GDBSTUB_SINGLE_STEP) {
91
- gen_debug_exception(ctx);
92
- return;
93
- }
94
- /* else CPU_SINGLE_STEP... */
95
- if (nip <= 0x100 || nip > 0xf00) {
96
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
97
- return;
98
- }
99
+ gen_debug_exception(ctx);
100
+ return;
79
}
101
}
80
102
81
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
103
switch (is_jmp) {
82
/* Handle anything that isn't just a straight memory access. */
83
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
84
CPUIOTLBEntry *iotlbentry;
85
+ bool need_swap;
86
87
/* For anything that is unaligned, recurse through full_load. */
88
if ((addr & (size - 1)) != 0) {
89
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
90
/* On watchpoint hit, this will longjmp out. */
91
cpu_check_watchpoint(env_cpu(env), addr, size,
92
iotlbentry->attrs, BP_MEM_READ, retaddr);
93
-
94
- /* The backing page may or may not require I/O. */
95
- tlb_addr &= ~TLB_WATCHPOINT;
96
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
97
- goto do_aligned_access;
98
- }
99
}
100
101
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
102
+
103
/* Handle I/O access. */
104
- return io_readx(env, iotlbentry, mmu_idx, addr,
105
- retaddr, access_type, op);
106
+ if (likely(tlb_addr & TLB_MMIO)) {
107
+ return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
108
+ access_type, op ^ (need_swap * MO_BSWAP));
109
+ }
110
+
111
+ haddr = (void *)((uintptr_t)addr + entry->addend);
112
+
113
+ /*
114
+ * Keep these two load_memop separate to ensure that the compiler
115
+ * is able to fold the entire function to a single instruction.
116
+ * There is a build-time assert inside to remind you of this. ;-)
117
+ */
118
+ if (unlikely(need_swap)) {
119
+ return load_memop(haddr, op ^ MO_BSWAP);
120
+ }
121
+ return load_memop(haddr, op);
122
}
123
124
/* Handle slow unaligned access (it spans two pages or IO). */
125
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
126
return res & MAKE_64BIT_MASK(0, size * 8);
127
}
128
129
- do_aligned_access:
130
haddr = (void *)((uintptr_t)addr + entry->addend);
131
return load_memop(haddr, op);
132
}
133
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
134
/* Handle anything that isn't just a straight memory access. */
135
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
136
CPUIOTLBEntry *iotlbentry;
137
+ bool need_swap;
138
139
/* For anything that is unaligned, recurse through byte stores. */
140
if ((addr & (size - 1)) != 0) {
141
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
142
/* On watchpoint hit, this will longjmp out. */
143
cpu_check_watchpoint(env_cpu(env), addr, size,
144
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
145
-
146
- /* The backing page may or may not require I/O. */
147
- tlb_addr &= ~TLB_WATCHPOINT;
148
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
149
- goto do_aligned_access;
150
- }
151
}
152
153
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
154
+
155
/* Handle I/O access. */
156
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
157
+ if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
158
+ io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
159
+ op ^ (need_swap * MO_BSWAP));
160
+ return;
161
+ }
162
+
163
+ haddr = (void *)((uintptr_t)addr + entry->addend);
164
+
165
+ /*
166
+ * Keep these two store_memop separate to ensure that the compiler
167
+ * is able to fold the entire function to a single instruction.
168
+ * There is a build-time assert inside to remind you of this. ;-)
169
+ */
170
+ if (unlikely(need_swap)) {
171
+ store_memop(haddr, val, op ^ MO_BSWAP);
172
+ } else {
173
+ store_memop(haddr, val, op);
174
+ }
175
return;
176
}
177
178
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
179
return;
180
}
181
182
- do_aligned_access:
183
haddr = (void *)((uintptr_t)addr + entry->addend);
184
store_memop(haddr, val, op);
185
}
186
--
104
--
187
2.17.1
105
2.25.1
188
106
189
107
diff view generated by jsdifflib
1
Increase the current runtime assert to a compile-time assert.
1
We have already set DISAS_NORETURN in generate_exception,
2
which makes the exit_tb unreachable.
2
3
3
Reviewed-by: David Hildenbrand <david@redhat.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
accel/tcg/cputlb.c | 5 ++---
7
target/riscv/insn_trans/trans_privileged.c.inc | 6 +-----
8
1 file changed, 2 insertions(+), 3 deletions(-)
8
1 file changed, 1 insertion(+), 5 deletions(-)
9
9
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
12
--- a/target/riscv/insn_trans/trans_privileged.c.inc
13
+++ b/accel/tcg/cputlb.c
13
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
14
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
14
@@ -XXX,XX +XXX,XX @@ static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
15
res = ldq_le_p(haddr);
15
{
16
break;
16
/* always generates U-level ECALL, fixed in do_interrupt handler */
17
default:
17
generate_exception(ctx, RISCV_EXCP_U_ECALL);
18
- g_assert_not_reached();
18
- exit_tb(ctx); /* no chaining */
19
+ qemu_build_not_reached();
19
- ctx->base.is_jmp = DISAS_NORETURN;
20
return true;
21
}
22
23
@@ -XXX,XX +XXX,XX @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
24
post = opcode_at(&ctx->base, post_addr);
20
}
25
}
21
26
22
return res;
27
- if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
23
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
28
+ if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
24
stq_le_p(haddr, val);
29
generate_exception(ctx, RISCV_EXCP_SEMIHOST);
25
break;
30
} else {
26
default:
31
generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
27
- g_assert_not_reached();
28
- break;
29
+ qemu_build_not_reached();
30
}
32
}
33
- exit_tb(ctx); /* no chaining */
34
- ctx->base.is_jmp = DISAS_NORETURN;
35
return true;
31
}
36
}
32
37
33
--
38
--
34
2.17.1
39
2.25.1
35
40
36
41
diff view generated by jsdifflib
1
The memory_region_tb_read tracepoint is unreachable, since notdirty
1
GDB single-stepping is now handled generically, which means
2
is supposed to apply only to writes. The memory_region_tb_write
2
we don't need to do anything in the wrappers.
3
tracepoint is mis-named, because notdirty is not only used for TB
4
invalidation. It is also used for e.g. VGA RAM updates and migration.
5
3
6
Replace memory_region_tb_write with memory_notdirty_write_access,
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
and place it in memory_notdirty_write_prepare where it can catch
8
all of the instances. Add memory_notdirty_set_dirty to log when
9
we no longer intercept writes to a page.
10
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Reviewed-by: David Hildenbrand <david@redhat.com>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
6
---
16
exec.c | 3 +++
7
target/riscv/translate.c | 27 +------------------
17
memory.c | 4 ----
8
.../riscv/insn_trans/trans_privileged.c.inc | 4 +--
18
trace-events | 4 ++--
9
target/riscv/insn_trans/trans_rvi.c.inc | 8 +++---
19
3 files changed, 5 insertions(+), 6 deletions(-)
10
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
11
4 files changed, 7 insertions(+), 34 deletions(-)
20
12
21
diff --git a/exec.c b/exec.c
13
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
22
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
23
--- a/exec.c
15
--- a/target/riscv/translate.c
24
+++ b/exec.c
16
+++ b/target/riscv/translate.c
25
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
17
@@ -XXX,XX +XXX,XX @@ static void generate_exception_mtval(DisasContext *ctx, int excp)
26
ndi->size = size;
18
ctx->base.is_jmp = DISAS_NORETURN;
27
ndi->pages = NULL;
19
}
28
20
29
+ trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
21
-static void gen_exception_debug(void)
30
+
22
-{
31
assert(tcg_enabled());
23
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
32
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
24
-}
33
ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
25
-
34
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_complete(NotDirtyInfo *ndi)
26
-/* Wrapper around tcg_gen_exit_tb that handles single stepping */
35
/* we remove the notdirty callback only if the code has been
27
-static void exit_tb(DisasContext *ctx)
36
flushed */
28
-{
37
if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
29
- if (ctx->base.singlestep_enabled) {
38
+ trace_memory_notdirty_set_dirty(ndi->mem_vaddr);
30
- gen_exception_debug();
39
tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
31
- } else {
32
- tcg_gen_exit_tb(NULL, 0);
33
- }
34
-}
35
-
36
-/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
37
-static void lookup_and_goto_ptr(DisasContext *ctx)
38
-{
39
- if (ctx->base.singlestep_enabled) {
40
- gen_exception_debug();
41
- } else {
42
- tcg_gen_lookup_and_goto_ptr();
43
- }
44
-}
45
-
46
static void gen_exception_illegal(DisasContext *ctx)
47
{
48
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
49
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
50
tcg_gen_exit_tb(ctx->base.tb, n);
51
} else {
52
tcg_gen_movi_tl(cpu_pc, dest);
53
- lookup_and_goto_ptr(ctx);
54
+ tcg_gen_lookup_and_goto_ptr();
40
}
55
}
41
}
56
}
42
diff --git a/memory.c b/memory.c
57
58
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
43
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
44
--- a/memory.c
60
--- a/target/riscv/insn_trans/trans_privileged.c.inc
45
+++ b/memory.c
61
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
46
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
62
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
47
/* Accesses to code which has previously been translated into a TB show
63
48
* up in the MMIO path, as accesses to the io_mem_notdirty
64
if (has_ext(ctx, RVS)) {
49
* MemoryRegion. */
65
gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
50
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
66
- exit_tb(ctx); /* no chaining */
51
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
67
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
52
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
68
ctx->base.is_jmp = DISAS_NORETURN;
53
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
69
} else {
54
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
70
return false;
55
/* Accesses to code which has previously been translated into a TB show
71
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
56
* up in the MMIO path, as accesses to the io_mem_notdirty
72
#ifndef CONFIG_USER_ONLY
57
* MemoryRegion. */
73
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
58
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
74
gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
59
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
75
- exit_tb(ctx); /* no chaining */
60
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
76
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
61
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
77
ctx->base.is_jmp = DISAS_NORETURN;
62
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
78
return true;
63
/* Accesses to code which has previously been translated into a TB show
79
#else
64
* up in the MMIO path, as accesses to the io_mem_notdirty
80
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
65
* MemoryRegion. */
66
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
67
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
68
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
69
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
70
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
71
/* Accesses to code which has previously been translated into a TB show
72
* up in the MMIO path, as accesses to the io_mem_notdirty
73
* MemoryRegion. */
74
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
75
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
76
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
77
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
78
diff --git a/trace-events b/trace-events
79
index XXXXXXX..XXXXXXX 100644
81
index XXXXXXX..XXXXXXX 100644
80
--- a/trace-events
82
--- a/target/riscv/insn_trans/trans_rvi.c.inc
81
+++ b/trace-events
83
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
82
@@ -XXX,XX +XXX,XX @@ dma_map_wait(void *dbs) "dbs=%p"
84
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
83
find_ram_offset(uint64_t size, uint64_t offset) "size: 0x%" PRIx64 " @ 0x%" PRIx64
85
if (a->rd != 0) {
84
find_ram_offset_loop(uint64_t size, uint64_t candidate, uint64_t offset, uint64_t next, uint64_t mingap) "trying size: 0x%" PRIx64 " @ 0x%" PRIx64 ", offset: 0x%" PRIx64" next: 0x%" PRIx64 " mingap: 0x%" PRIx64
86
tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
85
ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_madvise, bool need_fallocate, int ret) "%s@%p + 0x%zx: madvise: %d fallocate: %d ret: %d"
87
}
86
+memory_notdirty_write_access(uint64_t vaddr, uint64_t ram_addr, unsigned size) "0x%" PRIx64 " ram_addr 0x%" PRIx64 " size %u"
88
-
87
+memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64
89
- /* No chaining with JALR. */
88
90
- lookup_and_goto_ptr(ctx);
89
# memory.c
91
+ tcg_gen_lookup_and_goto_ptr();
90
memory_region_ops_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
92
91
memory_region_ops_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
93
if (misaligned) {
92
memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
94
gen_set_label(misaligned);
93
memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
95
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
94
-memory_region_tb_read(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
96
* however we need to end the translation block
95
-memory_region_tb_write(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
97
*/
96
memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
98
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
97
memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
99
- exit_tb(ctx);
98
flatview_new(void *view, void *root) "%p (root %p)"
100
+ tcg_gen_exit_tb(NULL, 0);
101
ctx->base.is_jmp = DISAS_NORETURN;
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool do_csr_post(DisasContext *ctx)
105
{
106
/* We may have changed important cpu state -- exit to main loop. */
107
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
108
- exit_tb(ctx);
109
+ tcg_gen_exit_tb(NULL, 0);
110
ctx->base.is_jmp = DISAS_NORETURN;
111
return true;
112
}
113
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/riscv/insn_trans/trans_rvv.c.inc
116
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
117
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
118
gen_set_gpr(ctx, a->rd, dst);
119
120
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
121
- lookup_and_goto_ptr(ctx);
122
+ tcg_gen_lookup_and_goto_ptr();
123
ctx->base.is_jmp = DISAS_NORETURN;
124
return true;
125
}
99
--
126
--
100
2.17.1
127
2.25.1
101
128
102
129
diff view generated by jsdifflib
1
Use this as a compile-time assert that a particular
1
GDB single-stepping is now handled generically.
2
code path is not reachable.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
include/qemu/compiler.h | 15 +++++++++++++++
6
target/rx/helper.h | 1 -
8
1 file changed, 15 insertions(+)
7
target/rx/op_helper.c | 8 --------
8
target/rx/translate.c | 12 ++----------
9
3 files changed, 2 insertions(+), 19 deletions(-)
9
10
10
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
11
diff --git a/target/rx/helper.h b/target/rx/helper.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/include/qemu/compiler.h
13
--- a/target/rx/helper.h
13
+++ b/include/qemu/compiler.h
14
+++ b/target/rx/helper.h
14
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
15
#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
16
DEF_HELPER_1(raise_access_fault, noreturn, env)
16
#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
17
DEF_HELPER_1(raise_privilege_violation, noreturn, env)
17
18
DEF_HELPER_1(wait, noreturn, env)
18
+/**
19
-DEF_HELPER_1(debug, noreturn, env)
19
+ * qemu_build_not_reached()
20
DEF_HELPER_2(rxint, noreturn, env, i32)
20
+ *
21
DEF_HELPER_1(rxbrk, noreturn, env)
21
+ * The compiler, during optimization, is expected to prove that a call
22
DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32)
22
+ * to this function cannot be reached and remove it. If the compiler
23
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
23
+ * supports QEMU_ERROR, this will be reported at compile time; otherwise
24
index XXXXXXX..XXXXXXX 100644
24
+ * this will be reported at link time due to the missing symbol.
25
--- a/target/rx/op_helper.c
25
+ */
26
+++ b/target/rx/op_helper.c
26
+#ifdef __OPTIMIZE__
27
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_wait(CPURXState *env)
27
+extern void QEMU_NORETURN QEMU_ERROR("code path is reachable")
28
raise_exception(env, EXCP_HLT, 0);
28
+ qemu_build_not_reached(void);
29
}
29
+#else
30
30
+#define qemu_build_not_reached() g_assert_not_reached()
31
-void QEMU_NORETURN helper_debug(CPURXState *env)
31
+#endif
32
-{
32
+
33
- CPUState *cs = env_cpu(env);
33
#endif /* COMPILER_H */
34
-
35
- cs->exception_index = EXCP_DEBUG;
36
- cpu_loop_exit(cs);
37
-}
38
-
39
void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec)
40
{
41
raise_exception(env, 0x100 + vec, 0);
42
diff --git a/target/rx/translate.c b/target/rx/translate.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/rx/translate.c
45
+++ b/target/rx/translate.c
46
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
47
tcg_gen_exit_tb(dc->base.tb, n);
48
} else {
49
tcg_gen_movi_i32(cpu_pc, dest);
50
- if (dc->base.singlestep_enabled) {
51
- gen_helper_debug(cpu_env);
52
- } else {
53
- tcg_gen_lookup_and_goto_ptr();
54
- }
55
+ tcg_gen_lookup_and_goto_ptr();
56
}
57
dc->base.is_jmp = DISAS_NORETURN;
58
}
59
@@ -XXX,XX +XXX,XX @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
60
gen_goto_tb(ctx, 0, dcbase->pc_next);
61
break;
62
case DISAS_JUMP:
63
- if (ctx->base.singlestep_enabled) {
64
- gen_helper_debug(cpu_env);
65
- } else {
66
- tcg_gen_lookup_and_goto_ptr();
67
- }
68
+ tcg_gen_lookup_and_goto_ptr();
69
break;
70
case DISAS_UPDATE:
71
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
34
--
72
--
35
2.17.1
73
2.25.1
36
74
37
75
diff view generated by jsdifflib
1
All callers pass false to this argument. Remove it and pass the
1
GDB single-stepping is now handled generically.
2
constant on to tb_invalidate_phys_page_range__locked.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
accel/tcg/translate-all.h | 3 +--
5
target/s390x/tcg/translate.c | 8 ++------
9
accel/tcg/translate-all.c | 6 ++----
6
1 file changed, 2 insertions(+), 6 deletions(-)
10
exec.c | 4 ++--
11
3 files changed, 5 insertions(+), 8 deletions(-)
12
7
13
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
8
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
14
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
15
--- a/accel/tcg/translate-all.h
10
--- a/target/s390x/tcg/translate.c
16
+++ b/accel/tcg/translate-all.h
11
+++ b/target/s390x/tcg/translate.c
17
@@ -XXX,XX +XXX,XX @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
12
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
18
void page_collection_unlock(struct page_collection *set);
13
uint64_t pc_tmp;
19
void tb_invalidate_phys_page_fast(struct page_collection *pages,
14
uint32_t ilen;
20
tb_page_addr_t start, int len);
15
enum cc_op cc_op;
21
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
16
- bool do_debug;
22
- int is_cpu_write_access);
17
};
23
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
18
24
void tb_check_watchpoint(CPUState *cpu);
19
/* Information carried about a condition to be evaluated. */
25
20
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
26
#ifdef CONFIG_USER_ONLY
21
27
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
22
dc->cc_op = CC_OP_DYNAMIC;
28
index XXXXXXX..XXXXXXX 100644
23
dc->ex_value = dc->base.tb->cs_base;
29
--- a/accel/tcg/translate-all.c
24
- dc->do_debug = dc->base.singlestep_enabled;
30
+++ b/accel/tcg/translate-all.c
31
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
32
*
33
* Called with mmap_lock held for user-mode emulation
34
*/
35
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
36
- int is_cpu_write_access)
37
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
38
{
39
struct page_collection *pages;
40
PageDesc *p;
41
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
42
return;
43
}
44
pages = page_collection_lock(start, end);
45
- tb_invalidate_phys_page_range__locked(pages, p, start, end,
46
- is_cpu_write_access);
47
+ tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
48
page_collection_unlock(pages);
49
}
25
}
50
26
51
diff --git a/exec.c b/exec.c
27
static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
52
index XXXXXXX..XXXXXXX 100644
28
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
53
--- a/exec.c
29
/* FALLTHRU */
54
+++ b/exec.c
30
case DISAS_PC_CC_UPDATED:
55
@@ -XXX,XX +XXX,XX @@ const char *parse_cpu_option(const char *cpu_option)
31
/* Exit the TB, either by raising a debug exception or by return. */
56
void tb_invalidate_phys_addr(target_ulong addr)
32
- if (dc->do_debug) {
57
{
33
- gen_exception(EXCP_DEBUG);
58
mmap_lock();
34
- } else if ((dc->base.tb->flags & FLAG_MASK_PER) ||
59
- tb_invalidate_phys_page_range(addr, addr + 1, 0);
35
- dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
60
+ tb_invalidate_phys_page_range(addr, addr + 1);
36
+ if ((dc->base.tb->flags & FLAG_MASK_PER) ||
61
mmap_unlock();
37
+ dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
62
}
38
tcg_gen_exit_tb(NULL, 0);
63
39
} else {
64
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
40
tcg_gen_lookup_and_goto_ptr();
65
return;
66
}
67
ram_addr = memory_region_get_ram_addr(mr) + addr;
68
- tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
69
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
70
rcu_read_unlock();
71
}
72
73
--
41
--
74
2.17.1
42
2.25.1
75
43
76
44
diff view generated by jsdifflib
1
This forced inlining can result in missing symbols,
1
GDB single-stepping is now handled generically.
2
which makes a debugging build harder to follow.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reported-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
include/qemu/compiler.h | 11 +++++++++++
6
target/sh4/helper.h | 1 -
11
accel/tcg/cputlb.c | 4 ++--
7
target/sh4/op_helper.c | 5 -----
12
2 files changed, 13 insertions(+), 2 deletions(-)
8
target/sh4/translate.c | 14 +++-----------
9
3 files changed, 3 insertions(+), 17 deletions(-)
13
10
14
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
11
diff --git a/target/sh4/helper.h b/target/sh4/helper.h
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/compiler.h
13
--- a/target/sh4/helper.h
17
+++ b/include/qemu/compiler.h
14
+++ b/target/sh4/helper.h
18
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
19
# define QEMU_NONSTRING
16
DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env)
20
#endif
17
DEF_HELPER_1(raise_fpu_disable, noreturn, env)
21
18
DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env)
22
+/*
19
-DEF_HELPER_1(debug, noreturn, env)
23
+ * Forced inlining may be desired to encourage constant propagation
20
DEF_HELPER_1(sleep, noreturn, env)
24
+ * of function parameters. However, it can also make debugging harder,
21
DEF_HELPER_2(trapa, noreturn, env, i32)
25
+ * so disable it for a non-optimizing build.
22
DEF_HELPER_1(exclusive, noreturn, env)
26
+ */
23
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
27
+#if defined(__OPTIMIZE__)
28
+#define QEMU_ALWAYS_INLINE __attribute__((always_inline))
29
+#else
30
+#define QEMU_ALWAYS_INLINE
31
+#endif
32
+
33
/* Implement C11 _Generic via GCC builtins. Example:
34
*
35
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
36
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
37
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
38
--- a/accel/tcg/cputlb.c
25
--- a/target/sh4/op_helper.c
39
+++ b/accel/tcg/cputlb.c
26
+++ b/target/sh4/op_helper.c
40
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
27
@@ -XXX,XX +XXX,XX @@ void helper_raise_slot_fpu_disable(CPUSH4State *env)
41
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
28
raise_exception(env, 0x820, 0);
42
TCGMemOpIdx oi, uintptr_t retaddr);
29
}
43
30
44
-static inline uint64_t __attribute__((always_inline))
31
-void helper_debug(CPUSH4State *env)
45
+static inline uint64_t QEMU_ALWAYS_INLINE
32
-{
46
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
33
- raise_exception(env, EXCP_DEBUG, 0);
47
uintptr_t retaddr, MemOp op, bool code_read,
34
-}
48
FullLoadHelper *full_load)
35
-
49
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
36
void helper_sleep(CPUSH4State *env)
50
* Store Helpers
51
*/
52
53
-static inline void __attribute__((always_inline))
54
+static inline void QEMU_ALWAYS_INLINE
55
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
56
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
57
{
37
{
38
CPUState *cs = env_cpu(env);
39
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/sh4/translate.c
42
+++ b/target/sh4/translate.c
43
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
44
tcg_gen_exit_tb(ctx->base.tb, n);
45
} else {
46
tcg_gen_movi_i32(cpu_pc, dest);
47
- if (ctx->base.singlestep_enabled) {
48
- gen_helper_debug(cpu_env);
49
- } else if (use_exit_tb(ctx)) {
50
+ if (use_exit_tb(ctx)) {
51
tcg_gen_exit_tb(NULL, 0);
52
} else {
53
tcg_gen_lookup_and_goto_ptr();
54
@@ -XXX,XX +XXX,XX @@ static void gen_jump(DisasContext * ctx)
55
     delayed jump as immediate jump are conditinal jumps */
56
    tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
57
tcg_gen_discard_i32(cpu_delayed_pc);
58
- if (ctx->base.singlestep_enabled) {
59
- gen_helper_debug(cpu_env);
60
- } else if (use_exit_tb(ctx)) {
61
+ if (use_exit_tb(ctx)) {
62
tcg_gen_exit_tb(NULL, 0);
63
} else {
64
tcg_gen_lookup_and_goto_ptr();
65
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
66
switch (ctx->base.is_jmp) {
67
case DISAS_STOP:
68
gen_save_cpu_state(ctx, true);
69
- if (ctx->base.singlestep_enabled) {
70
- gen_helper_debug(cpu_env);
71
- } else {
72
- tcg_gen_exit_tb(NULL, 0);
73
- }
74
+ tcg_gen_exit_tb(NULL, 0);
75
break;
76
case DISAS_NEXT:
77
case DISAS_TOO_MANY:
58
--
78
--
59
2.17.1
79
2.25.1
60
80
61
81
diff view generated by jsdifflib
1
It does not require going through the whole I/O path
1
GDB single-stepping is now handled generically.
2
in order to discard a write.
3
2
4
Reviewed-by: David Hildenbrand <david@redhat.com>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
include/exec/cpu-all.h | 5 ++++-
6
target/tricore/helper.h | 1 -
8
include/exec/cpu-common.h | 1 -
7
target/tricore/op_helper.c | 7 -------
9
accel/tcg/cputlb.c | 36 ++++++++++++++++++++--------------
8
target/tricore/translate.c | 14 +-------------
10
exec.c | 41 +--------------------------------------
9
3 files changed, 1 insertion(+), 21 deletions(-)
11
4 files changed, 26 insertions(+), 57 deletions(-)
12
10
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
11
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
13
--- a/target/tricore/helper.h
16
+++ b/include/exec/cpu-all.h
14
+++ b/target/tricore/helper.h
17
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(psw_write, void, env, i32)
18
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
16
DEF_HELPER_1(psw_read, i32, env)
19
/* Set if TLB entry requires byte swap. */
17
/* Exceptions */
20
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
18
DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32)
21
+/* Set if TLB entry writes ignored. */
19
-DEF_HELPER_2(qemu_excp, noreturn, env, i32)
22
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
20
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
23
24
/* Use this mask to check interception with an alignment mask
25
* in a TCG backend.
26
*/
27
#define TLB_FLAGS_MASK \
28
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
29
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
30
+ | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
31
32
/**
33
* tlb_hit_page: return true if page aligned @addr is a hit against the
34
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
35
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
36
--- a/include/exec/cpu-common.h
22
--- a/target/tricore/op_helper.c
37
+++ b/include/exec/cpu-common.h
23
+++ b/target/tricore/op_helper.c
38
@@ -XXX,XX +XXX,XX @@ void qemu_flush_coalesced_mmio_buffer(void);
24
@@ -XXX,XX +XXX,XX @@ static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class,
39
25
raise_exception_sync_internal(env, class, tin, pc, 0);
40
void cpu_flush_icache_range(hwaddr start, hwaddr len);
41
42
-extern struct MemoryRegion io_mem_rom;
43
extern struct MemoryRegion io_mem_notdirty;
44
45
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
46
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/accel/tcg/cputlb.c
49
+++ b/accel/tcg/cputlb.c
50
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
51
{
52
uintptr_t addr = tlb_entry->addr_write;
53
54
- if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
55
+ if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
56
+ TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
57
addr &= TARGET_PAGE_MASK;
58
addr += tlb_entry->addend;
59
if ((addr - start) < length) {
60
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
61
address |= TLB_MMIO;
62
addend = 0;
63
} else {
64
- /* TLB_MMIO for rom/romd handled below */
65
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
66
}
67
68
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
69
70
tn.addr_write = -1;
71
if (prot & PAGE_WRITE) {
72
- if ((memory_region_is_ram(section->mr) && section->readonly)
73
- || memory_region_is_romd(section->mr)) {
74
- /* Write access calls the I/O callback. */
75
- tn.addr_write = address | TLB_MMIO;
76
- } else if (memory_region_is_ram(section->mr)
77
- && cpu_physical_memory_is_clean(
78
- memory_region_get_ram_addr(section->mr) + xlat)) {
79
- tn.addr_write = address | TLB_NOTDIRTY;
80
- } else {
81
- tn.addr_write = address;
82
+ tn.addr_write = address;
83
+ if (memory_region_is_romd(section->mr)) {
84
+ /* Use the MMIO path so that the device can switch states. */
85
+ tn.addr_write |= TLB_MMIO;
86
+ } else if (memory_region_is_ram(section->mr)) {
87
+ if (section->readonly) {
88
+ tn.addr_write |= TLB_DISCARD_WRITE;
89
+ } else if (cpu_physical_memory_is_clean(
90
+ memory_region_get_ram_addr(section->mr) + xlat)) {
91
+ tn.addr_write |= TLB_NOTDIRTY;
92
+ }
93
}
94
if (prot & PAGE_WRITE_INV) {
95
tn.addr_write |= TLB_INVALID_MASK;
96
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
97
mr = section->mr;
98
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
99
cpu->mem_io_pc = retaddr;
100
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
101
+ if (mr != &io_mem_notdirty && !cpu->can_do_io) {
102
cpu_io_recompile(cpu, retaddr);
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
106
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
107
mr = section->mr;
108
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
109
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
110
+ if (mr != &io_mem_notdirty && !cpu->can_do_io) {
111
cpu_io_recompile(cpu, retaddr);
112
}
113
cpu->mem_io_vaddr = addr;
114
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
115
}
116
117
/* Reject I/O access, or other required slow-path. */
118
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
119
+ if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
120
return NULL;
121
}
122
123
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
124
return;
125
}
126
127
+ /* Ignore writes to ROM. */
128
+ if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
129
+ return;
130
+ }
131
+
132
haddr = (void *)((uintptr_t)addr + entry->addend);
133
134
/*
135
diff --git a/exec.c b/exec.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/exec.c
138
+++ b/exec.c
139
@@ -XXX,XX +XXX,XX @@ static MemoryRegion *system_io;
140
AddressSpace address_space_io;
141
AddressSpace address_space_memory;
142
143
-MemoryRegion io_mem_rom, io_mem_notdirty;
144
+MemoryRegion io_mem_notdirty;
145
static MemoryRegion io_mem_unassigned;
146
#endif
147
148
@@ -XXX,XX +XXX,XX @@ typedef struct subpage_t {
149
150
#define PHYS_SECTION_UNASSIGNED 0
151
#define PHYS_SECTION_NOTDIRTY 1
152
-#define PHYS_SECTION_ROM 2
153
154
static void io_mem_init(void);
155
static void memory_map_init(void);
156
@@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
157
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
158
if (!section->readonly) {
159
iotlb |= PHYS_SECTION_NOTDIRTY;
160
- } else {
161
- iotlb |= PHYS_SECTION_ROM;
162
}
163
} else {
164
AddressSpaceDispatch *d;
165
@@ -XXX,XX +XXX,XX @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
166
return phys_section_add(map, &section);
167
}
26
}
168
27
169
-static void readonly_mem_write(void *opaque, hwaddr addr,
28
-void helper_qemu_excp(CPUTriCoreState *env, uint32_t excp)
170
- uint64_t val, unsigned size)
171
-{
29
-{
172
- /* Ignore any write to ROM. */
30
- CPUState *cs = env_cpu(env);
31
- cs->exception_index = excp;
32
- cpu_loop_exit(cs);
173
-}
33
-}
174
-
34
-
175
-static bool readonly_mem_accepts(void *opaque, hwaddr addr,
35
/* Addressing mode helper */
176
- unsigned size, bool is_write,
36
177
- MemTxAttrs attrs)
37
static uint16_t reverse16(uint16_t val)
38
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/tricore/translate.c
41
+++ b/target/tricore/translate.c
42
@@ -XXX,XX +XXX,XX @@ static inline void gen_save_pc(target_ulong pc)
43
tcg_gen_movi_tl(cpu_PC, pc);
44
}
45
46
-static void generate_qemu_excp(DisasContext *ctx, int excp)
178
-{
47
-{
179
- return is_write;
48
- TCGv_i32 tmp = tcg_const_i32(excp);
49
- gen_helper_qemu_excp(cpu_env, tmp);
50
- ctx->base.is_jmp = DISAS_NORETURN;
51
- tcg_temp_free(tmp);
180
-}
52
-}
181
-
53
-
182
-/* This will only be used for writes, because reads are special cased
54
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
183
- * to directly access the underlying host ram.
184
- */
185
-static const MemoryRegionOps readonly_mem_ops = {
186
- .write = readonly_mem_write,
187
- .valid.accepts = readonly_mem_accepts,
188
- .endianness = DEVICE_NATIVE_ENDIAN,
189
- .valid = {
190
- .min_access_size = 1,
191
- .max_access_size = 8,
192
- .unaligned = false,
193
- },
194
- .impl = {
195
- .min_access_size = 1,
196
- .max_access_size = 8,
197
- .unaligned = false,
198
- },
199
-};
200
-
201
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
202
hwaddr index, MemTxAttrs attrs)
203
{
55
{
204
@@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
56
if (translator_use_goto_tb(&ctx->base, dest)) {
205
57
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
206
static void io_mem_init(void)
58
tcg_gen_exit_tb(ctx->base.tb, n);
207
{
59
} else {
208
- memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
60
gen_save_pc(dest);
209
- NULL, NULL, UINT64_MAX);
61
- if (ctx->base.singlestep_enabled) {
210
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
62
- generate_qemu_excp(ctx, EXCP_DEBUG);
211
NULL, UINT64_MAX);
63
- } else {
212
64
- tcg_gen_lookup_and_goto_ptr();
213
@@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
65
- }
214
assert(n == PHYS_SECTION_UNASSIGNED);
66
+ tcg_gen_lookup_and_goto_ptr();
215
n = dummy_section(&d->map, fv, &io_mem_notdirty);
67
}
216
assert(n == PHYS_SECTION_NOTDIRTY);
68
}
217
- n = dummy_section(&d->map, fv, &io_mem_rom);
218
- assert(n == PHYS_SECTION_ROM);
219
220
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
221
69
222
--
70
--
223
2.17.1
71
2.25.1
224
72
225
73
diff view generated by jsdifflib
1
These bits do not need to vary with the actual page size
1
GDB single-stepping is now handled generically.
2
used by the guest.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
4
---
9
include/exec/cpu-all.h | 16 ++++++++++------
5
target/xtensa/translate.c | 25 ++++++++-----------------
10
1 file changed, 10 insertions(+), 6 deletions(-)
6
1 file changed, 8 insertions(+), 17 deletions(-)
11
7
12
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
8
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
13
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/cpu-all.h
10
--- a/target/xtensa/translate.c
15
+++ b/include/exec/cpu-all.h
11
+++ b/target/xtensa/translate.c
16
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
12
@@ -XXX,XX +XXX,XX @@ static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
17
13
if (dc->icount) {
18
#if !defined(CONFIG_USER_ONLY)
14
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
19
15
}
20
-/* Flags stored in the low bits of the TLB virtual address. These are
16
- if (dc->base.singlestep_enabled) {
21
- * defined so that fast path ram access is all zeros.
17
- gen_exception(dc, EXCP_DEBUG);
22
+/*
18
+ if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
23
+ * Flags stored in the low bits of the TLB virtual address.
19
+ slot = gen_postprocess(dc, slot);
24
+ * These are defined so that fast path ram access is all zeros.
20
+ }
25
* The flags all must be between TARGET_PAGE_BITS and
21
+ if (slot >= 0) {
26
* maximum address alignment bit.
22
+ tcg_gen_goto_tb(slot);
27
+ *
23
+ tcg_gen_exit_tb(dc->base.tb, slot);
28
+ * Use TARGET_PAGE_BITS_MIN so that these bits are constant
24
} else {
29
+ * when TARGET_PAGE_BITS_VARY is in effect.
25
- if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
30
*/
26
- slot = gen_postprocess(dc, slot);
31
/* Zero if TLB entry is valid. */
27
- }
32
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
28
- if (slot >= 0) {
33
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
29
- tcg_gen_goto_tb(slot);
34
/* Set if TLB entry references a clean RAM page. The iotlb entry will
30
- tcg_gen_exit_tb(dc->base.tb, slot);
35
contain the page physical address. */
31
- } else {
36
-#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
32
- tcg_gen_exit_tb(NULL, 0);
37
+#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
33
- }
38
/* Set if TLB entry is an IO callback. */
34
+ tcg_gen_exit_tb(NULL, 0);
39
-#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
35
}
40
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
36
dc->base.is_jmp = DISAS_NORETURN;
41
/* Set if TLB entry contains a watchpoint. */
37
}
42
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
38
@@ -XXX,XX +XXX,XX @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
43
+#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
39
case DISAS_NORETURN:
44
40
break;
45
/* Use this mask to check interception with an alignment mask
41
case DISAS_TOO_MANY:
46
* in a TCG backend.
42
- if (dc->base.singlestep_enabled) {
43
- tcg_gen_movi_i32(cpu_pc, dc->pc);
44
- gen_exception(dc, EXCP_DEBUG);
45
- } else {
46
- gen_jumpi(dc, dc->pc, 0);
47
- }
48
+ gen_jumpi(dc, dc->pc, 0);
49
break;
50
default:
51
g_assert_not_reached();
47
--
52
--
48
2.17.1
53
2.25.1
49
54
50
55
diff view generated by jsdifflib
1
With the merge of notdirty handling into store_helper,
1
This reverts commit 1b36e4f5a5de585210ea95f2257839c2312be28f.
2
the last user of cpu->mem_io_vaddr was removed.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Despite a comment saying why cpu_common_props cannot be placed in
5
Reviewed-by: David Hildenbrand <david@redhat.com>
4
a file that is compiled once, it was moved anyway. Revert that.
5
6
Since then, Property is not defined in hw/core/cpu.h, so it is now
7
easier to declare a function to install the properties rather than
8
the Property array itself.
9
10
Cc: Eduardo Habkost <ehabkost@redhat.com>
11
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
13
---
8
include/hw/core/cpu.h | 2 --
14
include/hw/core/cpu.h | 1 +
9
accel/tcg/cputlb.c | 2 --
15
cpu.c | 21 +++++++++++++++++++++
10
hw/core/cpu.c | 1 -
16
hw/core/cpu-common.c | 17 +----------------
11
3 files changed, 5 deletions(-)
17
3 files changed, 23 insertions(+), 16 deletions(-)
12
18
13
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/include/hw/core/cpu.h
21
--- a/include/hw/core/cpu.h
16
+++ b/include/hw/core/cpu.h
22
+++ b/include/hw/core/cpu.h
17
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
23
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
18
* @next_cpu: Next CPU sharing TB cache.
24
GCC_FMT_ATTR(2, 3);
19
* @opaque: User data.
25
20
* @mem_io_pc: Host Program Counter at which the memory was accessed.
26
/* $(top_srcdir)/cpu.c */
21
- * @mem_io_vaddr: Target virtual address at which the memory was accessed.
27
+void cpu_class_init_props(DeviceClass *dc);
22
* @kvm_fd: vCPU file descriptor for KVM.
28
void cpu_exec_initfn(CPUState *cpu);
23
* @work_mutex: Lock to prevent multiple access to queued_work_*.
29
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
24
* @queued_work_first: First asynchronous work pending.
30
void cpu_exec_unrealizefn(CPUState *cpu);
25
@@ -XXX,XX +XXX,XX @@ struct CPUState {
31
diff --git a/cpu.c b/cpu.c
26
* we store some rarely used information in the CPU context.
32
index XXXXXXX..XXXXXXX 100644
27
*/
33
--- a/cpu.c
28
uintptr_t mem_io_pc;
34
+++ b/cpu.c
29
- vaddr mem_io_vaddr;
35
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
36
cpu_list_remove(cpu);
37
}
38
39
+static Property cpu_common_props[] = {
40
+#ifndef CONFIG_USER_ONLY
41
+ /*
42
+ * Create a memory property for softmmu CPU object,
43
+ * so users can wire up its memory. (This can't go in hw/core/cpu.c
44
+ * because that file is compiled only once for both user-mode
45
+ * and system builds.) The default if no link is set up is to use
46
+ * the system address space.
47
+ */
48
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
49
+ MemoryRegion *),
50
+#endif
51
+ DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
52
+ DEFINE_PROP_END_OF_LIST(),
53
+};
54
+
55
+void cpu_class_init_props(DeviceClass *dc)
56
+{
57
+ device_class_set_props(dc, cpu_common_props);
58
+}
59
+
60
void cpu_exec_initfn(CPUState *cpu)
61
{
62
cpu->as = NULL;
63
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/core/cpu-common.c
66
+++ b/hw/core/cpu-common.c
67
@@ -XXX,XX +XXX,XX @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
68
return cpu->cpu_index;
69
}
70
71
-static Property cpu_common_props[] = {
72
-#ifndef CONFIG_USER_ONLY
73
- /* Create a memory property for softmmu CPU object,
74
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
75
- * because that file is compiled only once for both user-mode
76
- * and system builds.) The default if no link is set up is to use
77
- * the system address space.
78
- */
79
- DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
80
- MemoryRegion *),
81
-#endif
82
- DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
83
- DEFINE_PROP_END_OF_LIST(),
84
-};
85
-
86
static void cpu_class_init(ObjectClass *klass, void *data)
87
{
88
DeviceClass *dc = DEVICE_CLASS(klass);
89
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
90
dc->realize = cpu_common_realizefn;
91
dc->unrealize = cpu_common_unrealizefn;
92
dc->reset = cpu_common_reset;
93
- device_class_set_props(dc, cpu_common_props);
94
+ cpu_class_init_props(dc);
30
/*
95
/*
31
* This is only needed for the legacy cpu_unassigned_access() hook;
96
* Reason: CPUs still need special care by board code: wiring up
32
* when all targets using it have been converted to use
97
* IRQs, adding reset handlers, halting non-first CPUs, ...
33
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/cputlb.c
36
+++ b/accel/tcg/cputlb.c
37
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
38
cpu_io_recompile(cpu, retaddr);
39
}
40
41
- cpu->mem_io_vaddr = addr;
42
cpu->mem_io_access_type = access_type;
43
44
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
45
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
46
if (!cpu->can_do_io) {
47
cpu_io_recompile(cpu, retaddr);
48
}
49
- cpu->mem_io_vaddr = addr;
50
cpu->mem_io_pc = retaddr;
51
52
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
53
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/core/cpu.c
56
+++ b/hw/core/cpu.c
57
@@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(CPUState *cpu)
58
cpu->interrupt_request = 0;
59
cpu->halted = 0;
60
cpu->mem_io_pc = 0;
61
- cpu->mem_io_vaddr = 0;
62
cpu->icount_extra = 0;
63
atomic_set(&cpu->icount_decr_ptr->u32, 0);
64
cpu->can_do_io = 1;
65
--
98
--
66
2.17.1
99
2.25.1
67
100
68
101
diff view generated by jsdifflib