1
This is v4 of my notdirty + rom patch set with two suggested name
1
V2 fixes a build problem that affected win32.
2
changes (qemu_build_not_reached, TLB_DISCARD_WRITE) from David and Alex.
3
2
4
3
5
r~
4
r~
6
5
7
6
8
The following changes since commit 240ab11fb72049d6373cbbec8d788f8e411a00bc:
7
The following changes since commit 187f35512106501fe9a11057f4d8705431e0026d:
9
8
10
Merge remote-tracking branch 'remotes/aperard/tags/pull-xen-20190924' into staging (2019-09-24 15:36:31 +0100)
9
Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-next-251019-3' into staging (2019-10-26 10:13:48 +0100)
11
10
12
are available in the Git repository at:
11
are available in the Git repository at:
13
12
14
https://github.com/rth7680/qemu.git tags/pull-tcg-20190925
13
https://github.com/rth7680/qemu.git tags/pull-tcg-20191028
15
14
16
for you to fetch changes up to ae57db63acf5a0399232f852acc5c1d83ef63400:
15
for you to fetch changes up to fe9b676fb3160496b4b2bf0c57d33be724bf04c3:
17
16
18
cputlb: Pass retaddr to tb_check_watchpoint (2019-09-25 10:56:28 -0700)
17
translate-all: Remove tb_alloc (2019-10-28 10:35:23 +0100)
19
18
20
----------------------------------------------------------------
19
----------------------------------------------------------------
21
Fixes for TLB_BSWAP
20
Improvements for TARGET_PAGE_BITS_VARY
22
Coversion of NOTDIRTY and ROM handling to cputlb
21
Fix for TCI ld16u_i64.
23
Followup cleanups to cputlb
22
Fix for segv on icount execute from i/o memory.
23
Two misc cleanups.
24
24
25
----------------------------------------------------------------
25
----------------------------------------------------------------
26
Richard Henderson (16):
26
Alex Bennée (1):
27
exec: Use TARGET_PAGE_BITS_MIN for TLB flags
27
cputlb: ensure _cmmu helper functions follow the naming standard
28
cputlb: Disable __always_inline__ without optimization
29
qemu/compiler.h: Add qemu_build_not_reached
30
cputlb: Use qemu_build_not_reached in load/store_helpers
31
cputlb: Split out load/store_memop
32
cputlb: Introduce TLB_BSWAP
33
exec: Adjust notdirty tracing
34
cputlb: Move ROM handling from I/O path to TLB path
35
cputlb: Move NOTDIRTY handling from I/O path to TLB path
36
cputlb: Partially inline memory_region_section_get_iotlb
37
cputlb: Merge and move memory_notdirty_write_{prepare,complete}
38
cputlb: Handle TLB_NOTDIRTY in probe_access
39
cputlb: Remove cpu->mem_io_vaddr
40
cputlb: Remove tb_invalidate_phys_page_range is_cpu_write_access
41
cputlb: Pass retaddr to tb_invalidate_phys_page_fast
42
cputlb: Pass retaddr to tb_check_watchpoint
43
28
44
accel/tcg/translate-all.h | 8 +-
29
Clement Deschamps (1):
45
include/exec/cpu-all.h | 23 ++-
30
translate-all: fix uninitialized tb->orig_tb
46
include/exec/cpu-common.h | 3 -
47
include/exec/exec-all.h | 6 +-
48
include/exec/memory-internal.h | 65 --------
49
include/hw/core/cpu.h | 2 -
50
include/qemu/compiler.h | 26 +++
51
accel/tcg/cputlb.c | 348 +++++++++++++++++++++++++----------------
52
accel/tcg/translate-all.c | 51 +++---
53
exec.c | 158 +------------------
54
hw/core/cpu.c | 1 -
55
memory.c | 20 ---
56
trace-events | 4 +-
57
13 files changed, 288 insertions(+), 427 deletions(-)
58
31
32
Richard Henderson (8):
33
exec: Split out variable page size support to exec-vary.c
34
configure: Detect compiler support for __attribute__((alias))
35
exec: Use const alias for TARGET_PAGE_BITS_VARY
36
exec: Restrict TARGET_PAGE_BITS_VARY assert to CONFIG_DEBUG_TCG
37
exec: Promote TARGET_PAGE_MASK to target_long
38
exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY
39
cputlb: Fix tlb_vaddr_to_host
40
translate-all: Remove tb_alloc
41
42
Stefan Weil (1):
43
tci: Add implementation for INDEX_op_ld16u_i64
44
45
Wei Yang (1):
46
cpu: use ROUND_UP() to define xxx_PAGE_ALIGN
47
48
Makefile.target | 2 +-
49
include/exec/cpu-all.h | 33 ++++++++----
50
include/exec/cpu_ldst_template.h | 4 +-
51
include/qemu-common.h | 6 +++
52
tcg/tcg.h | 20 +++++---
53
accel/tcg/cputlb.c | 26 ++++++++--
54
accel/tcg/translate-all.c | 21 ++------
55
exec-vary.c | 108 +++++++++++++++++++++++++++++++++++++++
56
exec.c | 34 ------------
57
target/cris/translate_v10.inc.c | 3 +-
58
tcg/tci.c | 15 ++++++
59
configure | 19 +++++++
60
12 files changed, 214 insertions(+), 77 deletions(-)
61
create mode 100644 exec-vary.c
62
diff view generated by jsdifflib
1
Since 9458a9a1df1a, all readers of the dirty bitmaps wait
1
From: Stefan Weil <sw@weilnetz.de>
2
for the rcu lock, which means that they wait until the end
3
of any executing TranslationBlock.
4
2
5
As a consequence, there is no need for the actual access
3
This fixes "make check-tcg" on a Debian x86_64 host.
6
to happen in between the _prepare and _complete. Therefore,
7
we can improve things by merging the two functions into
8
notdirty_write and dropping the NotDirtyInfo structure.
9
4
10
In addition, the only users of notdirty_write are in cputlb.c,
5
Signed-off-by: Stefan Weil <sw@weilnetz.de>
11
so move the merged function there. Pass in the CPUIOTLBEntry
6
Tested-by: Thomas Huth <thuth@redhat.com>
12
from which the ram_addr_t may be computed.
7
Message-Id: <20190410194838.10123-1-sw@weilnetz.de>
13
14
Reviewed-by: David Hildenbrand <david@redhat.com>
15
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
9
---
18
include/exec/memory-internal.h | 65 -----------------------------
10
tcg/tci.c | 15 +++++++++++++++
19
accel/tcg/cputlb.c | 76 +++++++++++++++++++---------------
11
1 file changed, 15 insertions(+)
20
exec.c | 44 --------------------
21
3 files changed, 42 insertions(+), 143 deletions(-)
22
12
23
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
13
diff --git a/tcg/tci.c b/tcg/tci.c
24
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
25
--- a/include/exec/memory-internal.h
15
--- a/tcg/tci.c
26
+++ b/include/exec/memory-internal.h
16
+++ b/tcg/tci.c
27
@@ -XXX,XX +XXX,XX @@ void address_space_dispatch_free(AddressSpaceDispatch *d);
17
@@ -XXX,XX +XXX,XX @@ static void tci_write_reg8(tcg_target_ulong *regs, TCGReg index, uint8_t value)
28
18
tci_write_reg(regs, index, value);
29
void mtree_print_dispatch(struct AddressSpaceDispatch *d,
30
MemoryRegion *root);
31
-
32
-struct page_collection;
33
-
34
-/* Opaque struct for passing info from memory_notdirty_write_prepare()
35
- * to memory_notdirty_write_complete(). Callers should treat all fields
36
- * as private, with the exception of @active.
37
- *
38
- * @active is a field which is not touched by either the prepare or
39
- * complete functions, but which the caller can use if it wishes to
40
- * track whether it has called prepare for this struct and so needs
41
- * to later call the complete function.
42
- */
43
-typedef struct {
44
- CPUState *cpu;
45
- struct page_collection *pages;
46
- ram_addr_t ram_addr;
47
- vaddr mem_vaddr;
48
- unsigned size;
49
- bool active;
50
-} NotDirtyInfo;
51
-
52
-/**
53
- * memory_notdirty_write_prepare: call before writing to non-dirty memory
54
- * @ndi: pointer to opaque NotDirtyInfo struct
55
- * @cpu: CPU doing the write
56
- * @mem_vaddr: virtual address of write
57
- * @ram_addr: the ram address of the write
58
- * @size: size of write in bytes
59
- *
60
- * Any code which writes to the host memory corresponding to
61
- * guest RAM which has been marked as NOTDIRTY must wrap those
62
- * writes in calls to memory_notdirty_write_prepare() and
63
- * memory_notdirty_write_complete():
64
- *
65
- * NotDirtyInfo ndi;
66
- * memory_notdirty_write_prepare(&ndi, ....);
67
- * ... perform write here ...
68
- * memory_notdirty_write_complete(&ndi);
69
- *
70
- * These calls will ensure that we flush any TCG translated code for
71
- * the memory being written, update the dirty bits and (if possible)
72
- * remove the slowpath callback for writing to the memory.
73
- *
74
- * This must only be called if we are using TCG; it will assert otherwise.
75
- *
76
- * We may take locks in the prepare call, so callers must ensure that
77
- * they don't exit (via longjump or otherwise) without calling complete.
78
- *
79
- * This call must only be made inside an RCU critical section.
80
- * (Note that while we're executing a TCG TB we're always in an
81
- * RCU critical section, which is likely to be the case for callers
82
- * of these functions.)
83
- */
84
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
85
- CPUState *cpu,
86
- vaddr mem_vaddr,
87
- ram_addr_t ram_addr,
88
- unsigned size);
89
-/**
90
- * memory_notdirty_write_complete: finish write to non-dirty memory
91
- * @ndi: pointer to the opaque NotDirtyInfo struct which was initialized
92
- * by memory_not_dirty_write_prepare().
93
- */
94
-void memory_notdirty_write_complete(NotDirtyInfo *ndi);
95
-
96
#endif
97
#endif
98
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/accel/tcg/cputlb.c
101
+++ b/accel/tcg/cputlb.c
102
@@ -XXX,XX +XXX,XX @@
103
#include "exec/helper-proto.h"
104
#include "qemu/atomic.h"
105
#include "qemu/atomic128.h"
106
+#include "translate-all.h"
107
108
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
109
/* #define DEBUG_TLB */
110
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
111
return qemu_ram_addr_from_host_nofail(p);
112
}
19
}
113
20
114
+static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
21
+static void
115
+ CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
22
+tci_write_reg16(tcg_target_ulong *regs, TCGReg index, uint16_t value)
116
+{
23
+{
117
+ ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
24
+ tci_write_reg(regs, index, value);
118
+
119
+ trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
120
+
121
+ if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
122
+ struct page_collection *pages
123
+ = page_collection_lock(ram_addr, ram_addr + size);
124
+
125
+ /* We require mem_io_pc in tb_invalidate_phys_page_range. */
126
+ cpu->mem_io_pc = retaddr;
127
+
128
+ tb_invalidate_phys_page_fast(pages, ram_addr, size);
129
+ page_collection_unlock(pages);
130
+ }
131
+
132
+ /*
133
+ * Set both VGA and migration bits for simplicity and to remove
134
+ * the notdirty callback faster.
135
+ */
136
+ cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
137
+
138
+ /* We remove the notdirty callback only if the code has been flushed. */
139
+ if (!cpu_physical_memory_is_clean(ram_addr)) {
140
+ trace_memory_notdirty_set_dirty(mem_vaddr);
141
+ tlb_set_dirty(cpu, mem_vaddr);
142
+ }
143
+}
25
+}
144
+
26
+
145
/*
27
static void
146
* Probe for whether the specified guest access is permitted. If it is not
28
tci_write_reg32(tcg_target_ulong *regs, TCGReg index, uint32_t value)
147
* permitted then an exception will be taken in the same way as if this
148
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
149
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
150
* operations, or io operations to proceed. Return the host address. */
151
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
152
- TCGMemOpIdx oi, uintptr_t retaddr,
153
- NotDirtyInfo *ndi)
154
+ TCGMemOpIdx oi, uintptr_t retaddr)
155
{
29
{
156
size_t mmu_idx = get_mmuidx(oi);
30
@@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
157
uintptr_t index = tlb_index(env, mmu_idx, addr);
31
tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
158
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
32
break;
159
33
case INDEX_op_ld8s_i32:
160
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
34
+ TODO();
161
35
+ break;
162
- ndi->active = false;
36
case INDEX_op_ld16u_i32:
163
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
37
TODO();
164
- ndi->active = true;
38
break;
165
- memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
39
@@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
166
- qemu_ram_addr_from_host_nofail(hostaddr),
40
tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
167
- 1 << s_bits);
41
break;
168
+ notdirty_write(env_cpu(env), addr, 1 << s_bits,
42
case INDEX_op_ld8s_i64:
169
+ &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
43
+ TODO();
170
}
44
+ break;
171
45
case INDEX_op_ld16u_i64:
172
return hostaddr;
46
+ t0 = *tb_ptr++;
173
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
47
+ t1 = tci_read_r(regs, &tb_ptr);
174
return;
48
+ t2 = tci_read_s32(&tb_ptr);
175
}
49
+ tci_write_reg16(regs, t0, *(uint16_t *)(t1 + t2));
176
50
+ break;
177
- haddr = (void *)((uintptr_t)addr + entry->addend);
51
case INDEX_op_ld16s_i64:
178
-
52
TODO();
179
/* Handle clean RAM pages. */
53
break;
180
if (tlb_addr & TLB_NOTDIRTY) {
181
- NotDirtyInfo ndi;
182
-
183
- /* We require mem_io_pc in tb_invalidate_phys_page_range. */
184
- env_cpu(env)->mem_io_pc = retaddr;
185
-
186
- memory_notdirty_write_prepare(&ndi, env_cpu(env), addr,
187
- addr + iotlbentry->addr, size);
188
-
189
- if (unlikely(need_swap)) {
190
- store_memop(haddr, val, op ^ MO_BSWAP);
191
- } else {
192
- store_memop(haddr, val, op);
193
- }
194
-
195
- memory_notdirty_write_complete(&ndi);
196
- return;
197
+ notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
198
}
199
200
+ haddr = (void *)((uintptr_t)addr + entry->addend);
201
+
202
/*
203
* Keep these two store_memop separate to ensure that the compiler
204
* is able to fold the entire function to a single instruction.
205
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
206
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
207
#define ATOMIC_NAME(X) \
208
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
209
-#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
210
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
211
-#define ATOMIC_MMU_CLEANUP \
212
- do { \
213
- if (unlikely(ndi.active)) { \
214
- memory_notdirty_write_complete(&ndi); \
215
- } \
216
- } while (0)
217
+#define ATOMIC_MMU_DECLS
218
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
219
+#define ATOMIC_MMU_CLEANUP
220
221
#define DATA_SIZE 1
222
#include "atomic_template.h"
223
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
224
#undef ATOMIC_MMU_LOOKUP
225
#define EXTRA_ARGS , TCGMemOpIdx oi
226
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
227
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
228
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
229
230
#define DATA_SIZE 1
231
#include "atomic_template.h"
232
diff --git a/exec.c b/exec.c
233
index XXXXXXX..XXXXXXX 100644
234
--- a/exec.c
235
+++ b/exec.c
236
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
237
return block->offset + offset;
238
}
239
240
-/* Called within RCU critical section. */
241
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
242
- CPUState *cpu,
243
- vaddr mem_vaddr,
244
- ram_addr_t ram_addr,
245
- unsigned size)
246
-{
247
- ndi->cpu = cpu;
248
- ndi->ram_addr = ram_addr;
249
- ndi->mem_vaddr = mem_vaddr;
250
- ndi->size = size;
251
- ndi->pages = NULL;
252
-
253
- trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
254
-
255
- assert(tcg_enabled());
256
- if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
257
- ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
258
- tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
259
- }
260
-}
261
-
262
-/* Called within RCU critical section. */
263
-void memory_notdirty_write_complete(NotDirtyInfo *ndi)
264
-{
265
- if (ndi->pages) {
266
- assert(tcg_enabled());
267
- page_collection_unlock(ndi->pages);
268
- ndi->pages = NULL;
269
- }
270
-
271
- /* Set both VGA and migration bits for simplicity and to remove
272
- * the notdirty callback faster.
273
- */
274
- cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
275
- DIRTY_CLIENTS_NOCODE);
276
- /* we remove the notdirty callback only if the code has been
277
- flushed */
278
- if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
279
- trace_memory_notdirty_set_dirty(ndi->mem_vaddr);
280
- tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
281
- }
282
-}
283
-
284
/* Generate a debug exception if a watchpoint has been hit. */
285
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
286
MemTxAttrs attrs, int flags, uintptr_t ra)
287
--
54
--
288
2.17.1
55
2.17.1
289
56
290
57
diff view generated by jsdifflib
1
We will shortly be using these more than once.
1
From: Alex Bennée <alex.bennee@linaro.org>
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
We document this in docs/devel/load-stores.rst so lets follow it. The
4
Reviewed-by: David Hildenbrand <david@redhat.com>
4
32 bit and 64 bit access functions have historically not included the
5
sign so we leave those as is. We also introduce some signed helpers
6
which are used for loading immediate values in the translator.
7
8
Fixes: 282dffc8
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
11
Message-Id: <20191021150910.23216-1-alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
13
---
7
accel/tcg/cputlb.c | 107 +++++++++++++++++++++++----------------------
14
include/exec/cpu_ldst_template.h | 4 ++--
8
1 file changed, 55 insertions(+), 52 deletions(-)
15
tcg/tcg.h | 20 ++++++++++++++------
16
accel/tcg/cputlb.c | 24 +++++++++++++++++++++---
17
target/cris/translate_v10.inc.c | 3 +--
18
4 files changed, 38 insertions(+), 13 deletions(-)
9
19
20
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/cpu_ldst_template.h
23
+++ b/include/exec/cpu_ldst_template.h
24
@@ -XXX,XX +XXX,XX @@
25
#ifdef SOFTMMU_CODE_ACCESS
26
#define ADDR_READ addr_code
27
#define MMUSUFFIX _cmmu
28
-#define URETSUFFIX SUFFIX
29
-#define SRETSUFFIX SUFFIX
30
+#define URETSUFFIX USUFFIX
31
+#define SRETSUFFIX glue(s, SUFFIX)
32
#else
33
#define ADDR_READ addr_read
34
#define MMUSUFFIX _mmu
35
diff --git a/tcg/tcg.h b/tcg/tcg.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/tcg.h
38
+++ b/tcg/tcg.h
39
@@ -XXX,XX +XXX,XX @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
40
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
41
TCGMemOpIdx oi, uintptr_t retaddr);
42
43
-uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
44
+uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
45
TCGMemOpIdx oi, uintptr_t retaddr);
46
-uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
47
+int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
48
TCGMemOpIdx oi, uintptr_t retaddr);
49
+uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
50
+ TCGMemOpIdx oi, uintptr_t retaddr);
51
+int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
52
+ TCGMemOpIdx oi, uintptr_t retaddr);
53
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
54
TCGMemOpIdx oi, uintptr_t retaddr);
55
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
56
TCGMemOpIdx oi, uintptr_t retaddr);
57
-uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
58
- TCGMemOpIdx oi, uintptr_t retaddr);
59
+uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
60
+ TCGMemOpIdx oi, uintptr_t retaddr);
61
+int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
62
+ TCGMemOpIdx oi, uintptr_t retaddr);
63
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
64
TCGMemOpIdx oi, uintptr_t retaddr);
65
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
66
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
67
# define helper_ret_stw_mmu helper_be_stw_mmu
68
# define helper_ret_stl_mmu helper_be_stl_mmu
69
# define helper_ret_stq_mmu helper_be_stq_mmu
70
-# define helper_ret_ldw_cmmu helper_be_ldw_cmmu
71
+# define helper_ret_lduw_cmmu helper_be_lduw_cmmu
72
+# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu
73
# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
74
# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
75
#else
76
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
77
# define helper_ret_stw_mmu helper_le_stw_mmu
78
# define helper_ret_stl_mmu helper_le_stl_mmu
79
# define helper_ret_stq_mmu helper_le_stq_mmu
80
-# define helper_ret_ldw_cmmu helper_le_ldw_cmmu
81
+# define helper_ret_lduw_cmmu helper_le_lduw_cmmu
82
+# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu
83
# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
84
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
85
#endif
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
86
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
11
index XXXXXXX..XXXXXXX 100644
87
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
88
--- a/accel/tcg/cputlb.c
13
+++ b/accel/tcg/cputlb.c
89
+++ b/accel/tcg/cputlb.c
14
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
90
@@ -XXX,XX +XXX,XX @@ static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
15
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
91
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
16
TCGMemOpIdx oi, uintptr_t retaddr);
92
}
17
93
18
+static inline uint64_t QEMU_ALWAYS_INLINE
94
-uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
19
+load_memop(const void *haddr, MemOp op)
95
+uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
96
TCGMemOpIdx oi, uintptr_t retaddr)
97
{
98
return full_ldub_cmmu(env, addr, oi, retaddr);
99
}
100
101
+int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
102
+ TCGMemOpIdx oi, uintptr_t retaddr)
20
+{
103
+{
21
+ switch (op) {
104
+ return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
22
+ case MO_UB:
23
+ return ldub_p(haddr);
24
+ case MO_BEUW:
25
+ return lduw_be_p(haddr);
26
+ case MO_LEUW:
27
+ return lduw_le_p(haddr);
28
+ case MO_BEUL:
29
+ return (uint32_t)ldl_be_p(haddr);
30
+ case MO_LEUL:
31
+ return (uint32_t)ldl_le_p(haddr);
32
+ case MO_BEQ:
33
+ return ldq_be_p(haddr);
34
+ case MO_LEQ:
35
+ return ldq_le_p(haddr);
36
+ default:
37
+ qemu_build_not_reached();
38
+ }
39
+}
105
+}
40
+
106
+
41
static inline uint64_t QEMU_ALWAYS_INLINE
107
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
42
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
108
TCGMemOpIdx oi, uintptr_t retaddr)
43
uintptr_t retaddr, MemOp op, bool code_read,
109
{
44
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
110
@@ -XXX,XX +XXX,XX @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
45
111
full_le_lduw_cmmu);
46
do_aligned_access:
47
haddr = (void *)((uintptr_t)addr + entry->addend);
48
- switch (op) {
49
- case MO_UB:
50
- res = ldub_p(haddr);
51
- break;
52
- case MO_BEUW:
53
- res = lduw_be_p(haddr);
54
- break;
55
- case MO_LEUW:
56
- res = lduw_le_p(haddr);
57
- break;
58
- case MO_BEUL:
59
- res = (uint32_t)ldl_be_p(haddr);
60
- break;
61
- case MO_LEUL:
62
- res = (uint32_t)ldl_le_p(haddr);
63
- break;
64
- case MO_BEQ:
65
- res = ldq_be_p(haddr);
66
- break;
67
- case MO_LEQ:
68
- res = ldq_le_p(haddr);
69
- break;
70
- default:
71
- qemu_build_not_reached();
72
- }
73
-
74
- return res;
75
+ return load_memop(haddr, op);
76
}
112
}
77
113
78
/*
114
-uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
79
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
115
+uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
80
* Store Helpers
116
TCGMemOpIdx oi, uintptr_t retaddr)
81
*/
117
{
82
118
return full_le_lduw_cmmu(env, addr, oi, retaddr);
83
+static inline void QEMU_ALWAYS_INLINE
119
}
84
+store_memop(void *haddr, uint64_t val, MemOp op)
120
121
+int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
122
+ TCGMemOpIdx oi, uintptr_t retaddr)
85
+{
123
+{
86
+ switch (op) {
124
+ return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
87
+ case MO_UB:
88
+ stb_p(haddr, val);
89
+ break;
90
+ case MO_BEUW:
91
+ stw_be_p(haddr, val);
92
+ break;
93
+ case MO_LEUW:
94
+ stw_le_p(haddr, val);
95
+ break;
96
+ case MO_BEUL:
97
+ stl_be_p(haddr, val);
98
+ break;
99
+ case MO_LEUL:
100
+ stl_le_p(haddr, val);
101
+ break;
102
+ case MO_BEQ:
103
+ stq_be_p(haddr, val);
104
+ break;
105
+ case MO_LEQ:
106
+ stq_le_p(haddr, val);
107
+ break;
108
+ default:
109
+ qemu_build_not_reached();
110
+ }
111
+}
125
+}
112
+
126
+
113
static inline void QEMU_ALWAYS_INLINE
127
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
114
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
128
TCGMemOpIdx oi, uintptr_t retaddr)
115
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
129
{
116
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
130
@@ -XXX,XX +XXX,XX @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
117
131
full_be_lduw_cmmu);
118
do_aligned_access:
119
haddr = (void *)((uintptr_t)addr + entry->addend);
120
- switch (op) {
121
- case MO_UB:
122
- stb_p(haddr, val);
123
- break;
124
- case MO_BEUW:
125
- stw_be_p(haddr, val);
126
- break;
127
- case MO_LEUW:
128
- stw_le_p(haddr, val);
129
- break;
130
- case MO_BEUL:
131
- stl_be_p(haddr, val);
132
- break;
133
- case MO_LEUL:
134
- stl_le_p(haddr, val);
135
- break;
136
- case MO_BEQ:
137
- stq_be_p(haddr, val);
138
- break;
139
- case MO_LEQ:
140
- stq_le_p(haddr, val);
141
- break;
142
- default:
143
- qemu_build_not_reached();
144
- }
145
+ store_memop(haddr, val, op);
146
}
132
}
147
133
148
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
134
-uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
135
+uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
136
TCGMemOpIdx oi, uintptr_t retaddr)
137
{
138
return full_be_lduw_cmmu(env, addr, oi, retaddr);
139
}
140
141
+int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
142
+ TCGMemOpIdx oi, uintptr_t retaddr)
143
+{
144
+ return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
145
+}
146
+
147
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
148
TCGMemOpIdx oi, uintptr_t retaddr)
149
{
150
diff --git a/target/cris/translate_v10.inc.c b/target/cris/translate_v10.inc.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/target/cris/translate_v10.inc.c
153
+++ b/target/cris/translate_v10.inc.c
154
@@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
155
case CRISV10_IND_BCC_M:
156
157
cris_cc_mask(dc, 0);
158
- imm = cpu_ldsw_code(env, dc->pc + 2);
159
- simm = (int16_t)imm;
160
+ simm = cpu_ldsw_code(env, dc->pc + 2);
161
simm += 4;
162
163
LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm);
149
--
164
--
150
2.17.1
165
2.17.1
151
166
152
167
diff view generated by jsdifflib
1
Rather than rely on cpu->mem_io_pc, pass retaddr down directly.
1
From: Wei Yang <richardw.yang@linux.intel.com>
2
2
3
Within tb_invalidate_phys_page_range__locked, the is_cpu_write_access
3
Use ROUND_UP() to define, which is a little bit easy to read.
4
parameter is non-zero exactly when retaddr would be non-zero, so that
5
is a simple replacement.
6
7
Recognize that current_tb_not_found is true only when mem_io_pc
8
(and now retaddr) are also non-zero, so remove a redundant test.
9
4
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
7
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
8
Reviewed-by: Juan Quintela <quintela@redhat.com>
9
Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
10
Message-Id: <20191013021145.16011-2-richardw.yang@linux.intel.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
12
---
14
accel/tcg/translate-all.h | 3 ++-
13
include/exec/cpu-all.h | 7 +++----
15
accel/tcg/cputlb.c | 6 +-----
14
1 file changed, 3 insertions(+), 4 deletions(-)
16
accel/tcg/translate-all.c | 39 +++++++++++++++++++--------------------
17
3 files changed, 22 insertions(+), 26 deletions(-)
18
15
19
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
16
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
20
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/translate-all.h
18
--- a/include/exec/cpu-all.h
22
+++ b/accel/tcg/translate-all.h
19
+++ b/include/exec/cpu-all.h
23
@@ -XXX,XX +XXX,XX @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
20
@@ -XXX,XX +XXX,XX @@ extern int target_page_bits;
24
tb_page_addr_t end);
21
25
void page_collection_unlock(struct page_collection *set);
22
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
26
void tb_invalidate_phys_page_fast(struct page_collection *pages,
23
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
27
- tb_page_addr_t start, int len);
24
-#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
28
+ tb_page_addr_t start, int len,
25
+#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
29
+ uintptr_t retaddr);
26
30
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
27
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
31
void tb_check_watchpoint(CPUState *cpu);
28
* when intptr_t is 32-bit and we are aligning a long long.
32
29
@@ -XXX,XX +XXX,XX @@ extern int target_page_bits;
33
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
30
extern uintptr_t qemu_host_page_size;
34
index XXXXXXX..XXXXXXX 100644
31
extern intptr_t qemu_host_page_mask;
35
--- a/accel/tcg/cputlb.c
32
36
+++ b/accel/tcg/cputlb.c
33
-#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
37
@@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
34
-#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
38
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
35
- qemu_real_host_page_mask)
39
struct page_collection *pages
36
+#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
40
= page_collection_lock(ram_addr, ram_addr + size);
37
+#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
41
-
38
42
- /* We require mem_io_pc in tb_invalidate_phys_page_range. */
39
/* same as PROT_xxx */
43
- cpu->mem_io_pc = retaddr;
40
#define PAGE_READ 0x0001
44
-
45
- tb_invalidate_phys_page_fast(pages, ram_addr, size);
46
+ tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
47
page_collection_unlock(pages);
48
}
49
50
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/accel/tcg/translate-all.c
53
+++ b/accel/tcg/translate-all.c
54
@@ -XXX,XX +XXX,XX @@ static void
55
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
56
PageDesc *p, tb_page_addr_t start,
57
tb_page_addr_t end,
58
- int is_cpu_write_access)
59
+ uintptr_t retaddr)
60
{
61
TranslationBlock *tb;
62
tb_page_addr_t tb_start, tb_end;
63
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
64
#ifdef TARGET_HAS_PRECISE_SMC
65
CPUState *cpu = current_cpu;
66
CPUArchState *env = NULL;
67
- int current_tb_not_found = is_cpu_write_access;
68
+ bool current_tb_not_found = retaddr != 0;
69
+ bool current_tb_modified = false;
70
TranslationBlock *current_tb = NULL;
71
- int current_tb_modified = 0;
72
target_ulong current_pc = 0;
73
target_ulong current_cs_base = 0;
74
uint32_t current_flags = 0;
75
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
76
if (!(tb_end <= start || tb_start >= end)) {
77
#ifdef TARGET_HAS_PRECISE_SMC
78
if (current_tb_not_found) {
79
- current_tb_not_found = 0;
80
- current_tb = NULL;
81
- if (cpu->mem_io_pc) {
82
- /* now we have a real cpu fault */
83
- current_tb = tcg_tb_lookup(cpu->mem_io_pc);
84
- }
85
+ current_tb_not_found = false;
86
+ /* now we have a real cpu fault */
87
+ current_tb = tcg_tb_lookup(retaddr);
88
}
89
if (current_tb == tb &&
90
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
91
- /* If we are modifying the current TB, we must stop
92
- its execution. We could be more precise by checking
93
- that the modification is after the current PC, but it
94
- would require a specialized function to partially
95
- restore the CPU state */
96
-
97
- current_tb_modified = 1;
98
- cpu_restore_state_from_tb(cpu, current_tb,
99
- cpu->mem_io_pc, true);
100
+ /*
101
+ * If we are modifying the current TB, we must stop
102
+ * its execution. We could be more precise by checking
103
+ * that the modification is after the current PC, but it
104
+ * would require a specialized function to partially
105
+ * restore the CPU state.
106
+ */
107
+ current_tb_modified = true;
108
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
109
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
110
&current_flags);
111
}
112
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end)
113
* Call with all @pages in the range [@start, @start + len[ locked.
114
*/
115
void tb_invalidate_phys_page_fast(struct page_collection *pages,
116
- tb_page_addr_t start, int len)
117
+ tb_page_addr_t start, int len,
118
+ uintptr_t retaddr)
119
{
120
PageDesc *p;
121
122
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
123
}
124
} else {
125
do_invalidate:
126
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
127
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
128
+ retaddr);
129
}
130
}
131
#else
132
--
41
--
133
2.17.1
42
2.17.1
134
43
135
44
diff view generated by jsdifflib
1
Pages that we want to track for NOTDIRTY are RAM. We do not
1
The next patch will play a trick with "const" that will
2
really need to go through the I/O path to handle them.
2
confuse the compiler about the uses of target_page_bits
3
within exec.c. Moving everything to a new file prevents
4
this confusion.
3
5
4
Acked-by: David Hildenbrand <david@redhat.com>
6
No functional change so far.
7
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
12
---
9
include/exec/cpu-common.h | 2 --
13
Makefile.target | 2 +-
10
accel/tcg/cputlb.c | 26 +++++++++++++++++---
14
include/qemu-common.h | 6 +++++
11
exec.c | 50 ---------------------------------------
15
exec-vary.c | 57 +++++++++++++++++++++++++++++++++++++++++++
12
memory.c | 16 -------------
16
exec.c | 34 --------------------------
13
4 files changed, 23 insertions(+), 71 deletions(-)
17
4 files changed, 64 insertions(+), 35 deletions(-)
18
create mode 100644 exec-vary.c
14
19
15
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
20
diff --git a/Makefile.target b/Makefile.target
16
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu-common.h
22
--- a/Makefile.target
18
+++ b/include/exec/cpu-common.h
23
+++ b/Makefile.target
19
@@ -XXX,XX +XXX,XX @@ void qemu_flush_coalesced_mmio_buffer(void);
24
@@ -XXX,XX +XXX,XX @@ obj-y += trace/
20
25
21
void cpu_flush_icache_range(hwaddr start, hwaddr len);
26
#########################################################
22
27
# cpu emulator library
23
-extern struct MemoryRegion io_mem_notdirty;
28
-obj-y += exec.o
24
-
29
+obj-y += exec.o exec-vary.o
25
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
30
obj-y += accel/
26
31
obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/tcg-op-vec.o tcg/tcg-op-gvec.o
27
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
32
obj-$(CONFIG_TCG) += tcg/tcg-common.o tcg/optimize.o
28
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
33
diff --git a/include/qemu-common.h b/include/qemu-common.h
29
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/tcg/cputlb.c
35
--- a/include/qemu-common.h
31
+++ b/accel/tcg/cputlb.c
36
+++ b/include/qemu-common.h
32
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
37
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu);
33
mr = section->mr;
38
*/
34
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
39
bool set_preferred_target_page_bits(int bits);
35
cpu->mem_io_pc = retaddr;
40
36
- if (mr != &io_mem_notdirty && !cpu->can_do_io) {
41
+/**
37
+ if (!cpu->can_do_io) {
42
+ * finalize_target_page_bits:
38
cpu_io_recompile(cpu, retaddr);
43
+ * Commit the final value set by set_preferred_target_page_bits.
39
}
44
+ */
40
45
+void finalize_target_page_bits(void);
41
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
42
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
43
mr = section->mr;
44
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
45
- if (mr != &io_mem_notdirty && !cpu->can_do_io) {
46
+ if (!cpu->can_do_io) {
47
cpu_io_recompile(cpu, retaddr);
48
}
49
cpu->mem_io_vaddr = addr;
50
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
51
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
52
53
/* Handle I/O access. */
54
- if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
55
+ if (tlb_addr & TLB_MMIO) {
56
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
57
op ^ (need_swap * MO_BSWAP));
58
return;
59
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
60
61
haddr = (void *)((uintptr_t)addr + entry->addend);
62
63
+ /* Handle clean RAM pages. */
64
+ if (tlb_addr & TLB_NOTDIRTY) {
65
+ NotDirtyInfo ndi;
66
+
46
+
67
+ /* We require mem_io_pc in tb_invalidate_phys_page_range. */
47
/**
68
+ env_cpu(env)->mem_io_pc = retaddr;
48
* Sends a (part of) iovec down a socket, yielding when the socket is full, or
49
* Receives data into a (part of) iovec from a socket,
50
diff --git a/exec-vary.c b/exec-vary.c
51
new file mode 100644
52
index XXXXXXX..XXXXXXX
53
--- /dev/null
54
+++ b/exec-vary.c
55
@@ -XXX,XX +XXX,XX @@
56
+/*
57
+ * Variable page size handling
58
+ *
59
+ * Copyright (c) 2003 Fabrice Bellard
60
+ *
61
+ * This library is free software; you can redistribute it and/or
62
+ * modify it under the terms of the GNU Lesser General Public
63
+ * License as published by the Free Software Foundation; either
64
+ * version 2 of the License, or (at your option) any later version.
65
+ *
66
+ * This library is distributed in the hope that it will be useful,
67
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
68
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
69
+ * Lesser General Public License for more details.
70
+ *
71
+ * You should have received a copy of the GNU Lesser General Public
72
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
73
+ */
69
+
74
+
70
+ memory_notdirty_write_prepare(&ndi, env_cpu(env), addr,
75
+#include "qemu/osdep.h"
71
+ addr + iotlbentry->addr, size);
76
+#include "qemu-common.h"
77
+#include "exec/exec-all.h"
72
+
78
+
73
+ if (unlikely(need_swap)) {
79
+#ifdef TARGET_PAGE_BITS_VARY
74
+ store_memop(haddr, val, op ^ MO_BSWAP);
80
+int target_page_bits;
75
+ } else {
81
+bool target_page_bits_decided;
76
+ store_memop(haddr, val, op);
82
+#endif
77
+ }
78
+
83
+
79
+ memory_notdirty_write_complete(&ndi);
84
+bool set_preferred_target_page_bits(int bits)
80
+ return;
85
+{
86
+ /*
87
+ * The target page size is the lowest common denominator for all
88
+ * the CPUs in the system, so we can only make it smaller, never
89
+ * larger. And we can't make it smaller once we've committed to
90
+ * a particular size.
91
+ */
92
+#ifdef TARGET_PAGE_BITS_VARY
93
+ assert(bits >= TARGET_PAGE_BITS_MIN);
94
+ if (target_page_bits == 0 || target_page_bits > bits) {
95
+ if (target_page_bits_decided) {
96
+ return false;
81
+ }
97
+ }
98
+ target_page_bits = bits;
99
+ }
100
+#endif
101
+ return true;
102
+}
82
+
103
+
83
/*
104
+void finalize_target_page_bits(void)
84
* Keep these two store_memop separate to ensure that the compiler
105
+{
85
* is able to fold the entire function to a single instruction.
106
+#ifdef TARGET_PAGE_BITS_VARY
107
+ if (target_page_bits == 0) {
108
+ target_page_bits = TARGET_PAGE_BITS_MIN;
109
+ }
110
+ target_page_bits_decided = true;
111
+#endif
112
+}
86
diff --git a/exec.c b/exec.c
113
diff --git a/exec.c b/exec.c
87
index XXXXXXX..XXXXXXX 100644
114
index XXXXXXX..XXXXXXX 100644
88
--- a/exec.c
115
--- a/exec.c
89
+++ b/exec.c
116
+++ b/exec.c
90
@@ -XXX,XX +XXX,XX @@ static MemoryRegion *system_io;
117
@@ -XXX,XX +XXX,XX @@ AddressSpace address_space_memory;
91
AddressSpace address_space_io;
92
AddressSpace address_space_memory;
93
94
-MemoryRegion io_mem_notdirty;
95
static MemoryRegion io_mem_unassigned;
118
static MemoryRegion io_mem_unassigned;
96
#endif
119
#endif
97
120
98
@@ -XXX,XX +XXX,XX @@ typedef struct subpage_t {
121
-#ifdef TARGET_PAGE_BITS_VARY
99
} subpage_t;
122
-int target_page_bits;
100
123
-bool target_page_bits_decided;
101
#define PHYS_SECTION_UNASSIGNED 0
124
-#endif
102
-#define PHYS_SECTION_NOTDIRTY 1
125
-
103
126
CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
104
static void io_mem_init(void);
127
105
static void memory_map_init(void);
128
/* current CPU in the current thread. It is only valid inside
106
@@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
129
@@ -XXX,XX +XXX,XX @@ int use_icount;
107
if (memory_region_is_ram(section->mr)) {
130
uintptr_t qemu_host_page_size;
108
/* Normal RAM. */
131
intptr_t qemu_host_page_mask;
109
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
132
110
- if (!section->readonly) {
133
-bool set_preferred_target_page_bits(int bits)
111
- iotlb |= PHYS_SECTION_NOTDIRTY;
134
-{
135
- /* The target page size is the lowest common denominator for all
136
- * the CPUs in the system, so we can only make it smaller, never
137
- * larger. And we can't make it smaller once we've committed to
138
- * a particular size.
139
- */
140
-#ifdef TARGET_PAGE_BITS_VARY
141
- assert(bits >= TARGET_PAGE_BITS_MIN);
142
- if (target_page_bits == 0 || target_page_bits > bits) {
143
- if (target_page_bits_decided) {
144
- return false;
112
- }
145
- }
113
} else {
146
- target_page_bits = bits;
114
AddressSpaceDispatch *d;
147
- }
115
148
-#endif
116
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_complete(NotDirtyInfo *ndi)
149
- return true;
117
}
118
}
119
120
-/* Called within RCU critical section. */
121
-static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
122
- uint64_t val, unsigned size)
123
-{
124
- NotDirtyInfo ndi;
125
-
126
- memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
127
- ram_addr, size);
128
-
129
- stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
130
- memory_notdirty_write_complete(&ndi);
131
-}
150
-}
132
-
151
-
133
-static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
152
#if !defined(CONFIG_USER_ONLY)
134
- unsigned size, bool is_write,
153
135
- MemTxAttrs attrs)
154
-static void finalize_target_page_bits(void)
136
-{
155
-{
137
- return is_write;
156
-#ifdef TARGET_PAGE_BITS_VARY
157
- if (target_page_bits == 0) {
158
- target_page_bits = TARGET_PAGE_BITS_MIN;
159
- }
160
- target_page_bits_decided = true;
161
-#endif
138
-}
162
-}
139
-
163
-
140
-static const MemoryRegionOps notdirty_mem_ops = {
164
typedef struct PhysPageEntry PhysPageEntry;
141
- .write = notdirty_mem_write,
165
142
- .valid.accepts = notdirty_mem_accepts,
166
struct PhysPageEntry {
143
- .endianness = DEVICE_NATIVE_ENDIAN,
144
- .valid = {
145
- .min_access_size = 1,
146
- .max_access_size = 8,
147
- .unaligned = false,
148
- },
149
- .impl = {
150
- .min_access_size = 1,
151
- .max_access_size = 8,
152
- .unaligned = false,
153
- },
154
-};
155
-
156
/* Generate a debug exception if a watchpoint has been hit. */
157
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
158
MemTxAttrs attrs, int flags, uintptr_t ra)
159
@@ -XXX,XX +XXX,XX @@ static void io_mem_init(void)
160
{
161
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
162
NULL, UINT64_MAX);
163
-
164
- /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
165
- * which can be called without the iothread mutex.
166
- */
167
- memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
168
- NULL, UINT64_MAX);
169
- memory_region_clear_global_locking(&io_mem_notdirty);
170
}
171
172
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
173
@@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
174
175
n = dummy_section(&d->map, fv, &io_mem_unassigned);
176
assert(n == PHYS_SECTION_UNASSIGNED);
177
- n = dummy_section(&d->map, fv, &io_mem_notdirty);
178
- assert(n == PHYS_SECTION_NOTDIRTY);
179
180
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
181
182
diff --git a/memory.c b/memory.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/memory.c
185
+++ b/memory.c
186
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
187
tmp = mr->ops->read(mr->opaque, addr, size);
188
if (mr->subpage) {
189
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
190
- } else if (mr == &io_mem_notdirty) {
191
- /* Accesses to code which has previously been translated into a TB show
192
- * up in the MMIO path, as accesses to the io_mem_notdirty
193
- * MemoryRegion. */
194
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
195
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
196
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
197
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
198
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
199
if (mr->subpage) {
200
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
201
- } else if (mr == &io_mem_notdirty) {
202
- /* Accesses to code which has previously been translated into a TB show
203
- * up in the MMIO path, as accesses to the io_mem_notdirty
204
- * MemoryRegion. */
205
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
206
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
207
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
208
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
209
210
if (mr->subpage) {
211
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
212
- } else if (mr == &io_mem_notdirty) {
213
- /* Accesses to code which has previously been translated into a TB show
214
- * up in the MMIO path, as accesses to the io_mem_notdirty
215
- * MemoryRegion. */
216
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
217
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
218
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
219
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
220
221
if (mr->subpage) {
222
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
223
- } else if (mr == &io_mem_notdirty) {
224
- /* Accesses to code which has previously been translated into a TB show
225
- * up in the MMIO path, as accesses to the io_mem_notdirty
226
- * MemoryRegion. */
227
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
228
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
229
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
230
--
167
--
231
2.17.1
168
2.17.1
232
169
233
170
diff view generated by jsdifflib
1
There is only one caller, tlb_set_page_with_attrs. We cannot
1
Such support is present almost everywhere, except for Xcode 9.
2
inline the entire function because the AddressSpaceDispatch
2
It is added in Xcode 10, but travis uses xcode9 by default,
3
structure is private to exec.c, and cannot easily be moved to
3
so we should support it for a while yet.
4
include/exec/memory-internal.h.
5
6
Compute is_ram and is_romd once within tlb_set_page_with_attrs.
7
Fold the number of tests against these predicates. Compute
8
cpu_physical_memory_is_clean outside of the tlb lock region.
9
4
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
7
---
13
include/exec/exec-all.h | 6 +---
8
configure | 19 +++++++++++++++++++
14
accel/tcg/cputlb.c | 68 ++++++++++++++++++++++++++---------------
9
1 file changed, 19 insertions(+)
15
exec.c | 22 ++-----------
16
3 files changed, 47 insertions(+), 49 deletions(-)
17
10
18
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
11
diff --git a/configure b/configure
19
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100755
20
--- a/include/exec/exec-all.h
13
--- a/configure
21
+++ b/include/exec/exec-all.h
14
+++ b/configure
22
@@ -XXX,XX +XXX,XX @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
15
@@ -XXX,XX +XXX,XX @@ if compile_prog "" "" ; then
23
hwaddr *xlat, hwaddr *plen,
16
vector16=yes
24
MemTxAttrs attrs, int *prot);
17
fi
25
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
18
26
- MemoryRegionSection *section,
19
+########################################
27
- target_ulong vaddr,
20
+# See if __attribute__((alias)) is supported.
28
- hwaddr paddr, hwaddr xlat,
21
+# This false for Xcode 9, but has been remedied for Xcode 10.
29
- int prot,
22
+# Unfortunately, travis uses Xcode 9 by default.
30
- target_ulong *address);
31
+ MemoryRegionSection *section);
32
#endif
33
34
/* vl.c */
35
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/accel/tcg/cputlb.c
38
+++ b/accel/tcg/cputlb.c
39
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
40
MemoryRegionSection *section;
41
unsigned int index;
42
target_ulong address;
43
- target_ulong code_address;
44
+ target_ulong write_address;
45
uintptr_t addend;
46
CPUTLBEntry *te, tn;
47
hwaddr iotlb, xlat, sz, paddr_page;
48
target_ulong vaddr_page;
49
int asidx = cpu_asidx_from_attrs(cpu, attrs);
50
int wp_flags;
51
+ bool is_ram, is_romd;
52
53
assert_cpu_is_self(cpu);
54
55
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
56
if (attrs.byte_swap) {
57
address |= TLB_BSWAP;
58
}
59
- if (!memory_region_is_ram(section->mr) &&
60
- !memory_region_is_romd(section->mr)) {
61
- /* IO memory case */
62
- address |= TLB_MMIO;
63
- addend = 0;
64
- } else {
65
+
23
+
66
+ is_ram = memory_region_is_ram(section->mr);
24
+attralias=no
67
+ is_romd = memory_region_is_romd(section->mr);
25
+cat > $TMPC << EOF
26
+int x = 1;
27
+extern const int y __attribute__((alias("x")));
28
+int main(void) { return 0; }
29
+EOF
30
+if compile_prog "" "" ; then
31
+ attralias=yes
32
+fi
68
+
33
+
69
+ if (is_ram || is_romd) {
34
########################################
70
+ /* RAM and ROMD both have associated host memory. */
35
# check if getauxval is available.
71
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
36
72
+ } else {
37
@@ -XXX,XX +XXX,XX @@ if test "$vector16" = "yes" ; then
73
+ /* I/O does not; force the host address to NULL. */
38
echo "CONFIG_VECTOR16=y" >> $config_host_mak
74
+ addend = 0;
39
fi
75
+ }
40
41
+if test "$attralias" = "yes" ; then
42
+ echo "CONFIG_ATTRIBUTE_ALIAS=y" >> $config_host_mak
43
+fi
76
+
44
+
77
+ write_address = address;
45
if test "$getauxval" = "yes" ; then
78
+ if (is_ram) {
46
echo "CONFIG_GETAUXVAL=y" >> $config_host_mak
79
+ iotlb = memory_region_get_ram_addr(section->mr) + xlat;
47
fi
80
+ /*
81
+ * Computing is_clean is expensive; avoid all that unless
82
+ * the page is actually writable.
83
+ */
84
+ if (prot & PAGE_WRITE) {
85
+ if (section->readonly) {
86
+ write_address |= TLB_DISCARD_WRITE;
87
+ } else if (cpu_physical_memory_is_clean(iotlb)) {
88
+ write_address |= TLB_NOTDIRTY;
89
+ }
90
+ }
91
+ } else {
92
+ /* I/O or ROMD */
93
+ iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
94
+ /*
95
+ * Writes to romd devices must go through MMIO to enable write.
96
+ * Reads to romd devices go through the ram_ptr found above,
97
+ * but of course reads to I/O must go through MMIO.
98
+ */
99
+ write_address |= TLB_MMIO;
100
+ if (!is_romd) {
101
+ address = write_address;
102
+ }
103
}
104
105
- code_address = address;
106
- iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
107
- paddr_page, xlat, prot, &address);
108
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
109
TARGET_PAGE_SIZE);
110
111
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
112
/*
113
* At this point iotlb contains a physical section number in the lower
114
* TARGET_PAGE_BITS, and either
115
- * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
116
- * + the offset within section->mr of the page base (otherwise)
117
+ * + the ram_addr_t of the page base of the target RAM (RAM)
118
+ * + the offset within section->mr of the page base (I/O, ROMD)
119
* We subtract the vaddr_page (which is page aligned and thus won't
120
* disturb the low bits) to give an offset which can be added to the
121
* (non-page-aligned) vaddr of the eventual memory access to get
122
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
123
}
124
125
if (prot & PAGE_EXEC) {
126
- tn.addr_code = code_address;
127
+ tn.addr_code = address;
128
} else {
129
tn.addr_code = -1;
130
}
131
132
tn.addr_write = -1;
133
if (prot & PAGE_WRITE) {
134
- tn.addr_write = address;
135
- if (memory_region_is_romd(section->mr)) {
136
- /* Use the MMIO path so that the device can switch states. */
137
- tn.addr_write |= TLB_MMIO;
138
- } else if (memory_region_is_ram(section->mr)) {
139
- if (section->readonly) {
140
- tn.addr_write |= TLB_DISCARD_WRITE;
141
- } else if (cpu_physical_memory_is_clean(
142
- memory_region_get_ram_addr(section->mr) + xlat)) {
143
- tn.addr_write |= TLB_NOTDIRTY;
144
- }
145
- }
146
+ tn.addr_write = write_address;
147
if (prot & PAGE_WRITE_INV) {
148
tn.addr_write |= TLB_INVALID_MASK;
149
}
150
diff --git a/exec.c b/exec.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/exec.c
153
+++ b/exec.c
154
@@ -XXX,XX +XXX,XX @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
155
156
/* Called from RCU critical section */
157
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
158
- MemoryRegionSection *section,
159
- target_ulong vaddr,
160
- hwaddr paddr, hwaddr xlat,
161
- int prot,
162
- target_ulong *address)
163
+ MemoryRegionSection *section)
164
{
165
- hwaddr iotlb;
166
-
167
- if (memory_region_is_ram(section->mr)) {
168
- /* Normal RAM. */
169
- iotlb = memory_region_get_ram_addr(section->mr) + xlat;
170
- } else {
171
- AddressSpaceDispatch *d;
172
-
173
- d = flatview_to_dispatch(section->fv);
174
- iotlb = section - d->map.sections;
175
- iotlb += xlat;
176
- }
177
-
178
- return iotlb;
179
+ AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
180
+ return section - d->map.sections;
181
}
182
#endif /* defined(CONFIG_USER_ONLY) */
183
184
--
48
--
185
2.17.1
49
2.17.1
186
50
187
51
diff view generated by jsdifflib
1
Handle bswap on ram directly in load/store_helper. This fixes a
1
Using a variable that is declared "const" for this tells the
2
bug with the previous implementation in that one cannot use the
2
compiler that it may read the value once and assume that it
3
I/O path for RAM.
3
does not change across function calls.
4
4
5
Fixes: a26fc6f5152b47f1
5
For target_page_size, this means we have only one assert per
6
function, and one read of the variable.
7
8
This reduces the size of qemu-system-aarch64 by 8k.
9
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
11
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
13
---
10
include/exec/cpu-all.h | 4 ++-
14
include/exec/cpu-all.h | 14 ++++++---
11
accel/tcg/cputlb.c | 72 +++++++++++++++++++++++++-----------------
15
exec-vary.c | 66 +++++++++++++++++++++++++++++++++++++-----
12
2 files changed, 46 insertions(+), 30 deletions(-)
16
2 files changed, 68 insertions(+), 12 deletions(-)
13
17
14
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
18
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-all.h
20
--- a/include/exec/cpu-all.h
17
+++ b/include/exec/cpu-all.h
21
+++ b/include/exec/cpu-all.h
18
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
22
@@ -XXX,XX +XXX,XX @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
19
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
23
/* page related stuff */
20
/* Set if TLB entry contains a watchpoint. */
24
21
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
25
#ifdef TARGET_PAGE_BITS_VARY
22
+/* Set if TLB entry requires byte swap. */
26
-extern bool target_page_bits_decided;
23
+#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
27
-extern int target_page_bits;
24
28
-#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \
25
/* Use this mask to check interception with an alignment mask
29
- target_page_bits; })
26
* in a TCG backend.
30
+typedef struct {
27
*/
31
+ bool decided;
28
#define TLB_FLAGS_MASK \
32
+ int bits;
29
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
33
+} TargetPageBits;
30
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
34
+#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
31
35
+extern const TargetPageBits target_page;
32
/**
36
+#else
33
* tlb_hit_page: return true if page aligned @addr is a hit against the
37
+extern TargetPageBits target_page;
34
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
38
+#endif
39
+#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
40
#else
41
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
42
#endif
43
diff --git a/exec-vary.c b/exec-vary.c
35
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
36
--- a/accel/tcg/cputlb.c
45
--- a/exec-vary.c
37
+++ b/accel/tcg/cputlb.c
46
+++ b/exec-vary.c
38
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
47
@@ -XXX,XX +XXX,XX @@
39
address |= TLB_INVALID_MASK;
48
49
#include "qemu/osdep.h"
50
#include "qemu-common.h"
51
+
52
+#define IN_EXEC_VARY 1
53
+
54
#include "exec/exec-all.h"
55
56
#ifdef TARGET_PAGE_BITS_VARY
57
-int target_page_bits;
58
-bool target_page_bits_decided;
59
+# ifdef CONFIG_ATTRIBUTE_ALIAS
60
+/*
61
+ * We want to declare the "target_page" variable as const, which tells
62
+ * the compiler that it can cache any value that it reads across calls.
63
+ * This avoids multiple assertions and multiple reads within any one user.
64
+ *
65
+ * This works because we finish initializing the data before we ever read
66
+ * from the "target_page" symbol.
67
+ *
68
+ * This also requires that we have a non-constant symbol by which we can
69
+ * perform the actual initialization, and which forces the data to be
70
+ * allocated within writable memory. Thus "init_target_page", and we use
71
+ * that symbol exclusively in the two functions that initialize this value.
72
+ *
73
+ * The "target_page" symbol is created as an alias of "init_target_page".
74
+ */
75
+static TargetPageBits init_target_page;
76
+
77
+/*
78
+ * Note that this is *not* a redundant decl, this is the definition of
79
+ * the "target_page" symbol. The syntax for this definition requires
80
+ * the use of the extern keyword. This seems to be a GCC bug in
81
+ * either the syntax for the alias attribute or in -Wredundant-decls.
82
+ *
83
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91765
84
+ */
85
+# pragma GCC diagnostic push
86
+# pragma GCC diagnostic ignored "-Wredundant-decls"
87
+
88
+extern const TargetPageBits target_page
89
+ __attribute__((alias("init_target_page")));
90
+
91
+# pragma GCC diagnostic pop
92
+# else
93
+/*
94
+ * When aliases are not supported then we force two different declarations,
95
+ * by way of suppressing the header declaration with IN_EXEC_VARY.
96
+ * We assume that on such an old compiler, LTO cannot be used, and so the
97
+ * compiler cannot not detect the mismatched declarations, and all is well.
98
+ */
99
+TargetPageBits target_page;
100
+# define init_target_page target_page
101
+# endif
102
#endif
103
104
bool set_preferred_target_page_bits(int bits)
105
@@ -XXX,XX +XXX,XX @@ bool set_preferred_target_page_bits(int bits)
106
*/
107
#ifdef TARGET_PAGE_BITS_VARY
108
assert(bits >= TARGET_PAGE_BITS_MIN);
109
- if (target_page_bits == 0 || target_page_bits > bits) {
110
- if (target_page_bits_decided) {
111
+ if (init_target_page.bits == 0 || init_target_page.bits > bits) {
112
+ if (init_target_page.decided) {
113
return false;
114
}
115
- target_page_bits = bits;
116
+ init_target_page.bits = bits;
40
}
117
}
41
if (attrs.byte_swap) {
118
#endif
42
- /* Force the access through the I/O slow path. */
119
return true;
43
- address |= TLB_MMIO;
120
@@ -XXX,XX +XXX,XX @@ bool set_preferred_target_page_bits(int bits)
44
+ address |= TLB_BSWAP;
121
void finalize_target_page_bits(void)
122
{
123
#ifdef TARGET_PAGE_BITS_VARY
124
- if (target_page_bits == 0) {
125
- target_page_bits = TARGET_PAGE_BITS_MIN;
126
+ if (init_target_page.bits == 0) {
127
+ init_target_page.bits = TARGET_PAGE_BITS_MIN;
45
}
128
}
46
if (!memory_region_is_ram(section->mr) &&
129
- target_page_bits_decided = true;
47
!memory_region_is_romd(section->mr)) {
130
+ init_target_page.decided = true;
48
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
49
bool locked = false;
50
MemTxResult r;
51
52
- if (iotlbentry->attrs.byte_swap) {
53
- op ^= MO_BSWAP;
54
- }
55
-
56
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
57
mr = section->mr;
58
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
59
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
60
bool locked = false;
61
MemTxResult r;
62
63
- if (iotlbentry->attrs.byte_swap) {
64
- op ^= MO_BSWAP;
65
- }
66
-
67
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
68
mr = section->mr;
69
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
70
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
71
wp_access, retaddr);
72
}
73
74
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
75
- /* I/O access */
76
+ /* Reject I/O access, or other required slow-path. */
77
+ if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
78
return NULL;
79
}
80
81
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
82
/* Handle anything that isn't just a straight memory access. */
83
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
84
CPUIOTLBEntry *iotlbentry;
85
+ bool need_swap;
86
87
/* For anything that is unaligned, recurse through full_load. */
88
if ((addr & (size - 1)) != 0) {
89
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
90
/* On watchpoint hit, this will longjmp out. */
91
cpu_check_watchpoint(env_cpu(env), addr, size,
92
iotlbentry->attrs, BP_MEM_READ, retaddr);
93
-
94
- /* The backing page may or may not require I/O. */
95
- tlb_addr &= ~TLB_WATCHPOINT;
96
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
97
- goto do_aligned_access;
98
- }
99
}
100
101
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
102
+
131
+
103
/* Handle I/O access. */
132
+ /*
104
- return io_readx(env, iotlbentry, mmu_idx, addr,
133
+ * For the benefit of an -flto build, prevent the compiler from
105
- retaddr, access_type, op);
134
+ * hoisting a read from target_page before we finish initializing.
106
+ if (likely(tlb_addr & TLB_MMIO)) {
135
+ */
107
+ return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
136
+ barrier();
108
+ access_type, op ^ (need_swap * MO_BSWAP));
137
#endif
109
+ }
110
+
111
+ haddr = (void *)((uintptr_t)addr + entry->addend);
112
+
113
+ /*
114
+ * Keep these two load_memop separate to ensure that the compiler
115
+ * is able to fold the entire function to a single instruction.
116
+ * There is a build-time assert inside to remind you of this. ;-)
117
+ */
118
+ if (unlikely(need_swap)) {
119
+ return load_memop(haddr, op ^ MO_BSWAP);
120
+ }
121
+ return load_memop(haddr, op);
122
}
123
124
/* Handle slow unaligned access (it spans two pages or IO). */
125
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
126
return res & MAKE_64BIT_MASK(0, size * 8);
127
}
128
129
- do_aligned_access:
130
haddr = (void *)((uintptr_t)addr + entry->addend);
131
return load_memop(haddr, op);
132
}
133
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
134
/* Handle anything that isn't just a straight memory access. */
135
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
136
CPUIOTLBEntry *iotlbentry;
137
+ bool need_swap;
138
139
/* For anything that is unaligned, recurse through byte stores. */
140
if ((addr & (size - 1)) != 0) {
141
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
142
/* On watchpoint hit, this will longjmp out. */
143
cpu_check_watchpoint(env_cpu(env), addr, size,
144
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
145
-
146
- /* The backing page may or may not require I/O. */
147
- tlb_addr &= ~TLB_WATCHPOINT;
148
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
149
- goto do_aligned_access;
150
- }
151
}
152
153
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
154
+
155
/* Handle I/O access. */
156
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
157
+ if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
158
+ io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
159
+ op ^ (need_swap * MO_BSWAP));
160
+ return;
161
+ }
162
+
163
+ haddr = (void *)((uintptr_t)addr + entry->addend);
164
+
165
+ /*
166
+ * Keep these two store_memop separate to ensure that the compiler
167
+ * is able to fold the entire function to a single instruction.
168
+ * There is a build-time assert inside to remind you of this. ;-)
169
+ */
170
+ if (unlikely(need_swap)) {
171
+ store_memop(haddr, val, op ^ MO_BSWAP);
172
+ } else {
173
+ store_memop(haddr, val, op);
174
+ }
175
return;
176
}
177
178
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
179
return;
180
}
181
182
- do_aligned_access:
183
haddr = (void *)((uintptr_t)addr + entry->addend);
184
store_memop(haddr, val, op);
185
}
138
}
186
--
139
--
187
2.17.1
140
2.17.1
188
141
189
142
diff view generated by jsdifflib
1
The memory_region_tb_read tracepoint is unreachable, since notdirty
1
This reduces the size of a release build by about 10k.
2
is supposed to apply only to writes. The memory_region_tb_write
2
Noticably, within the tlb miss helpers.
3
tracepoint is mis-named, because notdirty is not only used for TB
4
invalidation. It is also used for e.g. VGA RAM updates and migration.
5
6
Replace memory_region_tb_write with memory_notdirty_write_access,
7
and place it in memory_notdirty_write_prepare where it can catch
8
all of the instances. Add memory_notdirty_set_dirty to log when
9
we no longer intercept writes to a page.
10
3
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
8
---
16
exec.c | 3 +++
9
include/exec/cpu-all.h | 4 ++++
17
memory.c | 4 ----
10
1 file changed, 4 insertions(+)
18
trace-events | 4 ++--
19
3 files changed, 5 insertions(+), 6 deletions(-)
20
11
21
diff --git a/exec.c b/exec.c
12
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
22
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
23
--- a/exec.c
14
--- a/include/exec/cpu-all.h
24
+++ b/exec.c
15
+++ b/include/exec/cpu-all.h
25
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
16
@@ -XXX,XX +XXX,XX @@ extern const TargetPageBits target_page;
26
ndi->size = size;
17
#else
27
ndi->pages = NULL;
18
extern TargetPageBits target_page;
28
19
#endif
29
+ trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
20
+#ifdef CONFIG_DEBUG_TCG
30
+
21
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
31
assert(tcg_enabled());
22
#else
32
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
23
+#define TARGET_PAGE_BITS target_page.bits
33
ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
24
+#endif
34
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_complete(NotDirtyInfo *ndi)
25
+#else
35
/* we remove the notdirty callback only if the code has been
26
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
36
flushed */
27
#endif
37
if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
28
38
+ trace_memory_notdirty_set_dirty(ndi->mem_vaddr);
39
tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
40
}
41
}
42
diff --git a/memory.c b/memory.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/memory.c
45
+++ b/memory.c
46
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
47
/* Accesses to code which has previously been translated into a TB show
48
* up in the MMIO path, as accesses to the io_mem_notdirty
49
* MemoryRegion. */
50
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
51
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
52
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
53
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
54
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
55
/* Accesses to code which has previously been translated into a TB show
56
* up in the MMIO path, as accesses to the io_mem_notdirty
57
* MemoryRegion. */
58
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
59
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
60
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
61
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
62
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
63
/* Accesses to code which has previously been translated into a TB show
64
* up in the MMIO path, as accesses to the io_mem_notdirty
65
* MemoryRegion. */
66
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
67
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
68
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
69
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
70
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
71
/* Accesses to code which has previously been translated into a TB show
72
* up in the MMIO path, as accesses to the io_mem_notdirty
73
* MemoryRegion. */
74
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
75
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
76
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
77
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
78
diff --git a/trace-events b/trace-events
79
index XXXXXXX..XXXXXXX 100644
80
--- a/trace-events
81
+++ b/trace-events
82
@@ -XXX,XX +XXX,XX @@ dma_map_wait(void *dbs) "dbs=%p"
83
find_ram_offset(uint64_t size, uint64_t offset) "size: 0x%" PRIx64 " @ 0x%" PRIx64
84
find_ram_offset_loop(uint64_t size, uint64_t candidate, uint64_t offset, uint64_t next, uint64_t mingap) "trying size: 0x%" PRIx64 " @ 0x%" PRIx64 ", offset: 0x%" PRIx64" next: 0x%" PRIx64 " mingap: 0x%" PRIx64
85
ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_madvise, bool need_fallocate, int ret) "%s@%p + 0x%zx: madvise: %d fallocate: %d ret: %d"
86
+memory_notdirty_write_access(uint64_t vaddr, uint64_t ram_addr, unsigned size) "0x%" PRIx64 " ram_addr 0x%" PRIx64 " size %u"
87
+memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64
88
89
# memory.c
90
memory_region_ops_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
91
memory_region_ops_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
92
memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
93
memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
94
-memory_region_tb_read(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
95
-memory_region_tb_write(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
96
memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
97
memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
98
flatview_new(void *view, void *root) "%p (root %p)"
99
--
29
--
100
2.17.1
30
2.17.1
101
31
102
32
diff view generated by jsdifflib
1
It does not require going through the whole I/O path
1
There are some uint64_t uses that expect TARGET_PAGE_MASK to
2
in order to discard a write.
2
extend for a 32-bit, so this must continue to be a signed type.
3
Define based on TARGET_PAGE_BITS not TARGET_PAGE_SIZE; this
4
will make a following patch more clear.
3
5
4
Reviewed-by: David Hildenbrand <david@redhat.com>
6
This should not have a functional effect so far.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
11
---
7
include/exec/cpu-all.h | 5 ++++-
12
include/exec/cpu-all.h | 2 +-
8
include/exec/cpu-common.h | 1 -
13
1 file changed, 1 insertion(+), 1 deletion(-)
9
accel/tcg/cputlb.c | 36 ++++++++++++++++++++--------------
10
exec.c | 41 +--------------------------------------
11
4 files changed, 26 insertions(+), 57 deletions(-)
12
14
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
15
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
17
--- a/include/exec/cpu-all.h
16
+++ b/include/exec/cpu-all.h
18
+++ b/include/exec/cpu-all.h
17
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
19
@@ -XXX,XX +XXX,XX @@ extern TargetPageBits target_page;
18
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
19
/* Set if TLB entry requires byte swap. */
20
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
21
+/* Set if TLB entry writes ignored. */
22
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
23
24
/* Use this mask to check interception with an alignment mask
25
* in a TCG backend.
26
*/
27
#define TLB_FLAGS_MASK \
28
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
29
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
30
+ | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
31
32
/**
33
* tlb_hit_page: return true if page aligned @addr is a hit against the
34
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/exec/cpu-common.h
37
+++ b/include/exec/cpu-common.h
38
@@ -XXX,XX +XXX,XX @@ void qemu_flush_coalesced_mmio_buffer(void);
39
40
void cpu_flush_icache_range(hwaddr start, hwaddr len);
41
42
-extern struct MemoryRegion io_mem_rom;
43
extern struct MemoryRegion io_mem_notdirty;
44
45
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
46
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/accel/tcg/cputlb.c
49
+++ b/accel/tcg/cputlb.c
50
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
51
{
52
uintptr_t addr = tlb_entry->addr_write;
53
54
- if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
55
+ if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
56
+ TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
57
addr &= TARGET_PAGE_MASK;
58
addr += tlb_entry->addend;
59
if ((addr - start) < length) {
60
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
61
address |= TLB_MMIO;
62
addend = 0;
63
} else {
64
- /* TLB_MMIO for rom/romd handled below */
65
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
66
}
67
68
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
69
70
tn.addr_write = -1;
71
if (prot & PAGE_WRITE) {
72
- if ((memory_region_is_ram(section->mr) && section->readonly)
73
- || memory_region_is_romd(section->mr)) {
74
- /* Write access calls the I/O callback. */
75
- tn.addr_write = address | TLB_MMIO;
76
- } else if (memory_region_is_ram(section->mr)
77
- && cpu_physical_memory_is_clean(
78
- memory_region_get_ram_addr(section->mr) + xlat)) {
79
- tn.addr_write = address | TLB_NOTDIRTY;
80
- } else {
81
- tn.addr_write = address;
82
+ tn.addr_write = address;
83
+ if (memory_region_is_romd(section->mr)) {
84
+ /* Use the MMIO path so that the device can switch states. */
85
+ tn.addr_write |= TLB_MMIO;
86
+ } else if (memory_region_is_ram(section->mr)) {
87
+ if (section->readonly) {
88
+ tn.addr_write |= TLB_DISCARD_WRITE;
89
+ } else if (cpu_physical_memory_is_clean(
90
+ memory_region_get_ram_addr(section->mr) + xlat)) {
91
+ tn.addr_write |= TLB_NOTDIRTY;
92
+ }
93
}
94
if (prot & PAGE_WRITE_INV) {
95
tn.addr_write |= TLB_INVALID_MASK;
96
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
97
mr = section->mr;
98
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
99
cpu->mem_io_pc = retaddr;
100
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
101
+ if (mr != &io_mem_notdirty && !cpu->can_do_io) {
102
cpu_io_recompile(cpu, retaddr);
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
106
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
107
mr = section->mr;
108
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
109
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
110
+ if (mr != &io_mem_notdirty && !cpu->can_do_io) {
111
cpu_io_recompile(cpu, retaddr);
112
}
113
cpu->mem_io_vaddr = addr;
114
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
115
}
116
117
/* Reject I/O access, or other required slow-path. */
118
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
119
+ if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
120
return NULL;
121
}
122
123
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
124
return;
125
}
126
127
+ /* Ignore writes to ROM. */
128
+ if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
129
+ return;
130
+ }
131
+
132
haddr = (void *)((uintptr_t)addr + entry->addend);
133
134
/*
135
diff --git a/exec.c b/exec.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/exec.c
138
+++ b/exec.c
139
@@ -XXX,XX +XXX,XX @@ static MemoryRegion *system_io;
140
AddressSpace address_space_io;
141
AddressSpace address_space_memory;
142
143
-MemoryRegion io_mem_rom, io_mem_notdirty;
144
+MemoryRegion io_mem_notdirty;
145
static MemoryRegion io_mem_unassigned;
146
#endif
20
#endif
147
21
148
@@ -XXX,XX +XXX,XX @@ typedef struct subpage_t {
22
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
149
23
-#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
150
#define PHYS_SECTION_UNASSIGNED 0
24
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
151
#define PHYS_SECTION_NOTDIRTY 1
25
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
152
-#define PHYS_SECTION_ROM 2
26
153
27
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
154
static void io_mem_init(void);
155
static void memory_map_init(void);
156
@@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
157
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
158
if (!section->readonly) {
159
iotlb |= PHYS_SECTION_NOTDIRTY;
160
- } else {
161
- iotlb |= PHYS_SECTION_ROM;
162
}
163
} else {
164
AddressSpaceDispatch *d;
165
@@ -XXX,XX +XXX,XX @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
166
return phys_section_add(map, &section);
167
}
168
169
-static void readonly_mem_write(void *opaque, hwaddr addr,
170
- uint64_t val, unsigned size)
171
-{
172
- /* Ignore any write to ROM. */
173
-}
174
-
175
-static bool readonly_mem_accepts(void *opaque, hwaddr addr,
176
- unsigned size, bool is_write,
177
- MemTxAttrs attrs)
178
-{
179
- return is_write;
180
-}
181
-
182
-/* This will only be used for writes, because reads are special cased
183
- * to directly access the underlying host ram.
184
- */
185
-static const MemoryRegionOps readonly_mem_ops = {
186
- .write = readonly_mem_write,
187
- .valid.accepts = readonly_mem_accepts,
188
- .endianness = DEVICE_NATIVE_ENDIAN,
189
- .valid = {
190
- .min_access_size = 1,
191
- .max_access_size = 8,
192
- .unaligned = false,
193
- },
194
- .impl = {
195
- .min_access_size = 1,
196
- .max_access_size = 8,
197
- .unaligned = false,
198
- },
199
-};
200
-
201
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
202
hwaddr index, MemTxAttrs attrs)
203
{
204
@@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
205
206
static void io_mem_init(void)
207
{
208
- memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
209
- NULL, NULL, UINT64_MAX);
210
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
211
NULL, UINT64_MAX);
212
213
@@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
214
assert(n == PHYS_SECTION_UNASSIGNED);
215
n = dummy_section(&d->map, fv, &io_mem_notdirty);
216
assert(n == PHYS_SECTION_NOTDIRTY);
217
- n = dummy_section(&d->map, fv, &io_mem_rom);
218
- assert(n == PHYS_SECTION_ROM);
219
220
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
221
222
--
28
--
223
2.17.1
29
2.17.1
224
30
225
31
diff view generated by jsdifflib
1
These bits do not need to vary with the actual page size
1
This eliminates a set of runtime shifts. It turns out that we
2
used by the guest.
2
require TARGET_PAGE_MASK more often than TARGET_PAGE_SIZE, so
3
redefine TARGET_PAGE_SIZE based on TARGET_PAGE_MASK instead of
4
the other way around.
3
5
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
include/exec/cpu-all.h | 16 ++++++++++------
11
include/exec/cpu-all.h | 8 ++++++--
10
1 file changed, 10 insertions(+), 6 deletions(-)
12
exec-vary.c | 1 +
13
2 files changed, 7 insertions(+), 2 deletions(-)
11
14
12
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
15
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/cpu-all.h
17
--- a/include/exec/cpu-all.h
15
+++ b/include/exec/cpu-all.h
18
+++ b/include/exec/cpu-all.h
16
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
19
@@ -XXX,XX +XXX,XX @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
17
20
typedef struct {
18
#if !defined(CONFIG_USER_ONLY)
21
bool decided;
19
22
int bits;
20
-/* Flags stored in the low bits of the TLB virtual address. These are
23
+ target_long mask;
21
- * defined so that fast path ram access is all zeros.
24
} TargetPageBits;
22
+/*
25
#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
23
+ * Flags stored in the low bits of the TLB virtual address.
26
extern const TargetPageBits target_page;
24
+ * These are defined so that fast path ram access is all zeros.
27
@@ -XXX,XX +XXX,XX @@ extern TargetPageBits target_page;
25
* The flags all must be between TARGET_PAGE_BITS and
28
#endif
26
* maximum address alignment bit.
29
#ifdef CONFIG_DEBUG_TCG
27
+ *
30
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
28
+ * Use TARGET_PAGE_BITS_MIN so that these bits are constant
31
+#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; })
29
+ * when TARGET_PAGE_BITS_VARY is in effect.
32
#else
30
*/
33
#define TARGET_PAGE_BITS target_page.bits
31
/* Zero if TLB entry is valid. */
34
+#define TARGET_PAGE_MASK target_page.mask
32
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
35
#endif
33
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
36
+#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
34
/* Set if TLB entry references a clean RAM page. The iotlb entry will
37
#else
35
contain the page physical address. */
38
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
36
-#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
39
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
37
+#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
40
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
38
/* Set if TLB entry is an IO callback. */
41
#endif
39
-#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
42
40
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
43
-#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
41
/* Set if TLB entry contains a watchpoint. */
44
-#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
42
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
45
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
43
+#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
46
44
47
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
45
/* Use this mask to check interception with an alignment mask
48
diff --git a/exec-vary.c b/exec-vary.c
46
* in a TCG backend.
49
index XXXXXXX..XXXXXXX 100644
50
--- a/exec-vary.c
51
+++ b/exec-vary.c
52
@@ -XXX,XX +XXX,XX @@ void finalize_target_page_bits(void)
53
if (init_target_page.bits == 0) {
54
init_target_page.bits = TARGET_PAGE_BITS_MIN;
55
}
56
+ init_target_page.mask = (target_long)-1 << init_target_page.bits;
57
init_target_page.decided = true;
58
59
/*
47
--
60
--
48
2.17.1
61
2.17.1
49
62
50
63
diff view generated by jsdifflib
Deleted patch
1
This forced inlining can result in missing symbols,
2
which makes a debugging build harder to follow.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reported-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
include/qemu/compiler.h | 11 +++++++++++
11
accel/tcg/cputlb.c | 4 ++--
12
2 files changed, 13 insertions(+), 2 deletions(-)
13
14
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/compiler.h
17
+++ b/include/qemu/compiler.h
18
@@ -XXX,XX +XXX,XX @@
19
# define QEMU_NONSTRING
20
#endif
21
22
+/*
23
+ * Forced inlining may be desired to encourage constant propagation
24
+ * of function parameters. However, it can also make debugging harder,
25
+ * so disable it for a non-optimizing build.
26
+ */
27
+#if defined(__OPTIMIZE__)
28
+#define QEMU_ALWAYS_INLINE __attribute__((always_inline))
29
+#else
30
+#define QEMU_ALWAYS_INLINE
31
+#endif
32
+
33
/* Implement C11 _Generic via GCC builtins. Example:
34
*
35
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
36
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/accel/tcg/cputlb.c
39
+++ b/accel/tcg/cputlb.c
40
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
41
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
42
TCGMemOpIdx oi, uintptr_t retaddr);
43
44
-static inline uint64_t __attribute__((always_inline))
45
+static inline uint64_t QEMU_ALWAYS_INLINE
46
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
47
uintptr_t retaddr, MemOp op, bool code_read,
48
FullLoadHelper *full_load)
49
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
50
* Store Helpers
51
*/
52
53
-static inline void __attribute__((always_inline))
54
+static inline void QEMU_ALWAYS_INLINE
55
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
56
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
57
{
58
--
59
2.17.1
60
61
diff view generated by jsdifflib
Deleted patch
1
Use this as a compile-time assert that a particular
2
code path is not reachable.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/qemu/compiler.h | 15 +++++++++++++++
8
1 file changed, 15 insertions(+)
9
10
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/qemu/compiler.h
13
+++ b/include/qemu/compiler.h
14
@@ -XXX,XX +XXX,XX @@
15
#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
16
#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
17
18
+/**
19
+ * qemu_build_not_reached()
20
+ *
21
+ * The compiler, during optimization, is expected to prove that a call
22
+ * to this function cannot be reached and remove it. If the compiler
23
+ * supports QEMU_ERROR, this will be reported at compile time; otherwise
24
+ * this will be reported at link time due to the missing symbol.
25
+ */
26
+#ifdef __OPTIMIZE__
27
+extern void QEMU_NORETURN QEMU_ERROR("code path is reachable")
28
+ qemu_build_not_reached(void);
29
+#else
30
+#define qemu_build_not_reached() g_assert_not_reached()
31
+#endif
32
+
33
#endif /* COMPILER_H */
34
--
35
2.17.1
36
37
diff view generated by jsdifflib
Deleted patch
1
Increase the current runtime assert to a compile-time assert.
2
1
3
Reviewed-by: David Hildenbrand <david@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/cputlb.c | 5 ++---
8
1 file changed, 2 insertions(+), 3 deletions(-)
9
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
13
+++ b/accel/tcg/cputlb.c
14
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
15
res = ldq_le_p(haddr);
16
break;
17
default:
18
- g_assert_not_reached();
19
+ qemu_build_not_reached();
20
}
21
22
return res;
23
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
24
stq_le_p(haddr, val);
25
break;
26
default:
27
- g_assert_not_reached();
28
- break;
29
+ qemu_build_not_reached();
30
}
31
}
32
33
--
34
2.17.1
35
36
diff view generated by jsdifflib
Deleted patch
1
We can use notdirty_write for the write and return a valid host
2
pointer for this case.
3
1
4
Reviewed-by: David Hildenbrand <david@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/cputlb.c | 26 +++++++++++++++++---------
9
1 file changed, 17 insertions(+), 9 deletions(-)
10
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
16
return NULL;
17
}
18
19
- /* Handle watchpoints. */
20
- if (tlb_addr & TLB_WATCHPOINT) {
21
- cpu_check_watchpoint(env_cpu(env), addr, size,
22
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
23
- wp_access, retaddr);
24
- }
25
+ if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
26
+ CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
27
28
- /* Reject I/O access, or other required slow-path. */
29
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
30
- return NULL;
31
+ /* Reject I/O access, or other required slow-path. */
32
+ if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
33
+ return NULL;
34
+ }
35
+
36
+ /* Handle watchpoints. */
37
+ if (tlb_addr & TLB_WATCHPOINT) {
38
+ cpu_check_watchpoint(env_cpu(env), addr, size,
39
+ iotlbentry->attrs, wp_access, retaddr);
40
+ }
41
+
42
+ /* Handle clean RAM pages. */
43
+ if (tlb_addr & TLB_NOTDIRTY) {
44
+ notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
45
+ }
46
}
47
48
return (void *)((uintptr_t)addr + entry->addend);
49
--
50
2.17.1
51
52
diff view generated by jsdifflib
1
With the merge of notdirty handling into store_helper,
1
Using uintptr_t instead of target_ulong meant that, for 64-bit guest
2
the last user of cpu->mem_io_vaddr was removed.
2
and 32-bit host, we truncated the guest address comparator and so may
3
not hit the tlb when we should.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Fixes: 4811e9095c0
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
include/hw/core/cpu.h | 2 --
9
accel/tcg/cputlb.c | 2 +-
9
accel/tcg/cputlb.c | 2 --
10
1 file changed, 1 insertion(+), 1 deletion(-)
10
hw/core/cpu.c | 1 -
11
3 files changed, 5 deletions(-)
12
11
13
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/hw/core/cpu.h
16
+++ b/include/hw/core/cpu.h
17
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
18
* @next_cpu: Next CPU sharing TB cache.
19
* @opaque: User data.
20
* @mem_io_pc: Host Program Counter at which the memory was accessed.
21
- * @mem_io_vaddr: Target virtual address at which the memory was accessed.
22
* @kvm_fd: vCPU file descriptor for KVM.
23
* @work_mutex: Lock to prevent multiple access to queued_work_*.
24
* @queued_work_first: First asynchronous work pending.
25
@@ -XXX,XX +XXX,XX @@ struct CPUState {
26
* we store some rarely used information in the CPU context.
27
*/
28
uintptr_t mem_io_pc;
29
- vaddr mem_io_vaddr;
30
/*
31
* This is only needed for the legacy cpu_unassigned_access() hook;
32
* when all targets using it have been converted to use
33
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
34
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/cputlb.c
14
--- a/accel/tcg/cputlb.c
36
+++ b/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
37
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
16
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
38
cpu_io_recompile(cpu, retaddr);
17
MMUAccessType access_type, int mmu_idx)
39
}
18
{
40
19
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
41
- cpu->mem_io_vaddr = addr;
20
- uintptr_t tlb_addr, page;
42
cpu->mem_io_access_type = access_type;
21
+ target_ulong tlb_addr, page;
43
22
size_t elt_ofs;
44
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
23
45
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
24
switch (access_type) {
46
if (!cpu->can_do_io) {
47
cpu_io_recompile(cpu, retaddr);
48
}
49
- cpu->mem_io_vaddr = addr;
50
cpu->mem_io_pc = retaddr;
51
52
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
53
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/core/cpu.c
56
+++ b/hw/core/cpu.c
57
@@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(CPUState *cpu)
58
cpu->interrupt_request = 0;
59
cpu->halted = 0;
60
cpu->mem_io_pc = 0;
61
- cpu->mem_io_vaddr = 0;
62
cpu->icount_extra = 0;
63
atomic_set(&cpu->icount_decr_ptr->u32, 0);
64
cpu->can_do_io = 1;
65
--
25
--
66
2.17.1
26
2.17.1
67
27
68
28
diff view generated by jsdifflib
1
All callers pass false to this argument. Remove it and pass the
1
From: Clement Deschamps <clement.deschamps@greensocs.com>
2
constant on to tb_invalidate_phys_page_range__locked.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
This fixes a segmentation fault in icount mode when executing
5
Reviewed-by: David Hildenbrand <david@redhat.com>
4
from an IO region.
5
6
TB is marked as CF_NOCACHE but tb->orig_tb is not initialized
7
(equals previous value in code_gen_buffer).
8
9
The issue happens in cpu_io_recompile() when it tries to invalidate orig_tb.
10
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Clement Deschamps <clement.deschamps@greensocs.com>
13
Message-Id: <20191022140016.918371-1-clement.deschamps@greensocs.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
15
---
8
accel/tcg/translate-all.h | 3 +--
16
accel/tcg/translate-all.c | 1 +
9
accel/tcg/translate-all.c | 6 ++----
17
1 file changed, 1 insertion(+)
10
exec.c | 4 ++--
11
3 files changed, 5 insertions(+), 8 deletions(-)
12
18
13
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/accel/tcg/translate-all.h
16
+++ b/accel/tcg/translate-all.h
17
@@ -XXX,XX +XXX,XX @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
18
void page_collection_unlock(struct page_collection *set);
19
void tb_invalidate_phys_page_fast(struct page_collection *pages,
20
tb_page_addr_t start, int len);
21
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
22
- int is_cpu_write_access);
23
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
24
void tb_check_watchpoint(CPUState *cpu);
25
26
#ifdef CONFIG_USER_ONLY
27
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
19
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
28
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/translate-all.c
21
--- a/accel/tcg/translate-all.c
30
+++ b/accel/tcg/translate-all.c
22
+++ b/accel/tcg/translate-all.c
31
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
23
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
32
*
24
tb->cs_base = cs_base;
33
* Called with mmap_lock held for user-mode emulation
25
tb->flags = flags;
34
*/
26
tb->cflags = cflags;
35
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
27
+ tb->orig_tb = NULL;
36
- int is_cpu_write_access)
28
tb->trace_vcpu_dstate = *cpu->trace_dstate;
37
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
29
tcg_ctx->tb_cflags = cflags;
38
{
30
tb_overflow:
39
struct page_collection *pages;
40
PageDesc *p;
41
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
42
return;
43
}
44
pages = page_collection_lock(start, end);
45
- tb_invalidate_phys_page_range__locked(pages, p, start, end,
46
- is_cpu_write_access);
47
+ tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
48
page_collection_unlock(pages);
49
}
50
51
diff --git a/exec.c b/exec.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/exec.c
54
+++ b/exec.c
55
@@ -XXX,XX +XXX,XX @@ const char *parse_cpu_option(const char *cpu_option)
56
void tb_invalidate_phys_addr(target_ulong addr)
57
{
58
mmap_lock();
59
- tb_invalidate_phys_page_range(addr, addr + 1, 0);
60
+ tb_invalidate_phys_page_range(addr, addr + 1);
61
mmap_unlock();
62
}
63
64
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
65
return;
66
}
67
ram_addr = memory_region_get_ram_addr(mr) + addr;
68
- tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
69
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
70
rcu_read_unlock();
71
}
72
73
--
31
--
74
2.17.1
32
2.17.1
75
33
76
34
diff view generated by jsdifflib
1
Fixes the previous TLB_WATCHPOINT patches because we are currently
1
Since 2ac01d6dafab, this function does only two things: assert a
2
failing to set cpu->mem_io_pc with the call to cpu_check_watchpoint.
2
lock is held, and call tcg_tb_alloc. It is used exactly once,
3
Pass down the retaddr directly because it's readily available.
3
and its user has already done the assert.
4
4
5
Fixes: 50b107c5d61
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Clement Deschamps <clement.deschamps@greensocs.com>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
accel/tcg/translate-all.h | 2 +-
9
accel/tcg/translate-all.c | 20 ++------------------
11
accel/tcg/translate-all.c | 6 +++---
10
1 file changed, 2 insertions(+), 18 deletions(-)
12
exec.c | 2 +-
13
3 files changed, 5 insertions(+), 5 deletions(-)
14
11
15
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/translate-all.h
18
+++ b/accel/tcg/translate-all.h
19
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
20
tb_page_addr_t start, int len,
21
uintptr_t retaddr);
22
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
23
-void tb_check_watchpoint(CPUState *cpu);
24
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
25
26
#ifdef CONFIG_USER_ONLY
27
int page_unprotect(target_ulong address, uintptr_t pc);
28
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
12
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
29
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/tcg/translate-all.c
14
--- a/accel/tcg/translate-all.c
31
+++ b/accel/tcg/translate-all.c
15
+++ b/accel/tcg/translate-all.c
32
@@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
16
@@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size)
33
#endif
17
#endif
34
18
}
35
/* user-mode: call with mmap_lock held */
19
36
-void tb_check_watchpoint(CPUState *cpu)
20
-/*
37
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
21
- * Allocate a new translation block. Flush the translation buffer if
22
- * too many translation blocks or too much generated code.
23
- */
24
-static TranslationBlock *tb_alloc(target_ulong pc)
25
-{
26
- TranslationBlock *tb;
27
-
28
- assert_memory_lock();
29
-
30
- tb = tcg_tb_alloc(tcg_ctx);
31
- if (unlikely(tb == NULL)) {
32
- return NULL;
33
- }
34
- return tb;
35
-}
36
-
37
/* call with @p->lock held */
38
static inline void invalidate_page_bitmap(PageDesc *p)
38
{
39
{
39
TranslationBlock *tb;
40
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
40
41
TCGProfile *prof = &tcg_ctx->prof;
42
int64_t ti;
43
#endif
44
+
41
assert_memory_lock();
45
assert_memory_lock();
42
46
43
- tb = tcg_tb_lookup(cpu->mem_io_pc);
47
phys_pc = get_page_addr_code(env, pc);
44
+ tb = tcg_tb_lookup(retaddr);
48
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
45
if (tb) {
49
}
46
/* We can use retranslation to find the PC. */
50
47
- cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
51
buffer_overflow:
48
+ cpu_restore_state_from_tb(cpu, tb, retaddr, true);
52
- tb = tb_alloc(pc);
49
tb_phys_invalidate(tb, -1);
53
+ tb = tcg_tb_alloc(tcg_ctx);
50
} else {
54
if (unlikely(!tb)) {
51
/* The exception probably happened in a helper. The CPU state should
55
/* flush must be done */
52
diff --git a/exec.c b/exec.c
56
tb_flush(cpu);
53
index XXXXXXX..XXXXXXX 100644
54
--- a/exec.c
55
+++ b/exec.c
56
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
57
cpu->watchpoint_hit = wp;
58
59
mmap_lock();
60
- tb_check_watchpoint(cpu);
61
+ tb_check_watchpoint(cpu, ra);
62
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
63
cpu->exception_index = EXCP_DEBUG;
64
mmap_unlock();
65
--
57
--
66
2.17.1
58
2.17.1
67
59
68
60
diff view generated by jsdifflib