1
The following changes since commit 3e08b2b9cb64bff2b73fa9128c0e49bfcde0dd40:
1
The following changes since commit 813bac3d8d70d85cb7835f7945eb9eed84c2d8d0:
2
2
3
Merge remote-tracking branch 'remotes/philmd-gitlab/tags/edk2-next-20200121' into staging (2020-01-21 15:29:25 +0000)
3
Merge tag '2023q3-bsd-user-pull-request' of https://gitlab.com/bsdimp/qemu into staging (2023-08-29 08:58:00 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200121
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230829-2
8
8
9
for you to fetch changes up to 75fa376cdab5e5db2c7fdd107358e16f95503ac6:
9
for you to fetch changes up to 669fd6151337fdc81e34f7eb4940ba2f20d89957:
10
10
11
scripts/git.orderfile: Display decodetree before C source (2020-01-21 15:26:09 -1000)
11
Revert "include/exec: typedef abi_ptr to vaddr in softmmu" (2023-08-29 11:30:33 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Remove another limit to NB_MMU_MODES.
14
softmmu: Use async_run_on_cpu in tcg_commit
15
Fix compilation using uclibc.
15
tcg: Remove vecop_list check from tcg_gen_not_vec
16
Fix defaulting of -accel parameters.
16
tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32
17
Tidy cputlb basic routines.
17
Revert "include/exec: typedef abi_ptr to vaddr in softmmu"
18
Adjust git.orderfile for decodetree.
19
18
20
----------------------------------------------------------------
19
----------------------------------------------------------------
21
Carlos Santos (1):
20
Richard Henderson (5):
22
util/cacheinfo: fix crash when compiling with uClibc
21
softmmu: Assert data in bounds in iotlb_to_section
22
softmmu: Use async_run_on_cpu in tcg_commit
23
tcg: Remove vecop_list check from tcg_gen_not_vec
24
tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32
25
Revert "include/exec: typedef abi_ptr to vaddr in softmmu"
23
26
24
Philippe Mathieu-Daudé (1):
27
include/exec/cpu-common.h | 1 -
25
scripts/git.orderfile: Display decodetree before C source
28
include/exec/cpu_ldst.h | 4 ++--
26
29
tcg/sparc64/tcg-target.h | 2 +-
27
Richard Henderson (14):
30
accel/tcg/cpu-exec-common.c | 30 --------------------------
28
cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN
31
softmmu/physmem.c | 50 ++++++++++++++++++++++++++++++++------------
29
vl: Remove unused variable in configure_accelerators
32
tcg/tcg-op-vec.c | 7 +++----
30
vl: Reduce scope of variables in configure_accelerators
33
tcg/sparc64/tcg-target.c.inc | 11 ----------
31
vl: Remove useless test in configure_accelerators
34
7 files changed, 43 insertions(+), 62 deletions(-)
32
vl: Only choose enabled accelerators in configure_accelerators
33
cputlb: Merge tlb_table_flush_by_mmuidx into tlb_flush_one_mmuidx_locked
34
cputlb: Make tlb_n_entries private to cputlb.c
35
cputlb: Pass CPUTLBDescFast to tlb_n_entries and sizeof_tlb
36
cputlb: Hoist tlb portions in tlb_mmu_resize_locked
37
cputlb: Hoist tlb portions in tlb_flush_one_mmuidx_locked
38
cputlb: Split out tlb_mmu_flush_locked
39
cputlb: Partially merge tlb_dyn_init into tlb_init
40
cputlb: Initialize tlbs as flushed
41
cputlb: Hoist timestamp outside of loops over tlbs
42
43
include/exec/cpu_ldst.h | 5 -
44
accel/tcg/cputlb.c | 287 +++++++++++++++++++++++++++++++++---------------
45
util/cacheinfo.c | 10 +-
46
vl.c | 27 +++--
47
scripts/git.orderfile | 3 +
48
5 files changed, 223 insertions(+), 109 deletions(-)
49
diff view generated by jsdifflib
Deleted patch
1
In target/arm we will shortly have "too many" mmu_idx.
2
The current minimum barrier is caused by the way in which
3
tlb_flush_page_by_mmuidx is coded.
4
1
5
We can remove this limitation by allocating memory for
6
consumption by the worker. Let us assume that this is
7
the unlikely case, as will be the case for the majority
8
of targets which have so far satisfied the BUILD_BUG_ON,
9
and only allocate memory when necessary.
10
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
accel/tcg/cputlb.c | 167 +++++++++++++++++++++++++++++++++++----------
15
1 file changed, 132 insertions(+), 35 deletions(-)
16
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
20
+++ b/accel/tcg/cputlb.c
21
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
22
}
23
}
24
25
-/* As we are going to hijack the bottom bits of the page address for a
26
- * mmuidx bit mask we need to fail to build if we can't do that
27
+/**
28
+ * tlb_flush_page_by_mmuidx_async_0:
29
+ * @cpu: cpu on which to flush
30
+ * @addr: page of virtual address to flush
31
+ * @idxmap: set of mmu_idx to flush
32
+ *
33
+ * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
34
+ * at @addr from the tlbs indicated by @idxmap from @cpu.
35
*/
36
-QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
37
-
38
-static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
39
- run_on_cpu_data data)
40
+static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
41
+ target_ulong addr,
42
+ uint16_t idxmap)
43
{
44
CPUArchState *env = cpu->env_ptr;
45
- target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
46
- target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
47
- unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
48
int mmu_idx;
49
50
assert_cpu_is_self(cpu);
51
52
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
53
- addr, mmu_idx_bitmap);
54
+ tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
55
56
qemu_spin_lock(&env_tlb(env)->c.lock);
57
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
58
- if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
59
+ if ((idxmap >> mmu_idx) & 1) {
60
tlb_flush_page_locked(env, mmu_idx, addr);
61
}
62
}
63
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
64
tb_flush_jmp_cache(cpu, addr);
65
}
66
67
+/**
68
+ * tlb_flush_page_by_mmuidx_async_1:
69
+ * @cpu: cpu on which to flush
70
+ * @data: encoded addr + idxmap
71
+ *
72
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
73
+ * async_run_on_cpu. The idxmap parameter is encoded in the page
74
+ * offset of the target_ptr field. This limits the set of mmu_idx
75
+ * that can be passed via this method.
76
+ */
77
+static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
78
+ run_on_cpu_data data)
79
+{
80
+ target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
81
+ target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
82
+ uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
83
+
84
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
85
+}
86
+
87
+typedef struct {
88
+ target_ulong addr;
89
+ uint16_t idxmap;
90
+} TLBFlushPageByMMUIdxData;
91
+
92
+/**
93
+ * tlb_flush_page_by_mmuidx_async_2:
94
+ * @cpu: cpu on which to flush
95
+ * @data: allocated addr + idxmap
96
+ *
97
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
98
+ * async_run_on_cpu. The addr+idxmap parameters are stored in a
99
+ * TLBFlushPageByMMUIdxData structure that has been allocated
100
+ * specifically for this helper. Free the structure when done.
101
+ */
102
+static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
103
+ run_on_cpu_data data)
104
+{
105
+ TLBFlushPageByMMUIdxData *d = data.host_ptr;
106
+
107
+ tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
108
+ g_free(d);
109
+}
110
+
111
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
112
{
113
- target_ulong addr_and_mmu_idx;
114
-
115
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
116
117
/* This should already be page aligned */
118
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
119
- addr_and_mmu_idx |= idxmap;
120
+ addr &= TARGET_PAGE_MASK;
121
122
- if (!qemu_cpu_is_self(cpu)) {
123
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
124
- RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
125
+ if (qemu_cpu_is_self(cpu)) {
126
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
127
+ } else if (idxmap < TARGET_PAGE_SIZE) {
128
+ /*
129
+ * Most targets have only a few mmu_idx. In the case where
130
+ * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
131
+ * allocating memory for this operation.
132
+ */
133
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
134
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
135
} else {
136
- tlb_flush_page_by_mmuidx_async_work(
137
- cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
138
+ TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
139
+
140
+ /* Otherwise allocate a structure, freed by the worker. */
141
+ d->addr = addr;
142
+ d->idxmap = idxmap;
143
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
144
+ RUN_ON_CPU_HOST_PTR(d));
145
}
146
}
147
148
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
149
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
150
uint16_t idxmap)
151
{
152
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
153
- target_ulong addr_and_mmu_idx;
154
-
155
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
156
157
/* This should already be page aligned */
158
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
159
- addr_and_mmu_idx |= idxmap;
160
+ addr &= TARGET_PAGE_MASK;
161
162
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
163
- fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
164
+ /*
165
+ * Allocate memory to hold addr+idxmap only when needed.
166
+ * See tlb_flush_page_by_mmuidx for details.
167
+ */
168
+ if (idxmap < TARGET_PAGE_SIZE) {
169
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
170
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
171
+ } else {
172
+ CPUState *dst_cpu;
173
+
174
+ /* Allocate a separate data block for each destination cpu. */
175
+ CPU_FOREACH(dst_cpu) {
176
+ if (dst_cpu != src_cpu) {
177
+ TLBFlushPageByMMUIdxData *d
178
+ = g_new(TLBFlushPageByMMUIdxData, 1);
179
+
180
+ d->addr = addr;
181
+ d->idxmap = idxmap;
182
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
183
+ RUN_ON_CPU_HOST_PTR(d));
184
+ }
185
+ }
186
+ }
187
+
188
+ tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
189
}
190
191
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
192
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
193
target_ulong addr,
194
uint16_t idxmap)
195
{
196
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
197
- target_ulong addr_and_mmu_idx;
198
-
199
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
200
201
/* This should already be page aligned */
202
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
203
- addr_and_mmu_idx |= idxmap;
204
+ addr &= TARGET_PAGE_MASK;
205
206
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
207
- async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
208
+ /*
209
+ * Allocate memory to hold addr+idxmap only when needed.
210
+ * See tlb_flush_page_by_mmuidx for details.
211
+ */
212
+ if (idxmap < TARGET_PAGE_SIZE) {
213
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
214
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
215
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
216
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
217
+ } else {
218
+ CPUState *dst_cpu;
219
+ TLBFlushPageByMMUIdxData *d;
220
+
221
+ /* Allocate a separate data block for each destination cpu. */
222
+ CPU_FOREACH(dst_cpu) {
223
+ if (dst_cpu != src_cpu) {
224
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
225
+ d->addr = addr;
226
+ d->idxmap = idxmap;
227
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
228
+ RUN_ON_CPU_HOST_PTR(d));
229
+ }
230
+ }
231
+
232
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
233
+ d->addr = addr;
234
+ d->idxmap = idxmap;
235
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
236
+ RUN_ON_CPU_HOST_PTR(d));
237
+ }
238
}
239
240
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
241
--
242
2.20.1
243
244
diff view generated by jsdifflib
Deleted patch
1
From: Carlos Santos <casantos@redhat.com>
2
1
3
uClibc defines _SC_LEVEL1_ICACHE_LINESIZE and _SC_LEVEL1_DCACHE_LINESIZE
4
but the corresponding sysconf calls returns -1, which is a valid result,
5
meaning that the limit is indeterminate.
6
7
Handle this situation using the fallback values instead of crashing due
8
to an assertion failure.
9
10
Signed-off-by: Carlos Santos <casantos@redhat.com>
11
Message-Id: <20191017123713.30192-1-casantos@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
util/cacheinfo.c | 10 ++++++++--
15
1 file changed, 8 insertions(+), 2 deletions(-)
16
17
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/util/cacheinfo.c
20
+++ b/util/cacheinfo.c
21
@@ -XXX,XX +XXX,XX @@ static void sys_cache_info(int *isize, int *dsize)
22
static void sys_cache_info(int *isize, int *dsize)
23
{
24
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
25
- *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
26
+ int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
27
+ if (tmp_isize > 0) {
28
+ *isize = tmp_isize;
29
+ }
30
# endif
31
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
32
- *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
33
+ int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
34
+ if (tmp_dsize > 0) {
35
+ *dsize = tmp_dsize;
36
+ }
37
# endif
38
}
39
#endif /* sys_cache_info */
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
Deleted patch
1
The accel_initialised variable no longer has any setters.
2
1
3
Fixes: 6f6e1698a68c
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
vl.c | 3 +--
11
1 file changed, 1 insertion(+), 2 deletions(-)
12
13
diff --git a/vl.c b/vl.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/vl.c
16
+++ b/vl.c
17
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
18
{
19
const char *accel;
20
char **accel_list, **tmp;
21
- bool accel_initialised = false;
22
bool init_failed = false;
23
24
qemu_opts_foreach(qemu_find_opts("icount"),
25
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
26
27
accel_list = g_strsplit(accel, ":", 0);
28
29
- for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) {
30
+ for (tmp = accel_list; tmp && *tmp; tmp++) {
31
/*
32
* Filter invalid accelerators here, to prevent obscenities
33
* such as "-machine accel=tcg,,thread=single".
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
Deleted patch
1
The accel_list and tmp variables are only used when manufacturing
2
-machine accel, options based on -accel.
3
1
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
vl.c | 3 ++-
10
1 file changed, 2 insertions(+), 1 deletion(-)
11
12
diff --git a/vl.c b/vl.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
15
+++ b/vl.c
16
@@ -XXX,XX +XXX,XX @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp)
17
static void configure_accelerators(const char *progname)
18
{
19
const char *accel;
20
- char **accel_list, **tmp;
21
bool init_failed = false;
22
23
qemu_opts_foreach(qemu_find_opts("icount"),
24
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
25
26
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
27
if (QTAILQ_EMPTY(&qemu_accel_opts.head)) {
28
+ char **accel_list, **tmp;
29
+
30
if (accel == NULL) {
31
/* Select the default accelerator */
32
if (!accel_find("tcg") && !accel_find("kvm")) {
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
Deleted patch
1
The result of g_strsplit is never NULL.
2
1
3
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
vl.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
diff --git a/vl.c b/vl.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
15
+++ b/vl.c
16
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
17
18
accel_list = g_strsplit(accel, ":", 0);
19
20
- for (tmp = accel_list; tmp && *tmp; tmp++) {
21
+ for (tmp = accel_list; *tmp; tmp++) {
22
/*
23
* Filter invalid accelerators here, to prevent obscenities
24
* such as "-machine accel=tcg,,thread=single".
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
Deleted patch
1
By choosing "tcg:kvm" when kvm is not enabled, we generate
2
an incorrect warning: "invalid accelerator kvm".
3
1
4
At the same time, use g_str_has_suffix rather than open-coding
5
the same operation.
6
7
Presumably the inverse is also true with --disable-tcg.
8
9
Fixes: 28a0961757fc
10
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
vl.c | 21 +++++++++++++--------
16
1 file changed, 13 insertions(+), 8 deletions(-)
17
18
diff --git a/vl.c b/vl.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/vl.c
21
+++ b/vl.c
22
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
23
24
if (accel == NULL) {
25
/* Select the default accelerator */
26
- if (!accel_find("tcg") && !accel_find("kvm")) {
27
- error_report("No accelerator selected and"
28
- " no default accelerator available");
29
- exit(1);
30
- } else {
31
- int pnlen = strlen(progname);
32
- if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) {
33
+ bool have_tcg = accel_find("tcg");
34
+ bool have_kvm = accel_find("kvm");
35
+
36
+ if (have_tcg && have_kvm) {
37
+ if (g_str_has_suffix(progname, "kvm")) {
38
/* If the program name ends with "kvm", we prefer KVM */
39
accel = "kvm:tcg";
40
} else {
41
accel = "tcg:kvm";
42
}
43
+ } else if (have_kvm) {
44
+ accel = "kvm";
45
+ } else if (have_tcg) {
46
+ accel = "tcg";
47
+ } else {
48
+ error_report("No accelerator selected and"
49
+ " no default accelerator available");
50
+ exit(1);
51
}
52
}
53
-
54
accel_list = g_strsplit(accel, ":", 0);
55
56
for (tmp = accel_list; *tmp; tmp++) {
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
Deleted patch
1
There is only one caller for tlb_table_flush_by_mmuidx. Place
2
the result at the earlier line number, due to an expected user
3
in the near future.
4
1
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 19 +++++++------------
10
1 file changed, 7 insertions(+), 12 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
17
}
18
}
19
20
-static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
21
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
22
{
23
tlb_mmu_resize_locked(env, mmu_idx);
24
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
25
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
26
+ env_tlb(env)->d[mmu_idx].large_page_addr = -1;
27
+ env_tlb(env)->d[mmu_idx].large_page_mask = -1;
28
+ env_tlb(env)->d[mmu_idx].vindex = 0;
29
+ memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
30
+ memset(env_tlb(env)->d[mmu_idx].vtable, -1,
31
+ sizeof(env_tlb(env)->d[0].vtable));
32
}
33
34
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
35
@@ -XXX,XX +XXX,XX @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
36
*pelide = elide;
37
}
38
39
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
40
-{
41
- tlb_table_flush_by_mmuidx(env, mmu_idx);
42
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
43
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
44
- env_tlb(env)->d[mmu_idx].vindex = 0;
45
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
46
- sizeof(env_tlb(env)->d[0].vtable));
47
-}
48
-
49
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
50
{
51
CPUArchState *env = cpu->env_ptr;
52
--
53
2.20.1
54
55
diff view generated by jsdifflib
Deleted patch
1
There are no users of this function outside cputlb.c,
2
and its interface will change in the next patch.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/cpu_ldst.h | 5 -----
10
accel/tcg/cputlb.c | 5 +++++
11
2 files changed, 5 insertions(+), 5 deletions(-)
12
13
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu_ldst.h
16
+++ b/include/exec/cpu_ldst.h
17
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
18
return (addr >> TARGET_PAGE_BITS) & size_mask;
19
}
20
21
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
22
-{
23
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
24
-}
25
-
26
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
27
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
28
target_ulong addr)
29
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/tcg/cputlb.c
32
+++ b/accel/tcg/cputlb.c
33
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
34
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
35
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
36
37
+static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
38
+{
39
+ return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
40
+}
41
+
42
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
43
{
44
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
Deleted patch
1
We do not need the entire CPUArchState to compute these values.
2
1
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/cputlb.c | 15 ++++++++-------
9
1 file changed, 8 insertions(+), 7 deletions(-)
10
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
16
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
17
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
18
19
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
20
+static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
21
{
22
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
23
+ return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
24
}
25
26
-static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
27
+static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
28
{
29
- return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
30
+ return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
31
}
32
33
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
34
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
35
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
36
{
37
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
38
- size_t old_size = tlb_n_entries(env, mmu_idx);
39
+ size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
40
size_t rate;
41
size_t new_size = old_size;
42
int64_t now = get_clock_realtime();
43
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
44
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
45
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
46
env_tlb(env)->d[mmu_idx].vindex = 0;
47
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
48
+ memset(env_tlb(env)->f[mmu_idx].table, -1,
49
+ sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
50
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
51
sizeof(env_tlb(env)->d[0].vtable));
52
}
53
@@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
54
qemu_spin_lock(&env_tlb(env)->c.lock);
55
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
56
unsigned int i;
57
- unsigned int n = tlb_n_entries(env, mmu_idx);
58
+ unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
59
60
for (i = 0; i < n; i++) {
61
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
62
--
63
2.20.1
64
65
diff view generated by jsdifflib
Deleted patch
1
No functional change, but the smaller expressions make
2
the code easier to read.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 35 +++++++++++++++++------------------
10
1 file changed, 17 insertions(+), 18 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
17
18
/**
19
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
20
- * @env: CPU that owns the TLB
21
- * @mmu_idx: MMU index of the TLB
22
+ * @desc: The CPUTLBDesc portion of the TLB
23
+ * @fast: The CPUTLBDescFast portion of the same TLB
24
*
25
* Called with tlb_lock_held.
26
*
27
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
28
* high), since otherwise we are likely to have a significant amount of
29
* conflict misses.
30
*/
31
-static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
32
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
33
{
34
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
35
- size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
36
+ size_t old_size = tlb_n_entries(fast);
37
size_t rate;
38
size_t new_size = old_size;
39
int64_t now = get_clock_realtime();
40
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
41
return;
42
}
43
44
- g_free(env_tlb(env)->f[mmu_idx].table);
45
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
46
+ g_free(fast->table);
47
+ g_free(desc->iotlb);
48
49
tlb_window_reset(desc, now, 0);
50
/* desc->n_used_entries is cleared by the caller */
51
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
52
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
53
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
54
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
55
+ fast->table = g_try_new(CPUTLBEntry, new_size);
56
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
57
+
58
/*
59
* If the allocations fail, try smaller sizes. We just freed some
60
* memory, so going back to half of new_size has a good chance of working.
61
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
62
* allocations to fail though, so we progressively reduce the allocation
63
* size, aborting if we cannot even allocate the smallest TLB we support.
64
*/
65
- while (env_tlb(env)->f[mmu_idx].table == NULL ||
66
- env_tlb(env)->d[mmu_idx].iotlb == NULL) {
67
+ while (fast->table == NULL || desc->iotlb == NULL) {
68
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
69
error_report("%s: %s", __func__, strerror(errno));
70
abort();
71
}
72
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
73
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
74
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
75
76
- g_free(env_tlb(env)->f[mmu_idx].table);
77
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
78
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
79
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
80
+ g_free(fast->table);
81
+ g_free(desc->iotlb);
82
+ fast->table = g_try_new(CPUTLBEntry, new_size);
83
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
84
}
85
}
86
87
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
88
{
89
- tlb_mmu_resize_locked(env, mmu_idx);
90
+ tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
91
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
92
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
93
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
94
--
95
2.20.1
96
97
diff view generated by jsdifflib
Deleted patch
1
No functional change, but the smaller expressions make
2
the code easier to read.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 19 ++++++++++---------
10
1 file changed, 10 insertions(+), 9 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
17
18
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
19
{
20
- tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
21
- env_tlb(env)->d[mmu_idx].n_used_entries = 0;
22
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
23
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
24
- env_tlb(env)->d[mmu_idx].vindex = 0;
25
- memset(env_tlb(env)->f[mmu_idx].table, -1,
26
- sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
27
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
28
- sizeof(env_tlb(env)->d[0].vtable));
29
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
30
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
31
+
32
+ tlb_mmu_resize_locked(desc, fast);
33
+ desc->n_used_entries = 0;
34
+ desc->large_page_addr = -1;
35
+ desc->large_page_mask = -1;
36
+ desc->vindex = 0;
37
+ memset(fast->table, -1, sizeof_tlb(fast));
38
+ memset(desc->vtable, -1, sizeof(desc->vtable));
39
}
40
41
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
42
--
43
2.20.1
44
45
diff view generated by jsdifflib
Deleted patch
1
We will want to be able to flush a tlb without resizing.
2
1
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/cputlb.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
16
}
17
}
18
19
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
20
+static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
21
{
22
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
23
- CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
24
-
25
- tlb_mmu_resize_locked(desc, fast);
26
desc->n_used_entries = 0;
27
desc->large_page_addr = -1;
28
desc->large_page_mask = -1;
29
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
30
memset(desc->vtable, -1, sizeof(desc->vtable));
31
}
32
33
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
34
+{
35
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
36
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
37
+
38
+ tlb_mmu_resize_locked(desc, fast);
39
+ tlb_mmu_flush_locked(desc, fast);
40
+}
41
+
42
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
43
{
44
env_tlb(env)->d[mmu_idx].n_used_entries++;
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
Deleted patch
1
Merge into the only caller, but at the same time split
2
out tlb_mmu_init to initialize a single tlb entry.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
10
1 file changed, 16 insertions(+), 17 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
17
desc->window_max_entries = max_entries;
18
}
19
20
-static void tlb_dyn_init(CPUArchState *env)
21
-{
22
- int i;
23
-
24
- for (i = 0; i < NB_MMU_MODES; i++) {
25
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
26
- size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
27
-
28
- tlb_window_reset(desc, get_clock_realtime(), 0);
29
- desc->n_used_entries = 0;
30
- env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
31
- env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
32
- env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
33
- }
34
-}
35
-
36
/**
37
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
38
* @desc: The CPUTLBDesc portion of the TLB
39
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
40
tlb_mmu_flush_locked(desc, fast);
41
}
42
43
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
44
+{
45
+ size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
46
+
47
+ tlb_window_reset(desc, now, 0);
48
+ desc->n_used_entries = 0;
49
+ fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
50
+ fast->table = g_new(CPUTLBEntry, n_entries);
51
+ desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
52
+}
53
+
54
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
55
{
56
env_tlb(env)->d[mmu_idx].n_used_entries++;
57
@@ -XXX,XX +XXX,XX @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
58
void tlb_init(CPUState *cpu)
59
{
60
CPUArchState *env = cpu->env_ptr;
61
+ int64_t now = get_clock_realtime();
62
+ int i;
63
64
qemu_spin_init(&env_tlb(env)->c.lock);
65
66
/* Ensure that cpu_reset performs a full flush. */
67
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
68
69
- tlb_dyn_init(env);
70
+ for (i = 0; i < NB_MMU_MODES; i++) {
71
+ tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
72
+ }
73
}
74
75
/* flush_all_helper: run fn across all cpus
76
--
77
2.20.1
78
79
diff view generated by jsdifflib
Deleted patch
1
There's little point in leaving these data structures half initialized,
2
and relying on a flush to be done during reset.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/cputlb.c | 5 +++--
9
1 file changed, 3 insertions(+), 2 deletions(-)
10
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
16
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
17
fast->table = g_new(CPUTLBEntry, n_entries);
18
desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
19
+ tlb_mmu_flush_locked(desc, fast);
20
}
21
22
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
23
@@ -XXX,XX +XXX,XX @@ void tlb_init(CPUState *cpu)
24
25
qemu_spin_init(&env_tlb(env)->c.lock);
26
27
- /* Ensure that cpu_reset performs a full flush. */
28
- env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
29
+ /* All tlbs are initialized flushed. */
30
+ env_tlb(env)->c.dirty = 0;
31
32
for (i = 0; i < NB_MMU_MODES; i++) {
33
tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
Deleted patch
1
Do not call get_clock_realtime() in tlb_mmu_resize_locked,
2
but hoist outside of any loop over a set of tlbs. This is
3
only two (indirect) callers, tlb_flush_by_mmuidx_async_work
4
and tlb_flush_page_locked, so not onerous.
5
1
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
accel/tcg/cputlb.c | 14 ++++++++------
12
1 file changed, 8 insertions(+), 6 deletions(-)
13
14
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/cputlb.c
17
+++ b/accel/tcg/cputlb.c
18
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
19
* high), since otherwise we are likely to have a significant amount of
20
* conflict misses.
21
*/
22
-static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
23
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
24
+ int64_t now)
25
{
26
size_t old_size = tlb_n_entries(fast);
27
size_t rate;
28
size_t new_size = old_size;
29
- int64_t now = get_clock_realtime();
30
int64_t window_len_ms = 100;
31
int64_t window_len_ns = window_len_ms * 1000 * 1000;
32
bool window_expired = now > desc->window_begin_ns + window_len_ns;
33
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
34
memset(desc->vtable, -1, sizeof(desc->vtable));
35
}
36
37
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
38
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
39
+ int64_t now)
40
{
41
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
42
CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
43
44
- tlb_mmu_resize_locked(desc, fast);
45
+ tlb_mmu_resize_locked(desc, fast, now);
46
tlb_mmu_flush_locked(desc, fast);
47
}
48
49
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
50
CPUArchState *env = cpu->env_ptr;
51
uint16_t asked = data.host_int;
52
uint16_t all_dirty, work, to_clean;
53
+ int64_t now = get_clock_realtime();
54
55
assert_cpu_is_self(cpu);
56
57
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
58
59
for (work = to_clean; work != 0; work &= work - 1) {
60
int mmu_idx = ctz32(work);
61
- tlb_flush_one_mmuidx_locked(env, mmu_idx);
62
+ tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
63
}
64
65
qemu_spin_unlock(&env_tlb(env)->c.lock);
66
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
67
tlb_debug("forcing full flush midx %d ("
68
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
69
midx, lp_addr, lp_mask);
70
- tlb_flush_one_mmuidx_locked(env, midx);
71
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
72
} else {
73
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
74
tlb_n_used_entries_dec(env, midx);
75
--
76
2.20.1
77
78
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
1
3
To avoid scrolling each instruction when reviewing tcg
4
helpers written for the decodetree script, display the
5
.decode files (similar to header declarations) before
6
the C source (implementation of previous declarations).
7
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Message-Id: <20191230082856.30556-1-philmd@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
scripts/git.orderfile | 3 +++
15
1 file changed, 3 insertions(+)
16
17
diff --git a/scripts/git.orderfile b/scripts/git.orderfile
18
index XXXXXXX..XXXXXXX 100644
19
--- a/scripts/git.orderfile
20
+++ b/scripts/git.orderfile
21
@@ -XXX,XX +XXX,XX @@ qga/*.json
22
# headers
23
*.h
24
25
+# decoding tree specification
26
+*.decode
27
+
28
# code
29
*.c
30
--
31
2.20.1
32
33
diff view generated by jsdifflib