1
The following changes since commit 3e08b2b9cb64bff2b73fa9128c0e49bfcde0dd40:
1
The following changes since commit 79b677d658d3d35e1e776826ac4abb28cdce69b8:
2
2
3
Merge remote-tracking branch 'remotes/philmd-gitlab/tags/edk2-next-20200121' into staging (2020-01-21 15:29:25 +0000)
3
Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging (2023-02-21 11:28:31 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200121
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230221
8
8
9
for you to fetch changes up to 75fa376cdab5e5db2c7fdd107358e16f95503ac6:
9
for you to fetch changes up to dbd672c87f19949bb62bfb1fb3a97b9729fd7560:
10
10
11
scripts/git.orderfile: Display decodetree before C source (2020-01-21 15:26:09 -1000)
11
sysemu/os-win32: fix setjmp/longjmp on windows-arm64 (2023-02-21 13:45:48 -1000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Remove another limit to NB_MMU_MODES.
14
tcg: Allow first half of insn in ram, and second half in mmio
15
Fix compilation using uclibc.
15
linux-user/sparc: SIGILL for unknown trap vectors
16
Fix defaulting of -accel parameters.
16
linux-user/microblaze: SIGILL for privileged insns
17
Tidy cputlb basic routines.
17
linux-user: Fix deadlock while exiting due to signal
18
Adjust git.orderfile for decodetree.
18
target/microblaze: Add gdbstub xml
19
util: Adjust cacheflush for windows-arm64
20
include/sysemu/os-win32: Adjust setjmp/longjmp for windows-arm64
19
21
20
----------------------------------------------------------------
22
----------------------------------------------------------------
21
Carlos Santos (1):
23
Ilya Leoshkevich (3):
22
util/cacheinfo: fix crash when compiling with uClibc
24
linux-user: Always exit from exclusive state in fork_end()
25
cpus: Make {start,end}_exclusive() recursive
26
linux-user/microblaze: Handle privileged exception
23
27
24
Philippe Mathieu-Daudé (1):
28
Pierrick Bouvier (2):
25
scripts/git.orderfile: Display decodetree before C source
29
util/cacheflush: fix cache on windows-arm64
30
sysemu/os-win32: fix setjmp/longjmp on windows-arm64
26
31
27
Richard Henderson (14):
32
Richard Henderson (3):
28
cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN
33
accel/tcg: Allow the second page of an instruction to be MMIO
29
vl: Remove unused variable in configure_accelerators
34
linux-user/sparc: Raise SIGILL for all unhandled software traps
30
vl: Reduce scope of variables in configure_accelerators
35
target/microblaze: Add gdbstub xml
31
vl: Remove useless test in configure_accelerators
32
vl: Only choose enabled accelerators in configure_accelerators
33
cputlb: Merge tlb_table_flush_by_mmuidx into tlb_flush_one_mmuidx_locked
34
cputlb: Make tlb_n_entries private to cputlb.c
35
cputlb: Pass CPUTLBDescFast to tlb_n_entries and sizeof_tlb
36
cputlb: Hoist tlb portions in tlb_mmu_resize_locked
37
cputlb: Hoist tlb portions in tlb_flush_one_mmuidx_locked
38
cputlb: Split out tlb_mmu_flush_locked
39
cputlb: Partially merge tlb_dyn_init into tlb_init
40
cputlb: Initialize tlbs as flushed
41
cputlb: Hoist timestamp outside of loops over tlbs
42
36
43
include/exec/cpu_ldst.h | 5 -
37
include/hw/core/cpu.h | 4 +-
44
accel/tcg/cputlb.c | 287 +++++++++++++++++++++++++++++++++---------------
38
include/sysemu/os-win32.h | 28 ++++++++++--
45
util/cacheinfo.c | 10 +-
39
target/microblaze/cpu.h | 2 +
46
vl.c | 27 +++--
40
accel/tcg/translator.c | 12 +++++-
47
scripts/git.orderfile | 3 +
41
cpus-common.c | 12 +++++-
48
5 files changed, 223 insertions(+), 109 deletions(-)
42
linux-user/main.c | 10 +++--
49
43
linux-user/microblaze/cpu_loop.c | 10 ++++-
44
linux-user/sparc/cpu_loop.c | 8 ++++
45
linux-user/syscall.c | 1 +
46
target/microblaze/cpu.c | 7 ++-
47
target/microblaze/gdbstub.c | 51 ++++++++++++++++------
48
util/cacheflush.c | 14 ++++--
49
configs/targets/microblaze-linux-user.mak | 1 +
50
configs/targets/microblaze-softmmu.mak | 1 +
51
configs/targets/microblazeel-linux-user.mak | 1 +
52
configs/targets/microblazeel-softmmu.mak | 1 +
53
gdb-xml/microblaze-core.xml | 67 +++++++++++++++++++++++++++++
54
gdb-xml/microblaze-stack-protect.xml | 12 ++++++
55
meson.build | 21 +++++++++
56
19 files changed, 229 insertions(+), 34 deletions(-)
57
create mode 100644 gdb-xml/microblaze-core.xml
58
create mode 100644 gdb-xml/microblaze-stack-protect.xml
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
If an instruction straddles a page boundary, and the first page
2
was ram, but the second page was MMIO, we would abort. Handle
3
this as if both pages are MMIO, by setting the ram_addr_t for
4
the first page to -1.
2
5
3
To avoid scrolling each instruction when reviewing tcg
6
Reported-by: Sid Manning <sidneym@quicinc.com>
4
helpers written for the decodetree script, display the
7
Reported-by: Jørgen Hansen <Jorgen.Hansen@wdc.com>
5
.decode files (similar to header declarations) before
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
the C source (implementation of previous declarations).
7
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Message-Id: <20191230082856.30556-1-philmd@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
10
---
14
scripts/git.orderfile | 3 +++
11
accel/tcg/translator.c | 12 ++++++++++--
15
1 file changed, 3 insertions(+)
12
1 file changed, 10 insertions(+), 2 deletions(-)
16
13
17
diff --git a/scripts/git.orderfile b/scripts/git.orderfile
14
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/scripts/git.orderfile
16
--- a/accel/tcg/translator.c
20
+++ b/scripts/git.orderfile
17
+++ b/accel/tcg/translator.c
21
@@ -XXX,XX +XXX,XX @@ qga/*.json
18
@@ -XXX,XX +XXX,XX @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
22
# headers
19
if (host == NULL) {
23
*.h
20
tb_page_addr_t phys_page =
24
21
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
25
+# decoding tree specification
22
- /* We cannot handle MMIO as second page. */
26
+*.decode
23
- assert(phys_page != -1);
27
+
24
+
28
# code
25
+ /*
29
*.c
26
+ * If the second page is MMIO, treat as if the first page
27
+ * was MMIO as well, so that we do not cache the TB.
28
+ */
29
+ if (unlikely(phys_page == -1)) {
30
+ tb_set_page_addr0(tb, -1);
31
+ return NULL;
32
+ }
33
+
34
tb_set_page_addr1(tb, phys_page);
35
#ifdef CONFIG_USER_ONLY
36
page_protect(end);
30
--
37
--
31
2.20.1
38
2.34.1
32
39
33
40
diff view generated by jsdifflib
1
Do not call get_clock_realtime() in tlb_mmu_resize_locked,
1
The linux kernel's trap tables vector all unassigned trap
2
but hoist outside of any loop over a set of tlbs. This is
2
numbers to BAD_TRAP, which then raises SIGILL.
3
only two (indirect) callers, tlb_flush_by_mmuidx_async_work
4
and tlb_flush_page_locked, so not onerous.
5
3
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reported-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
accel/tcg/cputlb.c | 14 ++++++++------
8
linux-user/sparc/cpu_loop.c | 8 ++++++++
12
1 file changed, 8 insertions(+), 6 deletions(-)
9
1 file changed, 8 insertions(+)
13
10
14
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
11
diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/cputlb.c
13
--- a/linux-user/sparc/cpu_loop.c
17
+++ b/accel/tcg/cputlb.c
14
+++ b/linux-user/sparc/cpu_loop.c
18
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
15
@@ -XXX,XX +XXX,XX @@ void cpu_loop (CPUSPARCState *env)
19
* high), since otherwise we are likely to have a significant amount of
16
cpu_exec_step_atomic(cs);
20
* conflict misses.
17
break;
21
*/
18
default:
22
-static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
19
+ /*
23
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
20
+ * Most software trap numbers vector to BAD_TRAP.
24
+ int64_t now)
21
+ * Handle anything not explicitly matched above.
25
{
22
+ */
26
size_t old_size = tlb_n_entries(fast);
23
+ if (trapnr >= TT_TRAP && trapnr <= TT_TRAP + 0x7f) {
27
size_t rate;
24
+ force_sig_fault(TARGET_SIGILL, ILL_ILLTRP, env->pc);
28
size_t new_size = old_size;
25
+ break;
29
- int64_t now = get_clock_realtime();
26
+ }
30
int64_t window_len_ms = 100;
27
fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr);
31
int64_t window_len_ns = window_len_ms * 1000 * 1000;
28
cpu_dump_state(cs, stderr, 0);
32
bool window_expired = now > desc->window_begin_ns + window_len_ns;
29
exit(EXIT_FAILURE);
33
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
34
memset(desc->vtable, -1, sizeof(desc->vtable));
35
}
36
37
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
38
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
39
+ int64_t now)
40
{
41
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
42
CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
43
44
- tlb_mmu_resize_locked(desc, fast);
45
+ tlb_mmu_resize_locked(desc, fast, now);
46
tlb_mmu_flush_locked(desc, fast);
47
}
48
49
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
50
CPUArchState *env = cpu->env_ptr;
51
uint16_t asked = data.host_int;
52
uint16_t all_dirty, work, to_clean;
53
+ int64_t now = get_clock_realtime();
54
55
assert_cpu_is_self(cpu);
56
57
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
58
59
for (work = to_clean; work != 0; work &= work - 1) {
60
int mmu_idx = ctz32(work);
61
- tlb_flush_one_mmuidx_locked(env, mmu_idx);
62
+ tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
63
}
64
65
qemu_spin_unlock(&env_tlb(env)->c.lock);
66
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
67
tlb_debug("forcing full flush midx %d ("
68
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
69
midx, lp_addr, lp_mask);
70
- tlb_flush_one_mmuidx_locked(env, midx);
71
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
72
} else {
73
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
74
tlb_n_used_entries_dec(env, midx);
75
--
30
--
76
2.20.1
31
2.34.1
77
78
diff view generated by jsdifflib
1
There's little point in leaving these data structures half initialized,
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
and relying on a flush to be done during reset.
3
2
3
fork()ed processes currently start with
4
current_cpu->in_exclusive_context set, which is, strictly speaking, not
5
correct, but does not cause problems (even assertion failures).
6
7
With one of the next patches, the code begins to rely on this value, so
8
fix it by always calling end_exclusive() in fork_end().
9
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
13
Message-Id: <20230214140829.45392-2-iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
15
---
8
accel/tcg/cputlb.c | 5 +++--
16
linux-user/main.c | 10 ++++++----
9
1 file changed, 3 insertions(+), 2 deletions(-)
17
linux-user/syscall.c | 1 +
18
2 files changed, 7 insertions(+), 4 deletions(-)
10
19
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
20
diff --git a/linux-user/main.c b/linux-user/main.c
12
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
22
--- a/linux-user/main.c
14
+++ b/accel/tcg/cputlb.c
23
+++ b/linux-user/main.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
24
@@ -XXX,XX +XXX,XX @@ void fork_end(int child)
16
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
25
}
17
fast->table = g_new(CPUTLBEntry, n_entries);
26
qemu_init_cpu_list();
18
desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
27
gdbserver_fork(thread_cpu);
19
+ tlb_mmu_flush_locked(desc, fast);
28
- /* qemu_init_cpu_list() takes care of reinitializing the
29
- * exclusive state, so we don't need to end_exclusive() here.
30
- */
31
} else {
32
cpu_list_unlock();
33
- end_exclusive();
34
}
35
+ /*
36
+ * qemu_init_cpu_list() reinitialized the child exclusive state, but we
37
+ * also need to keep current_cpu consistent, so call end_exclusive() for
38
+ * both child and parent.
39
+ */
40
+ end_exclusive();
20
}
41
}
21
42
22
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
43
__thread CPUState *thread_cpu;
23
@@ -XXX,XX +XXX,XX @@ void tlb_init(CPUState *cpu)
44
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
24
45
index XXXXXXX..XXXXXXX 100644
25
qemu_spin_init(&env_tlb(env)->c.lock);
46
--- a/linux-user/syscall.c
26
47
+++ b/linux-user/syscall.c
27
- /* Ensure that cpu_reset performs a full flush. */
48
@@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
28
- env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
49
cpu_clone_regs_parent(env, flags);
29
+ /* All tlbs are initialized flushed. */
50
fork_end(0);
30
+ env_tlb(env)->c.dirty = 0;
51
}
31
52
+ g_assert(!cpu_in_exclusive_context(cpu));
32
for (i = 0; i < NB_MMU_MODES; i++) {
53
}
33
tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
54
return ret;
55
}
34
--
56
--
35
2.20.1
57
2.34.1
36
58
37
59
diff view generated by jsdifflib
1
In target/arm we will shortly have "too many" mmu_idx.
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
The current minimum barrier is caused by the way in which
3
tlb_flush_page_by_mmuidx is coded.
4
2
5
We can remove this limitation by allocating memory for
3
Currently dying to one of the core_dump_signal()s deadlocks, because
6
consumption by the worker. Let us assume that this is
4
dump_core_and_abort() calls start_exclusive() two times: first via
7
the unlikely case, as will be the case for the majority
5
stop_all_tasks(), and then via preexit_cleanup() ->
8
of targets which have so far satisfied the BUILD_BUG_ON,
6
qemu_plugin_user_exit().
9
and only allocate memory when necessary.
10
7
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
There are a number of ways to solve this: resume after dumping core;
9
check cpu_in_exclusive_context() in qemu_plugin_user_exit(); or make
10
{start,end}_exclusive() recursive. Pick the last option, since it's
11
the most straightforward one.
12
13
Fixes: da91c1920242 ("linux-user: Clean up when exiting due to a signal")
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
17
Message-Id: <20230214140829.45392-3-iii@linux.ibm.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
19
---
14
accel/tcg/cputlb.c | 167 +++++++++++++++++++++++++++++++++++----------
20
include/hw/core/cpu.h | 4 ++--
15
1 file changed, 132 insertions(+), 35 deletions(-)
21
cpus-common.c | 12 ++++++++++--
22
2 files changed, 12 insertions(+), 4 deletions(-)
16
23
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
24
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
18
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
26
--- a/include/hw/core/cpu.h
20
+++ b/accel/tcg/cputlb.c
27
+++ b/include/hw/core/cpu.h
21
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
28
@@ -XXX,XX +XXX,XX @@ struct CPUState {
22
}
29
bool unplug;
30
bool crash_occurred;
31
bool exit_request;
32
- bool in_exclusive_context;
33
+ int exclusive_context_count;
34
uint32_t cflags_next_tb;
35
/* updates protected by BQL */
36
uint32_t interrupt_request;
37
@@ -XXX,XX +XXX,XX @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data
38
*/
39
static inline bool cpu_in_exclusive_context(const CPUState *cpu)
40
{
41
- return cpu->in_exclusive_context;
42
+ return cpu->exclusive_context_count;
23
}
43
}
24
44
25
-/* As we are going to hijack the bottom bits of the page address for a
45
/**
26
- * mmuidx bit mask we need to fail to build if we can't do that
46
diff --git a/cpus-common.c b/cpus-common.c
27
+/**
47
index XXXXXXX..XXXXXXX 100644
28
+ * tlb_flush_page_by_mmuidx_async_0:
48
--- a/cpus-common.c
29
+ * @cpu: cpu on which to flush
49
+++ b/cpus-common.c
30
+ * @addr: page of virtual address to flush
50
@@ -XXX,XX +XXX,XX @@ void start_exclusive(void)
31
+ * @idxmap: set of mmu_idx to flush
51
CPUState *other_cpu;
32
+ *
52
int running_cpus;
33
+ * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
53
34
+ * at @addr from the tlbs indicated by @idxmap from @cpu.
54
+ if (current_cpu->exclusive_context_count) {
35
*/
55
+ current_cpu->exclusive_context_count++;
36
-QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
56
+ return;
37
-
38
-static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
39
- run_on_cpu_data data)
40
+static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
41
+ target_ulong addr,
42
+ uint16_t idxmap)
43
{
44
CPUArchState *env = cpu->env_ptr;
45
- target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
46
- target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
47
- unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
48
int mmu_idx;
49
50
assert_cpu_is_self(cpu);
51
52
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
53
- addr, mmu_idx_bitmap);
54
+ tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
55
56
qemu_spin_lock(&env_tlb(env)->c.lock);
57
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
58
- if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
59
+ if ((idxmap >> mmu_idx) & 1) {
60
tlb_flush_page_locked(env, mmu_idx, addr);
61
}
62
}
63
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
64
tb_flush_jmp_cache(cpu, addr);
65
}
66
67
+/**
68
+ * tlb_flush_page_by_mmuidx_async_1:
69
+ * @cpu: cpu on which to flush
70
+ * @data: encoded addr + idxmap
71
+ *
72
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
73
+ * async_run_on_cpu. The idxmap parameter is encoded in the page
74
+ * offset of the target_ptr field. This limits the set of mmu_idx
75
+ * that can be passed via this method.
76
+ */
77
+static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
78
+ run_on_cpu_data data)
79
+{
80
+ target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
81
+ target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
82
+ uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
83
+
84
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
85
+}
86
+
87
+typedef struct {
88
+ target_ulong addr;
89
+ uint16_t idxmap;
90
+} TLBFlushPageByMMUIdxData;
91
+
92
+/**
93
+ * tlb_flush_page_by_mmuidx_async_2:
94
+ * @cpu: cpu on which to flush
95
+ * @data: allocated addr + idxmap
96
+ *
97
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
98
+ * async_run_on_cpu. The addr+idxmap parameters are stored in a
99
+ * TLBFlushPageByMMUIdxData structure that has been allocated
100
+ * specifically for this helper. Free the structure when done.
101
+ */
102
+static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
103
+ run_on_cpu_data data)
104
+{
105
+ TLBFlushPageByMMUIdxData *d = data.host_ptr;
106
+
107
+ tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
108
+ g_free(d);
109
+}
110
+
111
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
112
{
113
- target_ulong addr_and_mmu_idx;
114
-
115
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
116
117
/* This should already be page aligned */
118
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
119
- addr_and_mmu_idx |= idxmap;
120
+ addr &= TARGET_PAGE_MASK;
121
122
- if (!qemu_cpu_is_self(cpu)) {
123
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
124
- RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
125
+ if (qemu_cpu_is_self(cpu)) {
126
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
127
+ } else if (idxmap < TARGET_PAGE_SIZE) {
128
+ /*
129
+ * Most targets have only a few mmu_idx. In the case where
130
+ * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
131
+ * allocating memory for this operation.
132
+ */
133
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
134
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
135
} else {
136
- tlb_flush_page_by_mmuidx_async_work(
137
- cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
138
+ TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
139
+
140
+ /* Otherwise allocate a structure, freed by the worker. */
141
+ d->addr = addr;
142
+ d->idxmap = idxmap;
143
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
144
+ RUN_ON_CPU_HOST_PTR(d));
145
}
146
}
147
148
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
149
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
150
uint16_t idxmap)
151
{
152
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
153
- target_ulong addr_and_mmu_idx;
154
-
155
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
156
157
/* This should already be page aligned */
158
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
159
- addr_and_mmu_idx |= idxmap;
160
+ addr &= TARGET_PAGE_MASK;
161
162
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
163
- fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
164
+ /*
165
+ * Allocate memory to hold addr+idxmap only when needed.
166
+ * See tlb_flush_page_by_mmuidx for details.
167
+ */
168
+ if (idxmap < TARGET_PAGE_SIZE) {
169
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
170
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
171
+ } else {
172
+ CPUState *dst_cpu;
173
+
174
+ /* Allocate a separate data block for each destination cpu. */
175
+ CPU_FOREACH(dst_cpu) {
176
+ if (dst_cpu != src_cpu) {
177
+ TLBFlushPageByMMUIdxData *d
178
+ = g_new(TLBFlushPageByMMUIdxData, 1);
179
+
180
+ d->addr = addr;
181
+ d->idxmap = idxmap;
182
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
183
+ RUN_ON_CPU_HOST_PTR(d));
184
+ }
185
+ }
186
+ }
57
+ }
187
+
58
+
188
+ tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
59
qemu_mutex_lock(&qemu_cpu_list_lock);
60
exclusive_idle();
61
62
@@ -XXX,XX +XXX,XX @@ void start_exclusive(void)
63
*/
64
qemu_mutex_unlock(&qemu_cpu_list_lock);
65
66
- current_cpu->in_exclusive_context = true;
67
+ current_cpu->exclusive_context_count = 1;
189
}
68
}
190
69
191
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
70
/* Finish an exclusive operation. */
192
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
71
void end_exclusive(void)
193
target_ulong addr,
194
uint16_t idxmap)
195
{
72
{
196
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
73
- current_cpu->in_exclusive_context = false;
197
- target_ulong addr_and_mmu_idx;
74
+ current_cpu->exclusive_context_count--;
198
-
75
+ if (current_cpu->exclusive_context_count) {
199
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
76
+ return;
200
201
/* This should already be page aligned */
202
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
203
- addr_and_mmu_idx |= idxmap;
204
+ addr &= TARGET_PAGE_MASK;
205
206
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
207
- async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
208
+ /*
209
+ * Allocate memory to hold addr+idxmap only when needed.
210
+ * See tlb_flush_page_by_mmuidx for details.
211
+ */
212
+ if (idxmap < TARGET_PAGE_SIZE) {
213
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
214
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
215
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
216
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
217
+ } else {
218
+ CPUState *dst_cpu;
219
+ TLBFlushPageByMMUIdxData *d;
220
+
221
+ /* Allocate a separate data block for each destination cpu. */
222
+ CPU_FOREACH(dst_cpu) {
223
+ if (dst_cpu != src_cpu) {
224
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
225
+ d->addr = addr;
226
+ d->idxmap = idxmap;
227
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
228
+ RUN_ON_CPU_HOST_PTR(d));
229
+ }
230
+ }
231
+
232
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
233
+ d->addr = addr;
234
+ d->idxmap = idxmap;
235
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
236
+ RUN_ON_CPU_HOST_PTR(d));
237
+ }
77
+ }
238
}
78
239
79
qemu_mutex_lock(&qemu_cpu_list_lock);
240
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
80
qatomic_set(&pending_cpus, 0);
241
--
81
--
242
2.20.1
82
2.34.1
243
83
244
84
diff view generated by jsdifflib
1
From: Carlos Santos <casantos@redhat.com>
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
2
3
uClibc defines _SC_LEVEL1_ICACHE_LINESIZE and _SC_LEVEL1_DCACHE_LINESIZE
3
Follow what kernel's full_exception() is doing.
4
but the corresponding sysconf calls returns -1, which is a valid result,
5
meaning that the limit is indeterminate.
6
4
7
Handle this situation using the fallback values instead of crashing due
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
to an assertion failure.
6
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
7
Message-Id: <20230214140829.45392-4-iii@linux.ibm.com>
10
Signed-off-by: Carlos Santos <casantos@redhat.com>
11
Message-Id: <20191017123713.30192-1-casantos@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
9
---
14
util/cacheinfo.c | 10 ++++++++--
10
linux-user/microblaze/cpu_loop.c | 10 ++++++++--
15
1 file changed, 8 insertions(+), 2 deletions(-)
11
1 file changed, 8 insertions(+), 2 deletions(-)
16
12
17
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
13
diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c
18
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
19
--- a/util/cacheinfo.c
15
--- a/linux-user/microblaze/cpu_loop.c
20
+++ b/util/cacheinfo.c
16
+++ b/linux-user/microblaze/cpu_loop.c
21
@@ -XXX,XX +XXX,XX @@ static void sys_cache_info(int *isize, int *dsize)
17
@@ -XXX,XX +XXX,XX @@
22
static void sys_cache_info(int *isize, int *dsize)
18
19
void cpu_loop(CPUMBState *env)
23
{
20
{
24
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
21
+ int trapnr, ret, si_code, sig;
25
- *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
22
CPUState *cs = env_cpu(env);
26
+ int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
23
- int trapnr, ret, si_code;
27
+ if (tmp_isize > 0) {
24
28
+ *isize = tmp_isize;
25
while (1) {
29
+ }
26
cpu_exec_start(cs);
30
# endif
27
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUMBState *env)
31
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
28
env->iflags &= ~(IMM_FLAG | D_FLAG);
32
- *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
29
switch (env->esr & 31) {
33
+ int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
30
case ESR_EC_DIVZERO:
34
+ if (tmp_dsize > 0) {
31
+ sig = TARGET_SIGFPE;
35
+ *dsize = tmp_dsize;
32
si_code = TARGET_FPE_INTDIV;
36
+ }
33
break;
37
# endif
34
case ESR_EC_FPU:
38
}
35
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUMBState *env)
39
#endif /* sys_cache_info */
36
* if there's no recognized bit set. Possibly this
37
* implies that si_code is 0, but follow the structure.
38
*/
39
+ sig = TARGET_SIGFPE;
40
si_code = env->fsr;
41
if (si_code & FSR_IO) {
42
si_code = TARGET_FPE_FLTINV;
43
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUMBState *env)
44
si_code = TARGET_FPE_FLTRES;
45
}
46
break;
47
+ case ESR_EC_PRIVINSN:
48
+ sig = SIGILL;
49
+ si_code = ILL_PRVOPC;
50
+ break;
51
default:
52
fprintf(stderr, "Unhandled hw-exception: 0x%x\n",
53
env->esr & ESR_EC_MASK);
54
cpu_dump_state(cs, stderr, 0);
55
exit(EXIT_FAILURE);
56
}
57
- force_sig_fault(TARGET_SIGFPE, si_code, env->pc);
58
+ force_sig_fault(sig, si_code, env->pc);
59
break;
60
61
case EXCP_DEBUG:
40
--
62
--
41
2.20.1
63
2.34.1
42
43
diff view generated by jsdifflib
Deleted patch
1
The accel_initialised variable no longer has any setters.
2
1
3
Fixes: 6f6e1698a68c
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
vl.c | 3 +--
11
1 file changed, 1 insertion(+), 2 deletions(-)
12
13
diff --git a/vl.c b/vl.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/vl.c
16
+++ b/vl.c
17
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
18
{
19
const char *accel;
20
char **accel_list, **tmp;
21
- bool accel_initialised = false;
22
bool init_failed = false;
23
24
qemu_opts_foreach(qemu_find_opts("icount"),
25
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
26
27
accel_list = g_strsplit(accel, ":", 0);
28
29
- for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) {
30
+ for (tmp = accel_list; tmp && *tmp; tmp++) {
31
/*
32
* Filter invalid accelerators here, to prevent obscenities
33
* such as "-machine accel=tcg,,thread=single".
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
Deleted patch
1
The accel_list and tmp variables are only used when manufacturing
2
-machine accel, options based on -accel.
3
1
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
vl.c | 3 ++-
10
1 file changed, 2 insertions(+), 1 deletion(-)
11
12
diff --git a/vl.c b/vl.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
15
+++ b/vl.c
16
@@ -XXX,XX +XXX,XX @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp)
17
static void configure_accelerators(const char *progname)
18
{
19
const char *accel;
20
- char **accel_list, **tmp;
21
bool init_failed = false;
22
23
qemu_opts_foreach(qemu_find_opts("icount"),
24
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
25
26
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
27
if (QTAILQ_EMPTY(&qemu_accel_opts.head)) {
28
+ char **accel_list, **tmp;
29
+
30
if (accel == NULL) {
31
/* Select the default accelerator */
32
if (!accel_find("tcg") && !accel_find("kvm")) {
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
Deleted patch
1
The result of g_strsplit is never NULL.
2
1
3
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
vl.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
diff --git a/vl.c b/vl.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
15
+++ b/vl.c
16
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
17
18
accel_list = g_strsplit(accel, ":", 0);
19
20
- for (tmp = accel_list; tmp && *tmp; tmp++) {
21
+ for (tmp = accel_list; *tmp; tmp++) {
22
/*
23
* Filter invalid accelerators here, to prevent obscenities
24
* such as "-machine accel=tcg,,thread=single".
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
Deleted patch
1
By choosing "tcg:kvm" when kvm is not enabled, we generate
2
an incorrect warning: "invalid accelerator kvm".
3
1
4
At the same time, use g_str_has_suffix rather than open-coding
5
the same operation.
6
7
Presumably the inverse is also true with --disable-tcg.
8
9
Fixes: 28a0961757fc
10
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
vl.c | 21 +++++++++++++--------
16
1 file changed, 13 insertions(+), 8 deletions(-)
17
18
diff --git a/vl.c b/vl.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/vl.c
21
+++ b/vl.c
22
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
23
24
if (accel == NULL) {
25
/* Select the default accelerator */
26
- if (!accel_find("tcg") && !accel_find("kvm")) {
27
- error_report("No accelerator selected and"
28
- " no default accelerator available");
29
- exit(1);
30
- } else {
31
- int pnlen = strlen(progname);
32
- if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) {
33
+ bool have_tcg = accel_find("tcg");
34
+ bool have_kvm = accel_find("kvm");
35
+
36
+ if (have_tcg && have_kvm) {
37
+ if (g_str_has_suffix(progname, "kvm")) {
38
/* If the program name ends with "kvm", we prefer KVM */
39
accel = "kvm:tcg";
40
} else {
41
accel = "tcg:kvm";
42
}
43
+ } else if (have_kvm) {
44
+ accel = "kvm";
45
+ } else if (have_tcg) {
46
+ accel = "tcg";
47
+ } else {
48
+ error_report("No accelerator selected and"
49
+ " no default accelerator available");
50
+ exit(1);
51
}
52
}
53
-
54
accel_list = g_strsplit(accel, ":", 0);
55
56
for (tmp = accel_list; *tmp; tmp++) {
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
Deleted patch
1
There is only one caller for tlb_table_flush_by_mmuidx. Place
2
the result at the earlier line number, due to an expected user
3
in the near future.
4
1
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 19 +++++++------------
10
1 file changed, 7 insertions(+), 12 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
17
}
18
}
19
20
-static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
21
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
22
{
23
tlb_mmu_resize_locked(env, mmu_idx);
24
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
25
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
26
+ env_tlb(env)->d[mmu_idx].large_page_addr = -1;
27
+ env_tlb(env)->d[mmu_idx].large_page_mask = -1;
28
+ env_tlb(env)->d[mmu_idx].vindex = 0;
29
+ memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
30
+ memset(env_tlb(env)->d[mmu_idx].vtable, -1,
31
+ sizeof(env_tlb(env)->d[0].vtable));
32
}
33
34
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
35
@@ -XXX,XX +XXX,XX @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
36
*pelide = elide;
37
}
38
39
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
40
-{
41
- tlb_table_flush_by_mmuidx(env, mmu_idx);
42
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
43
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
44
- env_tlb(env)->d[mmu_idx].vindex = 0;
45
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
46
- sizeof(env_tlb(env)->d[0].vtable));
47
-}
48
-
49
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
50
{
51
CPUArchState *env = cpu->env_ptr;
52
--
53
2.20.1
54
55
diff view generated by jsdifflib
1
There are no users of this function outside cputlb.c,
1
Mirroring the upstream gdb xml files, the two stack boundary
2
and its interface will change in the next patch.
2
registers are separated out.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Edgar E. Iglesias <edgar@zeroasic.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/cpu_ldst.h | 5 -----
7
target/microblaze/cpu.h | 2 +
10
accel/tcg/cputlb.c | 5 +++++
8
target/microblaze/cpu.c | 7 ++-
11
2 files changed, 5 insertions(+), 5 deletions(-)
9
target/microblaze/gdbstub.c | 51 +++++++++++-----
10
configs/targets/microblaze-linux-user.mak | 1 +
11
configs/targets/microblaze-softmmu.mak | 1 +
12
configs/targets/microblazeel-linux-user.mak | 1 +
13
configs/targets/microblazeel-softmmu.mak | 1 +
14
gdb-xml/microblaze-core.xml | 67 +++++++++++++++++++++
15
gdb-xml/microblaze-stack-protect.xml | 12 ++++
16
9 files changed, 128 insertions(+), 15 deletions(-)
17
create mode 100644 gdb-xml/microblaze-core.xml
18
create mode 100644 gdb-xml/microblaze-stack-protect.xml
12
19
13
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
20
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
14
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu_ldst.h
22
--- a/target/microblaze/cpu.h
16
+++ b/include/exec/cpu_ldst.h
23
+++ b/target/microblaze/cpu.h
17
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
24
@@ -XXX,XX +XXX,XX @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
18
return (addr >> TARGET_PAGE_BITS) & size_mask;
25
MemTxAttrs *attrs);
26
int mb_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
27
int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
28
+int mb_cpu_gdb_read_stack_protect(CPUArchState *cpu, GByteArray *buf, int reg);
29
+int mb_cpu_gdb_write_stack_protect(CPUArchState *cpu, uint8_t *buf, int reg);
30
31
static inline uint32_t mb_cpu_read_msr(const CPUMBState *env)
32
{
33
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/microblaze/cpu.c
36
+++ b/target/microblaze/cpu.c
37
@@ -XXX,XX +XXX,XX @@
38
#include "qemu/module.h"
39
#include "hw/qdev-properties.h"
40
#include "exec/exec-all.h"
41
+#include "exec/gdbstub.h"
42
#include "fpu/softfloat-helpers.h"
43
44
static const struct {
45
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_initfn(Object *obj)
46
CPUMBState *env = &cpu->env;
47
48
cpu_set_cpustate_pointers(cpu);
49
+ gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect,
50
+ mb_cpu_gdb_write_stack_protect, 2,
51
+ "microblaze-stack-protect.xml", 0);
52
53
set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
54
55
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
56
cc->sysemu_ops = &mb_sysemu_ops;
57
#endif
58
device_class_set_props(dc, mb_properties);
59
- cc->gdb_num_core_regs = 32 + 27;
60
+ cc->gdb_num_core_regs = 32 + 25;
61
+ cc->gdb_core_xml_file = "microblaze-core.xml";
62
63
cc->disas_set_info = mb_disas_set_info;
64
cc->tcg_ops = &mb_tcg_ops;
65
diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/microblaze/gdbstub.c
68
+++ b/target/microblaze/gdbstub.c
69
@@ -XXX,XX +XXX,XX @@ enum {
70
GDB_PVR0 = 32 + 6,
71
GDB_PVR11 = 32 + 17,
72
GDB_EDR = 32 + 18,
73
- GDB_SLR = 32 + 25,
74
- GDB_SHR = 32 + 26,
75
+};
76
+
77
+enum {
78
+ GDB_SP_SHL,
79
+ GDB_SP_SHR,
80
};
81
82
int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
83
@@ -XXX,XX +XXX,XX @@ int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
84
case GDB_EDR:
85
val = env->edr;
86
break;
87
- case GDB_SLR:
88
- val = env->slr;
89
- break;
90
- case GDB_SHR:
91
- val = env->shr;
92
- break;
93
default:
94
/* Other SRegs aren't modeled, so report a value of 0 */
95
val = 0;
96
@@ -XXX,XX +XXX,XX @@ int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
97
return gdb_get_reg32(mem_buf, val);
19
}
98
}
20
99
21
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
100
+int mb_cpu_gdb_read_stack_protect(CPUMBState *env, GByteArray *mem_buf, int n)
22
-{
23
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
24
-}
25
-
26
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
27
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
28
target_ulong addr)
29
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/tcg/cputlb.c
32
+++ b/accel/tcg/cputlb.c
33
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
34
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
35
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
36
37
+static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
38
+{
101
+{
39
+ return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
102
+ uint32_t val;
103
+
104
+ switch (n) {
105
+ case GDB_SP_SHL:
106
+ val = env->slr;
107
+ break;
108
+ case GDB_SP_SHR:
109
+ val = env->shr;
110
+ break;
111
+ default:
112
+ return 0;
113
+ }
114
+ return gdb_get_reg32(mem_buf, val);
40
+}
115
+}
41
+
116
+
42
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
117
int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
43
{
118
{
44
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
119
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
120
@@ -XXX,XX +XXX,XX @@ int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
121
case GDB_EDR:
122
env->edr = tmp;
123
break;
124
- case GDB_SLR:
125
- env->slr = tmp;
126
- break;
127
- case GDB_SHR:
128
- env->shr = tmp;
129
- break;
130
+ }
131
+ return 4;
132
+}
133
+
134
+int mb_cpu_gdb_write_stack_protect(CPUMBState *env, uint8_t *mem_buf, int n)
135
+{
136
+ switch (n) {
137
+ case GDB_SP_SHL:
138
+ env->slr = ldl_p(mem_buf);
139
+ break;
140
+ case GDB_SP_SHR:
141
+ env->shr = ldl_p(mem_buf);
142
+ break;
143
+ default:
144
+ return 0;
145
}
146
return 4;
147
}
148
diff --git a/configs/targets/microblaze-linux-user.mak b/configs/targets/microblaze-linux-user.mak
149
index XXXXXXX..XXXXXXX 100644
150
--- a/configs/targets/microblaze-linux-user.mak
151
+++ b/configs/targets/microblaze-linux-user.mak
152
@@ -XXX,XX +XXX,XX @@ TARGET_SYSTBL_ABI=common
153
TARGET_SYSTBL=syscall.tbl
154
TARGET_BIG_ENDIAN=y
155
TARGET_HAS_BFLT=y
156
+TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
157
diff --git a/configs/targets/microblaze-softmmu.mak b/configs/targets/microblaze-softmmu.mak
158
index XXXXXXX..XXXXXXX 100644
159
--- a/configs/targets/microblaze-softmmu.mak
160
+++ b/configs/targets/microblaze-softmmu.mak
161
@@ -XXX,XX +XXX,XX @@ TARGET_ARCH=microblaze
162
TARGET_BIG_ENDIAN=y
163
TARGET_SUPPORTS_MTTCG=y
164
TARGET_NEED_FDT=y
165
+TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
166
diff --git a/configs/targets/microblazeel-linux-user.mak b/configs/targets/microblazeel-linux-user.mak
167
index XXXXXXX..XXXXXXX 100644
168
--- a/configs/targets/microblazeel-linux-user.mak
169
+++ b/configs/targets/microblazeel-linux-user.mak
170
@@ -XXX,XX +XXX,XX @@ TARGET_ARCH=microblaze
171
TARGET_SYSTBL_ABI=common
172
TARGET_SYSTBL=syscall.tbl
173
TARGET_HAS_BFLT=y
174
+TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
175
diff --git a/configs/targets/microblazeel-softmmu.mak b/configs/targets/microblazeel-softmmu.mak
176
index XXXXXXX..XXXXXXX 100644
177
--- a/configs/targets/microblazeel-softmmu.mak
178
+++ b/configs/targets/microblazeel-softmmu.mak
179
@@ -XXX,XX +XXX,XX @@
180
TARGET_ARCH=microblaze
181
TARGET_SUPPORTS_MTTCG=y
182
TARGET_NEED_FDT=y
183
+TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml
184
diff --git a/gdb-xml/microblaze-core.xml b/gdb-xml/microblaze-core.xml
185
new file mode 100644
186
index XXXXXXX..XXXXXXX
187
--- /dev/null
188
+++ b/gdb-xml/microblaze-core.xml
189
@@ -XXX,XX +XXX,XX @@
190
+<?xml version="1.0"?>
191
+<!-- Copyright (C) 2008 Free Software Foundation, Inc.
192
+
193
+ Copying and distribution of this file, with or without modification,
194
+ are permitted in any medium without royalty provided the copyright
195
+ notice and this notice are preserved. -->
196
+
197
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
198
+<feature name="org.gnu.gdb.microblaze.core">
199
+ <reg name="r0" bitsize="32" regnum="0"/>
200
+ <reg name="r1" bitsize="32" type="data_ptr"/>
201
+ <reg name="r2" bitsize="32"/>
202
+ <reg name="r3" bitsize="32"/>
203
+ <reg name="r4" bitsize="32"/>
204
+ <reg name="r5" bitsize="32"/>
205
+ <reg name="r6" bitsize="32"/>
206
+ <reg name="r7" bitsize="32"/>
207
+ <reg name="r8" bitsize="32"/>
208
+ <reg name="r9" bitsize="32"/>
209
+ <reg name="r10" bitsize="32"/>
210
+ <reg name="r11" bitsize="32"/>
211
+ <reg name="r12" bitsize="32"/>
212
+ <reg name="r13" bitsize="32"/>
213
+ <reg name="r14" bitsize="32"/>
214
+ <reg name="r15" bitsize="32"/>
215
+ <reg name="r16" bitsize="32"/>
216
+ <reg name="r17" bitsize="32"/>
217
+ <reg name="r18" bitsize="32"/>
218
+ <reg name="r19" bitsize="32"/>
219
+ <reg name="r20" bitsize="32"/>
220
+ <reg name="r21" bitsize="32"/>
221
+ <reg name="r22" bitsize="32"/>
222
+ <reg name="r23" bitsize="32"/>
223
+ <reg name="r24" bitsize="32"/>
224
+ <reg name="r25" bitsize="32"/>
225
+ <reg name="r26" bitsize="32"/>
226
+ <reg name="r27" bitsize="32"/>
227
+ <reg name="r28" bitsize="32"/>
228
+ <reg name="r29" bitsize="32"/>
229
+ <reg name="r30" bitsize="32"/>
230
+ <reg name="r31" bitsize="32"/>
231
+ <reg name="rpc" bitsize="32" type="code_ptr"/>
232
+ <reg name="rmsr" bitsize="32"/>
233
+ <reg name="rear" bitsize="32"/>
234
+ <reg name="resr" bitsize="32"/>
235
+ <reg name="rfsr" bitsize="32"/>
236
+ <reg name="rbtr" bitsize="32"/>
237
+ <reg name="rpvr0" bitsize="32"/>
238
+ <reg name="rpvr1" bitsize="32"/>
239
+ <reg name="rpvr2" bitsize="32"/>
240
+ <reg name="rpvr3" bitsize="32"/>
241
+ <reg name="rpvr4" bitsize="32"/>
242
+ <reg name="rpvr5" bitsize="32"/>
243
+ <reg name="rpvr6" bitsize="32"/>
244
+ <reg name="rpvr7" bitsize="32"/>
245
+ <reg name="rpvr8" bitsize="32"/>
246
+ <reg name="rpvr9" bitsize="32"/>
247
+ <reg name="rpvr10" bitsize="32"/>
248
+ <reg name="rpvr11" bitsize="32"/>
249
+ <reg name="redr" bitsize="32"/>
250
+ <reg name="rpid" bitsize="32"/>
251
+ <reg name="rzpr" bitsize="32"/>
252
+ <reg name="rtlbx" bitsize="32"/>
253
+ <reg name="rtlbsx" bitsize="32"/>
254
+ <reg name="rtlblo" bitsize="32"/>
255
+ <reg name="rtlbhi" bitsize="32"/>
256
+</feature>
257
diff --git a/gdb-xml/microblaze-stack-protect.xml b/gdb-xml/microblaze-stack-protect.xml
258
new file mode 100644
259
index XXXXXXX..XXXXXXX
260
--- /dev/null
261
+++ b/gdb-xml/microblaze-stack-protect.xml
262
@@ -XXX,XX +XXX,XX @@
263
+<?xml version="1.0"?>
264
+<!-- Copyright (C) 2008 Free Software Foundation, Inc.
265
+
266
+ Copying and distribution of this file, with or without modification,
267
+ are permitted in any medium without royalty provided the copyright
268
+ notice and this notice are preserved. -->
269
+
270
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
271
+<feature name="org.gnu.gdb.microblaze.stack-protect">
272
+ <reg name="rslr" bitsize="32"/>
273
+ <reg name="rshr" bitsize="32"/>
274
+</feature>
45
--
275
--
46
2.20.1
276
2.34.1
47
48
diff view generated by jsdifflib
Deleted patch
1
We do not need the entire CPUArchState to compute these values.
2
1
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/cputlb.c | 15 ++++++++-------
9
1 file changed, 8 insertions(+), 7 deletions(-)
10
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
16
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
17
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
18
19
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
20
+static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
21
{
22
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
23
+ return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
24
}
25
26
-static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
27
+static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
28
{
29
- return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
30
+ return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
31
}
32
33
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
34
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
35
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
36
{
37
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
38
- size_t old_size = tlb_n_entries(env, mmu_idx);
39
+ size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
40
size_t rate;
41
size_t new_size = old_size;
42
int64_t now = get_clock_realtime();
43
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
44
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
45
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
46
env_tlb(env)->d[mmu_idx].vindex = 0;
47
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
48
+ memset(env_tlb(env)->f[mmu_idx].table, -1,
49
+ sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
50
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
51
sizeof(env_tlb(env)->d[0].vtable));
52
}
53
@@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
54
qemu_spin_lock(&env_tlb(env)->c.lock);
55
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
56
unsigned int i;
57
- unsigned int n = tlb_n_entries(env, mmu_idx);
58
+ unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
59
60
for (i = 0; i < n; i++) {
61
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
62
--
63
2.20.1
64
65
diff view generated by jsdifflib
Deleted patch
1
No functional change, but the smaller expressions make
2
the code easier to read.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 35 +++++++++++++++++------------------
10
1 file changed, 17 insertions(+), 18 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
17
18
/**
19
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
20
- * @env: CPU that owns the TLB
21
- * @mmu_idx: MMU index of the TLB
22
+ * @desc: The CPUTLBDesc portion of the TLB
23
+ * @fast: The CPUTLBDescFast portion of the same TLB
24
*
25
* Called with tlb_lock_held.
26
*
27
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
28
* high), since otherwise we are likely to have a significant amount of
29
* conflict misses.
30
*/
31
-static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
32
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
33
{
34
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
35
- size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
36
+ size_t old_size = tlb_n_entries(fast);
37
size_t rate;
38
size_t new_size = old_size;
39
int64_t now = get_clock_realtime();
40
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
41
return;
42
}
43
44
- g_free(env_tlb(env)->f[mmu_idx].table);
45
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
46
+ g_free(fast->table);
47
+ g_free(desc->iotlb);
48
49
tlb_window_reset(desc, now, 0);
50
/* desc->n_used_entries is cleared by the caller */
51
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
52
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
53
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
54
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
55
+ fast->table = g_try_new(CPUTLBEntry, new_size);
56
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
57
+
58
/*
59
* If the allocations fail, try smaller sizes. We just freed some
60
* memory, so going back to half of new_size has a good chance of working.
61
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
62
* allocations to fail though, so we progressively reduce the allocation
63
* size, aborting if we cannot even allocate the smallest TLB we support.
64
*/
65
- while (env_tlb(env)->f[mmu_idx].table == NULL ||
66
- env_tlb(env)->d[mmu_idx].iotlb == NULL) {
67
+ while (fast->table == NULL || desc->iotlb == NULL) {
68
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
69
error_report("%s: %s", __func__, strerror(errno));
70
abort();
71
}
72
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
73
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
74
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
75
76
- g_free(env_tlb(env)->f[mmu_idx].table);
77
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
78
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
79
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
80
+ g_free(fast->table);
81
+ g_free(desc->iotlb);
82
+ fast->table = g_try_new(CPUTLBEntry, new_size);
83
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
84
}
85
}
86
87
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
88
{
89
- tlb_mmu_resize_locked(env, mmu_idx);
90
+ tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
91
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
92
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
93
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
94
--
95
2.20.1
96
97
diff view generated by jsdifflib
Deleted patch
1
No functional change, but the smaller expressions make
2
the code easier to read.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 19 ++++++++++---------
10
1 file changed, 10 insertions(+), 9 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
17
18
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
19
{
20
- tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
21
- env_tlb(env)->d[mmu_idx].n_used_entries = 0;
22
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
23
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
24
- env_tlb(env)->d[mmu_idx].vindex = 0;
25
- memset(env_tlb(env)->f[mmu_idx].table, -1,
26
- sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
27
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
28
- sizeof(env_tlb(env)->d[0].vtable));
29
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
30
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
31
+
32
+ tlb_mmu_resize_locked(desc, fast);
33
+ desc->n_used_entries = 0;
34
+ desc->large_page_addr = -1;
35
+ desc->large_page_mask = -1;
36
+ desc->vindex = 0;
37
+ memset(fast->table, -1, sizeof_tlb(fast));
38
+ memset(desc->vtable, -1, sizeof(desc->vtable));
39
}
40
41
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
42
--
43
2.20.1
44
45
diff view generated by jsdifflib
1
Merge into the only caller, but at the same time split
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
out tlb_mmu_init to initialize a single tlb entry.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
ctr_el0 access is privileged on this platform and fails as an illegal
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
instruction.
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
6
Windows does not offer a way to flush data cache from userspace, and
7
only FlushInstructionCache is available in Windows API.
8
9
The generic implementation of flush_idcache_range uses,
10
__builtin___clear_cache, which already use the FlushInstructionCache
11
function. So we rely on that.
12
13
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-Id: <20230221153006.20300-2-pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
17
---
9
accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
18
util/cacheflush.c | 14 +++++++++++---
10
1 file changed, 16 insertions(+), 17 deletions(-)
19
1 file changed, 11 insertions(+), 3 deletions(-)
11
20
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
21
diff --git a/util/cacheflush.c b/util/cacheflush.c
13
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
23
--- a/util/cacheflush.c
15
+++ b/accel/tcg/cputlb.c
24
+++ b/util/cacheflush.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
25
@@ -XXX,XX +XXX,XX @@ static void sys_cache_info(int *isize, int *dsize)
17
desc->window_max_entries = max_entries;
26
static bool have_coherent_icache;
18
}
27
#endif
19
28
20
-static void tlb_dyn_init(CPUArchState *env)
29
-#if defined(__aarch64__) && !defined(CONFIG_DARWIN)
21
-{
30
-/* Apple does not expose CTR_EL0, so we must use system interfaces. */
22
- int i;
31
+#if defined(__aarch64__) && !defined(CONFIG_DARWIN) && !defined(CONFIG_WIN32)
23
-
32
+/*
24
- for (i = 0; i < NB_MMU_MODES; i++) {
33
+ * Apple does not expose CTR_EL0, so we must use system interfaces.
25
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
34
+ * Windows neither, but we use a generic implementation of flush_idcache_range
26
- size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
35
+ * in this case.
27
-
36
+ */
28
- tlb_window_reset(desc, get_clock_realtime(), 0);
37
static uint64_t save_ctr_el0;
29
- desc->n_used_entries = 0;
38
static void arch_cache_info(int *isize, int *dsize)
30
- env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
31
- env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
32
- env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
33
- }
34
-}
35
-
36
/**
37
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
38
* @desc: The CPUTLBDesc portion of the TLB
39
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
40
tlb_mmu_flush_locked(desc, fast);
41
}
42
43
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
44
+{
45
+ size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
46
+
47
+ tlb_window_reset(desc, now, 0);
48
+ desc->n_used_entries = 0;
49
+ fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
50
+ fast->table = g_new(CPUTLBEntry, n_entries);
51
+ desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
52
+}
53
+
54
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
55
{
39
{
56
env_tlb(env)->d[mmu_idx].n_used_entries++;
40
@@ -XXX,XX +XXX,XX @@ static void __attribute__((constructor)) init_cache_info(void)
57
@@ -XXX,XX +XXX,XX @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
41
58
void tlb_init(CPUState *cpu)
42
/* Caches are coherent and do not require flushing; symbol inline. */
59
{
43
60
CPUArchState *env = cpu->env_ptr;
44
-#elif defined(__aarch64__)
61
+ int64_t now = get_clock_realtime();
45
+#elif defined(__aarch64__) && !defined(CONFIG_WIN32)
62
+ int i;
46
+/*
63
47
+ * For Windows, we use generic implementation of flush_idcache_range, that
64
qemu_spin_init(&env_tlb(env)->c.lock);
48
+ * performs a call to FlushInstructionCache, through __builtin___clear_cache.
65
49
+ */
66
/* Ensure that cpu_reset performs a full flush. */
50
67
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
51
#ifdef CONFIG_DARWIN
68
52
/* Apple does not expose CTR_EL0, so we must use system interfaces. */
69
- tlb_dyn_init(env);
70
+ for (i = 0; i < NB_MMU_MODES; i++) {
71
+ tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
72
+ }
73
}
74
75
/* flush_all_helper: run fn across all cpus
76
--
53
--
77
2.20.1
54
2.34.1
78
79
diff view generated by jsdifflib
1
We will want to be able to flush a tlb without resizing.
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Windows implementation of setjmp/longjmp is done in
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
C:/WINDOWS/system32/ucrtbase.dll. Alas, on arm64, it seems to *always*
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
perform stack unwinding, which crashes from generated code.
6
7
By using alternative implementation built in mingw, we avoid doing stack
8
unwinding and this fixes crash when calling longjmp.
9
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
12
Acked-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-Id: <20230221153006.20300-3-pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
15
---
8
accel/tcg/cputlb.c | 15 ++++++++++-----
16
include/sysemu/os-win32.h | 28 ++++++++++++++++++++++++----
9
1 file changed, 10 insertions(+), 5 deletions(-)
17
meson.build | 21 +++++++++++++++++++++
18
2 files changed, 45 insertions(+), 4 deletions(-)
10
19
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
20
diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h
12
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
22
--- a/include/sysemu/os-win32.h
14
+++ b/accel/tcg/cputlb.c
23
+++ b/include/sysemu/os-win32.h
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
24
@@ -XXX,XX +XXX,XX @@ typedef struct sockaddr_un {
16
}
25
extern "C" {
17
}
26
#endif
18
27
19
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
28
-#if defined(_WIN64)
20
+static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
29
-/* On w64, setjmp is implemented by _setjmp which needs a second parameter.
21
{
30
+#if defined(__aarch64__)
22
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
31
+/*
23
- CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
32
+ * On windows-arm64, setjmp is available in only one variant, and longjmp always
24
-
33
+ * does stack unwinding. This crash with generated code.
25
- tlb_mmu_resize_locked(desc, fast);
34
+ * Thus, we use another implementation of setjmp (not windows one), coming from
26
desc->n_used_entries = 0;
35
+ * mingw, which never performs stack unwinding.
27
desc->large_page_addr = -1;
36
+ */
28
desc->large_page_mask = -1;
37
+#undef setjmp
29
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
38
+#undef longjmp
30
memset(desc->vtable, -1, sizeof(desc->vtable));
39
+/*
31
}
40
+ * These functions are not declared in setjmp.h because __aarch64__ defines
32
41
+ * setjmp to _setjmpex instead. However, they are still defined in libmingwex.a,
33
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
42
+ * which gets linked automatically.
34
+{
43
+ */
35
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
44
+extern int __mingw_setjmp(jmp_buf);
36
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
45
+extern void __attribute__((noreturn)) __mingw_longjmp(jmp_buf, int);
46
+#define setjmp(env) __mingw_setjmp(env)
47
+#define longjmp(env, val) __mingw_longjmp(env, val)
48
+#elif defined(_WIN64)
49
+/*
50
+ * On windows-x64, setjmp is implemented by _setjmp which needs a second parameter.
51
* If this parameter is NULL, longjump does no stack unwinding.
52
* That is what we need for QEMU. Passing the value of register rsp (default)
53
- * lets longjmp try a stack unwinding which will crash with generated code. */
54
+ * lets longjmp try a stack unwinding which will crash with generated code.
55
+ */
56
# undef setjmp
57
# define setjmp(env) _setjmp(env, NULL)
58
-#endif
59
+#endif /* __aarch64__ */
60
/* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify
61
* "longjmp and don't touch the signal masks". Since we know that the
62
* savemask parameter will always be zero we can safely define these
63
diff --git a/meson.build b/meson.build
64
index XXXXXXX..XXXXXXX 100644
65
--- a/meson.build
66
+++ b/meson.build
67
@@ -XXX,XX +XXX,XX @@ if targetos == 'windows'
68
}''', name: '_lock_file and _unlock_file'))
69
endif
70
71
+if targetos == 'windows'
72
+ mingw_has_setjmp_longjmp = cc.links('''
73
+ #include <setjmp.h>
74
+ int main(void) {
75
+ /*
76
+ * These functions are not available in setjmp header, but may be
77
+ * available at link time, from libmingwex.a.
78
+ */
79
+ extern int __mingw_setjmp(jmp_buf);
80
+ extern void __attribute__((noreturn)) __mingw_longjmp(jmp_buf, int);
81
+ jmp_buf env;
82
+ __mingw_setjmp(env);
83
+ __mingw_longjmp(env, 0);
84
+ }
85
+ ''', name: 'mingw setjmp and longjmp')
37
+
86
+
38
+ tlb_mmu_resize_locked(desc, fast);
87
+ if cpu == 'aarch64' and not mingw_has_setjmp_longjmp
39
+ tlb_mmu_flush_locked(desc, fast);
88
+ error('mingw must provide setjmp/longjmp for windows-arm64')
40
+}
89
+ endif
90
+endif
41
+
91
+
42
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
92
########################
43
{
93
# Target configuration #
44
env_tlb(env)->d[mmu_idx].n_used_entries++;
94
########################
45
--
95
--
46
2.20.1
96
2.34.1
47
97
48
98
diff view generated by jsdifflib