1 | The following changes since commit 3e08b2b9cb64bff2b73fa9128c0e49bfcde0dd40: | 1 | Pulling together some cleanups, fixes, and prepatory tci stuff. |
---|---|---|---|
2 | Most of this has been reviewed, but not all. | ||
2 | 3 | ||
3 | Merge remote-tracking branch 'remotes/philmd-gitlab/tags/edk2-next-20200121' into staging (2020-01-21 15:29:25 +0000) | 4 | Those lacking review: |
4 | 5 | ||
5 | are available in the Git repository at: | 6 | 01-tcg-aarch64-Fix-constant-subtraction-in-tcg_out_adds.patch |
7 | 02-tcg-aarch64-Fix-I3617_CMLE0.patch | ||
8 | 03-tcg-aarch64-Fix-generation-of-scalar-vector-operatio.patch | ||
9 | 04-tcg-tci-Use-exec-cpu_ldst.h-interfaces.patch | ||
10 | 06-tcg-Manage-splitwx-in-tc_ptr_to_region_tree-by-hand.patch | ||
11 | 23-accel-tcg-rename-tb_lookup__cpu_state-and-hoist-stat.patch | ||
12 | 24-accel-tcg-move-CF_CLUSTER-calculation-to-curr_cflags.patch | ||
13 | 25-accel-tcg-drop-the-use-of-CF_HASH_MASK-and-rename-pa.patch | ||
14 | 26-include-exec-lightly-re-arrange-TranslationBlock.patch | ||
15 | 27-accel-tcg-Precompute-curr_cflags-into-cpu-tcg_cflags.patch | ||
6 | 16 | ||
7 | https://github.com/rth7680/qemu.git tags/pull-tcg-20200121 | 17 | Alex, the last patch is a re-write and extension of one that |
18 | you did review. | ||
8 | 19 | ||
9 | for you to fetch changes up to 75fa376cdab5e5db2c7fdd107358e16f95503ac6: | ||
10 | 20 | ||
11 | scripts/git.orderfile: Display decodetree before C source (2020-01-21 15:26:09 -1000) | 21 | r~ |
12 | 22 | ||
13 | ---------------------------------------------------------------- | ||
14 | Remove another limit to NB_MMU_MODES. | ||
15 | Fix compilation using uclibc. | ||
16 | Fix defaulting of -accel parameters. | ||
17 | Tidy cputlb basic routines. | ||
18 | Adjust git.orderfile for decodetree. | ||
19 | 23 | ||
20 | ---------------------------------------------------------------- | 24 | Alex Bennée (4): |
21 | Carlos Santos (1): | 25 | accel/tcg: rename tb_lookup__cpu_state and hoist state extraction |
22 | util/cacheinfo: fix crash when compiling with uClibc | 26 | accel/tcg: move CF_CLUSTER calculation to curr_cflags |
27 | accel/tcg: drop the use of CF_HASH_MASK and rename params | ||
28 | include/exec: lightly re-arrange TranslationBlock | ||
23 | 29 | ||
24 | Philippe Mathieu-Daudé (1): | 30 | Richard Henderson (23): |
25 | scripts/git.orderfile: Display decodetree before C source | 31 | tcg/aarch64: Fix constant subtraction in tcg_out_addsub2 |
32 | tcg/aarch64: Fix I3617_CMLE0 | ||
33 | tcg/aarch64: Fix generation of "scalar" vector operations | ||
34 | tcg/tci: Use exec/cpu_ldst.h interfaces | ||
35 | tcg: Split out tcg_raise_tb_overflow | ||
36 | tcg: Manage splitwx in tc_ptr_to_region_tree by hand | ||
37 | tcg/tci: Merge identical cases in generation (arithmetic opcodes) | ||
38 | tcg/tci: Merge identical cases in generation (exchange opcodes) | ||
39 | tcg/tci: Merge identical cases in generation (deposit opcode) | ||
40 | tcg/tci: Merge identical cases in generation (conditional opcodes) | ||
41 | tcg/tci: Merge identical cases in generation (load/store opcodes) | ||
42 | tcg/tci: Remove tci_read_r8 | ||
43 | tcg/tci: Remove tci_read_r8s | ||
44 | tcg/tci: Remove tci_read_r16 | ||
45 | tcg/tci: Remove tci_read_r16s | ||
46 | tcg/tci: Remove tci_read_r32 | ||
47 | tcg/tci: Remove tci_read_r32s | ||
48 | tcg/tci: Reduce use of tci_read_r64 | ||
49 | tcg/tci: Merge basic arithmetic operations | ||
50 | tcg/tci: Merge extension operations | ||
51 | tcg/tci: Merge bswap operations | ||
52 | tcg/tci: Merge mov, not and neg operations | ||
53 | accel/tcg: Precompute curr_cflags into cpu->tcg_cflags | ||
26 | 54 | ||
27 | Richard Henderson (14): | 55 | accel/tcg/tcg-accel-ops.h | 1 + |
28 | cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN | 56 | include/exec/exec-all.h | 19 +- |
29 | vl: Remove unused variable in configure_accelerators | 57 | include/exec/tb-lookup.h | 26 +- |
30 | vl: Reduce scope of variables in configure_accelerators | 58 | include/hw/core/cpu.h | 2 + |
31 | vl: Remove useless test in configure_accelerators | 59 | accel/tcg/cpu-exec.c | 34 ++- |
32 | vl: Only choose enabled accelerators in configure_accelerators | 60 | accel/tcg/tcg-accel-ops-mttcg.c | 3 +- |
33 | cputlb: Merge tlb_table_flush_by_mmuidx into tlb_flush_one_mmuidx_locked | 61 | accel/tcg/tcg-accel-ops-rr.c | 2 +- |
34 | cputlb: Make tlb_n_entries private to cputlb.c | 62 | accel/tcg/tcg-accel-ops.c | 8 + |
35 | cputlb: Pass CPUTLBDescFast to tlb_n_entries and sizeof_tlb | 63 | accel/tcg/tcg-runtime.c | 6 +- |
36 | cputlb: Hoist tlb portions in tlb_mmu_resize_locked | 64 | accel/tcg/translate-all.c | 18 +- |
37 | cputlb: Hoist tlb portions in tlb_flush_one_mmuidx_locked | 65 | linux-user/main.c | 1 + |
38 | cputlb: Split out tlb_mmu_flush_locked | 66 | linux-user/sh4/signal.c | 8 +- |
39 | cputlb: Partially merge tlb_dyn_init into tlb_init | 67 | linux-user/syscall.c | 18 +- |
40 | cputlb: Initialize tlbs as flushed | 68 | softmmu/physmem.c | 2 +- |
41 | cputlb: Hoist timestamp outside of loops over tlbs | 69 | tcg/tcg.c | 29 +- |
70 | tcg/tci.c | 526 ++++++++++---------------------- | ||
71 | tcg/aarch64/tcg-target.c.inc | 229 +++++++++++--- | ||
72 | tcg/tci/tcg-target.c.inc | 204 +++++-------- | ||
73 | 18 files changed, 526 insertions(+), 610 deletions(-) | ||
42 | 74 | ||
43 | include/exec/cpu_ldst.h | 5 - | 75 | -- |
44 | accel/tcg/cputlb.c | 287 +++++++++++++++++++++++++++++++++--------------- | 76 | 2.25.1 |
45 | util/cacheinfo.c | 10 +- | ||
46 | vl.c | 27 +++-- | ||
47 | scripts/git.orderfile | 3 + | ||
48 | 5 files changed, 223 insertions(+), 109 deletions(-) | ||
49 | 77 | ||
78 | diff view generated by jsdifflib |
1 | No functional change, but the smaller expressions make | 1 | An hppa guest executing |
---|---|---|---|
2 | the code easier to read. | ||
3 | 2 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | 0x000000000000e05c: ldil L%10000,r4 |
5 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 4 | 0x000000000000e060: ldo 0(r4),r4 |
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 5 | 0x000000000000e064: sub r3,r4,sp |
6 | |||
7 | produces | ||
8 | |||
9 | ---- 000000000000e064 000000000000e068 | ||
10 | sub2_i32 tmp0,tmp4,r3,$0x1,$0x10000,$0x0 | ||
11 | |||
12 | after folding and constant propagation. Then we hit | ||
13 | |||
14 | tcg-target.c.inc:640: tcg_out_insn_3401: Assertion `aimm <= 0xfff' failed. | ||
15 | |||
16 | because aimm is in fact -16, but unsigned. | ||
17 | |||
18 | The ((bl < 0) ^ sub) condition which negates bl is incorrect and will | ||
19 | always lead to this abort. If the constant is positive, sub will make | ||
20 | it negative; if the constant is negative, sub will keep it negative. | ||
21 | |||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 22 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 23 | --- |
9 | accel/tcg/cputlb.c | 35 +++++++++++++++++------------------ | 24 | tcg/aarch64/tcg-target.c.inc | 16 +++++++++------- |
10 | 1 file changed, 17 insertions(+), 18 deletions(-) | 25 | 1 file changed, 9 insertions(+), 7 deletions(-) |
11 | 26 | ||
12 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 27 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/accel/tcg/cputlb.c | 29 | --- a/tcg/aarch64/tcg-target.c.inc |
15 | +++ b/accel/tcg/cputlb.c | 30 | +++ b/tcg/aarch64/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env) | 31 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, |
17 | |||
18 | /** | ||
19 | * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary | ||
20 | - * @env: CPU that owns the TLB | ||
21 | - * @mmu_idx: MMU index of the TLB | ||
22 | + * @desc: The CPUTLBDesc portion of the TLB | ||
23 | + * @fast: The CPUTLBDescFast portion of the same TLB | ||
24 | * | ||
25 | * Called with tlb_lock_held. | ||
26 | * | ||
27 | @@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env) | ||
28 | * high), since otherwise we are likely to have a significant amount of | ||
29 | * conflict misses. | ||
30 | */ | ||
31 | -static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) | ||
32 | +static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) | ||
33 | { | ||
34 | - CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | ||
35 | - size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); | ||
36 | + size_t old_size = tlb_n_entries(fast); | ||
37 | size_t rate; | ||
38 | size_t new_size = old_size; | ||
39 | int64_t now = get_clock_realtime(); | ||
40 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) | ||
41 | return; | ||
42 | } | ||
43 | |||
44 | - g_free(env_tlb(env)->f[mmu_idx].table); | ||
45 | - g_free(env_tlb(env)->d[mmu_idx].iotlb); | ||
46 | + g_free(fast->table); | ||
47 | + g_free(desc->iotlb); | ||
48 | |||
49 | tlb_window_reset(desc, now, 0); | ||
50 | /* desc->n_used_entries is cleared by the caller */ | ||
51 | - env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; | ||
52 | - env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); | ||
53 | - env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); | ||
54 | + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; | ||
55 | + fast->table = g_try_new(CPUTLBEntry, new_size); | ||
56 | + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | ||
57 | + | ||
58 | /* | ||
59 | * If the allocations fail, try smaller sizes. We just freed some | ||
60 | * memory, so going back to half of new_size has a good chance of working. | ||
61 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) | ||
62 | * allocations to fail though, so we progressively reduce the allocation | ||
63 | * size, aborting if we cannot even allocate the smallest TLB we support. | ||
64 | */ | ||
65 | - while (env_tlb(env)->f[mmu_idx].table == NULL || | ||
66 | - env_tlb(env)->d[mmu_idx].iotlb == NULL) { | ||
67 | + while (fast->table == NULL || desc->iotlb == NULL) { | ||
68 | if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { | ||
69 | error_report("%s: %s", __func__, strerror(errno)); | ||
70 | abort(); | ||
71 | } | ||
72 | new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); | ||
73 | - env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; | ||
74 | + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; | ||
75 | |||
76 | - g_free(env_tlb(env)->f[mmu_idx].table); | ||
77 | - g_free(env_tlb(env)->d[mmu_idx].iotlb); | ||
78 | - env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); | ||
79 | - env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); | ||
80 | + g_free(fast->table); | ||
81 | + g_free(desc->iotlb); | ||
82 | + fast->table = g_try_new(CPUTLBEntry, new_size); | ||
83 | + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); | ||
84 | } | 32 | } |
85 | } | 33 | } |
86 | 34 | ||
87 | static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | 35 | -static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, |
36 | - TCGReg rh, TCGReg al, TCGReg ah, | ||
37 | - tcg_target_long bl, tcg_target_long bh, | ||
38 | - bool const_bl, bool const_bh, bool sub) | ||
39 | +static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, | ||
40 | + TCGReg rh, TCGReg al, TCGReg ah, | ||
41 | + tcg_target_long bl, tcg_target_long bh, | ||
42 | + bool const_bl, bool const_bh, bool sub) | ||
88 | { | 43 | { |
89 | - tlb_mmu_resize_locked(env, mmu_idx); | 44 | TCGReg orig_rl = rl; |
90 | + tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]); | 45 | AArch64Insn insn; |
91 | env_tlb(env)->d[mmu_idx].n_used_entries = 0; | 46 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, |
92 | env_tlb(env)->d[mmu_idx].large_page_addr = -1; | 47 | } |
93 | env_tlb(env)->d[mmu_idx].large_page_mask = -1; | 48 | |
49 | if (const_bl) { | ||
50 | - insn = I3401_ADDSI; | ||
51 | - if ((bl < 0) ^ sub) { | ||
52 | - insn = I3401_SUBSI; | ||
53 | + if (bl < 0) { | ||
54 | bl = -bl; | ||
55 | + insn = sub ? I3401_ADDSI : I3401_SUBSI; | ||
56 | + } else { | ||
57 | + insn = sub ? I3401_SUBSI : I3401_ADDSI; | ||
58 | } | ||
59 | + | ||
60 | if (unlikely(al == TCG_REG_XZR)) { | ||
61 | /* ??? We want to allow al to be zero for the benefit of | ||
62 | negation via subtraction. However, that leaves open the | ||
94 | -- | 63 | -- |
95 | 2.20.1 | 64 | 2.25.1 |
96 | 65 | ||
97 | 66 | diff view generated by jsdifflib |
1 | The result of g_strsplit is never NULL. | 1 | Fix a typo in the encodeing of the cmle (zero) instruction. |
---|---|---|---|
2 | 2 | ||
3 | Acked-by: Paolo Bonzini <pbonzini@redhat.com> | 3 | Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations") |
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
6 | Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 5 | --- |
9 | vl.c | 2 +- | 6 | tcg/aarch64/tcg-target.c.inc | 2 +- |
10 | 1 file changed, 1 insertion(+), 1 deletion(-) | 7 | 1 file changed, 1 insertion(+), 1 deletion(-) |
11 | 8 | ||
12 | diff --git a/vl.c b/vl.c | 9 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc |
13 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/vl.c | 11 | --- a/tcg/aarch64/tcg-target.c.inc |
15 | +++ b/vl.c | 12 | +++ b/tcg/aarch64/tcg-target.c.inc |
16 | @@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname) | 13 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
17 | 14 | I3617_CMEQ0 = 0x0e209800, | |
18 | accel_list = g_strsplit(accel, ":", 0); | 15 | I3617_CMLT0 = 0x0e20a800, |
19 | 16 | I3617_CMGE0 = 0x2e208800, | |
20 | - for (tmp = accel_list; tmp && *tmp; tmp++) { | 17 | - I3617_CMLE0 = 0x2e20a800, |
21 | + for (tmp = accel_list; *tmp; tmp++) { | 18 | + I3617_CMLE0 = 0x2e209800, |
22 | /* | 19 | I3617_NOT = 0x2e205800, |
23 | * Filter invalid accelerators here, to prevent obscenities | 20 | I3617_ABS = 0x0e20b800, |
24 | * such as "-machine accel=tcg,,thread=single". | 21 | I3617_NEG = 0x2e20b800, |
25 | -- | 22 | -- |
26 | 2.20.1 | 23 | 2.25.1 |
27 | 24 | ||
28 | 25 | diff view generated by jsdifflib |
1 | By choosing "tcg:kvm" when kvm is not enabled, we generate | 1 | For some vector operations, "1D" is not a valid type, and there |
---|---|---|---|
2 | an incorrect warning: "invalid accelerator kvm". | 2 | are separate instructions for the 64-bit scalar operation. |
3 | 3 | ||
4 | At the same time, use g_str_has_suffix rather than open-coding | 4 | Tested-by: Stefan Weil <sw@weilnetz.de> |
5 | the same operation. | 5 | Buglink: https://bugs.launchpad.net/qemu/+bug/1916112 |
6 | 6 | Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations") | |
7 | Presumably the inverse is also true with --disable-tcg. | ||
8 | |||
9 | Fixes: 28a0961757fc | ||
10 | Acked-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
12 | Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 8 | --- |
15 | vl.c | 21 +++++++++++++-------- | 9 | tcg/aarch64/tcg-target.c.inc | 211 ++++++++++++++++++++++++++++++----- |
16 | 1 file changed, 13 insertions(+), 8 deletions(-) | 10 | 1 file changed, 181 insertions(+), 30 deletions(-) |
17 | 11 | ||
18 | diff --git a/vl.c b/vl.c | 12 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc |
19 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/vl.c | 14 | --- a/tcg/aarch64/tcg-target.c.inc |
21 | +++ b/vl.c | 15 | +++ b/tcg/aarch64/tcg-target.c.inc |
22 | @@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname) | 16 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
23 | 17 | I3606_BIC = 0x2f001400, | |
24 | if (accel == NULL) { | 18 | I3606_ORR = 0x0f001400, |
25 | /* Select the default accelerator */ | 19 | |
26 | - if (!accel_find("tcg") && !accel_find("kvm")) { | 20 | + /* AdvSIMD scalar shift by immediate */ |
27 | - error_report("No accelerator selected and" | 21 | + I3609_SSHR = 0x5f000400, |
28 | - " no default accelerator available"); | 22 | + I3609_SSRA = 0x5f001400, |
29 | - exit(1); | 23 | + I3609_SHL = 0x5f005400, |
30 | - } else { | 24 | + I3609_USHR = 0x7f000400, |
31 | - int pnlen = strlen(progname); | 25 | + I3609_USRA = 0x7f001400, |
32 | - if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) { | 26 | + I3609_SLI = 0x7f005400, |
33 | + bool have_tcg = accel_find("tcg"); | 27 | + |
34 | + bool have_kvm = accel_find("kvm"); | 28 | + /* AdvSIMD scalar three same */ |
35 | + | 29 | + I3611_SQADD = 0x5e200c00, |
36 | + if (have_tcg && have_kvm) { | 30 | + I3611_SQSUB = 0x5e202c00, |
37 | + if (g_str_has_suffix(progname, "kvm")) { | 31 | + I3611_CMGT = 0x5e203400, |
38 | /* If the program name ends with "kvm", we prefer KVM */ | 32 | + I3611_CMGE = 0x5e203c00, |
39 | accel = "kvm:tcg"; | 33 | + I3611_SSHL = 0x5e204400, |
34 | + I3611_ADD = 0x5e208400, | ||
35 | + I3611_CMTST = 0x5e208c00, | ||
36 | + I3611_UQADD = 0x7e200c00, | ||
37 | + I3611_UQSUB = 0x7e202c00, | ||
38 | + I3611_CMHI = 0x7e203400, | ||
39 | + I3611_CMHS = 0x7e203c00, | ||
40 | + I3611_USHL = 0x7e204400, | ||
41 | + I3611_SUB = 0x7e208400, | ||
42 | + I3611_CMEQ = 0x7e208c00, | ||
43 | + | ||
44 | + /* AdvSIMD scalar two-reg misc */ | ||
45 | + I3612_CMGT0 = 0x5e208800, | ||
46 | + I3612_CMEQ0 = 0x5e209800, | ||
47 | + I3612_CMLT0 = 0x5e20a800, | ||
48 | + I3612_ABS = 0x5e20b800, | ||
49 | + I3612_CMGE0 = 0x7e208800, | ||
50 | + I3612_CMLE0 = 0x7e209800, | ||
51 | + I3612_NEG = 0x7e20b800, | ||
52 | + | ||
53 | /* AdvSIMD shift by immediate */ | ||
54 | I3614_SSHR = 0x0f000400, | ||
55 | I3614_SSRA = 0x0f001400, | ||
56 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q, | ||
57 | | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5); | ||
58 | } | ||
59 | |||
60 | +static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn, | ||
61 | + TCGReg rd, TCGReg rn, unsigned immhb) | ||
62 | +{ | ||
63 | + tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f)); | ||
64 | +} | ||
65 | + | ||
66 | +static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn, | ||
67 | + unsigned size, TCGReg rd, TCGReg rn, TCGReg rm) | ||
68 | +{ | ||
69 | + tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16 | ||
70 | + | (rn & 0x1f) << 5 | (rd & 0x1f)); | ||
71 | +} | ||
72 | + | ||
73 | +static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn, | ||
74 | + unsigned size, TCGReg rd, TCGReg rn) | ||
75 | +{ | ||
76 | + tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f)); | ||
77 | +} | ||
78 | + | ||
79 | static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q, | ||
80 | TCGReg rd, TCGReg rn, unsigned immhb) | ||
81 | { | ||
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
83 | unsigned vecl, unsigned vece, | ||
84 | const TCGArg *args, const int *const_args) | ||
85 | { | ||
86 | - static const AArch64Insn cmp_insn[16] = { | ||
87 | + static const AArch64Insn cmp_vec_insn[16] = { | ||
88 | [TCG_COND_EQ] = I3616_CMEQ, | ||
89 | [TCG_COND_GT] = I3616_CMGT, | ||
90 | [TCG_COND_GE] = I3616_CMGE, | ||
91 | [TCG_COND_GTU] = I3616_CMHI, | ||
92 | [TCG_COND_GEU] = I3616_CMHS, | ||
93 | }; | ||
94 | - static const AArch64Insn cmp0_insn[16] = { | ||
95 | + static const AArch64Insn cmp_scalar_insn[16] = { | ||
96 | + [TCG_COND_EQ] = I3611_CMEQ, | ||
97 | + [TCG_COND_GT] = I3611_CMGT, | ||
98 | + [TCG_COND_GE] = I3611_CMGE, | ||
99 | + [TCG_COND_GTU] = I3611_CMHI, | ||
100 | + [TCG_COND_GEU] = I3611_CMHS, | ||
101 | + }; | ||
102 | + static const AArch64Insn cmp0_vec_insn[16] = { | ||
103 | [TCG_COND_EQ] = I3617_CMEQ0, | ||
104 | [TCG_COND_GT] = I3617_CMGT0, | ||
105 | [TCG_COND_GE] = I3617_CMGE0, | ||
106 | [TCG_COND_LT] = I3617_CMLT0, | ||
107 | [TCG_COND_LE] = I3617_CMLE0, | ||
108 | }; | ||
109 | + static const AArch64Insn cmp0_scalar_insn[16] = { | ||
110 | + [TCG_COND_EQ] = I3612_CMEQ0, | ||
111 | + [TCG_COND_GT] = I3612_CMGT0, | ||
112 | + [TCG_COND_GE] = I3612_CMGE0, | ||
113 | + [TCG_COND_LT] = I3612_CMLT0, | ||
114 | + [TCG_COND_LE] = I3612_CMLE0, | ||
115 | + }; | ||
116 | |||
117 | TCGType type = vecl + TCG_TYPE_V64; | ||
118 | unsigned is_q = vecl; | ||
119 | + bool is_scalar = !is_q && vece == MO_64; | ||
120 | TCGArg a0, a1, a2, a3; | ||
121 | int cmode, imm8; | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
124 | tcg_out_dupm_vec(s, type, vece, a0, a1, a2); | ||
125 | break; | ||
126 | case INDEX_op_add_vec: | ||
127 | - tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); | ||
128 | + if (is_scalar) { | ||
129 | + tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2); | ||
130 | + } else { | ||
131 | + tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); | ||
132 | + } | ||
133 | break; | ||
134 | case INDEX_op_sub_vec: | ||
135 | - tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); | ||
136 | + if (is_scalar) { | ||
137 | + tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2); | ||
138 | + } else { | ||
139 | + tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); | ||
140 | + } | ||
141 | break; | ||
142 | case INDEX_op_mul_vec: | ||
143 | tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2); | ||
144 | break; | ||
145 | case INDEX_op_neg_vec: | ||
146 | - tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); | ||
147 | + if (is_scalar) { | ||
148 | + tcg_out_insn(s, 3612, NEG, vece, a0, a1); | ||
149 | + } else { | ||
150 | + tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); | ||
151 | + } | ||
152 | break; | ||
153 | case INDEX_op_abs_vec: | ||
154 | - tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); | ||
155 | + if (is_scalar) { | ||
156 | + tcg_out_insn(s, 3612, ABS, vece, a0, a1); | ||
157 | + } else { | ||
158 | + tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); | ||
159 | + } | ||
160 | break; | ||
161 | case INDEX_op_and_vec: | ||
162 | if (const_args[2]) { | ||
163 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
164 | tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2); | ||
165 | break; | ||
166 | case INDEX_op_ssadd_vec: | ||
167 | - tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); | ||
168 | + if (is_scalar) { | ||
169 | + tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2); | ||
170 | + } else { | ||
171 | + tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); | ||
172 | + } | ||
173 | break; | ||
174 | case INDEX_op_sssub_vec: | ||
175 | - tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); | ||
176 | + if (is_scalar) { | ||
177 | + tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2); | ||
178 | + } else { | ||
179 | + tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); | ||
180 | + } | ||
181 | break; | ||
182 | case INDEX_op_usadd_vec: | ||
183 | - tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); | ||
184 | + if (is_scalar) { | ||
185 | + tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2); | ||
186 | + } else { | ||
187 | + tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); | ||
188 | + } | ||
189 | break; | ||
190 | case INDEX_op_ussub_vec: | ||
191 | - tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); | ||
192 | + if (is_scalar) { | ||
193 | + tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2); | ||
194 | + } else { | ||
195 | + tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); | ||
196 | + } | ||
197 | break; | ||
198 | case INDEX_op_smax_vec: | ||
199 | tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2); | ||
200 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
201 | tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1); | ||
202 | break; | ||
203 | case INDEX_op_shli_vec: | ||
204 | - tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); | ||
205 | + if (is_scalar) { | ||
206 | + tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece)); | ||
207 | + } else { | ||
208 | + tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); | ||
209 | + } | ||
210 | break; | ||
211 | case INDEX_op_shri_vec: | ||
212 | - tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); | ||
213 | + if (is_scalar) { | ||
214 | + tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2); | ||
215 | + } else { | ||
216 | + tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); | ||
217 | + } | ||
218 | break; | ||
219 | case INDEX_op_sari_vec: | ||
220 | - tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); | ||
221 | + if (is_scalar) { | ||
222 | + tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2); | ||
223 | + } else { | ||
224 | + tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); | ||
225 | + } | ||
226 | break; | ||
227 | case INDEX_op_aa64_sli_vec: | ||
228 | - tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); | ||
229 | + if (is_scalar) { | ||
230 | + tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece)); | ||
231 | + } else { | ||
232 | + tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); | ||
233 | + } | ||
234 | break; | ||
235 | case INDEX_op_shlv_vec: | ||
236 | - tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); | ||
237 | + if (is_scalar) { | ||
238 | + tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2); | ||
239 | + } else { | ||
240 | + tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); | ||
241 | + } | ||
242 | break; | ||
243 | case INDEX_op_aa64_sshl_vec: | ||
244 | - tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); | ||
245 | + if (is_scalar) { | ||
246 | + tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2); | ||
247 | + } else { | ||
248 | + tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); | ||
249 | + } | ||
250 | break; | ||
251 | case INDEX_op_cmp_vec: | ||
252 | { | ||
253 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
254 | |||
255 | if (cond == TCG_COND_NE) { | ||
256 | if (const_args[2]) { | ||
257 | - tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); | ||
258 | + if (is_scalar) { | ||
259 | + tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1); | ||
260 | + } else { | ||
261 | + tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); | ||
262 | + } | ||
40 | } else { | 263 | } else { |
41 | accel = "tcg:kvm"; | 264 | - tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); |
265 | + if (is_scalar) { | ||
266 | + tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2); | ||
267 | + } else { | ||
268 | + tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); | ||
269 | + } | ||
270 | tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0); | ||
42 | } | 271 | } |
43 | + } else if (have_kvm) { | 272 | } else { |
44 | + accel = "kvm"; | 273 | if (const_args[2]) { |
45 | + } else if (have_tcg) { | 274 | - insn = cmp0_insn[cond]; |
46 | + accel = "tcg"; | 275 | - if (insn) { |
47 | + } else { | 276 | - tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); |
48 | + error_report("No accelerator selected and" | 277 | - break; |
49 | + " no default accelerator available"); | 278 | + if (is_scalar) { |
50 | + exit(1); | 279 | + insn = cmp0_scalar_insn[cond]; |
280 | + if (insn) { | ||
281 | + tcg_out_insn_3612(s, insn, vece, a0, a1); | ||
282 | + break; | ||
283 | + } | ||
284 | + } else { | ||
285 | + insn = cmp0_vec_insn[cond]; | ||
286 | + if (insn) { | ||
287 | + tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); | ||
288 | + break; | ||
289 | + } | ||
290 | } | ||
291 | tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); | ||
292 | a2 = TCG_VEC_TMP; | ||
293 | } | ||
294 | - insn = cmp_insn[cond]; | ||
295 | - if (insn == 0) { | ||
296 | - TCGArg t; | ||
297 | - t = a1, a1 = a2, a2 = t; | ||
298 | - cond = tcg_swap_cond(cond); | ||
299 | - insn = cmp_insn[cond]; | ||
300 | - tcg_debug_assert(insn != 0); | ||
301 | + if (is_scalar) { | ||
302 | + insn = cmp_scalar_insn[cond]; | ||
303 | + if (insn == 0) { | ||
304 | + TCGArg t; | ||
305 | + t = a1, a1 = a2, a2 = t; | ||
306 | + cond = tcg_swap_cond(cond); | ||
307 | + insn = cmp_scalar_insn[cond]; | ||
308 | + tcg_debug_assert(insn != 0); | ||
309 | + } | ||
310 | + tcg_out_insn_3611(s, insn, vece, a0, a1, a2); | ||
311 | + } else { | ||
312 | + insn = cmp_vec_insn[cond]; | ||
313 | + if (insn == 0) { | ||
314 | + TCGArg t; | ||
315 | + t = a1, a1 = a2, a2 = t; | ||
316 | + cond = tcg_swap_cond(cond); | ||
317 | + insn = cmp_vec_insn[cond]; | ||
318 | + tcg_debug_assert(insn != 0); | ||
319 | + } | ||
320 | + tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); | ||
321 | } | ||
322 | - tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); | ||
51 | } | 323 | } |
52 | } | 324 | } |
53 | - | 325 | break; |
54 | accel_list = g_strsplit(accel, ":", 0); | ||
55 | |||
56 | for (tmp = accel_list; *tmp; tmp++) { | ||
57 | -- | 326 | -- |
58 | 2.20.1 | 327 | 2.25.1 |
59 | 328 | ||
60 | 329 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Use the provided cpu_ldst.h interfaces. This fixes the build vs | ||
2 | the unconverted uses of g2h(), adds missed memory trace events, | ||
3 | and correctly recognizes when a SIGSEGV belongs to the guest via | ||
4 | set_helper_retaddr(). | ||
1 | 5 | ||
6 | Fixes: 3e8f1628e864 | ||
7 | Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | tcg/tci.c | 73 +++++++++++++++++++++---------------------------------- | ||
11 | 1 file changed, 28 insertions(+), 45 deletions(-) | ||
12 | |||
13 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/tcg/tci.c | ||
16 | +++ b/tcg/tci.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) | ||
18 | return result; | ||
19 | } | ||
20 | |||
21 | -#ifdef CONFIG_SOFTMMU | ||
22 | -# define qemu_ld_ub \ | ||
23 | - helper_ret_ldub_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
24 | -# define qemu_ld_leuw \ | ||
25 | - helper_le_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
26 | -# define qemu_ld_leul \ | ||
27 | - helper_le_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
28 | -# define qemu_ld_leq \ | ||
29 | - helper_le_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
30 | -# define qemu_ld_beuw \ | ||
31 | - helper_be_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
32 | -# define qemu_ld_beul \ | ||
33 | - helper_be_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
34 | -# define qemu_ld_beq \ | ||
35 | - helper_be_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
36 | -# define qemu_st_b(X) \ | ||
37 | - helper_ret_stb_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
38 | -# define qemu_st_lew(X) \ | ||
39 | - helper_le_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
40 | -# define qemu_st_lel(X) \ | ||
41 | - helper_le_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
42 | -# define qemu_st_leq(X) \ | ||
43 | - helper_le_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
44 | -# define qemu_st_bew(X) \ | ||
45 | - helper_be_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
46 | -# define qemu_st_bel(X) \ | ||
47 | - helper_be_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
48 | -# define qemu_st_beq(X) \ | ||
49 | - helper_be_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
50 | -#else | ||
51 | -# define qemu_ld_ub ldub_p(g2h(taddr)) | ||
52 | -# define qemu_ld_leuw lduw_le_p(g2h(taddr)) | ||
53 | -# define qemu_ld_leul (uint32_t)ldl_le_p(g2h(taddr)) | ||
54 | -# define qemu_ld_leq ldq_le_p(g2h(taddr)) | ||
55 | -# define qemu_ld_beuw lduw_be_p(g2h(taddr)) | ||
56 | -# define qemu_ld_beul (uint32_t)ldl_be_p(g2h(taddr)) | ||
57 | -# define qemu_ld_beq ldq_be_p(g2h(taddr)) | ||
58 | -# define qemu_st_b(X) stb_p(g2h(taddr), X) | ||
59 | -# define qemu_st_lew(X) stw_le_p(g2h(taddr), X) | ||
60 | -# define qemu_st_lel(X) stl_le_p(g2h(taddr), X) | ||
61 | -# define qemu_st_leq(X) stq_le_p(g2h(taddr), X) | ||
62 | -# define qemu_st_bew(X) stw_be_p(g2h(taddr), X) | ||
63 | -# define qemu_st_bel(X) stl_be_p(g2h(taddr), X) | ||
64 | -# define qemu_st_beq(X) stq_be_p(g2h(taddr), X) | ||
65 | -#endif | ||
66 | +#define qemu_ld_ub \ | ||
67 | + cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
68 | +#define qemu_ld_leuw \ | ||
69 | + cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
70 | +#define qemu_ld_leul \ | ||
71 | + cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
72 | +#define qemu_ld_leq \ | ||
73 | + cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
74 | +#define qemu_ld_beuw \ | ||
75 | + cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
76 | +#define qemu_ld_beul \ | ||
77 | + cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
78 | +#define qemu_ld_beq \ | ||
79 | + cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
80 | +#define qemu_st_b(X) \ | ||
81 | + cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
82 | +#define qemu_st_lew(X) \ | ||
83 | + cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
84 | +#define qemu_st_lel(X) \ | ||
85 | + cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
86 | +#define qemu_st_leq(X) \ | ||
87 | + cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
88 | +#define qemu_st_bew(X) \ | ||
89 | + cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
90 | +#define qemu_st_bel(X) \ | ||
91 | + cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
92 | +#define qemu_st_beq(X) \ | ||
93 | + cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
94 | |||
95 | #if TCG_TARGET_REG_BITS == 64 | ||
96 | # define CASE_32_64(x) \ | ||
97 | -- | ||
98 | 2.25.1 | ||
99 | |||
100 | diff view generated by jsdifflib |
1 | We will want to be able to flush a tlb without resizing. | 1 | Allow other places in tcg to restart with a smaller tb. |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
4 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 5 | --- |
8 | accel/tcg/cputlb.c | 15 ++++++++++----- | 6 | tcg/tcg.c | 9 +++++++-- |
9 | 1 file changed, 10 insertions(+), 5 deletions(-) | 7 | 1 file changed, 7 insertions(+), 2 deletions(-) |
10 | 8 | ||
11 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 9 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
12 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/accel/tcg/cputlb.c | 11 | --- a/tcg/tcg.c |
14 | +++ b/accel/tcg/cputlb.c | 12 | +++ b/tcg/tcg.c |
15 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) | 13 | @@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which) |
16 | } | 14 | s->tb_jmp_reset_offset[which] = tcg_current_code_size(s); |
17 | } | 15 | } |
18 | 16 | ||
19 | -static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | 17 | +/* Signal overflow, starting over with fewer guest insns. */ |
20 | +static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) | 18 | +static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s) |
21 | { | ||
22 | - CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | ||
23 | - CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; | ||
24 | - | ||
25 | - tlb_mmu_resize_locked(desc, fast); | ||
26 | desc->n_used_entries = 0; | ||
27 | desc->large_page_addr = -1; | ||
28 | desc->large_page_mask = -1; | ||
29 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | ||
30 | memset(desc->vtable, -1, sizeof(desc->vtable)); | ||
31 | } | ||
32 | |||
33 | +static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | ||
34 | +{ | 19 | +{ |
35 | + CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | 20 | + siglongjmp(s->jmp_trans, -2); |
36 | + CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; | ||
37 | + | ||
38 | + tlb_mmu_resize_locked(desc, fast); | ||
39 | + tlb_mmu_flush_locked(desc, fast); | ||
40 | +} | 21 | +} |
41 | + | 22 | + |
42 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) | 23 | #define C_PFX1(P, A) P##A |
43 | { | 24 | #define C_PFX2(P, A, B) P##A##_##B |
44 | env_tlb(env)->d[mmu_idx].n_used_entries++; | 25 | #define C_PFX3(P, A, B, C) P##A##_##B##_##C |
26 | @@ -XXX,XX +XXX,XX @@ static TCGTemp *tcg_temp_alloc(TCGContext *s) | ||
27 | int n = s->nb_temps++; | ||
28 | |||
29 | if (n >= TCG_MAX_TEMPS) { | ||
30 | - /* Signal overflow, starting over with fewer guest insns. */ | ||
31 | - siglongjmp(s->jmp_trans, -2); | ||
32 | + tcg_raise_tb_overflow(s); | ||
33 | } | ||
34 | return memset(&s->temps[n], 0, sizeof(TCGTemp)); | ||
35 | } | ||
45 | -- | 36 | -- |
46 | 2.20.1 | 37 | 2.25.1 |
47 | 38 | ||
48 | 39 | diff view generated by jsdifflib |
1 | In target/arm we will shortly have "too many" mmu_idx. | 1 | The use in tcg_tb_lookup is given a random pc that comes from the pc |
---|---|---|---|
2 | The current minimum barrier is caused by the way in which | 2 | of a signal handler. Do not assert that the pointer is already within |
3 | tlb_flush_page_by_mmuidx is coded. | 3 | the code gen buffer at all, much less the writable mirror of it. |
4 | 4 | ||
5 | We can remove this limitation by allocating memory for | 5 | Fixes: db0c51a3803 |
6 | consumption by the worker. Let us assume that this is | ||
7 | the unlikely case, as will be the case for the majority | ||
8 | of targets which have so far satisfied the BUILD_BUG_ON, | ||
9 | and only allocate memory when necessary. | ||
10 | |||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | --- | 7 | --- |
14 | accel/tcg/cputlb.c | 167 +++++++++++++++++++++++++++++++++++---------- | 8 | tcg/tcg.c | 20 ++++++++++++++++++-- |
15 | 1 file changed, 132 insertions(+), 35 deletions(-) | 9 | 1 file changed, 18 insertions(+), 2 deletions(-) |
16 | 10 | ||
17 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 11 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
18 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/accel/tcg/cputlb.c | 13 | --- a/tcg/tcg.c |
20 | +++ b/accel/tcg/cputlb.c | 14 | +++ b/tcg/tcg.c |
21 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx, | 15 | @@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void) |
22 | } | 16 | } |
23 | } | 17 | } |
24 | 18 | ||
25 | -/* As we are going to hijack the bottom bits of the page address for a | 19 | -static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp) |
26 | - * mmuidx bit mask we need to fail to build if we can't do that | 20 | +static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) |
27 | +/** | ||
28 | + * tlb_flush_page_by_mmuidx_async_0: | ||
29 | + * @cpu: cpu on which to flush | ||
30 | + * @addr: page of virtual address to flush | ||
31 | + * @idxmap: set of mmu_idx to flush | ||
32 | + * | ||
33 | + * Helper for tlb_flush_page_by_mmuidx and friends, flush one page | ||
34 | + * at @addr from the tlbs indicated by @idxmap from @cpu. | ||
35 | */ | ||
36 | -QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); | ||
37 | - | ||
38 | -static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, | ||
39 | - run_on_cpu_data data) | ||
40 | +static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, | ||
41 | + target_ulong addr, | ||
42 | + uint16_t idxmap) | ||
43 | { | 21 | { |
44 | CPUArchState *env = cpu->env_ptr; | 22 | - void *p = tcg_splitwx_to_rw(cp); |
45 | - target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; | 23 | size_t region_idx; |
46 | - target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; | 24 | |
47 | - unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; | ||
48 | int mmu_idx; | ||
49 | |||
50 | assert_cpu_is_self(cpu); | ||
51 | |||
52 | - tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", | ||
53 | - addr, mmu_idx_bitmap); | ||
54 | + tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); | ||
55 | |||
56 | qemu_spin_lock(&env_tlb(env)->c.lock); | ||
57 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | ||
58 | - if (test_bit(mmu_idx, &mmu_idx_bitmap)) { | ||
59 | + if ((idxmap >> mmu_idx) & 1) { | ||
60 | tlb_flush_page_locked(env, mmu_idx, addr); | ||
61 | } | ||
62 | } | ||
63 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, | ||
64 | tb_flush_jmp_cache(cpu, addr); | ||
65 | } | ||
66 | |||
67 | +/** | ||
68 | + * tlb_flush_page_by_mmuidx_async_1: | ||
69 | + * @cpu: cpu on which to flush | ||
70 | + * @data: encoded addr + idxmap | ||
71 | + * | ||
72 | + * Helper for tlb_flush_page_by_mmuidx and friends, called through | ||
73 | + * async_run_on_cpu. The idxmap parameter is encoded in the page | ||
74 | + * offset of the target_ptr field. This limits the set of mmu_idx | ||
75 | + * that can be passed via this method. | ||
76 | + */ | ||
77 | +static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, | ||
78 | + run_on_cpu_data data) | ||
79 | +{ | ||
80 | + target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; | ||
81 | + target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; | ||
82 | + uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; | ||
83 | + | ||
84 | + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); | ||
85 | +} | ||
86 | + | ||
87 | +typedef struct { | ||
88 | + target_ulong addr; | ||
89 | + uint16_t idxmap; | ||
90 | +} TLBFlushPageByMMUIdxData; | ||
91 | + | ||
92 | +/** | ||
93 | + * tlb_flush_page_by_mmuidx_async_2: | ||
94 | + * @cpu: cpu on which to flush | ||
95 | + * @data: allocated addr + idxmap | ||
96 | + * | ||
97 | + * Helper for tlb_flush_page_by_mmuidx and friends, called through | ||
98 | + * async_run_on_cpu. The addr+idxmap parameters are stored in a | ||
99 | + * TLBFlushPageByMMUIdxData structure that has been allocated | ||
100 | + * specifically for this helper. Free the structure when done. | ||
101 | + */ | ||
102 | +static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, | ||
103 | + run_on_cpu_data data) | ||
104 | +{ | ||
105 | + TLBFlushPageByMMUIdxData *d = data.host_ptr; | ||
106 | + | ||
107 | + tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); | ||
108 | + g_free(d); | ||
109 | +} | ||
110 | + | ||
111 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) | ||
112 | { | ||
113 | - target_ulong addr_and_mmu_idx; | ||
114 | - | ||
115 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); | ||
116 | |||
117 | /* This should already be page aligned */ | ||
118 | - addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | ||
119 | - addr_and_mmu_idx |= idxmap; | ||
120 | + addr &= TARGET_PAGE_MASK; | ||
121 | |||
122 | - if (!qemu_cpu_is_self(cpu)) { | ||
123 | - async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, | ||
124 | - RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | ||
125 | + if (qemu_cpu_is_self(cpu)) { | ||
126 | + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); | ||
127 | + } else if (idxmap < TARGET_PAGE_SIZE) { | ||
128 | + /* | ||
129 | + * Most targets have only a few mmu_idx. In the case where | ||
130 | + * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid | ||
131 | + * allocating memory for this operation. | ||
132 | + */ | ||
133 | + async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, | ||
134 | + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | ||
135 | } else { | ||
136 | - tlb_flush_page_by_mmuidx_async_work( | ||
137 | - cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | ||
138 | + TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); | ||
139 | + | ||
140 | + /* Otherwise allocate a structure, freed by the worker. */ | ||
141 | + d->addr = addr; | ||
142 | + d->idxmap = idxmap; | ||
143 | + async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, | ||
144 | + RUN_ON_CPU_HOST_PTR(d)); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, target_ulong addr) | ||
149 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, | ||
150 | uint16_t idxmap) | ||
151 | { | ||
152 | - const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; | ||
153 | - target_ulong addr_and_mmu_idx; | ||
154 | - | ||
155 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); | ||
156 | |||
157 | /* This should already be page aligned */ | ||
158 | - addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | ||
159 | - addr_and_mmu_idx |= idxmap; | ||
160 | + addr &= TARGET_PAGE_MASK; | ||
161 | |||
162 | - flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | ||
163 | - fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | ||
164 | + /* | 25 | + /* |
165 | + * Allocate memory to hold addr+idxmap only when needed. | 26 | + * Like tcg_splitwx_to_rw, with no assert. The pc may come from |
166 | + * See tlb_flush_page_by_mmuidx for details. | 27 | + * a signal handler over which the caller has no control. |
167 | + */ | 28 | + */ |
168 | + if (idxmap < TARGET_PAGE_SIZE) { | 29 | + if (!in_code_gen_buffer(p)) { |
169 | + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, | 30 | + p -= tcg_splitwx_diff; |
170 | + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | 31 | + if (!in_code_gen_buffer(p)) { |
171 | + } else { | 32 | + return NULL; |
172 | + CPUState *dst_cpu; | ||
173 | + | ||
174 | + /* Allocate a separate data block for each destination cpu. */ | ||
175 | + CPU_FOREACH(dst_cpu) { | ||
176 | + if (dst_cpu != src_cpu) { | ||
177 | + TLBFlushPageByMMUIdxData *d | ||
178 | + = g_new(TLBFlushPageByMMUIdxData, 1); | ||
179 | + | ||
180 | + d->addr = addr; | ||
181 | + d->idxmap = idxmap; | ||
182 | + async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, | ||
183 | + RUN_ON_CPU_HOST_PTR(d)); | ||
184 | + } | ||
185 | + } | 33 | + } |
186 | + } | 34 | + } |
187 | + | 35 | + |
188 | + tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); | 36 | if (p < region.start_aligned) { |
189 | } | 37 | region_idx = 0; |
190 | 38 | } else { | |
191 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) | 39 | @@ -XXX,XX +XXX,XX @@ void tcg_tb_insert(TranslationBlock *tb) |
192 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, | ||
193 | target_ulong addr, | ||
194 | uint16_t idxmap) | ||
195 | { | 40 | { |
196 | - const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; | 41 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); |
197 | - target_ulong addr_and_mmu_idx; | 42 | |
198 | - | 43 | + g_assert(rt != NULL); |
199 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); | 44 | qemu_mutex_lock(&rt->lock); |
200 | 45 | g_tree_insert(rt->tree, &tb->tc, tb); | |
201 | /* This should already be page aligned */ | 46 | qemu_mutex_unlock(&rt->lock); |
202 | - addr_and_mmu_idx = addr & TARGET_PAGE_MASK; | 47 | @@ -XXX,XX +XXX,XX @@ void tcg_tb_remove(TranslationBlock *tb) |
203 | - addr_and_mmu_idx |= idxmap; | 48 | { |
204 | + addr &= TARGET_PAGE_MASK; | 49 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); |
205 | 50 | ||
206 | - flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | 51 | + g_assert(rt != NULL); |
207 | - async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); | 52 | qemu_mutex_lock(&rt->lock); |
208 | + /* | 53 | g_tree_remove(rt->tree, &tb->tc); |
209 | + * Allocate memory to hold addr+idxmap only when needed. | 54 | qemu_mutex_unlock(&rt->lock); |
210 | + * See tlb_flush_page_by_mmuidx for details. | 55 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) |
211 | + */ | 56 | TranslationBlock *tb; |
212 | + if (idxmap < TARGET_PAGE_SIZE) { | 57 | struct tb_tc s = { .ptr = (void *)tc_ptr }; |
213 | + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, | 58 | |
214 | + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | 59 | + if (rt == NULL) { |
215 | + async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, | 60 | + return NULL; |
216 | + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); | 61 | + } |
217 | + } else { | ||
218 | + CPUState *dst_cpu; | ||
219 | + TLBFlushPageByMMUIdxData *d; | ||
220 | + | 62 | + |
221 | + /* Allocate a separate data block for each destination cpu. */ | 63 | qemu_mutex_lock(&rt->lock); |
222 | + CPU_FOREACH(dst_cpu) { | 64 | tb = g_tree_lookup(rt->tree, &s); |
223 | + if (dst_cpu != src_cpu) { | 65 | qemu_mutex_unlock(&rt->lock); |
224 | + d = g_new(TLBFlushPageByMMUIdxData, 1); | ||
225 | + d->addr = addr; | ||
226 | + d->idxmap = idxmap; | ||
227 | + async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, | ||
228 | + RUN_ON_CPU_HOST_PTR(d)); | ||
229 | + } | ||
230 | + } | ||
231 | + | ||
232 | + d = g_new(TLBFlushPageByMMUIdxData, 1); | ||
233 | + d->addr = addr; | ||
234 | + d->idxmap = idxmap; | ||
235 | + async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, | ||
236 | + RUN_ON_CPU_HOST_PTR(d)); | ||
237 | + } | ||
238 | } | ||
239 | |||
240 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) | ||
241 | -- | 66 | -- |
242 | 2.20.1 | 67 | 2.25.1 |
243 | 68 | ||
244 | 69 | diff view generated by jsdifflib |
1 | There's little point in leaving these data structures half initialized, | 1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge |
---|---|---|---|
2 | and relying on a flush to be done during reset. | 2 | cases that are identical between 32-bit and 64-bit hosts. |
3 | 3 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 1/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-2-f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 11 | --- |
8 | accel/tcg/cputlb.c | 5 +++-- | 12 | tcg/tci/tcg-target.c.inc | 85 +++++++++++++++++----------------------- |
9 | 1 file changed, 3 insertions(+), 2 deletions(-) | 13 | 1 file changed, 37 insertions(+), 48 deletions(-) |
10 | 14 | ||
11 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/accel/tcg/cputlb.c | 17 | --- a/tcg/tci/tcg-target.c.inc |
14 | +++ b/accel/tcg/cputlb.c | 18 | +++ b/tcg/tci/tcg-target.c.inc |
15 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) | 19 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) |
16 | fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; | 20 | old_code_ptr[1] = s->code_ptr - old_code_ptr; |
17 | fast->table = g_new(CPUTLBEntry, n_entries); | ||
18 | desc->iotlb = g_new(CPUIOTLBEntry, n_entries); | ||
19 | + tlb_mmu_flush_locked(desc, fast); | ||
20 | } | 21 | } |
21 | 22 | ||
22 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) | 23 | +#if TCG_TARGET_REG_BITS == 64 |
23 | @@ -XXX,XX +XXX,XX @@ void tlb_init(CPUState *cpu) | 24 | +# define CASE_32_64(x) \ |
24 | 25 | + case glue(glue(INDEX_op_, x), _i64): \ | |
25 | qemu_spin_init(&env_tlb(env)->c.lock); | 26 | + case glue(glue(INDEX_op_, x), _i32): |
26 | 27 | +# define CASE_64(x) \ | |
27 | - /* Ensure that cpu_reset performs a full flush. */ | 28 | + case glue(glue(INDEX_op_, x), _i64): |
28 | - env_tlb(env)->c.dirty = ALL_MMUIDX_BITS; | 29 | +#else |
29 | + /* All tlbs are initialized flushed. */ | 30 | +# define CASE_32_64(x) \ |
30 | + env_tlb(env)->c.dirty = 0; | 31 | + case glue(glue(INDEX_op_, x), _i32): |
31 | 32 | +# define CASE_64(x) | |
32 | for (i = 0; i < NB_MMU_MODES; i++) { | 33 | +#endif |
33 | tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); | 34 | + |
35 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
36 | const int *const_args) | ||
37 | { | ||
38 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
39 | case INDEX_op_exit_tb: | ||
40 | tcg_out64(s, args[0]); | ||
41 | break; | ||
42 | + | ||
43 | case INDEX_op_goto_tb: | ||
44 | if (s->tb_jmp_insn_offset) { | ||
45 | /* Direct jump method. */ | ||
46 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
47 | tcg_debug_assert(args[2] == (int32_t)args[2]); | ||
48 | tcg_out32(s, args[2]); | ||
49 | break; | ||
50 | - case INDEX_op_add_i32: | ||
51 | - case INDEX_op_sub_i32: | ||
52 | - case INDEX_op_mul_i32: | ||
53 | - case INDEX_op_and_i32: | ||
54 | - case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */ | ||
55 | - case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */ | ||
56 | - case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */ | ||
57 | - case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */ | ||
58 | - case INDEX_op_or_i32: | ||
59 | - case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */ | ||
60 | - case INDEX_op_xor_i32: | ||
61 | - case INDEX_op_shl_i32: | ||
62 | - case INDEX_op_shr_i32: | ||
63 | - case INDEX_op_sar_i32: | ||
64 | - case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | ||
65 | - case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | ||
66 | + | ||
67 | + CASE_32_64(add) | ||
68 | + CASE_32_64(sub) | ||
69 | + CASE_32_64(mul) | ||
70 | + CASE_32_64(and) | ||
71 | + CASE_32_64(or) | ||
72 | + CASE_32_64(xor) | ||
73 | + CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */ | ||
74 | + CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */ | ||
75 | + CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */ | ||
76 | + CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */ | ||
77 | + CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */ | ||
78 | + CASE_32_64(shl) | ||
79 | + CASE_32_64(shr) | ||
80 | + CASE_32_64(sar) | ||
81 | + CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */ | ||
82 | + CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */ | ||
83 | + CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
84 | + CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
85 | + CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
86 | + CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
87 | tcg_out_r(s, args[0]); | ||
88 | tcg_out_r(s, args[1]); | ||
89 | tcg_out_r(s, args[2]); | ||
90 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
91 | break; | ||
92 | |||
93 | #if TCG_TARGET_REG_BITS == 64 | ||
94 | - case INDEX_op_add_i64: | ||
95 | - case INDEX_op_sub_i64: | ||
96 | - case INDEX_op_mul_i64: | ||
97 | - case INDEX_op_and_i64: | ||
98 | - case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */ | ||
99 | - case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */ | ||
100 | - case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */ | ||
101 | - case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */ | ||
102 | - case INDEX_op_or_i64: | ||
103 | - case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */ | ||
104 | - case INDEX_op_xor_i64: | ||
105 | - case INDEX_op_shl_i64: | ||
106 | - case INDEX_op_shr_i64: | ||
107 | - case INDEX_op_sar_i64: | ||
108 | - case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | ||
109 | - case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | ||
110 | - case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
111 | - case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
112 | - case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
113 | - case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
114 | - tcg_out_r(s, args[0]); | ||
115 | - tcg_out_r(s, args[1]); | ||
116 | - tcg_out_r(s, args[2]); | ||
117 | - break; | ||
118 | case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */ | ||
119 | tcg_out_r(s, args[0]); | ||
120 | tcg_out_r(s, args[1]); | ||
121 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
122 | tcg_out_r(s, args[0]); | ||
123 | tcg_out_r(s, args[1]); | ||
124 | break; | ||
125 | - case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
126 | - case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
127 | - case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
128 | - case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
129 | - tcg_out_r(s, args[0]); | ||
130 | - tcg_out_r(s, args[1]); | ||
131 | - tcg_out_r(s, args[2]); | ||
132 | - break; | ||
133 | + | ||
134 | #if TCG_TARGET_REG_BITS == 32 | ||
135 | case INDEX_op_add2_i32: | ||
136 | case INDEX_op_sub2_i32: | ||
137 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
138 | } | ||
139 | tcg_out_i(s, *args++); | ||
140 | break; | ||
141 | + | ||
142 | case INDEX_op_mb: | ||
143 | break; | ||
144 | + | ||
145 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | ||
146 | case INDEX_op_mov_i64: | ||
147 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | ||
34 | -- | 148 | -- |
35 | 2.20.1 | 149 | 2.25.1 |
36 | 150 | ||
37 | 151 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
1 | 3 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 2/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-3-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 35 ++++++++++++++--------------------- | ||
13 | 1 file changed, 14 insertions(+), 21 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out8(s, args[2]); /* condition */ | ||
21 | tci_out_label(s, arg_label(args[3])); | ||
22 | break; | ||
23 | - case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ | ||
24 | - case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ | ||
25 | - case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ | ||
26 | - case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */ | ||
27 | - case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */ | ||
28 | - case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */ | ||
29 | - case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */ | ||
30 | - case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */ | ||
31 | - case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */ | ||
32 | - case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */ | ||
33 | - case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */ | ||
34 | - case INDEX_op_ext_i32_i64: | ||
35 | - case INDEX_op_extu_i32_i64: | ||
36 | #endif /* TCG_TARGET_REG_BITS == 64 */ | ||
37 | - case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */ | ||
38 | - case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */ | ||
39 | - case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */ | ||
40 | - case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */ | ||
41 | - case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */ | ||
42 | - case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */ | ||
43 | - case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ | ||
44 | - case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ | ||
45 | + | ||
46 | + CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ | ||
47 | + CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ | ||
48 | + CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */ | ||
49 | + CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */ | ||
50 | + CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */ | ||
51 | + CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */ | ||
52 | + CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */ | ||
53 | + CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */ | ||
54 | + CASE_64(ext_i32) | ||
55 | + CASE_64(extu_i32) | ||
56 | + CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */ | ||
57 | + CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */ | ||
58 | + CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */ | ||
59 | tcg_out_r(s, args[0]); | ||
60 | tcg_out_r(s, args[1]); | ||
61 | break; | ||
62 | -- | ||
63 | 2.25.1 | ||
64 | |||
65 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
1 | 3 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 3/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-4-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 12 ++---------- | ||
13 | 1 file changed, 2 insertions(+), 10 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out_r(s, args[1]); | ||
21 | tcg_out_r(s, args[2]); | ||
22 | break; | ||
23 | - case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */ | ||
24 | + | ||
25 | + CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */ | ||
26 | tcg_out_r(s, args[0]); | ||
27 | tcg_out_r(s, args[1]); | ||
28 | tcg_out_r(s, args[2]); | ||
29 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
30 | break; | ||
31 | |||
32 | #if TCG_TARGET_REG_BITS == 64 | ||
33 | - case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */ | ||
34 | - tcg_out_r(s, args[0]); | ||
35 | - tcg_out_r(s, args[1]); | ||
36 | - tcg_out_r(s, args[2]); | ||
37 | - tcg_debug_assert(args[3] <= UINT8_MAX); | ||
38 | - tcg_out8(s, args[3]); | ||
39 | - tcg_debug_assert(args[4] <= UINT8_MAX); | ||
40 | - tcg_out8(s, args[4]); | ||
41 | - break; | ||
42 | case INDEX_op_brcond_i64: | ||
43 | tcg_out_r(s, args[0]); | ||
44 | tcg_out_r(s, args[1]); | ||
45 | -- | ||
46 | 2.25.1 | ||
47 | |||
48 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
1 | 3 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 4/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-5-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 23 ++++++----------------- | ||
13 | 1 file changed, 6 insertions(+), 17 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | } | ||
21 | set_jmp_reset_offset(s, args[0]); | ||
22 | break; | ||
23 | + | ||
24 | case INDEX_op_br: | ||
25 | tci_out_label(s, arg_label(args[0])); | ||
26 | break; | ||
27 | - case INDEX_op_setcond_i32: | ||
28 | + | ||
29 | + CASE_32_64(setcond) | ||
30 | tcg_out_r(s, args[0]); | ||
31 | tcg_out_r(s, args[1]); | ||
32 | tcg_out_r(s, args[2]); | ||
33 | tcg_out8(s, args[3]); /* condition */ | ||
34 | break; | ||
35 | + | ||
36 | #if TCG_TARGET_REG_BITS == 32 | ||
37 | case INDEX_op_setcond2_i32: | ||
38 | /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */ | ||
39 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
40 | tcg_out_r(s, args[4]); | ||
41 | tcg_out8(s, args[5]); /* condition */ | ||
42 | break; | ||
43 | -#elif TCG_TARGET_REG_BITS == 64 | ||
44 | - case INDEX_op_setcond_i64: | ||
45 | - tcg_out_r(s, args[0]); | ||
46 | - tcg_out_r(s, args[1]); | ||
47 | - tcg_out_r(s, args[2]); | ||
48 | - tcg_out8(s, args[3]); /* condition */ | ||
49 | - break; | ||
50 | #endif | ||
51 | case INDEX_op_ld8u_i32: | ||
52 | case INDEX_op_ld8s_i32: | ||
53 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
54 | tcg_out8(s, args[4]); | ||
55 | break; | ||
56 | |||
57 | -#if TCG_TARGET_REG_BITS == 64 | ||
58 | - case INDEX_op_brcond_i64: | ||
59 | + CASE_32_64(brcond) | ||
60 | tcg_out_r(s, args[0]); | ||
61 | tcg_out_r(s, args[1]); | ||
62 | tcg_out8(s, args[2]); /* condition */ | ||
63 | tci_out_label(s, arg_label(args[3])); | ||
64 | break; | ||
65 | -#endif /* TCG_TARGET_REG_BITS == 64 */ | ||
66 | |||
67 | CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ | ||
68 | CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ | ||
69 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
70 | tcg_out_r(s, args[3]); | ||
71 | break; | ||
72 | #endif | ||
73 | - case INDEX_op_brcond_i32: | ||
74 | - tcg_out_r(s, args[0]); | ||
75 | - tcg_out_r(s, args[1]); | ||
76 | - tcg_out8(s, args[2]); /* condition */ | ||
77 | - tci_out_label(s, arg_label(args[3])); | ||
78 | - break; | ||
79 | + | ||
80 | case INDEX_op_qemu_ld_i32: | ||
81 | tcg_out_r(s, *args++); | ||
82 | tcg_out_r(s, *args++); | ||
83 | -- | ||
84 | 2.25.1 | ||
85 | |||
86 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
1 | 3 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 5/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-6-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 49 ++++++++++++---------------------------- | ||
13 | 1 file changed, 14 insertions(+), 35 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out8(s, args[5]); /* condition */ | ||
21 | break; | ||
22 | #endif | ||
23 | - case INDEX_op_ld8u_i32: | ||
24 | - case INDEX_op_ld8s_i32: | ||
25 | - case INDEX_op_ld16u_i32: | ||
26 | - case INDEX_op_ld16s_i32: | ||
27 | + | ||
28 | + CASE_32_64(ld8u) | ||
29 | + CASE_32_64(ld8s) | ||
30 | + CASE_32_64(ld16u) | ||
31 | + CASE_32_64(ld16s) | ||
32 | case INDEX_op_ld_i32: | ||
33 | - case INDEX_op_st8_i32: | ||
34 | - case INDEX_op_st16_i32: | ||
35 | + CASE_64(ld32u) | ||
36 | + CASE_64(ld32s) | ||
37 | + CASE_64(ld) | ||
38 | + CASE_32_64(st8) | ||
39 | + CASE_32_64(st16) | ||
40 | case INDEX_op_st_i32: | ||
41 | - case INDEX_op_ld8u_i64: | ||
42 | - case INDEX_op_ld8s_i64: | ||
43 | - case INDEX_op_ld16u_i64: | ||
44 | - case INDEX_op_ld16s_i64: | ||
45 | - case INDEX_op_ld32u_i64: | ||
46 | - case INDEX_op_ld32s_i64: | ||
47 | - case INDEX_op_ld_i64: | ||
48 | - case INDEX_op_st8_i64: | ||
49 | - case INDEX_op_st16_i64: | ||
50 | - case INDEX_op_st32_i64: | ||
51 | - case INDEX_op_st_i64: | ||
52 | + CASE_64(st32) | ||
53 | + CASE_64(st) | ||
54 | stack_bounds_check(args[1], args[2]); | ||
55 | tcg_out_r(s, args[0]); | ||
56 | tcg_out_r(s, args[1]); | ||
57 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
58 | #endif | ||
59 | |||
60 | case INDEX_op_qemu_ld_i32: | ||
61 | - tcg_out_r(s, *args++); | ||
62 | - tcg_out_r(s, *args++); | ||
63 | - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { | ||
64 | - tcg_out_r(s, *args++); | ||
65 | - } | ||
66 | - tcg_out_i(s, *args++); | ||
67 | - break; | ||
68 | - case INDEX_op_qemu_ld_i64: | ||
69 | - tcg_out_r(s, *args++); | ||
70 | - if (TCG_TARGET_REG_BITS == 32) { | ||
71 | - tcg_out_r(s, *args++); | ||
72 | - } | ||
73 | - tcg_out_r(s, *args++); | ||
74 | - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { | ||
75 | - tcg_out_r(s, *args++); | ||
76 | - } | ||
77 | - tcg_out_i(s, *args++); | ||
78 | - break; | ||
79 | case INDEX_op_qemu_st_i32: | ||
80 | tcg_out_r(s, *args++); | ||
81 | tcg_out_r(s, *args++); | ||
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
83 | } | ||
84 | tcg_out_i(s, *args++); | ||
85 | break; | ||
86 | + | ||
87 | + case INDEX_op_qemu_ld_i64: | ||
88 | case INDEX_op_qemu_st_i64: | ||
89 | tcg_out_r(s, *args++); | ||
90 | if (TCG_TARGET_REG_BITS == 32) { | ||
91 | -- | ||
92 | 2.25.1 | ||
93 | |||
94 | diff view generated by jsdifflib |
1 | Do not call get_clock_realtime() in tlb_mmu_resize_locked, | 1 | Use explicit casts for ext8u opcodes, and allow truncation |
---|---|---|---|
2 | but hoist outside of any loop over a set of tlbs. This is | 2 | to happen with the store for st8 opcodes. |
3 | only two (indirect) callers, tlb_flush_by_mmuidx_async_work | ||
4 | and tlb_flush_page_locked, so not onerous. | ||
5 | 3 | ||
6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
7 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 6 | --- |
11 | accel/tcg/cputlb.c | 14 ++++++++------ | 7 | tcg/tci.c | 23 +++++------------------ |
12 | 1 file changed, 8 insertions(+), 6 deletions(-) | 8 | 1 file changed, 5 insertions(+), 18 deletions(-) |
13 | 9 | ||
14 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 10 | diff --git a/tcg/tci.c b/tcg/tci.c |
15 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/accel/tcg/cputlb.c | 12 | --- a/tcg/tci.c |
17 | +++ b/accel/tcg/cputlb.c | 13 | +++ b/tcg/tci.c |
18 | @@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, | 14 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) |
19 | * high), since otherwise we are likely to have a significant amount of | 15 | } |
20 | * conflict misses. | 16 | #endif |
21 | */ | 17 | |
22 | -static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) | 18 | -static uint8_t tci_read_reg8(const tcg_target_ulong *regs, TCGReg index) |
23 | +static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, | 19 | -{ |
24 | + int64_t now) | 20 | - return (uint8_t)tci_read_reg(regs, index); |
21 | -} | ||
22 | - | ||
23 | static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index) | ||
25 | { | 24 | { |
26 | size_t old_size = tlb_n_entries(fast); | 25 | return (uint16_t)tci_read_reg(regs, index); |
27 | size_t rate; | 26 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) |
28 | size_t new_size = old_size; | 27 | return value; |
29 | - int64_t now = get_clock_realtime(); | ||
30 | int64_t window_len_ms = 100; | ||
31 | int64_t window_len_ns = window_len_ms * 1000 * 1000; | ||
32 | bool window_expired = now > desc->window_begin_ns + window_len_ns; | ||
33 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) | ||
34 | memset(desc->vtable, -1, sizeof(desc->vtable)); | ||
35 | } | 28 | } |
36 | 29 | ||
37 | -static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | 30 | -/* Read indexed register (8 bit) from bytecode. */ |
38 | +static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, | 31 | -static uint8_t tci_read_r8(const tcg_target_ulong *regs, const uint8_t **tb_ptr) |
39 | + int64_t now) | 32 | -{ |
40 | { | 33 | - uint8_t value = tci_read_reg8(regs, **tb_ptr); |
41 | CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | 34 | - *tb_ptr += 1; |
42 | CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; | 35 | - return value; |
43 | 36 | -} | |
44 | - tlb_mmu_resize_locked(desc, fast); | 37 | - |
45 | + tlb_mmu_resize_locked(desc, fast, now); | 38 | #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 |
46 | tlb_mmu_flush_locked(desc, fast); | 39 | /* Read indexed register (8 bit signed) from bytecode. */ |
47 | } | 40 | static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr) |
48 | 41 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | |
49 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) | 42 | tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2)); |
50 | CPUArchState *env = cpu->env_ptr; | 43 | break; |
51 | uint16_t asked = data.host_int; | 44 | CASE_32_64(st8) |
52 | uint16_t all_dirty, work, to_clean; | 45 | - t0 = tci_read_r8(regs, &tb_ptr); |
53 | + int64_t now = get_clock_realtime(); | 46 | + t0 = tci_read_r(regs, &tb_ptr); |
54 | 47 | t1 = tci_read_r(regs, &tb_ptr); | |
55 | assert_cpu_is_self(cpu); | 48 | t2 = tci_read_s32(&tb_ptr); |
56 | 49 | *(uint8_t *)(t1 + t2) = t0; | |
57 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) | 50 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, |
58 | 51 | #if TCG_TARGET_HAS_ext8u_i32 | |
59 | for (work = to_clean; work != 0; work &= work - 1) { | 52 | case INDEX_op_ext8u_i32: |
60 | int mmu_idx = ctz32(work); | 53 | t0 = *tb_ptr++; |
61 | - tlb_flush_one_mmuidx_locked(env, mmu_idx); | 54 | - t1 = tci_read_r8(regs, &tb_ptr); |
62 | + tlb_flush_one_mmuidx_locked(env, mmu_idx, now); | 55 | - tci_write_reg(regs, t0, t1); |
63 | } | 56 | + t1 = tci_read_r(regs, &tb_ptr); |
64 | 57 | + tci_write_reg(regs, t0, (uint8_t)t1); | |
65 | qemu_spin_unlock(&env_tlb(env)->c.lock); | 58 | break; |
66 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx, | 59 | #endif |
67 | tlb_debug("forcing full flush midx %d (" | 60 | #if TCG_TARGET_HAS_ext16u_i32 |
68 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", | 61 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, |
69 | midx, lp_addr, lp_mask); | 62 | #if TCG_TARGET_HAS_ext8u_i64 |
70 | - tlb_flush_one_mmuidx_locked(env, midx); | 63 | case INDEX_op_ext8u_i64: |
71 | + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); | 64 | t0 = *tb_ptr++; |
72 | } else { | 65 | - t1 = tci_read_r8(regs, &tb_ptr); |
73 | if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { | 66 | - tci_write_reg(regs, t0, t1); |
74 | tlb_n_used_entries_dec(env, midx); | 67 | + t1 = tci_read_r(regs, &tb_ptr); |
68 | + tci_write_reg(regs, t0, (uint8_t)t1); | ||
69 | break; | ||
70 | #endif | ||
71 | #if TCG_TARGET_HAS_ext8s_i64 | ||
75 | -- | 72 | -- |
76 | 2.20.1 | 73 | 2.25.1 |
77 | 74 | ||
78 | 75 | diff view generated by jsdifflib |
1 | No functional change, but the smaller expressions make | 1 | Use explicit casts for ext8s opcodes. |
---|---|---|---|
2 | the code easier to read. | ||
3 | 2 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
5 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 5 | --- |
9 | accel/tcg/cputlb.c | 19 ++++++++++--------- | 6 | tcg/tci.c | 25 ++++--------------------- |
10 | 1 file changed, 10 insertions(+), 9 deletions(-) | 7 | 1 file changed, 4 insertions(+), 21 deletions(-) |
11 | 8 | ||
12 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 9 | diff --git a/tcg/tci.c b/tcg/tci.c |
13 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/accel/tcg/cputlb.c | 11 | --- a/tcg/tci.c |
15 | +++ b/accel/tcg/cputlb.c | 12 | +++ b/tcg/tci.c |
16 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) | 13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) |
17 | 14 | return regs[index]; | |
18 | static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | 15 | } |
16 | |||
17 | -#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
18 | -static int8_t tci_read_reg8s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (int8_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
25 | static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | { | 26 | { |
20 | - tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]); | 27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) |
21 | - env_tlb(env)->d[mmu_idx].n_used_entries = 0; | 28 | return value; |
22 | - env_tlb(env)->d[mmu_idx].large_page_addr = -1; | ||
23 | - env_tlb(env)->d[mmu_idx].large_page_mask = -1; | ||
24 | - env_tlb(env)->d[mmu_idx].vindex = 0; | ||
25 | - memset(env_tlb(env)->f[mmu_idx].table, -1, | ||
26 | - sizeof_tlb(&env_tlb(env)->f[mmu_idx])); | ||
27 | - memset(env_tlb(env)->d[mmu_idx].vtable, -1, | ||
28 | - sizeof(env_tlb(env)->d[0].vtable)); | ||
29 | + CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | ||
30 | + CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; | ||
31 | + | ||
32 | + tlb_mmu_resize_locked(desc, fast); | ||
33 | + desc->n_used_entries = 0; | ||
34 | + desc->large_page_addr = -1; | ||
35 | + desc->large_page_mask = -1; | ||
36 | + desc->vindex = 0; | ||
37 | + memset(fast->table, -1, sizeof_tlb(fast)); | ||
38 | + memset(desc->vtable, -1, sizeof(desc->vtable)); | ||
39 | } | 29 | } |
40 | 30 | ||
41 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) | 31 | -#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 |
32 | -/* Read indexed register (8 bit signed) from bytecode. */ | ||
33 | -static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - int8_t value = tci_read_reg8s(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
38 | -} | ||
39 | -#endif | ||
40 | - | ||
41 | /* Read indexed register (16 bit) from bytecode. */ | ||
42 | static uint16_t tci_read_r16(const tcg_target_ulong *regs, | ||
43 | const uint8_t **tb_ptr) | ||
44 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
45 | #if TCG_TARGET_HAS_ext8s_i32 | ||
46 | case INDEX_op_ext8s_i32: | ||
47 | t0 = *tb_ptr++; | ||
48 | - t1 = tci_read_r8s(regs, &tb_ptr); | ||
49 | - tci_write_reg(regs, t0, t1); | ||
50 | + t1 = tci_read_r(regs, &tb_ptr); | ||
51 | + tci_write_reg(regs, t0, (int8_t)t1); | ||
52 | break; | ||
53 | #endif | ||
54 | #if TCG_TARGET_HAS_ext16s_i32 | ||
55 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
56 | #if TCG_TARGET_HAS_ext8s_i64 | ||
57 | case INDEX_op_ext8s_i64: | ||
58 | t0 = *tb_ptr++; | ||
59 | - t1 = tci_read_r8s(regs, &tb_ptr); | ||
60 | - tci_write_reg(regs, t0, t1); | ||
61 | + t1 = tci_read_r(regs, &tb_ptr); | ||
62 | + tci_write_reg(regs, t0, (int8_t)t1); | ||
63 | break; | ||
64 | #endif | ||
65 | #if TCG_TARGET_HAS_ext16s_i64 | ||
42 | -- | 66 | -- |
43 | 2.20.1 | 67 | 2.25.1 |
44 | 68 | ||
45 | 69 | diff view generated by jsdifflib |
1 | Merge into the only caller, but at the same time split | 1 | Use explicit casts for ext16u opcodes, and allow truncation |
---|---|---|---|
2 | out tlb_mmu_init to initialize a single tlb entry. | 2 | to happen with the store for st16 opcodes, and with the call |
3 | for bswap16 opcodes. | ||
3 | 4 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
5 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 7 | --- |
9 | accel/tcg/cputlb.c | 33 ++++++++++++++++----------------- | 8 | tcg/tci.c | 28 +++++++--------------------- |
10 | 1 file changed, 16 insertions(+), 17 deletions(-) | 9 | 1 file changed, 7 insertions(+), 21 deletions(-) |
11 | 10 | ||
12 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 11 | diff --git a/tcg/tci.c b/tcg/tci.c |
13 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/accel/tcg/cputlb.c | 13 | --- a/tcg/tci.c |
15 | +++ b/accel/tcg/cputlb.c | 14 | +++ b/tcg/tci.c |
16 | @@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, | 15 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) |
17 | desc->window_max_entries = max_entries; | ||
18 | } | 16 | } |
19 | 17 | #endif | |
20 | -static void tlb_dyn_init(CPUArchState *env) | 18 | |
19 | -static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index) | ||
21 | -{ | 20 | -{ |
22 | - int i; | 21 | - return (uint16_t)tci_read_reg(regs, index); |
23 | - | ||
24 | - for (i = 0; i < NB_MMU_MODES; i++) { | ||
25 | - CPUTLBDesc *desc = &env_tlb(env)->d[i]; | ||
26 | - size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; | ||
27 | - | ||
28 | - tlb_window_reset(desc, get_clock_realtime(), 0); | ||
29 | - desc->n_used_entries = 0; | ||
30 | - env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; | ||
31 | - env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries); | ||
32 | - env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries); | ||
33 | - } | ||
34 | -} | 22 | -} |
35 | - | 23 | - |
36 | /** | 24 | static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index) |
37 | * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary | 25 | { |
38 | * @desc: The CPUTLBDesc portion of the TLB | 26 | return (uint32_t)tci_read_reg(regs, index); |
39 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | 27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) |
40 | tlb_mmu_flush_locked(desc, fast); | 28 | return value; |
41 | } | 29 | } |
42 | 30 | ||
43 | +static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) | 31 | -/* Read indexed register (16 bit) from bytecode. */ |
44 | +{ | 32 | -static uint16_t tci_read_r16(const tcg_target_ulong *regs, |
45 | + size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; | 33 | - const uint8_t **tb_ptr) |
46 | + | 34 | -{ |
47 | + tlb_window_reset(desc, now, 0); | 35 | - uint16_t value = tci_read_reg16(regs, **tb_ptr); |
48 | + desc->n_used_entries = 0; | 36 | - *tb_ptr += 1; |
49 | + fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; | 37 | - return value; |
50 | + fast->table = g_new(CPUTLBEntry, n_entries); | 38 | -} |
51 | + desc->iotlb = g_new(CPUIOTLBEntry, n_entries); | 39 | - |
52 | +} | 40 | #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 |
53 | + | 41 | /* Read indexed register (16 bit signed) from bytecode. */ |
54 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) | 42 | static int16_t tci_read_r16s(const tcg_target_ulong *regs, |
55 | { | 43 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, |
56 | env_tlb(env)->d[mmu_idx].n_used_entries++; | 44 | *(uint8_t *)(t1 + t2) = t0; |
57 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) | 45 | break; |
58 | void tlb_init(CPUState *cpu) | 46 | CASE_32_64(st16) |
59 | { | 47 | - t0 = tci_read_r16(regs, &tb_ptr); |
60 | CPUArchState *env = cpu->env_ptr; | 48 | + t0 = tci_read_r(regs, &tb_ptr); |
61 | + int64_t now = get_clock_realtime(); | 49 | t1 = tci_read_r(regs, &tb_ptr); |
62 | + int i; | 50 | t2 = tci_read_s32(&tb_ptr); |
63 | 51 | *(uint16_t *)(t1 + t2) = t0; | |
64 | qemu_spin_init(&env_tlb(env)->c.lock); | 52 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, |
65 | 53 | #if TCG_TARGET_HAS_ext16u_i32 | |
66 | /* Ensure that cpu_reset performs a full flush. */ | 54 | case INDEX_op_ext16u_i32: |
67 | env_tlb(env)->c.dirty = ALL_MMUIDX_BITS; | 55 | t0 = *tb_ptr++; |
68 | 56 | - t1 = tci_read_r16(regs, &tb_ptr); | |
69 | - tlb_dyn_init(env); | 57 | - tci_write_reg(regs, t0, t1); |
70 | + for (i = 0; i < NB_MMU_MODES; i++) { | 58 | + t1 = tci_read_r(regs, &tb_ptr); |
71 | + tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); | 59 | + tci_write_reg(regs, t0, (uint16_t)t1); |
72 | + } | 60 | break; |
73 | } | 61 | #endif |
74 | 62 | #if TCG_TARGET_HAS_bswap16_i32 | |
75 | /* flush_all_helper: run fn across all cpus | 63 | case INDEX_op_bswap16_i32: |
64 | t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
66 | + t1 = tci_read_r(regs, &tb_ptr); | ||
67 | tci_write_reg(regs, t0, bswap16(t1)); | ||
68 | break; | ||
69 | #endif | ||
70 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
71 | #if TCG_TARGET_HAS_ext16u_i64 | ||
72 | case INDEX_op_ext16u_i64: | ||
73 | t0 = *tb_ptr++; | ||
74 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
75 | - tci_write_reg(regs, t0, t1); | ||
76 | + t1 = tci_read_r(regs, &tb_ptr); | ||
77 | + tci_write_reg(regs, t0, (uint16_t)t1); | ||
78 | break; | ||
79 | #endif | ||
80 | #if TCG_TARGET_HAS_ext32s_i64 | ||
81 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
82 | #if TCG_TARGET_HAS_bswap16_i64 | ||
83 | case INDEX_op_bswap16_i64: | ||
84 | t0 = *tb_ptr++; | ||
85 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
86 | + t1 = tci_read_r(regs, &tb_ptr); | ||
87 | tci_write_reg(regs, t0, bswap16(t1)); | ||
88 | break; | ||
89 | #endif | ||
76 | -- | 90 | -- |
77 | 2.20.1 | 91 | 2.25.1 |
78 | 92 | ||
79 | 93 | diff view generated by jsdifflib |
1 | There is only one caller for tlb_table_flush_by_mmuidx. Place | 1 | Use explicit casts for ext16s opcodes. |
---|---|---|---|
2 | the result at the earlier line number, due to an expected user | ||
3 | in the near future. | ||
4 | 2 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
6 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 5 | --- |
9 | accel/tcg/cputlb.c | 19 +++++++------------ | 6 | tcg/tci.c | 26 ++++---------------------- |
10 | 1 file changed, 7 insertions(+), 12 deletions(-) | 7 | 1 file changed, 4 insertions(+), 22 deletions(-) |
11 | 8 | ||
12 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 9 | diff --git a/tcg/tci.c b/tcg/tci.c |
13 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/accel/tcg/cputlb.c | 11 | --- a/tcg/tci.c |
15 | +++ b/accel/tcg/cputlb.c | 12 | +++ b/tcg/tci.c |
16 | @@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) | 13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) |
17 | } | 14 | return regs[index]; |
18 | } | 15 | } |
19 | 16 | ||
20 | -static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx) | 17 | -#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 |
21 | +static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | 18 | -static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index) |
19 | -{ | ||
20 | - return (int16_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_REG_BITS == 64 | ||
25 | static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
22 | { | 26 | { |
23 | tlb_mmu_resize_locked(env, mmu_idx); | 27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) |
24 | - memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx)); | 28 | return value; |
25 | env_tlb(env)->d[mmu_idx].n_used_entries = 0; | ||
26 | + env_tlb(env)->d[mmu_idx].large_page_addr = -1; | ||
27 | + env_tlb(env)->d[mmu_idx].large_page_mask = -1; | ||
28 | + env_tlb(env)->d[mmu_idx].vindex = 0; | ||
29 | + memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx)); | ||
30 | + memset(env_tlb(env)->d[mmu_idx].vtable, -1, | ||
31 | + sizeof(env_tlb(env)->d[0].vtable)); | ||
32 | } | 29 | } |
33 | 30 | ||
34 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) | 31 | -#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 |
35 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) | 32 | -/* Read indexed register (16 bit signed) from bytecode. */ |
36 | *pelide = elide; | 33 | -static int16_t tci_read_r16s(const tcg_target_ulong *regs, |
37 | } | 34 | - const uint8_t **tb_ptr) |
38 | |||
39 | -static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | ||
40 | -{ | 35 | -{ |
41 | - tlb_table_flush_by_mmuidx(env, mmu_idx); | 36 | - int16_t value = tci_read_reg16s(regs, **tb_ptr); |
42 | - env_tlb(env)->d[mmu_idx].large_page_addr = -1; | 37 | - *tb_ptr += 1; |
43 | - env_tlb(env)->d[mmu_idx].large_page_mask = -1; | 38 | - return value; |
44 | - env_tlb(env)->d[mmu_idx].vindex = 0; | ||
45 | - memset(env_tlb(env)->d[mmu_idx].vtable, -1, | ||
46 | - sizeof(env_tlb(env)->d[0].vtable)); | ||
47 | -} | 39 | -} |
40 | -#endif | ||
48 | - | 41 | - |
49 | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) | 42 | /* Read indexed register (32 bit) from bytecode. */ |
50 | { | 43 | static uint32_t tci_read_r32(const tcg_target_ulong *regs, |
51 | CPUArchState *env = cpu->env_ptr; | 44 | const uint8_t **tb_ptr) |
45 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
46 | #if TCG_TARGET_HAS_ext16s_i32 | ||
47 | case INDEX_op_ext16s_i32: | ||
48 | t0 = *tb_ptr++; | ||
49 | - t1 = tci_read_r16s(regs, &tb_ptr); | ||
50 | - tci_write_reg(regs, t0, t1); | ||
51 | + t1 = tci_read_r(regs, &tb_ptr); | ||
52 | + tci_write_reg(regs, t0, (int16_t)t1); | ||
53 | break; | ||
54 | #endif | ||
55 | #if TCG_TARGET_HAS_ext8u_i32 | ||
56 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
57 | #if TCG_TARGET_HAS_ext16s_i64 | ||
58 | case INDEX_op_ext16s_i64: | ||
59 | t0 = *tb_ptr++; | ||
60 | - t1 = tci_read_r16s(regs, &tb_ptr); | ||
61 | - tci_write_reg(regs, t0, t1); | ||
62 | + t1 = tci_read_r(regs, &tb_ptr); | ||
63 | + tci_write_reg(regs, t0, (int16_t)t1); | ||
64 | break; | ||
65 | #endif | ||
66 | #if TCG_TARGET_HAS_ext16u_i64 | ||
52 | -- | 67 | -- |
53 | 2.20.1 | 68 | 2.25.1 |
54 | 69 | ||
55 | 70 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | Use explicit casts for ext32u opcodes, and allow truncation | |
2 | to happen for other users. | ||
3 | |||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | tcg/tci.c | 122 ++++++++++++++++++++++++------------------------------ | ||
8 | 1 file changed, 54 insertions(+), 68 deletions(-) | ||
9 | |||
10 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tcg/tci.c | ||
13 | +++ b/tcg/tci.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
15 | } | ||
16 | #endif | ||
17 | |||
18 | -static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (uint32_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | - | ||
23 | #if TCG_TARGET_REG_BITS == 64 | ||
24 | static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
25 | { | ||
26 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
27 | return value; | ||
28 | } | ||
29 | |||
30 | -/* Read indexed register (32 bit) from bytecode. */ | ||
31 | -static uint32_t tci_read_r32(const tcg_target_ulong *regs, | ||
32 | - const uint8_t **tb_ptr) | ||
33 | -{ | ||
34 | - uint32_t value = tci_read_reg32(regs, **tb_ptr); | ||
35 | - *tb_ptr += 1; | ||
36 | - return value; | ||
37 | -} | ||
38 | - | ||
39 | #if TCG_TARGET_REG_BITS == 32 | ||
40 | /* Read two indexed registers (2 * 32 bit) from bytecode. */ | ||
41 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
42 | const uint8_t **tb_ptr) | ||
43 | { | ||
44 | - uint32_t low = tci_read_r32(regs, tb_ptr); | ||
45 | - return tci_uint64(tci_read_r32(regs, tb_ptr), low); | ||
46 | + uint32_t low = tci_read_r(regs, tb_ptr); | ||
47 | + return tci_uint64(tci_read_r(regs, tb_ptr), low); | ||
48 | } | ||
49 | #elif TCG_TARGET_REG_BITS == 64 | ||
50 | /* Read indexed register (32 bit signed) from bytecode. */ | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | continue; | ||
53 | case INDEX_op_setcond_i32: | ||
54 | t0 = *tb_ptr++; | ||
55 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
56 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
57 | + t1 = tci_read_r(regs, &tb_ptr); | ||
58 | + t2 = tci_read_r(regs, &tb_ptr); | ||
59 | condition = *tb_ptr++; | ||
60 | tci_write_reg(regs, t0, tci_compare32(t1, t2, condition)); | ||
61 | break; | ||
62 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
63 | #endif | ||
64 | case INDEX_op_mov_i32: | ||
65 | t0 = *tb_ptr++; | ||
66 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
67 | + t1 = tci_read_r(regs, &tb_ptr); | ||
68 | tci_write_reg(regs, t0, t1); | ||
69 | break; | ||
70 | case INDEX_op_tci_movi_i32: | ||
71 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
72 | break; | ||
73 | case INDEX_op_st_i32: | ||
74 | CASE_64(st32) | ||
75 | - t0 = tci_read_r32(regs, &tb_ptr); | ||
76 | + t0 = tci_read_r(regs, &tb_ptr); | ||
77 | t1 = tci_read_r(regs, &tb_ptr); | ||
78 | t2 = tci_read_s32(&tb_ptr); | ||
79 | *(uint32_t *)(t1 + t2) = t0; | ||
80 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
81 | |||
82 | case INDEX_op_add_i32: | ||
83 | t0 = *tb_ptr++; | ||
84 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
85 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
86 | + t1 = tci_read_r(regs, &tb_ptr); | ||
87 | + t2 = tci_read_r(regs, &tb_ptr); | ||
88 | tci_write_reg(regs, t0, t1 + t2); | ||
89 | break; | ||
90 | case INDEX_op_sub_i32: | ||
91 | t0 = *tb_ptr++; | ||
92 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
93 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
94 | + t1 = tci_read_r(regs, &tb_ptr); | ||
95 | + t2 = tci_read_r(regs, &tb_ptr); | ||
96 | tci_write_reg(regs, t0, t1 - t2); | ||
97 | break; | ||
98 | case INDEX_op_mul_i32: | ||
99 | t0 = *tb_ptr++; | ||
100 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
101 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
102 | + t1 = tci_read_r(regs, &tb_ptr); | ||
103 | + t2 = tci_read_r(regs, &tb_ptr); | ||
104 | tci_write_reg(regs, t0, t1 * t2); | ||
105 | break; | ||
106 | case INDEX_op_div_i32: | ||
107 | t0 = *tb_ptr++; | ||
108 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
109 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
110 | + t1 = tci_read_r(regs, &tb_ptr); | ||
111 | + t2 = tci_read_r(regs, &tb_ptr); | ||
112 | tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2); | ||
113 | break; | ||
114 | case INDEX_op_divu_i32: | ||
115 | t0 = *tb_ptr++; | ||
116 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
117 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
118 | - tci_write_reg(regs, t0, t1 / t2); | ||
119 | + t1 = tci_read_r(regs, &tb_ptr); | ||
120 | + t2 = tci_read_r(regs, &tb_ptr); | ||
121 | + tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2); | ||
122 | break; | ||
123 | case INDEX_op_rem_i32: | ||
124 | t0 = *tb_ptr++; | ||
125 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
126 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
127 | + t1 = tci_read_r(regs, &tb_ptr); | ||
128 | + t2 = tci_read_r(regs, &tb_ptr); | ||
129 | tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2); | ||
130 | break; | ||
131 | case INDEX_op_remu_i32: | ||
132 | t0 = *tb_ptr++; | ||
133 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
134 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
135 | - tci_write_reg(regs, t0, t1 % t2); | ||
136 | + t1 = tci_read_r(regs, &tb_ptr); | ||
137 | + t2 = tci_read_r(regs, &tb_ptr); | ||
138 | + tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2); | ||
139 | break; | ||
140 | case INDEX_op_and_i32: | ||
141 | t0 = *tb_ptr++; | ||
142 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
143 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
144 | + t1 = tci_read_r(regs, &tb_ptr); | ||
145 | + t2 = tci_read_r(regs, &tb_ptr); | ||
146 | tci_write_reg(regs, t0, t1 & t2); | ||
147 | break; | ||
148 | case INDEX_op_or_i32: | ||
149 | t0 = *tb_ptr++; | ||
150 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
151 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
152 | + t1 = tci_read_r(regs, &tb_ptr); | ||
153 | + t2 = tci_read_r(regs, &tb_ptr); | ||
154 | tci_write_reg(regs, t0, t1 | t2); | ||
155 | break; | ||
156 | case INDEX_op_xor_i32: | ||
157 | t0 = *tb_ptr++; | ||
158 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
159 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
160 | + t1 = tci_read_r(regs, &tb_ptr); | ||
161 | + t2 = tci_read_r(regs, &tb_ptr); | ||
162 | tci_write_reg(regs, t0, t1 ^ t2); | ||
163 | break; | ||
164 | |||
165 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
166 | |||
167 | case INDEX_op_shl_i32: | ||
168 | t0 = *tb_ptr++; | ||
169 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
170 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
171 | - tci_write_reg(regs, t0, t1 << (t2 & 31)); | ||
172 | + t1 = tci_read_r(regs, &tb_ptr); | ||
173 | + t2 = tci_read_r(regs, &tb_ptr); | ||
174 | + tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31)); | ||
175 | break; | ||
176 | case INDEX_op_shr_i32: | ||
177 | t0 = *tb_ptr++; | ||
178 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
179 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
180 | - tci_write_reg(regs, t0, t1 >> (t2 & 31)); | ||
181 | + t1 = tci_read_r(regs, &tb_ptr); | ||
182 | + t2 = tci_read_r(regs, &tb_ptr); | ||
183 | + tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31)); | ||
184 | break; | ||
185 | case INDEX_op_sar_i32: | ||
186 | t0 = *tb_ptr++; | ||
187 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
188 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
189 | - tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31))); | ||
190 | + t1 = tci_read_r(regs, &tb_ptr); | ||
191 | + t2 = tci_read_r(regs, &tb_ptr); | ||
192 | + tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31)); | ||
193 | break; | ||
194 | #if TCG_TARGET_HAS_rot_i32 | ||
195 | case INDEX_op_rotl_i32: | ||
196 | t0 = *tb_ptr++; | ||
197 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
198 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
199 | + t1 = tci_read_r(regs, &tb_ptr); | ||
200 | + t2 = tci_read_r(regs, &tb_ptr); | ||
201 | tci_write_reg(regs, t0, rol32(t1, t2 & 31)); | ||
202 | break; | ||
203 | case INDEX_op_rotr_i32: | ||
204 | t0 = *tb_ptr++; | ||
205 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
206 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
207 | + t1 = tci_read_r(regs, &tb_ptr); | ||
208 | + t2 = tci_read_r(regs, &tb_ptr); | ||
209 | tci_write_reg(regs, t0, ror32(t1, t2 & 31)); | ||
210 | break; | ||
211 | #endif | ||
212 | #if TCG_TARGET_HAS_deposit_i32 | ||
213 | case INDEX_op_deposit_i32: | ||
214 | t0 = *tb_ptr++; | ||
215 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
216 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
217 | + t1 = tci_read_r(regs, &tb_ptr); | ||
218 | + t2 = tci_read_r(regs, &tb_ptr); | ||
219 | tmp16 = *tb_ptr++; | ||
220 | tmp8 = *tb_ptr++; | ||
221 | tmp32 = (((1 << tmp8) - 1) << tmp16); | ||
222 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
223 | break; | ||
224 | #endif | ||
225 | case INDEX_op_brcond_i32: | ||
226 | - t0 = tci_read_r32(regs, &tb_ptr); | ||
227 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
228 | + t0 = tci_read_r(regs, &tb_ptr); | ||
229 | + t1 = tci_read_r(regs, &tb_ptr); | ||
230 | condition = *tb_ptr++; | ||
231 | label = tci_read_label(&tb_ptr); | ||
232 | if (tci_compare32(t0, t1, condition)) { | ||
233 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
234 | case INDEX_op_mulu2_i32: | ||
235 | t0 = *tb_ptr++; | ||
236 | t1 = *tb_ptr++; | ||
237 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
238 | - tmp64 = tci_read_r32(regs, &tb_ptr); | ||
239 | - tci_write_reg64(regs, t1, t0, t2 * tmp64); | ||
240 | + t2 = tci_read_r(regs, &tb_ptr); | ||
241 | + tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr); | ||
242 | + tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64); | ||
243 | break; | ||
244 | #endif /* TCG_TARGET_REG_BITS == 32 */ | ||
245 | #if TCG_TARGET_HAS_ext8s_i32 | ||
246 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
247 | #if TCG_TARGET_HAS_bswap32_i32 | ||
248 | case INDEX_op_bswap32_i32: | ||
249 | t0 = *tb_ptr++; | ||
250 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
251 | + t1 = tci_read_r(regs, &tb_ptr); | ||
252 | tci_write_reg(regs, t0, bswap32(t1)); | ||
253 | break; | ||
254 | #endif | ||
255 | #if TCG_TARGET_HAS_not_i32 | ||
256 | case INDEX_op_not_i32: | ||
257 | t0 = *tb_ptr++; | ||
258 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
259 | + t1 = tci_read_r(regs, &tb_ptr); | ||
260 | tci_write_reg(regs, t0, ~t1); | ||
261 | break; | ||
262 | #endif | ||
263 | #if TCG_TARGET_HAS_neg_i32 | ||
264 | case INDEX_op_neg_i32: | ||
265 | t0 = *tb_ptr++; | ||
266 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
267 | + t1 = tci_read_r(regs, &tb_ptr); | ||
268 | tci_write_reg(regs, t0, -t1); | ||
269 | break; | ||
270 | #endif | ||
271 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
272 | #endif | ||
273 | case INDEX_op_extu_i32_i64: | ||
274 | t0 = *tb_ptr++; | ||
275 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
276 | - tci_write_reg(regs, t0, t1); | ||
277 | + t1 = tci_read_r(regs, &tb_ptr); | ||
278 | + tci_write_reg(regs, t0, (uint32_t)t1); | ||
279 | break; | ||
280 | #if TCG_TARGET_HAS_bswap16_i64 | ||
281 | case INDEX_op_bswap16_i64: | ||
282 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
283 | #if TCG_TARGET_HAS_bswap32_i64 | ||
284 | case INDEX_op_bswap32_i64: | ||
285 | t0 = *tb_ptr++; | ||
286 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
287 | + t1 = tci_read_r(regs, &tb_ptr); | ||
288 | tci_write_reg(regs, t0, bswap32(t1)); | ||
289 | break; | ||
290 | #endif | ||
291 | -- | ||
292 | 2.25.1 | ||
293 | |||
294 | diff view generated by jsdifflib |
1 | There are no users of this function outside cputlb.c, | 1 | Use explicit casts for ext32s opcodes. |
---|---|---|---|
2 | and its interface will change in the next patch. | ||
3 | 2 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
5 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 5 | --- |
9 | include/exec/cpu_ldst.h | 5 ----- | 6 | tcg/tci.c | 20 ++------------------ |
10 | accel/tcg/cputlb.c | 5 +++++ | 7 | 1 file changed, 2 insertions(+), 18 deletions(-) |
11 | 2 files changed, 5 insertions(+), 5 deletions(-) | ||
12 | 8 | ||
13 | diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h | 9 | diff --git a/tcg/tci.c b/tcg/tci.c |
14 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/include/exec/cpu_ldst.h | 11 | --- a/tcg/tci.c |
16 | +++ b/include/exec/cpu_ldst.h | 12 | +++ b/tcg/tci.c |
17 | @@ -XXX,XX +XXX,XX @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, | 13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) |
18 | return (addr >> TARGET_PAGE_BITS) & size_mask; | 14 | return regs[index]; |
19 | } | 15 | } |
20 | 16 | ||
21 | -static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx) | 17 | -#if TCG_TARGET_REG_BITS == 64 |
18 | -static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
22 | -{ | 19 | -{ |
23 | - return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1; | 20 | - return (int32_t)tci_read_reg(regs, index); |
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_REG_BITS == 64 | ||
25 | static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
26 | { | ||
27 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
28 | return tci_uint64(tci_read_r(regs, tb_ptr), low); | ||
29 | } | ||
30 | #elif TCG_TARGET_REG_BITS == 64 | ||
31 | -/* Read indexed register (32 bit signed) from bytecode. */ | ||
32 | -static int32_t tci_read_r32s(const tcg_target_ulong *regs, | ||
33 | - const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - int32_t value = tci_read_reg32s(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
24 | -} | 38 | -} |
25 | - | 39 | - |
26 | /* Find the TLB entry corresponding to the mmu_idx + address pair. */ | 40 | /* Read indexed register (64 bit) from bytecode. */ |
27 | static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, | 41 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, |
28 | target_ulong addr) | 42 | const uint8_t **tb_ptr) |
29 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 43 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, |
30 | index XXXXXXX..XXXXXXX 100644 | 44 | #endif |
31 | --- a/accel/tcg/cputlb.c | 45 | case INDEX_op_ext_i32_i64: |
32 | +++ b/accel/tcg/cputlb.c | 46 | t0 = *tb_ptr++; |
33 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); | 47 | - t1 = tci_read_r32s(regs, &tb_ptr); |
34 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); | 48 | - tci_write_reg(regs, t0, t1); |
35 | #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) | 49 | + t1 = tci_read_r(regs, &tb_ptr); |
36 | 50 | + tci_write_reg(regs, t0, (int32_t)t1); | |
37 | +static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx) | 51 | break; |
38 | +{ | 52 | #if TCG_TARGET_HAS_ext32u_i64 |
39 | + return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1; | 53 | case INDEX_op_ext32u_i64: |
40 | +} | ||
41 | + | ||
42 | static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx) | ||
43 | { | ||
44 | return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS); | ||
45 | -- | 54 | -- |
46 | 2.20.1 | 55 | 2.25.1 |
47 | 56 | ||
48 | 57 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | In all cases restricted to 64-bit hosts, tcg_read_r is | |
2 | identical. We retain the 64-bit symbol for the single | ||
3 | case of INDEX_op_qemu_st_i64. | ||
4 | |||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/tci.c | 93 +++++++++++++++++++++++++------------------------------ | ||
9 | 1 file changed, 42 insertions(+), 51 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/tci.c | ||
14 | +++ b/tcg/tci.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
16 | return regs[index]; | ||
17 | } | ||
18 | |||
19 | -#if TCG_TARGET_REG_BITS == 64 | ||
20 | -static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
21 | -{ | ||
22 | - return tci_read_reg(regs, index); | ||
23 | -} | ||
24 | -#endif | ||
25 | - | ||
26 | static void | ||
27 | tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value) | ||
28 | { | ||
29 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
30 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
31 | const uint8_t **tb_ptr) | ||
32 | { | ||
33 | - uint64_t value = tci_read_reg64(regs, **tb_ptr); | ||
34 | - *tb_ptr += 1; | ||
35 | - return value; | ||
36 | + return tci_read_r(regs, tb_ptr); | ||
37 | } | ||
38 | #endif | ||
39 | |||
40 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
41 | #elif TCG_TARGET_REG_BITS == 64 | ||
42 | case INDEX_op_setcond_i64: | ||
43 | t0 = *tb_ptr++; | ||
44 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
45 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
46 | + t1 = tci_read_r(regs, &tb_ptr); | ||
47 | + t2 = tci_read_r(regs, &tb_ptr); | ||
48 | condition = *tb_ptr++; | ||
49 | tci_write_reg(regs, t0, tci_compare64(t1, t2, condition)); | ||
50 | break; | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | #if TCG_TARGET_REG_BITS == 64 | ||
53 | case INDEX_op_mov_i64: | ||
54 | t0 = *tb_ptr++; | ||
55 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
56 | + t1 = tci_read_r(regs, &tb_ptr); | ||
57 | tci_write_reg(regs, t0, t1); | ||
58 | break; | ||
59 | case INDEX_op_tci_movi_i64: | ||
60 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
61 | tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2)); | ||
62 | break; | ||
63 | case INDEX_op_st_i64: | ||
64 | - t0 = tci_read_r64(regs, &tb_ptr); | ||
65 | + t0 = tci_read_r(regs, &tb_ptr); | ||
66 | t1 = tci_read_r(regs, &tb_ptr); | ||
67 | t2 = tci_read_s32(&tb_ptr); | ||
68 | *(uint64_t *)(t1 + t2) = t0; | ||
69 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
70 | |||
71 | case INDEX_op_add_i64: | ||
72 | t0 = *tb_ptr++; | ||
73 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
74 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
75 | + t1 = tci_read_r(regs, &tb_ptr); | ||
76 | + t2 = tci_read_r(regs, &tb_ptr); | ||
77 | tci_write_reg(regs, t0, t1 + t2); | ||
78 | break; | ||
79 | case INDEX_op_sub_i64: | ||
80 | t0 = *tb_ptr++; | ||
81 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
82 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
83 | + t1 = tci_read_r(regs, &tb_ptr); | ||
84 | + t2 = tci_read_r(regs, &tb_ptr); | ||
85 | tci_write_reg(regs, t0, t1 - t2); | ||
86 | break; | ||
87 | case INDEX_op_mul_i64: | ||
88 | t0 = *tb_ptr++; | ||
89 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
90 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
91 | + t1 = tci_read_r(regs, &tb_ptr); | ||
92 | + t2 = tci_read_r(regs, &tb_ptr); | ||
93 | tci_write_reg(regs, t0, t1 * t2); | ||
94 | break; | ||
95 | case INDEX_op_div_i64: | ||
96 | t0 = *tb_ptr++; | ||
97 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
98 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
99 | + t1 = tci_read_r(regs, &tb_ptr); | ||
100 | + t2 = tci_read_r(regs, &tb_ptr); | ||
101 | tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2); | ||
102 | break; | ||
103 | case INDEX_op_divu_i64: | ||
104 | t0 = *tb_ptr++; | ||
105 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
106 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
107 | + t1 = tci_read_r(regs, &tb_ptr); | ||
108 | + t2 = tci_read_r(regs, &tb_ptr); | ||
109 | tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2); | ||
110 | break; | ||
111 | case INDEX_op_rem_i64: | ||
112 | t0 = *tb_ptr++; | ||
113 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
114 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
115 | + t1 = tci_read_r(regs, &tb_ptr); | ||
116 | + t2 = tci_read_r(regs, &tb_ptr); | ||
117 | tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2); | ||
118 | break; | ||
119 | case INDEX_op_remu_i64: | ||
120 | t0 = *tb_ptr++; | ||
121 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
122 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
123 | + t1 = tci_read_r(regs, &tb_ptr); | ||
124 | + t2 = tci_read_r(regs, &tb_ptr); | ||
125 | tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2); | ||
126 | break; | ||
127 | case INDEX_op_and_i64: | ||
128 | t0 = *tb_ptr++; | ||
129 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
130 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
131 | + t1 = tci_read_r(regs, &tb_ptr); | ||
132 | + t2 = tci_read_r(regs, &tb_ptr); | ||
133 | tci_write_reg(regs, t0, t1 & t2); | ||
134 | break; | ||
135 | case INDEX_op_or_i64: | ||
136 | t0 = *tb_ptr++; | ||
137 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
138 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
139 | + t1 = tci_read_r(regs, &tb_ptr); | ||
140 | + t2 = tci_read_r(regs, &tb_ptr); | ||
141 | tci_write_reg(regs, t0, t1 | t2); | ||
142 | break; | ||
143 | case INDEX_op_xor_i64: | ||
144 | t0 = *tb_ptr++; | ||
145 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
146 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
147 | + t1 = tci_read_r(regs, &tb_ptr); | ||
148 | + t2 = tci_read_r(regs, &tb_ptr); | ||
149 | tci_write_reg(regs, t0, t1 ^ t2); | ||
150 | break; | ||
151 | |||
152 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
153 | |||
154 | case INDEX_op_shl_i64: | ||
155 | t0 = *tb_ptr++; | ||
156 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
157 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
158 | + t1 = tci_read_r(regs, &tb_ptr); | ||
159 | + t2 = tci_read_r(regs, &tb_ptr); | ||
160 | tci_write_reg(regs, t0, t1 << (t2 & 63)); | ||
161 | break; | ||
162 | case INDEX_op_shr_i64: | ||
163 | t0 = *tb_ptr++; | ||
164 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
165 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
166 | + t1 = tci_read_r(regs, &tb_ptr); | ||
167 | + t2 = tci_read_r(regs, &tb_ptr); | ||
168 | tci_write_reg(regs, t0, t1 >> (t2 & 63)); | ||
169 | break; | ||
170 | case INDEX_op_sar_i64: | ||
171 | t0 = *tb_ptr++; | ||
172 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
173 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
174 | + t1 = tci_read_r(regs, &tb_ptr); | ||
175 | + t2 = tci_read_r(regs, &tb_ptr); | ||
176 | tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63))); | ||
177 | break; | ||
178 | #if TCG_TARGET_HAS_rot_i64 | ||
179 | case INDEX_op_rotl_i64: | ||
180 | t0 = *tb_ptr++; | ||
181 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
182 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
183 | + t1 = tci_read_r(regs, &tb_ptr); | ||
184 | + t2 = tci_read_r(regs, &tb_ptr); | ||
185 | tci_write_reg(regs, t0, rol64(t1, t2 & 63)); | ||
186 | break; | ||
187 | case INDEX_op_rotr_i64: | ||
188 | t0 = *tb_ptr++; | ||
189 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
190 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
191 | + t1 = tci_read_r(regs, &tb_ptr); | ||
192 | + t2 = tci_read_r(regs, &tb_ptr); | ||
193 | tci_write_reg(regs, t0, ror64(t1, t2 & 63)); | ||
194 | break; | ||
195 | #endif | ||
196 | #if TCG_TARGET_HAS_deposit_i64 | ||
197 | case INDEX_op_deposit_i64: | ||
198 | t0 = *tb_ptr++; | ||
199 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
200 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
201 | + t1 = tci_read_r(regs, &tb_ptr); | ||
202 | + t2 = tci_read_r(regs, &tb_ptr); | ||
203 | tmp16 = *tb_ptr++; | ||
204 | tmp8 = *tb_ptr++; | ||
205 | tmp64 = (((1ULL << tmp8) - 1) << tmp16); | ||
206 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
207 | break; | ||
208 | #endif | ||
209 | case INDEX_op_brcond_i64: | ||
210 | - t0 = tci_read_r64(regs, &tb_ptr); | ||
211 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
212 | + t0 = tci_read_r(regs, &tb_ptr); | ||
213 | + t1 = tci_read_r(regs, &tb_ptr); | ||
214 | condition = *tb_ptr++; | ||
215 | label = tci_read_label(&tb_ptr); | ||
216 | if (tci_compare64(t0, t1, condition)) { | ||
217 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
218 | #if TCG_TARGET_HAS_bswap64_i64 | ||
219 | case INDEX_op_bswap64_i64: | ||
220 | t0 = *tb_ptr++; | ||
221 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
222 | + t1 = tci_read_r(regs, &tb_ptr); | ||
223 | tci_write_reg(regs, t0, bswap64(t1)); | ||
224 | break; | ||
225 | #endif | ||
226 | #if TCG_TARGET_HAS_not_i64 | ||
227 | case INDEX_op_not_i64: | ||
228 | t0 = *tb_ptr++; | ||
229 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
230 | + t1 = tci_read_r(regs, &tb_ptr); | ||
231 | tci_write_reg(regs, t0, ~t1); | ||
232 | break; | ||
233 | #endif | ||
234 | #if TCG_TARGET_HAS_neg_i64 | ||
235 | case INDEX_op_neg_i64: | ||
236 | t0 = *tb_ptr++; | ||
237 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
238 | + t1 = tci_read_r(regs, &tb_ptr); | ||
239 | tci_write_reg(regs, t0, -t1); | ||
240 | break; | ||
241 | #endif | ||
242 | -- | ||
243 | 2.25.1 | ||
244 | |||
245 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | This includes add, sub, mul, and, or, xor. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 83 +++++++++++++++++-------------------------------------- | ||
7 | 1 file changed, 25 insertions(+), 58 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
14 | *(uint32_t *)(t1 + t2) = t0; | ||
15 | break; | ||
16 | |||
17 | - /* Arithmetic operations (32 bit). */ | ||
18 | + /* Arithmetic operations (mixed 32/64 bit). */ | ||
19 | |||
20 | - case INDEX_op_add_i32: | ||
21 | + CASE_32_64(add) | ||
22 | t0 = *tb_ptr++; | ||
23 | t1 = tci_read_r(regs, &tb_ptr); | ||
24 | t2 = tci_read_r(regs, &tb_ptr); | ||
25 | tci_write_reg(regs, t0, t1 + t2); | ||
26 | break; | ||
27 | - case INDEX_op_sub_i32: | ||
28 | + CASE_32_64(sub) | ||
29 | t0 = *tb_ptr++; | ||
30 | t1 = tci_read_r(regs, &tb_ptr); | ||
31 | t2 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, t1 - t2); | ||
33 | break; | ||
34 | - case INDEX_op_mul_i32: | ||
35 | + CASE_32_64(mul) | ||
36 | t0 = *tb_ptr++; | ||
37 | t1 = tci_read_r(regs, &tb_ptr); | ||
38 | t2 = tci_read_r(regs, &tb_ptr); | ||
39 | tci_write_reg(regs, t0, t1 * t2); | ||
40 | break; | ||
41 | + CASE_32_64(and) | ||
42 | + t0 = *tb_ptr++; | ||
43 | + t1 = tci_read_r(regs, &tb_ptr); | ||
44 | + t2 = tci_read_r(regs, &tb_ptr); | ||
45 | + tci_write_reg(regs, t0, t1 & t2); | ||
46 | + break; | ||
47 | + CASE_32_64(or) | ||
48 | + t0 = *tb_ptr++; | ||
49 | + t1 = tci_read_r(regs, &tb_ptr); | ||
50 | + t2 = tci_read_r(regs, &tb_ptr); | ||
51 | + tci_write_reg(regs, t0, t1 | t2); | ||
52 | + break; | ||
53 | + CASE_32_64(xor) | ||
54 | + t0 = *tb_ptr++; | ||
55 | + t1 = tci_read_r(regs, &tb_ptr); | ||
56 | + t2 = tci_read_r(regs, &tb_ptr); | ||
57 | + tci_write_reg(regs, t0, t1 ^ t2); | ||
58 | + break; | ||
59 | + | ||
60 | + /* Arithmetic operations (32 bit). */ | ||
61 | + | ||
62 | case INDEX_op_div_i32: | ||
63 | t0 = *tb_ptr++; | ||
64 | t1 = tci_read_r(regs, &tb_ptr); | ||
65 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
66 | t2 = tci_read_r(regs, &tb_ptr); | ||
67 | tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2); | ||
68 | break; | ||
69 | - case INDEX_op_and_i32: | ||
70 | - t0 = *tb_ptr++; | ||
71 | - t1 = tci_read_r(regs, &tb_ptr); | ||
72 | - t2 = tci_read_r(regs, &tb_ptr); | ||
73 | - tci_write_reg(regs, t0, t1 & t2); | ||
74 | - break; | ||
75 | - case INDEX_op_or_i32: | ||
76 | - t0 = *tb_ptr++; | ||
77 | - t1 = tci_read_r(regs, &tb_ptr); | ||
78 | - t2 = tci_read_r(regs, &tb_ptr); | ||
79 | - tci_write_reg(regs, t0, t1 | t2); | ||
80 | - break; | ||
81 | - case INDEX_op_xor_i32: | ||
82 | - t0 = *tb_ptr++; | ||
83 | - t1 = tci_read_r(regs, &tb_ptr); | ||
84 | - t2 = tci_read_r(regs, &tb_ptr); | ||
85 | - tci_write_reg(regs, t0, t1 ^ t2); | ||
86 | - break; | ||
87 | |||
88 | /* Shift/rotate operations (32 bit). */ | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
91 | |||
92 | /* Arithmetic operations (64 bit). */ | ||
93 | |||
94 | - case INDEX_op_add_i64: | ||
95 | - t0 = *tb_ptr++; | ||
96 | - t1 = tci_read_r(regs, &tb_ptr); | ||
97 | - t2 = tci_read_r(regs, &tb_ptr); | ||
98 | - tci_write_reg(regs, t0, t1 + t2); | ||
99 | - break; | ||
100 | - case INDEX_op_sub_i64: | ||
101 | - t0 = *tb_ptr++; | ||
102 | - t1 = tci_read_r(regs, &tb_ptr); | ||
103 | - t2 = tci_read_r(regs, &tb_ptr); | ||
104 | - tci_write_reg(regs, t0, t1 - t2); | ||
105 | - break; | ||
106 | - case INDEX_op_mul_i64: | ||
107 | - t0 = *tb_ptr++; | ||
108 | - t1 = tci_read_r(regs, &tb_ptr); | ||
109 | - t2 = tci_read_r(regs, &tb_ptr); | ||
110 | - tci_write_reg(regs, t0, t1 * t2); | ||
111 | - break; | ||
112 | case INDEX_op_div_i64: | ||
113 | t0 = *tb_ptr++; | ||
114 | t1 = tci_read_r(regs, &tb_ptr); | ||
115 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
116 | t2 = tci_read_r(regs, &tb_ptr); | ||
117 | tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2); | ||
118 | break; | ||
119 | - case INDEX_op_and_i64: | ||
120 | - t0 = *tb_ptr++; | ||
121 | - t1 = tci_read_r(regs, &tb_ptr); | ||
122 | - t2 = tci_read_r(regs, &tb_ptr); | ||
123 | - tci_write_reg(regs, t0, t1 & t2); | ||
124 | - break; | ||
125 | - case INDEX_op_or_i64: | ||
126 | - t0 = *tb_ptr++; | ||
127 | - t1 = tci_read_r(regs, &tb_ptr); | ||
128 | - t2 = tci_read_r(regs, &tb_ptr); | ||
129 | - tci_write_reg(regs, t0, t1 | t2); | ||
130 | - break; | ||
131 | - case INDEX_op_xor_i64: | ||
132 | - t0 = *tb_ptr++; | ||
133 | - t1 = tci_read_r(regs, &tb_ptr); | ||
134 | - t2 = tci_read_r(regs, &tb_ptr); | ||
135 | - tci_write_reg(regs, t0, t1 ^ t2); | ||
136 | - break; | ||
137 | |||
138 | /* Shift/rotate operations (64 bit). */ | ||
139 | |||
140 | -- | ||
141 | 2.25.1 | ||
142 | |||
143 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | This includes ext8s, ext8u, ext16s, ext16u. | ||
1 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 44 ++++++++------------------------------------ | ||
7 | 1 file changed, 8 insertions(+), 36 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
14 | tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64); | ||
15 | break; | ||
16 | #endif /* TCG_TARGET_REG_BITS == 32 */ | ||
17 | -#if TCG_TARGET_HAS_ext8s_i32 | ||
18 | - case INDEX_op_ext8s_i32: | ||
19 | +#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
20 | + CASE_32_64(ext8s) | ||
21 | t0 = *tb_ptr++; | ||
22 | t1 = tci_read_r(regs, &tb_ptr); | ||
23 | tci_write_reg(regs, t0, (int8_t)t1); | ||
24 | break; | ||
25 | #endif | ||
26 | -#if TCG_TARGET_HAS_ext16s_i32 | ||
27 | - case INDEX_op_ext16s_i32: | ||
28 | +#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
29 | + CASE_32_64(ext16s) | ||
30 | t0 = *tb_ptr++; | ||
31 | t1 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, (int16_t)t1); | ||
33 | break; | ||
34 | #endif | ||
35 | -#if TCG_TARGET_HAS_ext8u_i32 | ||
36 | - case INDEX_op_ext8u_i32: | ||
37 | +#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 | ||
38 | + CASE_32_64(ext8u) | ||
39 | t0 = *tb_ptr++; | ||
40 | t1 = tci_read_r(regs, &tb_ptr); | ||
41 | tci_write_reg(regs, t0, (uint8_t)t1); | ||
42 | break; | ||
43 | #endif | ||
44 | -#if TCG_TARGET_HAS_ext16u_i32 | ||
45 | - case INDEX_op_ext16u_i32: | ||
46 | +#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 | ||
47 | + CASE_32_64(ext16u) | ||
48 | t0 = *tb_ptr++; | ||
49 | t1 = tci_read_r(regs, &tb_ptr); | ||
50 | tci_write_reg(regs, t0, (uint16_t)t1); | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | continue; | ||
53 | } | ||
54 | break; | ||
55 | -#if TCG_TARGET_HAS_ext8u_i64 | ||
56 | - case INDEX_op_ext8u_i64: | ||
57 | - t0 = *tb_ptr++; | ||
58 | - t1 = tci_read_r(regs, &tb_ptr); | ||
59 | - tci_write_reg(regs, t0, (uint8_t)t1); | ||
60 | - break; | ||
61 | -#endif | ||
62 | -#if TCG_TARGET_HAS_ext8s_i64 | ||
63 | - case INDEX_op_ext8s_i64: | ||
64 | - t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, (int8_t)t1); | ||
67 | - break; | ||
68 | -#endif | ||
69 | -#if TCG_TARGET_HAS_ext16s_i64 | ||
70 | - case INDEX_op_ext16s_i64: | ||
71 | - t0 = *tb_ptr++; | ||
72 | - t1 = tci_read_r(regs, &tb_ptr); | ||
73 | - tci_write_reg(regs, t0, (int16_t)t1); | ||
74 | - break; | ||
75 | -#endif | ||
76 | -#if TCG_TARGET_HAS_ext16u_i64 | ||
77 | - case INDEX_op_ext16u_i64: | ||
78 | - t0 = *tb_ptr++; | ||
79 | - t1 = tci_read_r(regs, &tb_ptr); | ||
80 | - tci_write_reg(regs, t0, (uint16_t)t1); | ||
81 | - break; | ||
82 | -#endif | ||
83 | #if TCG_TARGET_HAS_ext32s_i64 | ||
84 | case INDEX_op_ext32s_i64: | ||
85 | #endif | ||
86 | -- | ||
87 | 2.25.1 | ||
88 | |||
89 | diff view generated by jsdifflib |
1 | From: Philippe Mathieu-Daudé <philmd@redhat.com> | 1 | This includes bswap16 and bswap32. |
---|---|---|---|
2 | 2 | ||
3 | To avoid scrolling each instruction when reviewing tcg | 3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
4 | helpers written for the decodetree script, display the | ||
5 | .decode files (similar to header declarations) before | ||
6 | the C source (implementation of previous declarations). | ||
7 | |||
8 | Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
9 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
10 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
11 | Message-Id: <20191230082856.30556-1-philmd@redhat.com> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | --- | 5 | --- |
14 | scripts/git.orderfile | 3 +++ | 6 | tcg/tci.c | 22 ++++------------------ |
15 | 1 file changed, 3 insertions(+) | 7 | 1 file changed, 4 insertions(+), 18 deletions(-) |
16 | 8 | ||
17 | diff --git a/scripts/git.orderfile b/scripts/git.orderfile | 9 | diff --git a/tcg/tci.c b/tcg/tci.c |
18 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/scripts/git.orderfile | 11 | --- a/tcg/tci.c |
20 | +++ b/scripts/git.orderfile | 12 | +++ b/tcg/tci.c |
21 | @@ -XXX,XX +XXX,XX @@ qga/*.json | 13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, |
22 | # headers | 14 | tci_write_reg(regs, t0, (uint16_t)t1); |
23 | *.h | 15 | break; |
24 | 16 | #endif | |
25 | +# decoding tree specification | 17 | -#if TCG_TARGET_HAS_bswap16_i32 |
26 | +*.decode | 18 | - case INDEX_op_bswap16_i32: |
27 | + | 19 | +#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 |
28 | # code | 20 | + CASE_32_64(bswap16) |
29 | *.c | 21 | t0 = *tb_ptr++; |
22 | t1 = tci_read_r(regs, &tb_ptr); | ||
23 | tci_write_reg(regs, t0, bswap16(t1)); | ||
24 | break; | ||
25 | #endif | ||
26 | -#if TCG_TARGET_HAS_bswap32_i32 | ||
27 | - case INDEX_op_bswap32_i32: | ||
28 | +#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 | ||
29 | + CASE_32_64(bswap32) | ||
30 | t0 = *tb_ptr++; | ||
31 | t1 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, bswap32(t1)); | ||
33 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
34 | t1 = tci_read_r(regs, &tb_ptr); | ||
35 | tci_write_reg(regs, t0, (uint32_t)t1); | ||
36 | break; | ||
37 | -#if TCG_TARGET_HAS_bswap16_i64 | ||
38 | - case INDEX_op_bswap16_i64: | ||
39 | - t0 = *tb_ptr++; | ||
40 | - t1 = tci_read_r(regs, &tb_ptr); | ||
41 | - tci_write_reg(regs, t0, bswap16(t1)); | ||
42 | - break; | ||
43 | -#endif | ||
44 | -#if TCG_TARGET_HAS_bswap32_i64 | ||
45 | - case INDEX_op_bswap32_i64: | ||
46 | - t0 = *tb_ptr++; | ||
47 | - t1 = tci_read_r(regs, &tb_ptr); | ||
48 | - tci_write_reg(regs, t0, bswap32(t1)); | ||
49 | - break; | ||
50 | -#endif | ||
51 | #if TCG_TARGET_HAS_bswap64_i64 | ||
52 | case INDEX_op_bswap64_i64: | ||
53 | t0 = *tb_ptr++; | ||
30 | -- | 54 | -- |
31 | 2.20.1 | 55 | 2.25.1 |
32 | 56 | ||
33 | 57 | diff view generated by jsdifflib |
1 | The accel_list and tmp variables are only used when manufacturing | 1 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
---|---|---|---|
2 | -machine accel, options based on -accel. | ||
3 | |||
4 | Acked-by: Paolo Bonzini <pbonzini@redhat.com> | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 3 | --- |
9 | vl.c | 3 ++- | 4 | tcg/tci.c | 29 +++++------------------------ |
10 | 1 file changed, 2 insertions(+), 1 deletion(-) | 5 | 1 file changed, 5 insertions(+), 24 deletions(-) |
11 | 6 | ||
12 | diff --git a/vl.c b/vl.c | 7 | diff --git a/tcg/tci.c b/tcg/tci.c |
13 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/vl.c | 9 | --- a/tcg/tci.c |
15 | +++ b/vl.c | 10 | +++ b/tcg/tci.c |
16 | @@ -XXX,XX +XXX,XX @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp) | 11 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, |
17 | static void configure_accelerators(const char *progname) | 12 | tci_write_reg(regs, t0, tci_compare64(t1, t2, condition)); |
18 | { | 13 | break; |
19 | const char *accel; | 14 | #endif |
20 | - char **accel_list, **tmp; | 15 | - case INDEX_op_mov_i32: |
21 | bool init_failed = false; | 16 | + CASE_32_64(mov) |
22 | 17 | t0 = *tb_ptr++; | |
23 | qemu_opts_foreach(qemu_find_opts("icount"), | 18 | t1 = tci_read_r(regs, &tb_ptr); |
24 | @@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname) | 19 | tci_write_reg(regs, t0, t1); |
25 | 20 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | |
26 | accel = qemu_opt_get(qemu_get_machine_opts(), "accel"); | 21 | tci_write_reg(regs, t0, bswap32(t1)); |
27 | if (QTAILQ_EMPTY(&qemu_accel_opts.head)) { | 22 | break; |
28 | + char **accel_list, **tmp; | 23 | #endif |
29 | + | 24 | -#if TCG_TARGET_HAS_not_i32 |
30 | if (accel == NULL) { | 25 | - case INDEX_op_not_i32: |
31 | /* Select the default accelerator */ | 26 | +#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 |
32 | if (!accel_find("tcg") && !accel_find("kvm")) { | 27 | + CASE_32_64(not) |
28 | t0 = *tb_ptr++; | ||
29 | t1 = tci_read_r(regs, &tb_ptr); | ||
30 | tci_write_reg(regs, t0, ~t1); | ||
31 | break; | ||
32 | #endif | ||
33 | -#if TCG_TARGET_HAS_neg_i32 | ||
34 | - case INDEX_op_neg_i32: | ||
35 | +#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 | ||
36 | + CASE_32_64(neg) | ||
37 | t0 = *tb_ptr++; | ||
38 | t1 = tci_read_r(regs, &tb_ptr); | ||
39 | tci_write_reg(regs, t0, -t1); | ||
40 | break; | ||
41 | #endif | ||
42 | #if TCG_TARGET_REG_BITS == 64 | ||
43 | - case INDEX_op_mov_i64: | ||
44 | - t0 = *tb_ptr++; | ||
45 | - t1 = tci_read_r(regs, &tb_ptr); | ||
46 | - tci_write_reg(regs, t0, t1); | ||
47 | - break; | ||
48 | case INDEX_op_tci_movi_i64: | ||
49 | t0 = *tb_ptr++; | ||
50 | t1 = tci_read_i64(&tb_ptr); | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | tci_write_reg(regs, t0, bswap64(t1)); | ||
53 | break; | ||
54 | #endif | ||
55 | -#if TCG_TARGET_HAS_not_i64 | ||
56 | - case INDEX_op_not_i64: | ||
57 | - t0 = *tb_ptr++; | ||
58 | - t1 = tci_read_r(regs, &tb_ptr); | ||
59 | - tci_write_reg(regs, t0, ~t1); | ||
60 | - break; | ||
61 | -#endif | ||
62 | -#if TCG_TARGET_HAS_neg_i64 | ||
63 | - case INDEX_op_neg_i64: | ||
64 | - t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, -t1); | ||
67 | - break; | ||
68 | -#endif | ||
69 | #endif /* TCG_TARGET_REG_BITS == 64 */ | ||
70 | |||
71 | /* QEMU specific operations. */ | ||
33 | -- | 72 | -- |
34 | 2.20.1 | 73 | 2.25.1 |
35 | 74 | ||
36 | 75 | diff view generated by jsdifflib |
1 | The accel_initialised variable no longer has any setters. | 1 | From: Alex Bennée <alex.bennee@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Fixes: 6f6e1698a68c | 3 | Having a function return either and valid TB and some system state |
4 | Acked-by: Paolo Bonzini <pbonzini@redhat.com> | 4 | seems excessive. It will make the subsequent re-factoring easier if we |
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | lookup the current state where we are. |
6 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 6 | |
7 | Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com> | 7 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> |
8 | Message-Id: <20210224165811.11567-2-alex.bennee@linaro.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 10 | --- |
10 | vl.c | 3 +-- | 11 | include/exec/tb-lookup.h | 18 ++++++++---------- |
11 | 1 file changed, 1 insertion(+), 2 deletions(-) | 12 | accel/tcg/cpu-exec.c | 10 ++++++++-- |
13 | accel/tcg/tcg-runtime.c | 4 +++- | ||
14 | 3 files changed, 19 insertions(+), 13 deletions(-) | ||
12 | 15 | ||
13 | diff --git a/vl.c b/vl.c | 16 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h |
14 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/vl.c | 18 | --- a/include/exec/tb-lookup.h |
16 | +++ b/vl.c | 19 | +++ b/include/exec/tb-lookup.h |
17 | @@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname) | 20 | @@ -XXX,XX +XXX,XX @@ |
21 | #include "exec/tb-hash.h" | ||
22 | |||
23 | /* Might cause an exception, so have a longjmp destination ready */ | ||
24 | -static inline TranslationBlock * | ||
25 | -tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, | ||
26 | - uint32_t *flags, uint32_t cf_mask) | ||
27 | +static inline TranslationBlock * tb_lookup(CPUState *cpu, | ||
28 | + target_ulong pc, target_ulong cs_base, | ||
29 | + uint32_t flags, uint32_t cf_mask) | ||
18 | { | 30 | { |
19 | const char *accel; | 31 | - CPUArchState *env = (CPUArchState *)cpu->env_ptr; |
20 | char **accel_list, **tmp; | 32 | TranslationBlock *tb; |
21 | - bool accel_initialised = false; | 33 | uint32_t hash; |
22 | bool init_failed = false; | 34 | |
23 | 35 | - cpu_get_tb_cpu_state(env, pc, cs_base, flags); | |
24 | qemu_opts_foreach(qemu_find_opts("icount"), | 36 | - hash = tb_jmp_cache_hash_func(*pc); |
25 | @@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname) | 37 | + hash = tb_jmp_cache_hash_func(pc); |
26 | 38 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | |
27 | accel_list = g_strsplit(accel, ":", 0); | 39 | |
28 | 40 | cf_mask &= ~CF_CLUSTER_MASK; | |
29 | - for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) { | 41 | cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; |
30 | + for (tmp = accel_list; tmp && *tmp; tmp++) { | 42 | |
31 | /* | 43 | if (likely(tb && |
32 | * Filter invalid accelerators here, to prevent obscenities | 44 | - tb->pc == *pc && |
33 | * such as "-machine accel=tcg,,thread=single". | 45 | - tb->cs_base == *cs_base && |
46 | - tb->flags == *flags && | ||
47 | + tb->pc == pc && | ||
48 | + tb->cs_base == cs_base && | ||
49 | + tb->flags == flags && | ||
50 | tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||
51 | (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { | ||
52 | return tb; | ||
53 | } | ||
54 | - tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask); | ||
55 | + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
56 | if (tb == NULL) { | ||
57 | return NULL; | ||
58 | } | ||
59 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/accel/tcg/cpu-exec.c | ||
62 | +++ b/accel/tcg/cpu-exec.c | ||
63 | @@ -XXX,XX +XXX,XX @@ static void cpu_exec_exit(CPUState *cpu) | ||
64 | |||
65 | void cpu_exec_step_atomic(CPUState *cpu) | ||
66 | { | ||
67 | + CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
68 | TranslationBlock *tb; | ||
69 | target_ulong cs_base, pc; | ||
70 | uint32_t flags; | ||
71 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
72 | g_assert(!cpu->running); | ||
73 | cpu->running = true; | ||
74 | |||
75 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); | ||
76 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
77 | + tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
78 | + | ||
79 | if (tb == NULL) { | ||
80 | mmap_lock(); | ||
81 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||
82 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu, | ||
83 | TranslationBlock *last_tb, | ||
84 | int tb_exit, uint32_t cf_mask) | ||
85 | { | ||
86 | + CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
87 | TranslationBlock *tb; | ||
88 | target_ulong cs_base, pc; | ||
89 | uint32_t flags; | ||
90 | |||
91 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); | ||
92 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
93 | + | ||
94 | + tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
95 | if (tb == NULL) { | ||
96 | mmap_lock(); | ||
97 | tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); | ||
98 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/accel/tcg/tcg-runtime.c | ||
101 | +++ b/accel/tcg/tcg-runtime.c | ||
102 | @@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | ||
103 | target_ulong cs_base, pc; | ||
104 | uint32_t flags; | ||
105 | |||
106 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags()); | ||
107 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
108 | + | ||
109 | + tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags()); | ||
110 | if (tb == NULL) { | ||
111 | return tcg_code_gen_epilogue; | ||
112 | } | ||
34 | -- | 113 | -- |
35 | 2.20.1 | 114 | 2.25.1 |
36 | 115 | ||
37 | 116 | diff view generated by jsdifflib |
1 | We do not need the entire CPUArchState to compute these values. | 1 | From: Alex Bennée <alex.bennee@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | There is nothing special about this compile flag that doesn't mean we |
4 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 4 | can't just compute it with curr_cflags() which we should be using when |
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 5 | building a new set. |
6 | |||
7 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Message-Id: <20210224165811.11567-3-alex.bennee@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 10 | --- |
8 | accel/tcg/cputlb.c | 15 ++++++++------- | 11 | include/exec/exec-all.h | 8 +++++--- |
9 | 1 file changed, 8 insertions(+), 7 deletions(-) | 12 | include/exec/tb-lookup.h | 3 --- |
13 | accel/tcg/cpu-exec.c | 9 ++++----- | ||
14 | accel/tcg/tcg-runtime.c | 2 +- | ||
15 | accel/tcg/translate-all.c | 6 +++--- | ||
16 | softmmu/physmem.c | 2 +- | ||
17 | 6 files changed, 14 insertions(+), 16 deletions(-) | ||
10 | 18 | ||
11 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h |
12 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/accel/tcg/cputlb.c | 21 | --- a/include/exec/exec-all.h |
14 | +++ b/accel/tcg/cputlb.c | 22 | +++ b/include/exec/exec-all.h |
15 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); | 23 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) |
16 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); | 24 | } |
17 | #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) | 25 | |
18 | 26 | /* current cflags for hashing/comparison */ | |
19 | -static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx) | 27 | -static inline uint32_t curr_cflags(void) |
20 | +static inline size_t tlb_n_entries(CPUTLBDescFast *fast) | 28 | +static inline uint32_t curr_cflags(CPUState *cpu) |
21 | { | 29 | { |
22 | - return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1; | 30 | - return (parallel_cpus ? CF_PARALLEL : 0) |
23 | + return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; | 31 | - | (icount_enabled() ? CF_USE_ICOUNT : 0); |
32 | + uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index); | ||
33 | + cflags |= parallel_cpus ? CF_PARALLEL : 0; | ||
34 | + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
35 | + return cflags; | ||
24 | } | 36 | } |
25 | 37 | ||
26 | -static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx) | 38 | /* TranslationBlock invalidate API */ |
27 | +static inline size_t sizeof_tlb(CPUTLBDescFast *fast) | 39 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h |
28 | { | 40 | index XXXXXXX..XXXXXXX 100644 |
29 | - return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS); | 41 | --- a/include/exec/tb-lookup.h |
30 | + return fast->mask + (1 << CPU_TLB_ENTRY_BITS); | 42 | +++ b/include/exec/tb-lookup.h |
31 | } | 43 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock * tb_lookup(CPUState *cpu, |
32 | 44 | hash = tb_jmp_cache_hash_func(pc); | |
33 | static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, | 45 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); |
34 | @@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env) | 46 | |
35 | static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) | 47 | - cf_mask &= ~CF_CLUSTER_MASK; |
36 | { | 48 | - cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; |
37 | CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; | 49 | - |
38 | - size_t old_size = tlb_n_entries(env, mmu_idx); | 50 | if (likely(tb && |
39 | + size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); | 51 | tb->pc == pc && |
40 | size_t rate; | 52 | tb->cs_base == cs_base && |
41 | size_t new_size = old_size; | 53 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c |
42 | int64_t now = get_clock_realtime(); | 54 | index XXXXXXX..XXXXXXX 100644 |
43 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) | 55 | --- a/accel/tcg/cpu-exec.c |
44 | env_tlb(env)->d[mmu_idx].large_page_addr = -1; | 56 | +++ b/accel/tcg/cpu-exec.c |
45 | env_tlb(env)->d[mmu_idx].large_page_mask = -1; | 57 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) |
46 | env_tlb(env)->d[mmu_idx].vindex = 0; | 58 | TranslationBlock *tb; |
47 | - memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx)); | 59 | target_ulong cs_base, pc; |
48 | + memset(env_tlb(env)->f[mmu_idx].table, -1, | 60 | uint32_t flags; |
49 | + sizeof_tlb(&env_tlb(env)->f[mmu_idx])); | 61 | - uint32_t cflags = 1; |
50 | memset(env_tlb(env)->d[mmu_idx].vtable, -1, | 62 | - uint32_t cf_mask = cflags & CF_HASH_MASK; |
51 | sizeof(env_tlb(env)->d[0].vtable)); | 63 | + uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1; |
52 | } | 64 | int tb_exit; |
53 | @@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) | 65 | |
54 | qemu_spin_lock(&env_tlb(env)->c.lock); | 66 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
55 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | 67 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) |
56 | unsigned int i; | 68 | cpu->running = true; |
57 | - unsigned int n = tlb_n_entries(env, mmu_idx); | 69 | |
58 | + unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); | 70 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
59 | 71 | - tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | |
60 | for (i = 0; i < n; i++) { | 72 | + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); |
61 | tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], | 73 | |
74 | if (tb == NULL) { | ||
75 | mmap_lock(); | ||
76 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) | ||
77 | if (replay_has_exception() | ||
78 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { | ||
79 | /* Execute just one insn to trigger exception pending in the log */ | ||
80 | - cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1; | ||
81 | + cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1; | ||
82 | } | ||
83 | #endif | ||
84 | return false; | ||
85 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | ||
86 | have CF_INVALID set, -1 is a convenient invalid value that | ||
87 | does not require tcg headers for cpu_common_reset. */ | ||
88 | if (cflags == -1) { | ||
89 | - cflags = curr_cflags(); | ||
90 | + cflags = curr_cflags(cpu); | ||
91 | } else { | ||
92 | cpu->cflags_next_tb = -1; | ||
93 | } | ||
94 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
95 | index XXXXXXX..XXXXXXX 100644 | ||
96 | --- a/accel/tcg/tcg-runtime.c | ||
97 | +++ b/accel/tcg/tcg-runtime.c | ||
98 | @@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | ||
99 | |||
100 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
101 | |||
102 | - tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags()); | ||
103 | + tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu)); | ||
104 | if (tb == NULL) { | ||
105 | return tcg_code_gen_epilogue; | ||
106 | } | ||
107 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/accel/tcg/translate-all.c | ||
110 | +++ b/accel/tcg/translate-all.c | ||
111 | @@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, | ||
112 | if (current_tb_modified) { | ||
113 | page_collection_unlock(pages); | ||
114 | /* Force execution of one insn next time. */ | ||
115 | - cpu->cflags_next_tb = 1 | curr_cflags(); | ||
116 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | ||
117 | mmap_unlock(); | ||
118 | cpu_loop_exit_noexc(cpu); | ||
119 | } | ||
120 | @@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) | ||
121 | #ifdef TARGET_HAS_PRECISE_SMC | ||
122 | if (current_tb_modified) { | ||
123 | /* Force execution of one insn next time. */ | ||
124 | - cpu->cflags_next_tb = 1 | curr_cflags(); | ||
125 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | ||
126 | return true; | ||
127 | } | ||
128 | #endif | ||
129 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||
130 | * operations only (which execute after completion) so we don't | ||
131 | * double instrument the instruction. | ||
132 | */ | ||
133 | - cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n; | ||
134 | + cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; | ||
135 | |||
136 | qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, | ||
137 | "cpu_io_recompile: rewound execution of TB to " | ||
138 | diff --git a/softmmu/physmem.c b/softmmu/physmem.c | ||
139 | index XXXXXXX..XXXXXXX 100644 | ||
140 | --- a/softmmu/physmem.c | ||
141 | +++ b/softmmu/physmem.c | ||
142 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
143 | cpu_loop_exit_restore(cpu, ra); | ||
144 | } else { | ||
145 | /* Force execution of one insn next time. */ | ||
146 | - cpu->cflags_next_tb = 1 | curr_cflags(); | ||
147 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | ||
148 | mmap_unlock(); | ||
149 | if (ra) { | ||
150 | cpu_restore_state(cpu, ra, true); | ||
62 | -- | 151 | -- |
63 | 2.20.1 | 152 | 2.25.1 |
64 | 153 | ||
65 | 154 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alex Bennée <alex.bennee@linaro.org> | |
2 | |||
3 | We don't really deal in cf_mask most of the time. The one time it's | ||
4 | relevant is when we want to remove an invalidated TB from the QHT | ||
5 | lookup. Everywhere else we should be looking up things without | ||
6 | CF_INVALID set. | ||
7 | |||
8 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Message-Id: <20210224165811.11567-4-alex.bennee@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | include/exec/exec-all.h | 4 +--- | ||
13 | include/exec/tb-lookup.h | 9 ++++++--- | ||
14 | accel/tcg/cpu-exec.c | 16 ++++++++-------- | ||
15 | accel/tcg/tcg-runtime.c | 2 +- | ||
16 | accel/tcg/translate-all.c | 8 +++++--- | ||
17 | 5 files changed, 21 insertions(+), 18 deletions(-) | ||
18 | |||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/exec-all.h | ||
22 | +++ b/include/exec/exec-all.h | ||
23 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | ||
24 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ | ||
25 | #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ | ||
26 | #define CF_CLUSTER_SHIFT 24 | ||
27 | -/* cflags' mask for hashing/comparison, basically ignore CF_INVALID */ | ||
28 | -#define CF_HASH_MASK (~CF_INVALID) | ||
29 | |||
30 | /* Per-vCPU dynamic tracing state used to generate this TB */ | ||
31 | uint32_t trace_vcpu_dstate; | ||
32 | @@ -XXX,XX +XXX,XX @@ void tb_flush(CPUState *cpu); | ||
33 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); | ||
34 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
35 | target_ulong cs_base, uint32_t flags, | ||
36 | - uint32_t cf_mask); | ||
37 | + uint32_t cflags); | ||
38 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); | ||
39 | |||
40 | /* GETPC is the true target of the return instruction that we'll execute. */ | ||
41 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/include/exec/tb-lookup.h | ||
44 | +++ b/include/exec/tb-lookup.h | ||
45 | @@ -XXX,XX +XXX,XX @@ | ||
46 | /* Might cause an exception, so have a longjmp destination ready */ | ||
47 | static inline TranslationBlock * tb_lookup(CPUState *cpu, | ||
48 | target_ulong pc, target_ulong cs_base, | ||
49 | - uint32_t flags, uint32_t cf_mask) | ||
50 | + uint32_t flags, uint32_t cflags) | ||
51 | { | ||
52 | TranslationBlock *tb; | ||
53 | uint32_t hash; | ||
54 | |||
55 | + /* we should never be trying to look up an INVALID tb */ | ||
56 | + tcg_debug_assert(!(cflags & CF_INVALID)); | ||
57 | + | ||
58 | hash = tb_jmp_cache_hash_func(pc); | ||
59 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
60 | |||
61 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock * tb_lookup(CPUState *cpu, | ||
62 | tb->cs_base == cs_base && | ||
63 | tb->flags == flags && | ||
64 | tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||
65 | - (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { | ||
66 | + tb_cflags(tb) == cflags)) { | ||
67 | return tb; | ||
68 | } | ||
69 | - tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
70 | + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | ||
71 | if (tb == NULL) { | ||
72 | return NULL; | ||
73 | } | ||
74 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/accel/tcg/cpu-exec.c | ||
77 | +++ b/accel/tcg/cpu-exec.c | ||
78 | @@ -XXX,XX +XXX,XX @@ struct tb_desc { | ||
79 | CPUArchState *env; | ||
80 | tb_page_addr_t phys_page1; | ||
81 | uint32_t flags; | ||
82 | - uint32_t cf_mask; | ||
83 | + uint32_t cflags; | ||
84 | uint32_t trace_vcpu_dstate; | ||
85 | }; | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
88 | tb->cs_base == desc->cs_base && | ||
89 | tb->flags == desc->flags && | ||
90 | tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && | ||
91 | - (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { | ||
92 | + tb_cflags(tb) == desc->cflags) { | ||
93 | /* check next page if needed */ | ||
94 | if (tb->page_addr[1] == -1) { | ||
95 | return true; | ||
96 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
97 | |||
98 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
99 | target_ulong cs_base, uint32_t flags, | ||
100 | - uint32_t cf_mask) | ||
101 | + uint32_t cflags) | ||
102 | { | ||
103 | tb_page_addr_t phys_pc; | ||
104 | struct tb_desc desc; | ||
105 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
106 | desc.env = (CPUArchState *)cpu->env_ptr; | ||
107 | desc.cs_base = cs_base; | ||
108 | desc.flags = flags; | ||
109 | - desc.cf_mask = cf_mask; | ||
110 | + desc.cflags = cflags; | ||
111 | desc.trace_vcpu_dstate = *cpu->trace_dstate; | ||
112 | desc.pc = pc; | ||
113 | phys_pc = get_page_addr_code(desc.env, pc); | ||
114 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
115 | return NULL; | ||
116 | } | ||
117 | desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; | ||
118 | - h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); | ||
119 | + h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); | ||
120 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); | ||
121 | } | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n, | ||
124 | |||
125 | static inline TranslationBlock *tb_find(CPUState *cpu, | ||
126 | TranslationBlock *last_tb, | ||
127 | - int tb_exit, uint32_t cf_mask) | ||
128 | + int tb_exit, uint32_t cflags) | ||
129 | { | ||
130 | CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
131 | TranslationBlock *tb; | ||
132 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu, | ||
133 | |||
134 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
135 | |||
136 | - tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
137 | + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | ||
138 | if (tb == NULL) { | ||
139 | mmap_lock(); | ||
140 | - tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); | ||
141 | + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||
142 | mmap_unlock(); | ||
143 | /* We add the TB in the virtual pc hash table for the fast lookup */ | ||
144 | qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); | ||
145 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
146 | index XXXXXXX..XXXXXXX 100644 | ||
147 | --- a/accel/tcg/tcg-runtime.c | ||
148 | +++ b/accel/tcg/tcg-runtime.c | ||
149 | @@ -XXX,XX +XXX,XX @@ | ||
150 | #include "exec/helper-proto.h" | ||
151 | #include "exec/cpu_ldst.h" | ||
152 | #include "exec/exec-all.h" | ||
153 | -#include "exec/tb-lookup.h" | ||
154 | #include "disas/disas.h" | ||
155 | #include "exec/log.h" | ||
156 | #include "tcg/tcg.h" | ||
157 | +#include "exec/tb-lookup.h" | ||
158 | |||
159 | /* 32-bit helpers */ | ||
160 | |||
161 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
162 | index XXXXXXX..XXXXXXX 100644 | ||
163 | --- a/accel/tcg/translate-all.c | ||
164 | +++ b/accel/tcg/translate-all.c | ||
165 | @@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp) | ||
166 | return a->pc == b->pc && | ||
167 | a->cs_base == b->cs_base && | ||
168 | a->flags == b->flags && | ||
169 | - (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && | ||
170 | + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && | ||
171 | a->trace_vcpu_dstate == b->trace_vcpu_dstate && | ||
172 | a->page_addr[0] == b->page_addr[0] && | ||
173 | a->page_addr[1] == b->page_addr[1]; | ||
174 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
175 | PageDesc *p; | ||
176 | uint32_t h; | ||
177 | tb_page_addr_t phys_pc; | ||
178 | + uint32_t orig_cflags = tb_cflags(tb); | ||
179 | |||
180 | assert_memory_lock(); | ||
181 | |||
182 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
183 | |||
184 | /* remove the TB from the hash list */ | ||
185 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | ||
186 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, | ||
187 | + h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, | ||
188 | tb->trace_vcpu_dstate); | ||
189 | if (!qht_remove(&tb_ctx.htable, tb, h)) { | ||
190 | return; | ||
191 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
192 | uint32_t h; | ||
193 | |||
194 | assert_memory_lock(); | ||
195 | + tcg_debug_assert(!(tb->cflags & CF_INVALID)); | ||
196 | |||
197 | /* | ||
198 | * Add the TB to the page list, acquiring first the pages's locks. | ||
199 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
200 | } | ||
201 | |||
202 | /* add in the hash table */ | ||
203 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, | ||
204 | + h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, | ||
205 | tb->trace_vcpu_dstate); | ||
206 | qht_insert(&tb_ctx.htable, tb, h, &existing_tb); | ||
207 | |||
208 | -- | ||
209 | 2.25.1 | ||
210 | |||
211 | diff view generated by jsdifflib |
1 | From: Carlos Santos <casantos@redhat.com> | 1 | From: Alex Bennée <alex.bennee@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | uClibc defines _SC_LEVEL1_ICACHE_LINESIZE and _SC_LEVEL1_DCACHE_LINESIZE | 3 | Lets make sure all the flags we compare when looking up blocks are |
4 | but the corresponding sysconf calls returns -1, which is a valid result, | 4 | together in the same place. |
5 | meaning that the limit is indeterminate. | ||
6 | 5 | ||
7 | Handle this situation using the fallback values instead of crashing due | 6 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> |
8 | to an assertion failure. | 7 | Message-Id: <20210224165811.11567-5-alex.bennee@linaro.org> |
9 | |||
10 | Signed-off-by: Carlos Santos <casantos@redhat.com> | ||
11 | Message-Id: <20191017123713.30192-1-casantos@redhat.com> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | --- | 9 | --- |
14 | util/cacheinfo.c | 10 ++++++++-- | 10 | include/exec/exec-all.h | 8 +++++--- |
15 | 1 file changed, 8 insertions(+), 2 deletions(-) | 11 | 1 file changed, 5 insertions(+), 3 deletions(-) |
16 | 12 | ||
17 | diff --git a/util/cacheinfo.c b/util/cacheinfo.c | 13 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h |
18 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/util/cacheinfo.c | 15 | --- a/include/exec/exec-all.h |
20 | +++ b/util/cacheinfo.c | 16 | +++ b/include/exec/exec-all.h |
21 | @@ -XXX,XX +XXX,XX @@ static void sys_cache_info(int *isize, int *dsize) | 17 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { |
22 | static void sys_cache_info(int *isize, int *dsize) | 18 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
23 | { | 19 | target_ulong cs_base; /* CS base for this block */ |
24 | # ifdef _SC_LEVEL1_ICACHE_LINESIZE | 20 | uint32_t flags; /* flags defining in which context the code was generated */ |
25 | - *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE); | 21 | - uint16_t size; /* size of target code for this block (1 <= |
26 | + int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE); | 22 | - size <= TARGET_PAGE_SIZE) */ |
27 | + if (tmp_isize > 0) { | 23 | - uint16_t icount; |
28 | + *isize = tmp_isize; | 24 | uint32_t cflags; /* compile flags */ |
29 | + } | 25 | #define CF_COUNT_MASK 0x00007fff |
30 | # endif | 26 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ |
31 | # ifdef _SC_LEVEL1_DCACHE_LINESIZE | 27 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { |
32 | - *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE); | 28 | /* Per-vCPU dynamic tracing state used to generate this TB */ |
33 | + int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE); | 29 | uint32_t trace_vcpu_dstate; |
34 | + if (tmp_dsize > 0) { | 30 | |
35 | + *dsize = tmp_dsize; | 31 | + /* Above fields used for comparing */ |
36 | + } | 32 | + uint16_t size; /* size of target code for this block (1 <= |
37 | # endif | 33 | + size <= TARGET_PAGE_SIZE) */ |
38 | } | 34 | + uint16_t icount; |
39 | #endif /* sys_cache_info */ | 35 | + |
36 | struct tb_tc tc; | ||
37 | |||
38 | /* first and second physical page containing code. The lower bit | ||
40 | -- | 39 | -- |
41 | 2.20.1 | 40 | 2.25.1 |
42 | 41 | ||
43 | 42 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | The primary motivation is to remove a dozen insns along | |
2 | the fast-path in tb_lookup. As a byproduct, this allows | ||
3 | us to completely remove parallel_cpus. | ||
4 | |||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | accel/tcg/tcg-accel-ops.h | 1 + | ||
8 | include/exec/exec-all.h | 7 +------ | ||
9 | include/hw/core/cpu.h | 2 ++ | ||
10 | accel/tcg/cpu-exec.c | 3 --- | ||
11 | accel/tcg/tcg-accel-ops-mttcg.c | 3 +-- | ||
12 | accel/tcg/tcg-accel-ops-rr.c | 2 +- | ||
13 | accel/tcg/tcg-accel-ops.c | 8 ++++++++ | ||
14 | accel/tcg/translate-all.c | 4 ---- | ||
15 | linux-user/main.c | 1 + | ||
16 | linux-user/sh4/signal.c | 8 +++++--- | ||
17 | linux-user/syscall.c | 18 ++++++++++-------- | ||
18 | 11 files changed, 30 insertions(+), 27 deletions(-) | ||
19 | |||
20 | diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/accel/tcg/tcg-accel-ops.h | ||
23 | +++ b/accel/tcg/tcg-accel-ops.h | ||
24 | @@ -XXX,XX +XXX,XX @@ | ||
25 | void tcg_cpus_destroy(CPUState *cpu); | ||
26 | int tcg_cpus_exec(CPUState *cpu); | ||
27 | void tcg_handle_interrupt(CPUState *cpu, int mask); | ||
28 | +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); | ||
29 | |||
30 | #endif /* TCG_CPUS_H */ | ||
31 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/include/exec/exec-all.h | ||
34 | +++ b/include/exec/exec-all.h | ||
35 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | ||
36 | uintptr_t jmp_dest[2]; | ||
37 | }; | ||
38 | |||
39 | -extern bool parallel_cpus; | ||
40 | - | ||
41 | /* Hide the qatomic_read to make code a little easier on the eyes */ | ||
42 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
43 | { | ||
44 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
45 | /* current cflags for hashing/comparison */ | ||
46 | static inline uint32_t curr_cflags(CPUState *cpu) | ||
47 | { | ||
48 | - uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index); | ||
49 | - cflags |= parallel_cpus ? CF_PARALLEL : 0; | ||
50 | - cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
51 | - return cflags; | ||
52 | + return cpu->tcg_cflags; | ||
53 | } | ||
54 | |||
55 | /* TranslationBlock invalidate API */ | ||
56 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/include/hw/core/cpu.h | ||
59 | +++ b/include/hw/core/cpu.h | ||
60 | @@ -XXX,XX +XXX,XX @@ struct qemu_work_item; | ||
61 | * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will | ||
62 | * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER | ||
63 | * QOM parent. | ||
64 | + * @tcg_cflags: Pre-computed cflags for this cpu. | ||
65 | * @nr_cores: Number of cores within this CPU package. | ||
66 | * @nr_threads: Number of threads within this CPU. | ||
67 | * @running: #true if CPU is currently running (lockless). | ||
68 | @@ -XXX,XX +XXX,XX @@ struct CPUState { | ||
69 | /* TODO Move common fields from CPUArchState here. */ | ||
70 | int cpu_index; | ||
71 | int cluster_index; | ||
72 | + uint32_t tcg_cflags; | ||
73 | uint32_t halted; | ||
74 | uint32_t can_do_io; | ||
75 | int32_t exception_index; | ||
76 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/accel/tcg/cpu-exec.c | ||
79 | +++ b/accel/tcg/cpu-exec.c | ||
80 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
81 | mmap_unlock(); | ||
82 | } | ||
83 | |||
84 | - /* Since we got here, we know that parallel_cpus must be true. */ | ||
85 | - parallel_cpus = false; | ||
86 | cpu_exec_enter(cpu); | ||
87 | /* execute the generated code */ | ||
88 | trace_exec_tb(tb, pc); | ||
89 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
90 | * the execution. | ||
91 | */ | ||
92 | g_assert(cpu_in_exclusive_context(cpu)); | ||
93 | - parallel_cpus = true; | ||
94 | cpu->running = false; | ||
95 | end_exclusive(); | ||
96 | } | ||
97 | diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c | ||
98 | index XXXXXXX..XXXXXXX 100644 | ||
99 | --- a/accel/tcg/tcg-accel-ops-mttcg.c | ||
100 | +++ b/accel/tcg/tcg-accel-ops-mttcg.c | ||
101 | @@ -XXX,XX +XXX,XX @@ void mttcg_start_vcpu_thread(CPUState *cpu) | ||
102 | char thread_name[VCPU_THREAD_NAME_SIZE]; | ||
103 | |||
104 | g_assert(tcg_enabled()); | ||
105 | - | ||
106 | - parallel_cpus = (current_machine->smp.max_cpus > 1); | ||
107 | + tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); | ||
108 | |||
109 | cpu->thread = g_malloc0(sizeof(QemuThread)); | ||
110 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | ||
111 | diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c | ||
112 | index XXXXXXX..XXXXXXX 100644 | ||
113 | --- a/accel/tcg/tcg-accel-ops-rr.c | ||
114 | +++ b/accel/tcg/tcg-accel-ops-rr.c | ||
115 | @@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu) | ||
116 | static QemuThread *single_tcg_cpu_thread; | ||
117 | |||
118 | g_assert(tcg_enabled()); | ||
119 | - parallel_cpus = false; | ||
120 | + tcg_cpu_init_cflags(cpu, false); | ||
121 | |||
122 | if (!single_tcg_cpu_thread) { | ||
123 | cpu->thread = g_malloc0(sizeof(QemuThread)); | ||
124 | diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c | ||
125 | index XXXXXXX..XXXXXXX 100644 | ||
126 | --- a/accel/tcg/tcg-accel-ops.c | ||
127 | +++ b/accel/tcg/tcg-accel-ops.c | ||
128 | @@ -XXX,XX +XXX,XX @@ | ||
129 | |||
130 | /* common functionality among all TCG variants */ | ||
131 | |||
132 | +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) | ||
133 | +{ | ||
134 | + uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
135 | + cflags |= parallel ? CF_PARALLEL : 0; | ||
136 | + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
137 | + cpu->tcg_cflags = cflags; | ||
138 | +} | ||
139 | + | ||
140 | void tcg_cpus_destroy(CPUState *cpu) | ||
141 | { | ||
142 | cpu_thread_signal_destroyed(cpu); | ||
143 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
144 | index XXXXXXX..XXXXXXX 100644 | ||
145 | --- a/accel/tcg/translate-all.c | ||
146 | +++ b/accel/tcg/translate-all.c | ||
147 | @@ -XXX,XX +XXX,XX @@ static void *l1_map[V_L1_MAX_SIZE]; | ||
148 | TCGContext tcg_init_ctx; | ||
149 | __thread TCGContext *tcg_ctx; | ||
150 | TBContext tb_ctx; | ||
151 | -bool parallel_cpus; | ||
152 | |||
153 | static void page_table_config_init(void) | ||
154 | { | ||
155 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
156 | cflags = (cflags & ~CF_COUNT_MASK) | 1; | ||
157 | } | ||
158 | |||
159 | - cflags &= ~CF_CLUSTER_MASK; | ||
160 | - cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
161 | - | ||
162 | max_insns = cflags & CF_COUNT_MASK; | ||
163 | if (max_insns == 0) { | ||
164 | max_insns = CF_COUNT_MASK; | ||
165 | diff --git a/linux-user/main.c b/linux-user/main.c | ||
166 | index XXXXXXX..XXXXXXX 100644 | ||
167 | --- a/linux-user/main.c | ||
168 | +++ b/linux-user/main.c | ||
169 | @@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env) | ||
170 | /* Reset non arch specific state */ | ||
171 | cpu_reset(new_cpu); | ||
172 | |||
173 | + new_cpu->tcg_cflags = cpu->tcg_cflags; | ||
174 | memcpy(new_env, env, sizeof(CPUArchState)); | ||
175 | |||
176 | /* Clone all break/watchpoints. | ||
177 | diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c | ||
178 | index XXXXXXX..XXXXXXX 100644 | ||
179 | --- a/linux-user/sh4/signal.c | ||
180 | +++ b/linux-user/sh4/signal.c | ||
181 | @@ -XXX,XX +XXX,XX @@ static abi_ulong get_sigframe(struct target_sigaction *ka, | ||
182 | return (sp - frame_size) & -8ul; | ||
183 | } | ||
184 | |||
185 | -/* Notice when we're in the middle of a gUSA region and reset. | ||
186 | - Note that this will only occur for !parallel_cpus, as we will | ||
187 | - translate such sequences differently in a parallel context. */ | ||
188 | +/* | ||
189 | + * Notice when we're in the middle of a gUSA region and reset. | ||
190 | + * Note that this will only occur when #CF_PARALLEL is unset, as we | ||
191 | + * will translate such sequences differently in a parallel context. | ||
192 | + */ | ||
193 | static void unwind_gusa(CPUSH4State *regs) | ||
194 | { | ||
195 | /* If the stack pointer is sufficiently negative, and we haven't | ||
196 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c | ||
197 | index XXXXXXX..XXXXXXX 100644 | ||
198 | --- a/linux-user/syscall.c | ||
199 | +++ b/linux-user/syscall.c | ||
200 | @@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, | ||
201 | /* Grab a mutex so that thread setup appears atomic. */ | ||
202 | pthread_mutex_lock(&clone_lock); | ||
203 | |||
204 | + /* | ||
205 | + * If this is our first additional thread, we need to ensure we | ||
206 | + * generate code for parallel execution and flush old translations. | ||
207 | + * Do this now so that the copy gets CF_PARALLEL too. | ||
208 | + */ | ||
209 | + if (!(cpu->tcg_cflags & CF_PARALLEL)) { | ||
210 | + cpu->tcg_cflags |= CF_PARALLEL; | ||
211 | + tb_flush(cpu); | ||
212 | + } | ||
213 | + | ||
214 | /* we create a new CPU instance. */ | ||
215 | new_env = cpu_copy(env); | ||
216 | /* Init regs that differ from the parent. */ | ||
217 | @@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, | ||
218 | sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); | ||
219 | cpu->random_seed = qemu_guest_random_seed_thread_part1(); | ||
220 | |||
221 | - /* If this is our first additional thread, we need to ensure we | ||
222 | - * generate code for parallel execution and flush old translations. | ||
223 | - */ | ||
224 | - if (!parallel_cpus) { | ||
225 | - parallel_cpus = true; | ||
226 | - tb_flush(cpu); | ||
227 | - } | ||
228 | - | ||
229 | ret = pthread_create(&info.thread, &attr, clone_func, &info); | ||
230 | /* TODO: Free new CPU state if thread creation failed. */ | ||
231 | |||
232 | -- | ||
233 | 2.25.1 | ||
234 | |||
235 | diff view generated by jsdifflib |