1 | The following changes since commit 91e92cad67caca3bc4b8e920ddb5c8ca64aac9e1: | 1 | V2 fixes a build problem that affected win32. |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/cohuck-gitlab/tags/s390x-20210305' into staging (2021-03-05 19:04:47 +0000) | 3 | |
4 | r~ | ||
5 | |||
6 | |||
7 | The following changes since commit 187f35512106501fe9a11057f4d8705431e0026d: | ||
8 | |||
9 | Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-next-251019-3' into staging (2019-10-26 10:13:48 +0100) | ||
4 | 10 | ||
5 | are available in the Git repository at: | 11 | are available in the Git repository at: |
6 | 12 | ||
7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210306 | 13 | https://github.com/rth7680/qemu.git tags/pull-tcg-20191028 |
8 | 14 | ||
9 | for you to fetch changes up to 6cc9d67c6f682cf04eea2d6e64a252b63a7eccdf: | 15 | for you to fetch changes up to fe9b676fb3160496b4b2bf0c57d33be724bf04c3: |
10 | 16 | ||
11 | accel/tcg: Precompute curr_cflags into cpu->tcg_cflags (2021-03-06 11:53:57 -0800) | 17 | translate-all: Remove tb_alloc (2019-10-28 10:35:23 +0100) |
12 | 18 | ||
13 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
14 | TCI build fix and cleanup | 20 | Improvements for TARGET_PAGE_BITS_VARY |
15 | Streamline tb_lookup | 21 | Fix for TCI ld16u_i64. |
16 | Fixes for tcg/aarch64 | 22 | Fix for segv on icount execute from i/o memory. |
23 | Two misc cleanups. | ||
17 | 24 | ||
18 | ---------------------------------------------------------------- | 25 | ---------------------------------------------------------------- |
19 | Alex Bennée (4): | 26 | Alex Bennée (1): |
20 | accel/tcg: rename tb_lookup__cpu_state and hoist state extraction | 27 | cputlb: ensure _cmmu helper functions follow the naming standard |
21 | accel/tcg: move CF_CLUSTER calculation to curr_cflags | ||
22 | accel/tcg: drop the use of CF_HASH_MASK and rename params | ||
23 | include/exec: lightly re-arrange TranslationBlock | ||
24 | 28 | ||
25 | Richard Henderson (23): | 29 | Clement Deschamps (1): |
26 | tcg/aarch64: Fix constant subtraction in tcg_out_addsub2 | 30 | translate-all: fix uninitialized tb->orig_tb |
27 | tcg/aarch64: Fix I3617_CMLE0 | ||
28 | tcg/aarch64: Fix generation of "scalar" vector operations | ||
29 | tcg/tci: Use exec/cpu_ldst.h interfaces | ||
30 | tcg: Split out tcg_raise_tb_overflow | ||
31 | tcg: Manage splitwx in tc_ptr_to_region_tree by hand | ||
32 | tcg/tci: Merge identical cases in generation (arithmetic opcodes) | ||
33 | tcg/tci: Merge identical cases in generation (exchange opcodes) | ||
34 | tcg/tci: Merge identical cases in generation (deposit opcode) | ||
35 | tcg/tci: Merge identical cases in generation (conditional opcodes) | ||
36 | tcg/tci: Merge identical cases in generation (load/store opcodes) | ||
37 | tcg/tci: Remove tci_read_r8 | ||
38 | tcg/tci: Remove tci_read_r8s | ||
39 | tcg/tci: Remove tci_read_r16 | ||
40 | tcg/tci: Remove tci_read_r16s | ||
41 | tcg/tci: Remove tci_read_r32 | ||
42 | tcg/tci: Remove tci_read_r32s | ||
43 | tcg/tci: Reduce use of tci_read_r64 | ||
44 | tcg/tci: Merge basic arithmetic operations | ||
45 | tcg/tci: Merge extension operations | ||
46 | tcg/tci: Merge bswap operations | ||
47 | tcg/tci: Merge mov, not and neg operations | ||
48 | accel/tcg: Precompute curr_cflags into cpu->tcg_cflags | ||
49 | 31 | ||
50 | accel/tcg/tcg-accel-ops.h | 1 + | 32 | Richard Henderson (8): |
51 | include/exec/exec-all.h | 22 +- | 33 | exec: Split out variable page size support to exec-vary.c |
52 | include/exec/tb-lookup.h | 26 +- | 34 | configure: Detect compiler support for __attribute__((alias)) |
53 | include/hw/core/cpu.h | 2 + | 35 | exec: Use const alias for TARGET_PAGE_BITS_VARY |
54 | accel/tcg/cpu-exec.c | 34 +-- | 36 | exec: Restrict TARGET_PAGE_BITS_VARY assert to CONFIG_DEBUG_TCG |
55 | accel/tcg/tcg-accel-ops-mttcg.c | 3 +- | 37 | exec: Promote TARGET_PAGE_MASK to target_long |
56 | accel/tcg/tcg-accel-ops-rr.c | 2 +- | 38 | exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY |
57 | accel/tcg/tcg-accel-ops.c | 8 + | 39 | cputlb: Fix tlb_vaddr_to_host |
58 | accel/tcg/tcg-runtime.c | 6 +- | 40 | translate-all: Remove tb_alloc |
59 | accel/tcg/translate-all.c | 18 +- | ||
60 | linux-user/main.c | 1 + | ||
61 | linux-user/sh4/signal.c | 8 +- | ||
62 | linux-user/syscall.c | 18 +- | ||
63 | softmmu/physmem.c | 2 +- | ||
64 | tcg/tcg.c | 29 ++- | ||
65 | tcg/tci.c | 526 ++++++++++++---------------------------- | ||
66 | tcg/aarch64/tcg-target.c.inc | 229 ++++++++++++++--- | ||
67 | tcg/tci/tcg-target.c.inc | 204 ++++++---------- | ||
68 | 18 files changed, 529 insertions(+), 610 deletions(-) | ||
69 | 41 | ||
42 | Stefan Weil (1): | ||
43 | tci: Add implementation for INDEX_op_ld16u_i64 | ||
44 | |||
45 | Wei Yang (1): | ||
46 | cpu: use ROUND_UP() to define xxx_PAGE_ALIGN | ||
47 | |||
48 | Makefile.target | 2 +- | ||
49 | include/exec/cpu-all.h | 33 ++++++++---- | ||
50 | include/exec/cpu_ldst_template.h | 4 +- | ||
51 | include/qemu-common.h | 6 +++ | ||
52 | tcg/tcg.h | 20 +++++--- | ||
53 | accel/tcg/cputlb.c | 26 ++++++++-- | ||
54 | accel/tcg/translate-all.c | 21 ++------ | ||
55 | exec-vary.c | 108 +++++++++++++++++++++++++++++++++++++++ | ||
56 | exec.c | 34 ------------ | ||
57 | target/cris/translate_v10.inc.c | 3 +- | ||
58 | tcg/tci.c | 15 ++++++ | ||
59 | configure | 19 +++++++ | ||
60 | 12 files changed, 214 insertions(+), 77 deletions(-) | ||
61 | create mode 100644 exec-vary.c | ||
62 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | An hppa guest executing | ||
2 | 1 | ||
3 | 0x000000000000e05c: ldil L%10000,r4 | ||
4 | 0x000000000000e060: ldo 0(r4),r4 | ||
5 | 0x000000000000e064: sub r3,r4,sp | ||
6 | |||
7 | produces | ||
8 | |||
9 | ---- 000000000000e064 000000000000e068 | ||
10 | sub2_i32 tmp0,tmp4,r3,$0x1,$0x10000,$0x0 | ||
11 | |||
12 | after folding and constant propagation. Then we hit | ||
13 | |||
14 | tcg-target.c.inc:640: tcg_out_insn_3401: Assertion `aimm <= 0xfff' failed. | ||
15 | |||
16 | because aimm is in fact -16, but unsigned. | ||
17 | |||
18 | The ((bl < 0) ^ sub) condition which negates bl is incorrect and will | ||
19 | always lead to this abort. If the constant is positive, sub will make | ||
20 | it negative; if the constant is negative, sub will keep it negative. | ||
21 | |||
22 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
23 | --- | ||
24 | tcg/aarch64/tcg-target.c.inc | 16 +++++++++------- | ||
25 | 1 file changed, 9 insertions(+), 7 deletions(-) | ||
26 | |||
27 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/tcg/aarch64/tcg-target.c.inc | ||
30 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
31 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, | ||
32 | } | ||
33 | } | ||
34 | |||
35 | -static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, | ||
36 | - TCGReg rh, TCGReg al, TCGReg ah, | ||
37 | - tcg_target_long bl, tcg_target_long bh, | ||
38 | - bool const_bl, bool const_bh, bool sub) | ||
39 | +static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, | ||
40 | + TCGReg rh, TCGReg al, TCGReg ah, | ||
41 | + tcg_target_long bl, tcg_target_long bh, | ||
42 | + bool const_bl, bool const_bh, bool sub) | ||
43 | { | ||
44 | TCGReg orig_rl = rl; | ||
45 | AArch64Insn insn; | ||
46 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, | ||
47 | } | ||
48 | |||
49 | if (const_bl) { | ||
50 | - insn = I3401_ADDSI; | ||
51 | - if ((bl < 0) ^ sub) { | ||
52 | - insn = I3401_SUBSI; | ||
53 | + if (bl < 0) { | ||
54 | bl = -bl; | ||
55 | + insn = sub ? I3401_ADDSI : I3401_SUBSI; | ||
56 | + } else { | ||
57 | + insn = sub ? I3401_SUBSI : I3401_ADDSI; | ||
58 | } | ||
59 | + | ||
60 | if (unlikely(al == TCG_REG_XZR)) { | ||
61 | /* ??? We want to allow al to be zero for the benefit of | ||
62 | negation via subtraction. However, that leaves open the | ||
63 | -- | ||
64 | 2.25.1 | ||
65 | |||
66 | diff view generated by jsdifflib |
1 | This includes add, sub, mul, and, or, xor. | 1 | From: Stefan Weil <sw@weilnetz.de> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 3 | This fixes "make check-tcg" on a Debian x86_64 host. |
4 | |||
5 | Signed-off-by: Stefan Weil <sw@weilnetz.de> | ||
6 | Tested-by: Thomas Huth <thuth@redhat.com> | ||
7 | Message-Id: <20190410194838.10123-1-sw@weilnetz.de> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 9 | --- |
6 | tcg/tci.c | 83 +++++++++++++++++-------------------------------------- | 10 | tcg/tci.c | 15 +++++++++++++++ |
7 | 1 file changed, 25 insertions(+), 58 deletions(-) | 11 | 1 file changed, 15 insertions(+) |
8 | 12 | ||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | 13 | diff --git a/tcg/tci.c b/tcg/tci.c |
10 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/tcg/tci.c | 15 | --- a/tcg/tci.c |
12 | +++ b/tcg/tci.c | 16 | +++ b/tcg/tci.c |
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | 17 | @@ -XXX,XX +XXX,XX @@ static void tci_write_reg8(tcg_target_ulong *regs, TCGReg index, uint8_t value) |
14 | *(uint32_t *)(t1 + t2) = t0; | 18 | tci_write_reg(regs, index, value); |
19 | } | ||
20 | |||
21 | +static void | ||
22 | +tci_write_reg16(tcg_target_ulong *regs, TCGReg index, uint16_t value) | ||
23 | +{ | ||
24 | + tci_write_reg(regs, index, value); | ||
25 | +} | ||
26 | + | ||
27 | static void | ||
28 | tci_write_reg32(tcg_target_ulong *regs, TCGReg index, uint32_t value) | ||
29 | { | ||
30 | @@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) | ||
31 | tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2)); | ||
15 | break; | 32 | break; |
16 | 33 | case INDEX_op_ld8s_i32: | |
17 | - /* Arithmetic operations (32 bit). */ | 34 | + TODO(); |
18 | + /* Arithmetic operations (mixed 32/64 bit). */ | 35 | + break; |
19 | 36 | case INDEX_op_ld16u_i32: | |
20 | - case INDEX_op_add_i32: | 37 | TODO(); |
21 | + CASE_32_64(add) | ||
22 | t0 = *tb_ptr++; | ||
23 | t1 = tci_read_r(regs, &tb_ptr); | ||
24 | t2 = tci_read_r(regs, &tb_ptr); | ||
25 | tci_write_reg(regs, t0, t1 + t2); | ||
26 | break; | 38 | break; |
27 | - case INDEX_op_sub_i32: | 39 | @@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) |
28 | + CASE_32_64(sub) | 40 | tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2)); |
29 | t0 = *tb_ptr++; | ||
30 | t1 = tci_read_r(regs, &tb_ptr); | ||
31 | t2 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, t1 - t2); | ||
33 | break; | 41 | break; |
34 | - case INDEX_op_mul_i32: | 42 | case INDEX_op_ld8s_i64: |
35 | + CASE_32_64(mul) | 43 | + TODO(); |
36 | t0 = *tb_ptr++; | 44 | + break; |
37 | t1 = tci_read_r(regs, &tb_ptr); | 45 | case INDEX_op_ld16u_i64: |
38 | t2 = tci_read_r(regs, &tb_ptr); | ||
39 | tci_write_reg(regs, t0, t1 * t2); | ||
40 | break; | ||
41 | + CASE_32_64(and) | ||
42 | + t0 = *tb_ptr++; | 46 | + t0 = *tb_ptr++; |
43 | + t1 = tci_read_r(regs, &tb_ptr); | 47 | + t1 = tci_read_r(regs, &tb_ptr); |
44 | + t2 = tci_read_r(regs, &tb_ptr); | 48 | + t2 = tci_read_s32(&tb_ptr); |
45 | + tci_write_reg(regs, t0, t1 & t2); | 49 | + tci_write_reg16(regs, t0, *(uint16_t *)(t1 + t2)); |
46 | + break; | 50 | + break; |
47 | + CASE_32_64(or) | 51 | case INDEX_op_ld16s_i64: |
48 | + t0 = *tb_ptr++; | 52 | TODO(); |
49 | + t1 = tci_read_r(regs, &tb_ptr); | ||
50 | + t2 = tci_read_r(regs, &tb_ptr); | ||
51 | + tci_write_reg(regs, t0, t1 | t2); | ||
52 | + break; | ||
53 | + CASE_32_64(xor) | ||
54 | + t0 = *tb_ptr++; | ||
55 | + t1 = tci_read_r(regs, &tb_ptr); | ||
56 | + t2 = tci_read_r(regs, &tb_ptr); | ||
57 | + tci_write_reg(regs, t0, t1 ^ t2); | ||
58 | + break; | ||
59 | + | ||
60 | + /* Arithmetic operations (32 bit). */ | ||
61 | + | ||
62 | case INDEX_op_div_i32: | ||
63 | t0 = *tb_ptr++; | ||
64 | t1 = tci_read_r(regs, &tb_ptr); | ||
65 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
66 | t2 = tci_read_r(regs, &tb_ptr); | ||
67 | tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2); | ||
68 | break; | 53 | break; |
69 | - case INDEX_op_and_i32: | ||
70 | - t0 = *tb_ptr++; | ||
71 | - t1 = tci_read_r(regs, &tb_ptr); | ||
72 | - t2 = tci_read_r(regs, &tb_ptr); | ||
73 | - tci_write_reg(regs, t0, t1 & t2); | ||
74 | - break; | ||
75 | - case INDEX_op_or_i32: | ||
76 | - t0 = *tb_ptr++; | ||
77 | - t1 = tci_read_r(regs, &tb_ptr); | ||
78 | - t2 = tci_read_r(regs, &tb_ptr); | ||
79 | - tci_write_reg(regs, t0, t1 | t2); | ||
80 | - break; | ||
81 | - case INDEX_op_xor_i32: | ||
82 | - t0 = *tb_ptr++; | ||
83 | - t1 = tci_read_r(regs, &tb_ptr); | ||
84 | - t2 = tci_read_r(regs, &tb_ptr); | ||
85 | - tci_write_reg(regs, t0, t1 ^ t2); | ||
86 | - break; | ||
87 | |||
88 | /* Shift/rotate operations (32 bit). */ | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
91 | |||
92 | /* Arithmetic operations (64 bit). */ | ||
93 | |||
94 | - case INDEX_op_add_i64: | ||
95 | - t0 = *tb_ptr++; | ||
96 | - t1 = tci_read_r(regs, &tb_ptr); | ||
97 | - t2 = tci_read_r(regs, &tb_ptr); | ||
98 | - tci_write_reg(regs, t0, t1 + t2); | ||
99 | - break; | ||
100 | - case INDEX_op_sub_i64: | ||
101 | - t0 = *tb_ptr++; | ||
102 | - t1 = tci_read_r(regs, &tb_ptr); | ||
103 | - t2 = tci_read_r(regs, &tb_ptr); | ||
104 | - tci_write_reg(regs, t0, t1 - t2); | ||
105 | - break; | ||
106 | - case INDEX_op_mul_i64: | ||
107 | - t0 = *tb_ptr++; | ||
108 | - t1 = tci_read_r(regs, &tb_ptr); | ||
109 | - t2 = tci_read_r(regs, &tb_ptr); | ||
110 | - tci_write_reg(regs, t0, t1 * t2); | ||
111 | - break; | ||
112 | case INDEX_op_div_i64: | ||
113 | t0 = *tb_ptr++; | ||
114 | t1 = tci_read_r(regs, &tb_ptr); | ||
115 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
116 | t2 = tci_read_r(regs, &tb_ptr); | ||
117 | tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2); | ||
118 | break; | ||
119 | - case INDEX_op_and_i64: | ||
120 | - t0 = *tb_ptr++; | ||
121 | - t1 = tci_read_r(regs, &tb_ptr); | ||
122 | - t2 = tci_read_r(regs, &tb_ptr); | ||
123 | - tci_write_reg(regs, t0, t1 & t2); | ||
124 | - break; | ||
125 | - case INDEX_op_or_i64: | ||
126 | - t0 = *tb_ptr++; | ||
127 | - t1 = tci_read_r(regs, &tb_ptr); | ||
128 | - t2 = tci_read_r(regs, &tb_ptr); | ||
129 | - tci_write_reg(regs, t0, t1 | t2); | ||
130 | - break; | ||
131 | - case INDEX_op_xor_i64: | ||
132 | - t0 = *tb_ptr++; | ||
133 | - t1 = tci_read_r(regs, &tb_ptr); | ||
134 | - t2 = tci_read_r(regs, &tb_ptr); | ||
135 | - tci_write_reg(regs, t0, t1 ^ t2); | ||
136 | - break; | ||
137 | |||
138 | /* Shift/rotate operations (64 bit). */ | ||
139 | |||
140 | -- | 54 | -- |
141 | 2.25.1 | 55 | 2.17.1 |
142 | 56 | ||
143 | 57 | diff view generated by jsdifflib |
1 | For some vector operations, "1D" is not a valid type, and there | 1 | From: Alex Bennée <alex.bennee@linaro.org> |
---|---|---|---|
2 | are separate instructions for the 64-bit scalar operation. | ||
3 | 2 | ||
4 | Tested-by: Stefan Weil <sw@weilnetz.de> | 3 | We document this in docs/devel/load-stores.rst so lets follow it. The |
5 | Buglink: https://bugs.launchpad.net/qemu/+bug/1916112 | 4 | 32 bit and 64 bit access functions have historically not included the |
6 | Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations") | 5 | sign so we leave those as is. We also introduce some signed helpers |
6 | which are used for loading immediate values in the translator. | ||
7 | |||
8 | Fixes: 282dffc8 | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
11 | Message-Id: <20191021150910.23216-1-alex.bennee@linaro.org> | ||
7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
8 | --- | 13 | --- |
9 | tcg/aarch64/tcg-target.c.inc | 211 ++++++++++++++++++++++++++++++----- | 14 | include/exec/cpu_ldst_template.h | 4 ++-- |
10 | 1 file changed, 181 insertions(+), 30 deletions(-) | 15 | tcg/tcg.h | 20 ++++++++++++++------ |
16 | accel/tcg/cputlb.c | 24 +++++++++++++++++++++--- | ||
17 | target/cris/translate_v10.inc.c | 3 +-- | ||
18 | 4 files changed, 38 insertions(+), 13 deletions(-) | ||
11 | 19 | ||
12 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | 20 | diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h |
13 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tcg/aarch64/tcg-target.c.inc | 22 | --- a/include/exec/cpu_ldst_template.h |
15 | +++ b/tcg/aarch64/tcg-target.c.inc | 23 | +++ b/include/exec/cpu_ldst_template.h |
16 | @@ -XXX,XX +XXX,XX @@ typedef enum { | 24 | @@ -XXX,XX +XXX,XX @@ |
17 | I3606_BIC = 0x2f001400, | 25 | #ifdef SOFTMMU_CODE_ACCESS |
18 | I3606_ORR = 0x0f001400, | 26 | #define ADDR_READ addr_code |
19 | 27 | #define MMUSUFFIX _cmmu | |
20 | + /* AdvSIMD scalar shift by immediate */ | 28 | -#define URETSUFFIX SUFFIX |
21 | + I3609_SSHR = 0x5f000400, | 29 | -#define SRETSUFFIX SUFFIX |
22 | + I3609_SSRA = 0x5f001400, | 30 | +#define URETSUFFIX USUFFIX |
23 | + I3609_SHL = 0x5f005400, | 31 | +#define SRETSUFFIX glue(s, SUFFIX) |
24 | + I3609_USHR = 0x7f000400, | 32 | #else |
25 | + I3609_USRA = 0x7f001400, | 33 | #define ADDR_READ addr_read |
26 | + I3609_SLI = 0x7f005400, | 34 | #define MMUSUFFIX _mmu |
27 | + | 35 | diff --git a/tcg/tcg.h b/tcg/tcg.h |
28 | + /* AdvSIMD scalar three same */ | 36 | index XXXXXXX..XXXXXXX 100644 |
29 | + I3611_SQADD = 0x5e200c00, | 37 | --- a/tcg/tcg.h |
30 | + I3611_SQSUB = 0x5e202c00, | 38 | +++ b/tcg/tcg.h |
31 | + I3611_CMGT = 0x5e203400, | 39 | @@ -XXX,XX +XXX,XX @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
32 | + I3611_CMGE = 0x5e203c00, | 40 | void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
33 | + I3611_SSHL = 0x5e204400, | 41 | TCGMemOpIdx oi, uintptr_t retaddr); |
34 | + I3611_ADD = 0x5e208400, | 42 | |
35 | + I3611_CMTST = 0x5e208c00, | 43 | -uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, |
36 | + I3611_UQADD = 0x7e200c00, | 44 | +uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr, |
37 | + I3611_UQSUB = 0x7e202c00, | 45 | TCGMemOpIdx oi, uintptr_t retaddr); |
38 | + I3611_CMHI = 0x7e203400, | 46 | -uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, |
39 | + I3611_CMHS = 0x7e203c00, | 47 | +int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr, |
40 | + I3611_USHL = 0x7e204400, | 48 | TCGMemOpIdx oi, uintptr_t retaddr); |
41 | + I3611_SUB = 0x7e208400, | 49 | +uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr, |
42 | + I3611_CMEQ = 0x7e208c00, | 50 | + TCGMemOpIdx oi, uintptr_t retaddr); |
43 | + | 51 | +int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr, |
44 | + /* AdvSIMD scalar two-reg misc */ | 52 | + TCGMemOpIdx oi, uintptr_t retaddr); |
45 | + I3612_CMGT0 = 0x5e208800, | 53 | uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, |
46 | + I3612_CMEQ0 = 0x5e209800, | 54 | TCGMemOpIdx oi, uintptr_t retaddr); |
47 | + I3612_CMLT0 = 0x5e20a800, | 55 | uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, |
48 | + I3612_ABS = 0x5e20b800, | 56 | TCGMemOpIdx oi, uintptr_t retaddr); |
49 | + I3612_CMGE0 = 0x7e208800, | 57 | -uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, |
50 | + I3612_CMLE0 = 0x7e209800, | 58 | - TCGMemOpIdx oi, uintptr_t retaddr); |
51 | + I3612_NEG = 0x7e20b800, | 59 | +uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr, |
52 | + | 60 | + TCGMemOpIdx oi, uintptr_t retaddr); |
53 | /* AdvSIMD shift by immediate */ | 61 | +int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr, |
54 | I3614_SSHR = 0x0f000400, | 62 | + TCGMemOpIdx oi, uintptr_t retaddr); |
55 | I3614_SSRA = 0x0f001400, | 63 | uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, |
56 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q, | 64 | TCGMemOpIdx oi, uintptr_t retaddr); |
57 | | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5); | 65 | uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, |
66 | @@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, | ||
67 | # define helper_ret_stw_mmu helper_be_stw_mmu | ||
68 | # define helper_ret_stl_mmu helper_be_stl_mmu | ||
69 | # define helper_ret_stq_mmu helper_be_stq_mmu | ||
70 | -# define helper_ret_ldw_cmmu helper_be_ldw_cmmu | ||
71 | +# define helper_ret_lduw_cmmu helper_be_lduw_cmmu | ||
72 | +# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu | ||
73 | # define helper_ret_ldl_cmmu helper_be_ldl_cmmu | ||
74 | # define helper_ret_ldq_cmmu helper_be_ldq_cmmu | ||
75 | #else | ||
76 | @@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, | ||
77 | # define helper_ret_stw_mmu helper_le_stw_mmu | ||
78 | # define helper_ret_stl_mmu helper_le_stl_mmu | ||
79 | # define helper_ret_stq_mmu helper_le_stq_mmu | ||
80 | -# define helper_ret_ldw_cmmu helper_le_ldw_cmmu | ||
81 | +# define helper_ret_lduw_cmmu helper_le_lduw_cmmu | ||
82 | +# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu | ||
83 | # define helper_ret_ldl_cmmu helper_le_ldl_cmmu | ||
84 | # define helper_ret_ldq_cmmu helper_le_ldq_cmmu | ||
85 | #endif | ||
86 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
87 | index XXXXXXX..XXXXXXX 100644 | ||
88 | --- a/accel/tcg/cputlb.c | ||
89 | +++ b/accel/tcg/cputlb.c | ||
90 | @@ -XXX,XX +XXX,XX @@ static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, | ||
91 | return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu); | ||
58 | } | 92 | } |
59 | 93 | ||
60 | +static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn, | 94 | -uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, |
61 | + TCGReg rd, TCGReg rn, unsigned immhb) | 95 | +uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr, |
96 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
97 | { | ||
98 | return full_ldub_cmmu(env, addr, oi, retaddr); | ||
99 | } | ||
100 | |||
101 | +int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr, | ||
102 | + TCGMemOpIdx oi, uintptr_t retaddr) | ||
62 | +{ | 103 | +{ |
63 | + tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f)); | 104 | + return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr); |
64 | +} | 105 | +} |
65 | + | 106 | + |
66 | +static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn, | 107 | static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, |
67 | + unsigned size, TCGReg rd, TCGReg rn, TCGReg rm) | 108 | TCGMemOpIdx oi, uintptr_t retaddr) |
109 | { | ||
110 | @@ -XXX,XX +XXX,XX @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
111 | full_le_lduw_cmmu); | ||
112 | } | ||
113 | |||
114 | -uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, | ||
115 | +uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
116 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
117 | { | ||
118 | return full_le_lduw_cmmu(env, addr, oi, retaddr); | ||
119 | } | ||
120 | |||
121 | +int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr, | ||
122 | + TCGMemOpIdx oi, uintptr_t retaddr) | ||
68 | +{ | 123 | +{ |
69 | + tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16 | 124 | + return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr); |
70 | + | (rn & 0x1f) << 5 | (rd & 0x1f)); | ||
71 | +} | 125 | +} |
72 | + | 126 | + |
73 | +static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn, | 127 | static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, |
74 | + unsigned size, TCGReg rd, TCGReg rn) | 128 | TCGMemOpIdx oi, uintptr_t retaddr) |
129 | { | ||
130 | @@ -XXX,XX +XXX,XX @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
131 | full_be_lduw_cmmu); | ||
132 | } | ||
133 | |||
134 | -uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, | ||
135 | +uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
136 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
137 | { | ||
138 | return full_be_lduw_cmmu(env, addr, oi, retaddr); | ||
139 | } | ||
140 | |||
141 | +int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr, | ||
142 | + TCGMemOpIdx oi, uintptr_t retaddr) | ||
75 | +{ | 143 | +{ |
76 | + tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f)); | 144 | + return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr); |
77 | +} | 145 | +} |
78 | + | 146 | + |
79 | static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q, | 147 | static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, |
80 | TCGReg rd, TCGReg rn, unsigned immhb) | 148 | TCGMemOpIdx oi, uintptr_t retaddr) |
81 | { | 149 | { |
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | 150 | diff --git a/target/cris/translate_v10.inc.c b/target/cris/translate_v10.inc.c |
83 | unsigned vecl, unsigned vece, | 151 | index XXXXXXX..XXXXXXX 100644 |
84 | const TCGArg *args, const int *const_args) | 152 | --- a/target/cris/translate_v10.inc.c |
85 | { | 153 | +++ b/target/cris/translate_v10.inc.c |
86 | - static const AArch64Insn cmp_insn[16] = { | 154 | @@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) |
87 | + static const AArch64Insn cmp_vec_insn[16] = { | 155 | case CRISV10_IND_BCC_M: |
88 | [TCG_COND_EQ] = I3616_CMEQ, | 156 | |
89 | [TCG_COND_GT] = I3616_CMGT, | 157 | cris_cc_mask(dc, 0); |
90 | [TCG_COND_GE] = I3616_CMGE, | 158 | - imm = cpu_ldsw_code(env, dc->pc + 2); |
91 | [TCG_COND_GTU] = I3616_CMHI, | 159 | - simm = (int16_t)imm; |
92 | [TCG_COND_GEU] = I3616_CMHS, | 160 | + simm = cpu_ldsw_code(env, dc->pc + 2); |
93 | }; | 161 | simm += 4; |
94 | - static const AArch64Insn cmp0_insn[16] = { | 162 | |
95 | + static const AArch64Insn cmp_scalar_insn[16] = { | 163 | LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm); |
96 | + [TCG_COND_EQ] = I3611_CMEQ, | ||
97 | + [TCG_COND_GT] = I3611_CMGT, | ||
98 | + [TCG_COND_GE] = I3611_CMGE, | ||
99 | + [TCG_COND_GTU] = I3611_CMHI, | ||
100 | + [TCG_COND_GEU] = I3611_CMHS, | ||
101 | + }; | ||
102 | + static const AArch64Insn cmp0_vec_insn[16] = { | ||
103 | [TCG_COND_EQ] = I3617_CMEQ0, | ||
104 | [TCG_COND_GT] = I3617_CMGT0, | ||
105 | [TCG_COND_GE] = I3617_CMGE0, | ||
106 | [TCG_COND_LT] = I3617_CMLT0, | ||
107 | [TCG_COND_LE] = I3617_CMLE0, | ||
108 | }; | ||
109 | + static const AArch64Insn cmp0_scalar_insn[16] = { | ||
110 | + [TCG_COND_EQ] = I3612_CMEQ0, | ||
111 | + [TCG_COND_GT] = I3612_CMGT0, | ||
112 | + [TCG_COND_GE] = I3612_CMGE0, | ||
113 | + [TCG_COND_LT] = I3612_CMLT0, | ||
114 | + [TCG_COND_LE] = I3612_CMLE0, | ||
115 | + }; | ||
116 | |||
117 | TCGType type = vecl + TCG_TYPE_V64; | ||
118 | unsigned is_q = vecl; | ||
119 | + bool is_scalar = !is_q && vece == MO_64; | ||
120 | TCGArg a0, a1, a2, a3; | ||
121 | int cmode, imm8; | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
124 | tcg_out_dupm_vec(s, type, vece, a0, a1, a2); | ||
125 | break; | ||
126 | case INDEX_op_add_vec: | ||
127 | - tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); | ||
128 | + if (is_scalar) { | ||
129 | + tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2); | ||
130 | + } else { | ||
131 | + tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); | ||
132 | + } | ||
133 | break; | ||
134 | case INDEX_op_sub_vec: | ||
135 | - tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); | ||
136 | + if (is_scalar) { | ||
137 | + tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2); | ||
138 | + } else { | ||
139 | + tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); | ||
140 | + } | ||
141 | break; | ||
142 | case INDEX_op_mul_vec: | ||
143 | tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2); | ||
144 | break; | ||
145 | case INDEX_op_neg_vec: | ||
146 | - tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); | ||
147 | + if (is_scalar) { | ||
148 | + tcg_out_insn(s, 3612, NEG, vece, a0, a1); | ||
149 | + } else { | ||
150 | + tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); | ||
151 | + } | ||
152 | break; | ||
153 | case INDEX_op_abs_vec: | ||
154 | - tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); | ||
155 | + if (is_scalar) { | ||
156 | + tcg_out_insn(s, 3612, ABS, vece, a0, a1); | ||
157 | + } else { | ||
158 | + tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); | ||
159 | + } | ||
160 | break; | ||
161 | case INDEX_op_and_vec: | ||
162 | if (const_args[2]) { | ||
163 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
164 | tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2); | ||
165 | break; | ||
166 | case INDEX_op_ssadd_vec: | ||
167 | - tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); | ||
168 | + if (is_scalar) { | ||
169 | + tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2); | ||
170 | + } else { | ||
171 | + tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); | ||
172 | + } | ||
173 | break; | ||
174 | case INDEX_op_sssub_vec: | ||
175 | - tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); | ||
176 | + if (is_scalar) { | ||
177 | + tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2); | ||
178 | + } else { | ||
179 | + tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); | ||
180 | + } | ||
181 | break; | ||
182 | case INDEX_op_usadd_vec: | ||
183 | - tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); | ||
184 | + if (is_scalar) { | ||
185 | + tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2); | ||
186 | + } else { | ||
187 | + tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); | ||
188 | + } | ||
189 | break; | ||
190 | case INDEX_op_ussub_vec: | ||
191 | - tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); | ||
192 | + if (is_scalar) { | ||
193 | + tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2); | ||
194 | + } else { | ||
195 | + tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); | ||
196 | + } | ||
197 | break; | ||
198 | case INDEX_op_smax_vec: | ||
199 | tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2); | ||
200 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
201 | tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1); | ||
202 | break; | ||
203 | case INDEX_op_shli_vec: | ||
204 | - tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); | ||
205 | + if (is_scalar) { | ||
206 | + tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece)); | ||
207 | + } else { | ||
208 | + tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); | ||
209 | + } | ||
210 | break; | ||
211 | case INDEX_op_shri_vec: | ||
212 | - tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); | ||
213 | + if (is_scalar) { | ||
214 | + tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2); | ||
215 | + } else { | ||
216 | + tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); | ||
217 | + } | ||
218 | break; | ||
219 | case INDEX_op_sari_vec: | ||
220 | - tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); | ||
221 | + if (is_scalar) { | ||
222 | + tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2); | ||
223 | + } else { | ||
224 | + tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); | ||
225 | + } | ||
226 | break; | ||
227 | case INDEX_op_aa64_sli_vec: | ||
228 | - tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); | ||
229 | + if (is_scalar) { | ||
230 | + tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece)); | ||
231 | + } else { | ||
232 | + tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); | ||
233 | + } | ||
234 | break; | ||
235 | case INDEX_op_shlv_vec: | ||
236 | - tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); | ||
237 | + if (is_scalar) { | ||
238 | + tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2); | ||
239 | + } else { | ||
240 | + tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); | ||
241 | + } | ||
242 | break; | ||
243 | case INDEX_op_aa64_sshl_vec: | ||
244 | - tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); | ||
245 | + if (is_scalar) { | ||
246 | + tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2); | ||
247 | + } else { | ||
248 | + tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); | ||
249 | + } | ||
250 | break; | ||
251 | case INDEX_op_cmp_vec: | ||
252 | { | ||
253 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
254 | |||
255 | if (cond == TCG_COND_NE) { | ||
256 | if (const_args[2]) { | ||
257 | - tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); | ||
258 | + if (is_scalar) { | ||
259 | + tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1); | ||
260 | + } else { | ||
261 | + tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); | ||
262 | + } | ||
263 | } else { | ||
264 | - tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); | ||
265 | + if (is_scalar) { | ||
266 | + tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2); | ||
267 | + } else { | ||
268 | + tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); | ||
269 | + } | ||
270 | tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0); | ||
271 | } | ||
272 | } else { | ||
273 | if (const_args[2]) { | ||
274 | - insn = cmp0_insn[cond]; | ||
275 | - if (insn) { | ||
276 | - tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); | ||
277 | - break; | ||
278 | + if (is_scalar) { | ||
279 | + insn = cmp0_scalar_insn[cond]; | ||
280 | + if (insn) { | ||
281 | + tcg_out_insn_3612(s, insn, vece, a0, a1); | ||
282 | + break; | ||
283 | + } | ||
284 | + } else { | ||
285 | + insn = cmp0_vec_insn[cond]; | ||
286 | + if (insn) { | ||
287 | + tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); | ||
288 | + break; | ||
289 | + } | ||
290 | } | ||
291 | tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); | ||
292 | a2 = TCG_VEC_TMP; | ||
293 | } | ||
294 | - insn = cmp_insn[cond]; | ||
295 | - if (insn == 0) { | ||
296 | - TCGArg t; | ||
297 | - t = a1, a1 = a2, a2 = t; | ||
298 | - cond = tcg_swap_cond(cond); | ||
299 | - insn = cmp_insn[cond]; | ||
300 | - tcg_debug_assert(insn != 0); | ||
301 | + if (is_scalar) { | ||
302 | + insn = cmp_scalar_insn[cond]; | ||
303 | + if (insn == 0) { | ||
304 | + TCGArg t; | ||
305 | + t = a1, a1 = a2, a2 = t; | ||
306 | + cond = tcg_swap_cond(cond); | ||
307 | + insn = cmp_scalar_insn[cond]; | ||
308 | + tcg_debug_assert(insn != 0); | ||
309 | + } | ||
310 | + tcg_out_insn_3611(s, insn, vece, a0, a1, a2); | ||
311 | + } else { | ||
312 | + insn = cmp_vec_insn[cond]; | ||
313 | + if (insn == 0) { | ||
314 | + TCGArg t; | ||
315 | + t = a1, a1 = a2, a2 = t; | ||
316 | + cond = tcg_swap_cond(cond); | ||
317 | + insn = cmp_vec_insn[cond]; | ||
318 | + tcg_debug_assert(insn != 0); | ||
319 | + } | ||
320 | + tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); | ||
321 | } | ||
322 | - tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); | ||
323 | } | ||
324 | } | ||
325 | break; | ||
326 | -- | 164 | -- |
327 | 2.25.1 | 165 | 2.17.1 |
328 | 166 | ||
329 | 167 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | From: Wei Yang <richardw.yang@linux.intel.com> |
---|---|---|---|
2 | 2 | ||
3 | Lets make sure all the flags we compare when looking up blocks are | 3 | Use ROUND_UP() to define, which is a little bit easy to read. |
4 | together in the same place. | ||
5 | 4 | ||
6 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
7 | Message-Id: <20210224165811.11567-5-alex.bennee@linaro.org> | 6 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> |
7 | Reviewed-by: David Gibson <david@gibson.dropbear.id.au> | ||
8 | Reviewed-by: Juan Quintela <quintela@redhat.com> | ||
9 | Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> | ||
10 | Message-Id: <20191013021145.16011-2-richardw.yang@linux.intel.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 12 | --- |
10 | include/exec/exec-all.h | 11 ++++++++--- | 13 | include/exec/cpu-all.h | 7 +++---- |
11 | 1 file changed, 8 insertions(+), 3 deletions(-) | 14 | 1 file changed, 3 insertions(+), 4 deletions(-) |
12 | 15 | ||
13 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 16 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h |
14 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/include/exec/exec-all.h | 18 | --- a/include/exec/cpu-all.h |
16 | +++ b/include/exec/exec-all.h | 19 | +++ b/include/exec/cpu-all.h |
17 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | 20 | @@ -XXX,XX +XXX,XX @@ extern int target_page_bits; |
18 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ | 21 | |
19 | target_ulong cs_base; /* CS base for this block */ | 22 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
20 | uint32_t flags; /* flags defining in which context the code was generated */ | 23 | #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
21 | - uint16_t size; /* size of target code for this block (1 <= | 24 | -#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) |
22 | - size <= TARGET_PAGE_SIZE) */ | 25 | +#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) |
23 | - uint16_t icount; | 26 | |
24 | uint32_t cflags; /* compile flags */ | 27 | /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even |
25 | #define CF_COUNT_MASK 0x00007fff | 28 | * when intptr_t is 32-bit and we are aligning a long long. |
26 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ | 29 | @@ -XXX,XX +XXX,XX @@ extern int target_page_bits; |
27 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | 30 | extern uintptr_t qemu_host_page_size; |
28 | /* Per-vCPU dynamic tracing state used to generate this TB */ | 31 | extern intptr_t qemu_host_page_mask; |
29 | uint32_t trace_vcpu_dstate; | 32 | |
30 | 33 | -#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) | |
31 | + /* | 34 | -#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \ |
32 | + * Above fields used for comparing | 35 | - qemu_real_host_page_mask) |
33 | + */ | 36 | +#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size) |
34 | + | 37 | +#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size) |
35 | + /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ | 38 | |
36 | + uint16_t size; | 39 | /* same as PROT_xxx */ |
37 | + uint16_t icount; | 40 | #define PAGE_READ 0x0001 |
38 | + | ||
39 | struct tb_tc tc; | ||
40 | |||
41 | /* first and second physical page containing code. The lower bit | ||
42 | -- | 41 | -- |
43 | 2.25.1 | 42 | 2.17.1 |
44 | 43 | ||
45 | 44 | diff view generated by jsdifflib |
1 | Use explicit casts for ext32u opcodes, and allow truncation | 1 | The next patch will play a trick with "const" that will |
---|---|---|---|
2 | to happen for other users. | 2 | confuse the compiler about the uses of target_page_bits |
3 | within exec.c. Moving everything to a new file prevents | ||
4 | this confusion. | ||
3 | 5 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 6 | No functional change so far. |
7 | |||
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
10 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 12 | --- |
7 | tcg/tci.c | 122 ++++++++++++++++++++++++------------------------------ | 13 | Makefile.target | 2 +- |
8 | 1 file changed, 54 insertions(+), 68 deletions(-) | 14 | include/qemu-common.h | 6 +++++ |
15 | exec-vary.c | 57 +++++++++++++++++++++++++++++++++++++++++++ | ||
16 | exec.c | 34 -------------------------- | ||
17 | 4 files changed, 64 insertions(+), 35 deletions(-) | ||
18 | create mode 100644 exec-vary.c | ||
9 | 19 | ||
10 | diff --git a/tcg/tci.c b/tcg/tci.c | 20 | diff --git a/Makefile.target b/Makefile.target |
11 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tcg/tci.c | 22 | --- a/Makefile.target |
13 | +++ b/tcg/tci.c | 23 | +++ b/Makefile.target |
14 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | 24 | @@ -XXX,XX +XXX,XX @@ obj-y += trace/ |
15 | } | 25 | |
26 | ######################################################### | ||
27 | # cpu emulator library | ||
28 | -obj-y += exec.o | ||
29 | +obj-y += exec.o exec-vary.o | ||
30 | obj-y += accel/ | ||
31 | obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/tcg-op-vec.o tcg/tcg-op-gvec.o | ||
32 | obj-$(CONFIG_TCG) += tcg/tcg-common.o tcg/optimize.o | ||
33 | diff --git a/include/qemu-common.h b/include/qemu-common.h | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/include/qemu-common.h | ||
36 | +++ b/include/qemu-common.h | ||
37 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu); | ||
38 | */ | ||
39 | bool set_preferred_target_page_bits(int bits); | ||
40 | |||
41 | +/** | ||
42 | + * finalize_target_page_bits: | ||
43 | + * Commit the final value set by set_preferred_target_page_bits. | ||
44 | + */ | ||
45 | +void finalize_target_page_bits(void); | ||
46 | + | ||
47 | /** | ||
48 | * Sends a (part of) iovec down a socket, yielding when the socket is full, or | ||
49 | * Receives data into a (part of) iovec from a socket, | ||
50 | diff --git a/exec-vary.c b/exec-vary.c | ||
51 | new file mode 100644 | ||
52 | index XXXXXXX..XXXXXXX | ||
53 | --- /dev/null | ||
54 | +++ b/exec-vary.c | ||
55 | @@ -XXX,XX +XXX,XX @@ | ||
56 | +/* | ||
57 | + * Variable page size handling | ||
58 | + * | ||
59 | + * Copyright (c) 2003 Fabrice Bellard | ||
60 | + * | ||
61 | + * This library is free software; you can redistribute it and/or | ||
62 | + * modify it under the terms of the GNU Lesser General Public | ||
63 | + * License as published by the Free Software Foundation; either | ||
64 | + * version 2 of the License, or (at your option) any later version. | ||
65 | + * | ||
66 | + * This library is distributed in the hope that it will be useful, | ||
67 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
68 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
69 | + * Lesser General Public License for more details. | ||
70 | + * | ||
71 | + * You should have received a copy of the GNU Lesser General Public | ||
72 | + * License along with this library; if not, see <http://www.gnu.org/licenses/>. | ||
73 | + */ | ||
74 | + | ||
75 | +#include "qemu/osdep.h" | ||
76 | +#include "qemu-common.h" | ||
77 | +#include "exec/exec-all.h" | ||
78 | + | ||
79 | +#ifdef TARGET_PAGE_BITS_VARY | ||
80 | +int target_page_bits; | ||
81 | +bool target_page_bits_decided; | ||
82 | +#endif | ||
83 | + | ||
84 | +bool set_preferred_target_page_bits(int bits) | ||
85 | +{ | ||
86 | + /* | ||
87 | + * The target page size is the lowest common denominator for all | ||
88 | + * the CPUs in the system, so we can only make it smaller, never | ||
89 | + * larger. And we can't make it smaller once we've committed to | ||
90 | + * a particular size. | ||
91 | + */ | ||
92 | +#ifdef TARGET_PAGE_BITS_VARY | ||
93 | + assert(bits >= TARGET_PAGE_BITS_MIN); | ||
94 | + if (target_page_bits == 0 || target_page_bits > bits) { | ||
95 | + if (target_page_bits_decided) { | ||
96 | + return false; | ||
97 | + } | ||
98 | + target_page_bits = bits; | ||
99 | + } | ||
100 | +#endif | ||
101 | + return true; | ||
102 | +} | ||
103 | + | ||
104 | +void finalize_target_page_bits(void) | ||
105 | +{ | ||
106 | +#ifdef TARGET_PAGE_BITS_VARY | ||
107 | + if (target_page_bits == 0) { | ||
108 | + target_page_bits = TARGET_PAGE_BITS_MIN; | ||
109 | + } | ||
110 | + target_page_bits_decided = true; | ||
111 | +#endif | ||
112 | +} | ||
113 | diff --git a/exec.c b/exec.c | ||
114 | index XXXXXXX..XXXXXXX 100644 | ||
115 | --- a/exec.c | ||
116 | +++ b/exec.c | ||
117 | @@ -XXX,XX +XXX,XX @@ AddressSpace address_space_memory; | ||
118 | static MemoryRegion io_mem_unassigned; | ||
16 | #endif | 119 | #endif |
17 | 120 | ||
18 | -static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index) | 121 | -#ifdef TARGET_PAGE_BITS_VARY |
122 | -int target_page_bits; | ||
123 | -bool target_page_bits_decided; | ||
124 | -#endif | ||
125 | - | ||
126 | CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); | ||
127 | |||
128 | /* current CPU in the current thread. It is only valid inside | ||
129 | @@ -XXX,XX +XXX,XX @@ int use_icount; | ||
130 | uintptr_t qemu_host_page_size; | ||
131 | intptr_t qemu_host_page_mask; | ||
132 | |||
133 | -bool set_preferred_target_page_bits(int bits) | ||
19 | -{ | 134 | -{ |
20 | - return (uint32_t)tci_read_reg(regs, index); | 135 | - /* The target page size is the lowest common denominator for all |
136 | - * the CPUs in the system, so we can only make it smaller, never | ||
137 | - * larger. And we can't make it smaller once we've committed to | ||
138 | - * a particular size. | ||
139 | - */ | ||
140 | -#ifdef TARGET_PAGE_BITS_VARY | ||
141 | - assert(bits >= TARGET_PAGE_BITS_MIN); | ||
142 | - if (target_page_bits == 0 || target_page_bits > bits) { | ||
143 | - if (target_page_bits_decided) { | ||
144 | - return false; | ||
145 | - } | ||
146 | - target_page_bits = bits; | ||
147 | - } | ||
148 | -#endif | ||
149 | - return true; | ||
21 | -} | 150 | -} |
22 | - | 151 | - |
23 | #if TCG_TARGET_REG_BITS == 64 | 152 | #if !defined(CONFIG_USER_ONLY) |
24 | static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | 153 | |
25 | { | 154 | -static void finalize_target_page_bits(void) |
26 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
27 | return value; | ||
28 | } | ||
29 | |||
30 | -/* Read indexed register (32 bit) from bytecode. */ | ||
31 | -static uint32_t tci_read_r32(const tcg_target_ulong *regs, | ||
32 | - const uint8_t **tb_ptr) | ||
33 | -{ | 155 | -{ |
34 | - uint32_t value = tci_read_reg32(regs, **tb_ptr); | 156 | -#ifdef TARGET_PAGE_BITS_VARY |
35 | - *tb_ptr += 1; | 157 | - if (target_page_bits == 0) { |
36 | - return value; | 158 | - target_page_bits = TARGET_PAGE_BITS_MIN; |
159 | - } | ||
160 | - target_page_bits_decided = true; | ||
161 | -#endif | ||
37 | -} | 162 | -} |
38 | - | 163 | - |
39 | #if TCG_TARGET_REG_BITS == 32 | 164 | typedef struct PhysPageEntry PhysPageEntry; |
40 | /* Read two indexed registers (2 * 32 bit) from bytecode. */ | 165 | |
41 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | 166 | struct PhysPageEntry { |
42 | const uint8_t **tb_ptr) | ||
43 | { | ||
44 | - uint32_t low = tci_read_r32(regs, tb_ptr); | ||
45 | - return tci_uint64(tci_read_r32(regs, tb_ptr), low); | ||
46 | + uint32_t low = tci_read_r(regs, tb_ptr); | ||
47 | + return tci_uint64(tci_read_r(regs, tb_ptr), low); | ||
48 | } | ||
49 | #elif TCG_TARGET_REG_BITS == 64 | ||
50 | /* Read indexed register (32 bit signed) from bytecode. */ | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | continue; | ||
53 | case INDEX_op_setcond_i32: | ||
54 | t0 = *tb_ptr++; | ||
55 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
56 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
57 | + t1 = tci_read_r(regs, &tb_ptr); | ||
58 | + t2 = tci_read_r(regs, &tb_ptr); | ||
59 | condition = *tb_ptr++; | ||
60 | tci_write_reg(regs, t0, tci_compare32(t1, t2, condition)); | ||
61 | break; | ||
62 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
63 | #endif | ||
64 | case INDEX_op_mov_i32: | ||
65 | t0 = *tb_ptr++; | ||
66 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
67 | + t1 = tci_read_r(regs, &tb_ptr); | ||
68 | tci_write_reg(regs, t0, t1); | ||
69 | break; | ||
70 | case INDEX_op_tci_movi_i32: | ||
71 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
72 | break; | ||
73 | case INDEX_op_st_i32: | ||
74 | CASE_64(st32) | ||
75 | - t0 = tci_read_r32(regs, &tb_ptr); | ||
76 | + t0 = tci_read_r(regs, &tb_ptr); | ||
77 | t1 = tci_read_r(regs, &tb_ptr); | ||
78 | t2 = tci_read_s32(&tb_ptr); | ||
79 | *(uint32_t *)(t1 + t2) = t0; | ||
80 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
81 | |||
82 | case INDEX_op_add_i32: | ||
83 | t0 = *tb_ptr++; | ||
84 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
85 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
86 | + t1 = tci_read_r(regs, &tb_ptr); | ||
87 | + t2 = tci_read_r(regs, &tb_ptr); | ||
88 | tci_write_reg(regs, t0, t1 + t2); | ||
89 | break; | ||
90 | case INDEX_op_sub_i32: | ||
91 | t0 = *tb_ptr++; | ||
92 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
93 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
94 | + t1 = tci_read_r(regs, &tb_ptr); | ||
95 | + t2 = tci_read_r(regs, &tb_ptr); | ||
96 | tci_write_reg(regs, t0, t1 - t2); | ||
97 | break; | ||
98 | case INDEX_op_mul_i32: | ||
99 | t0 = *tb_ptr++; | ||
100 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
101 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
102 | + t1 = tci_read_r(regs, &tb_ptr); | ||
103 | + t2 = tci_read_r(regs, &tb_ptr); | ||
104 | tci_write_reg(regs, t0, t1 * t2); | ||
105 | break; | ||
106 | case INDEX_op_div_i32: | ||
107 | t0 = *tb_ptr++; | ||
108 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
109 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
110 | + t1 = tci_read_r(regs, &tb_ptr); | ||
111 | + t2 = tci_read_r(regs, &tb_ptr); | ||
112 | tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2); | ||
113 | break; | ||
114 | case INDEX_op_divu_i32: | ||
115 | t0 = *tb_ptr++; | ||
116 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
117 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
118 | - tci_write_reg(regs, t0, t1 / t2); | ||
119 | + t1 = tci_read_r(regs, &tb_ptr); | ||
120 | + t2 = tci_read_r(regs, &tb_ptr); | ||
121 | + tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2); | ||
122 | break; | ||
123 | case INDEX_op_rem_i32: | ||
124 | t0 = *tb_ptr++; | ||
125 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
126 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
127 | + t1 = tci_read_r(regs, &tb_ptr); | ||
128 | + t2 = tci_read_r(regs, &tb_ptr); | ||
129 | tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2); | ||
130 | break; | ||
131 | case INDEX_op_remu_i32: | ||
132 | t0 = *tb_ptr++; | ||
133 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
134 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
135 | - tci_write_reg(regs, t0, t1 % t2); | ||
136 | + t1 = tci_read_r(regs, &tb_ptr); | ||
137 | + t2 = tci_read_r(regs, &tb_ptr); | ||
138 | + tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2); | ||
139 | break; | ||
140 | case INDEX_op_and_i32: | ||
141 | t0 = *tb_ptr++; | ||
142 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
143 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
144 | + t1 = tci_read_r(regs, &tb_ptr); | ||
145 | + t2 = tci_read_r(regs, &tb_ptr); | ||
146 | tci_write_reg(regs, t0, t1 & t2); | ||
147 | break; | ||
148 | case INDEX_op_or_i32: | ||
149 | t0 = *tb_ptr++; | ||
150 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
151 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
152 | + t1 = tci_read_r(regs, &tb_ptr); | ||
153 | + t2 = tci_read_r(regs, &tb_ptr); | ||
154 | tci_write_reg(regs, t0, t1 | t2); | ||
155 | break; | ||
156 | case INDEX_op_xor_i32: | ||
157 | t0 = *tb_ptr++; | ||
158 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
159 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
160 | + t1 = tci_read_r(regs, &tb_ptr); | ||
161 | + t2 = tci_read_r(regs, &tb_ptr); | ||
162 | tci_write_reg(regs, t0, t1 ^ t2); | ||
163 | break; | ||
164 | |||
165 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
166 | |||
167 | case INDEX_op_shl_i32: | ||
168 | t0 = *tb_ptr++; | ||
169 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
170 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
171 | - tci_write_reg(regs, t0, t1 << (t2 & 31)); | ||
172 | + t1 = tci_read_r(regs, &tb_ptr); | ||
173 | + t2 = tci_read_r(regs, &tb_ptr); | ||
174 | + tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31)); | ||
175 | break; | ||
176 | case INDEX_op_shr_i32: | ||
177 | t0 = *tb_ptr++; | ||
178 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
179 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
180 | - tci_write_reg(regs, t0, t1 >> (t2 & 31)); | ||
181 | + t1 = tci_read_r(regs, &tb_ptr); | ||
182 | + t2 = tci_read_r(regs, &tb_ptr); | ||
183 | + tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31)); | ||
184 | break; | ||
185 | case INDEX_op_sar_i32: | ||
186 | t0 = *tb_ptr++; | ||
187 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
188 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
189 | - tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31))); | ||
190 | + t1 = tci_read_r(regs, &tb_ptr); | ||
191 | + t2 = tci_read_r(regs, &tb_ptr); | ||
192 | + tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31)); | ||
193 | break; | ||
194 | #if TCG_TARGET_HAS_rot_i32 | ||
195 | case INDEX_op_rotl_i32: | ||
196 | t0 = *tb_ptr++; | ||
197 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
198 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
199 | + t1 = tci_read_r(regs, &tb_ptr); | ||
200 | + t2 = tci_read_r(regs, &tb_ptr); | ||
201 | tci_write_reg(regs, t0, rol32(t1, t2 & 31)); | ||
202 | break; | ||
203 | case INDEX_op_rotr_i32: | ||
204 | t0 = *tb_ptr++; | ||
205 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
206 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
207 | + t1 = tci_read_r(regs, &tb_ptr); | ||
208 | + t2 = tci_read_r(regs, &tb_ptr); | ||
209 | tci_write_reg(regs, t0, ror32(t1, t2 & 31)); | ||
210 | break; | ||
211 | #endif | ||
212 | #if TCG_TARGET_HAS_deposit_i32 | ||
213 | case INDEX_op_deposit_i32: | ||
214 | t0 = *tb_ptr++; | ||
215 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
216 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
217 | + t1 = tci_read_r(regs, &tb_ptr); | ||
218 | + t2 = tci_read_r(regs, &tb_ptr); | ||
219 | tmp16 = *tb_ptr++; | ||
220 | tmp8 = *tb_ptr++; | ||
221 | tmp32 = (((1 << tmp8) - 1) << tmp16); | ||
222 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
223 | break; | ||
224 | #endif | ||
225 | case INDEX_op_brcond_i32: | ||
226 | - t0 = tci_read_r32(regs, &tb_ptr); | ||
227 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
228 | + t0 = tci_read_r(regs, &tb_ptr); | ||
229 | + t1 = tci_read_r(regs, &tb_ptr); | ||
230 | condition = *tb_ptr++; | ||
231 | label = tci_read_label(&tb_ptr); | ||
232 | if (tci_compare32(t0, t1, condition)) { | ||
233 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
234 | case INDEX_op_mulu2_i32: | ||
235 | t0 = *tb_ptr++; | ||
236 | t1 = *tb_ptr++; | ||
237 | - t2 = tci_read_r32(regs, &tb_ptr); | ||
238 | - tmp64 = tci_read_r32(regs, &tb_ptr); | ||
239 | - tci_write_reg64(regs, t1, t0, t2 * tmp64); | ||
240 | + t2 = tci_read_r(regs, &tb_ptr); | ||
241 | + tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr); | ||
242 | + tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64); | ||
243 | break; | ||
244 | #endif /* TCG_TARGET_REG_BITS == 32 */ | ||
245 | #if TCG_TARGET_HAS_ext8s_i32 | ||
246 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
247 | #if TCG_TARGET_HAS_bswap32_i32 | ||
248 | case INDEX_op_bswap32_i32: | ||
249 | t0 = *tb_ptr++; | ||
250 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
251 | + t1 = tci_read_r(regs, &tb_ptr); | ||
252 | tci_write_reg(regs, t0, bswap32(t1)); | ||
253 | break; | ||
254 | #endif | ||
255 | #if TCG_TARGET_HAS_not_i32 | ||
256 | case INDEX_op_not_i32: | ||
257 | t0 = *tb_ptr++; | ||
258 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
259 | + t1 = tci_read_r(regs, &tb_ptr); | ||
260 | tci_write_reg(regs, t0, ~t1); | ||
261 | break; | ||
262 | #endif | ||
263 | #if TCG_TARGET_HAS_neg_i32 | ||
264 | case INDEX_op_neg_i32: | ||
265 | t0 = *tb_ptr++; | ||
266 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
267 | + t1 = tci_read_r(regs, &tb_ptr); | ||
268 | tci_write_reg(regs, t0, -t1); | ||
269 | break; | ||
270 | #endif | ||
271 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
272 | #endif | ||
273 | case INDEX_op_extu_i32_i64: | ||
274 | t0 = *tb_ptr++; | ||
275 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
276 | - tci_write_reg(regs, t0, t1); | ||
277 | + t1 = tci_read_r(regs, &tb_ptr); | ||
278 | + tci_write_reg(regs, t0, (uint32_t)t1); | ||
279 | break; | ||
280 | #if TCG_TARGET_HAS_bswap16_i64 | ||
281 | case INDEX_op_bswap16_i64: | ||
282 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
283 | #if TCG_TARGET_HAS_bswap32_i64 | ||
284 | case INDEX_op_bswap32_i64: | ||
285 | t0 = *tb_ptr++; | ||
286 | - t1 = tci_read_r32(regs, &tb_ptr); | ||
287 | + t1 = tci_read_r(regs, &tb_ptr); | ||
288 | tci_write_reg(regs, t0, bswap32(t1)); | ||
289 | break; | ||
290 | #endif | ||
291 | -- | 167 | -- |
292 | 2.25.1 | 168 | 2.17.1 |
293 | 169 | ||
294 | 170 | diff view generated by jsdifflib |
1 | The primary motivation is to remove a dozen insns along | 1 | Such support is present almost everywhere, except for Xcode 9. |
---|---|---|---|
2 | the fast-path in tb_lookup. As a byproduct, this allows | 2 | It is added in Xcode 10, but travis uses xcode9 by default, |
3 | us to completely remove parallel_cpus. | 3 | so we should support it for a while yet. |
4 | 4 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 7 | --- |
8 | accel/tcg/tcg-accel-ops.h | 1 + | 8 | configure | 19 +++++++++++++++++++ |
9 | include/exec/exec-all.h | 7 +------ | 9 | 1 file changed, 19 insertions(+) |
10 | include/hw/core/cpu.h | 2 ++ | ||
11 | accel/tcg/cpu-exec.c | 3 --- | ||
12 | accel/tcg/tcg-accel-ops-mttcg.c | 3 +-- | ||
13 | accel/tcg/tcg-accel-ops-rr.c | 2 +- | ||
14 | accel/tcg/tcg-accel-ops.c | 8 ++++++++ | ||
15 | accel/tcg/translate-all.c | 4 ---- | ||
16 | linux-user/main.c | 1 + | ||
17 | linux-user/sh4/signal.c | 8 +++++--- | ||
18 | linux-user/syscall.c | 18 ++++++++++-------- | ||
19 | 11 files changed, 30 insertions(+), 27 deletions(-) | ||
20 | 10 | ||
21 | diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h | 11 | diff --git a/configure b/configure |
22 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100755 |
23 | --- a/accel/tcg/tcg-accel-ops.h | 13 | --- a/configure |
24 | +++ b/accel/tcg/tcg-accel-ops.h | 14 | +++ b/configure |
25 | @@ -XXX,XX +XXX,XX @@ | 15 | @@ -XXX,XX +XXX,XX @@ if compile_prog "" "" ; then |
26 | void tcg_cpus_destroy(CPUState *cpu); | 16 | vector16=yes |
27 | int tcg_cpus_exec(CPUState *cpu); | 17 | fi |
28 | void tcg_handle_interrupt(CPUState *cpu, int mask); | 18 | |
29 | +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); | 19 | +######################################## |
30 | 20 | +# See if __attribute__((alias)) is supported. | |
31 | #endif /* TCG_CPUS_H */ | 21 | +# This false for Xcode 9, but has been remedied for Xcode 10. |
32 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 22 | +# Unfortunately, travis uses Xcode 9 by default. |
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/include/exec/exec-all.h | ||
35 | +++ b/include/exec/exec-all.h | ||
36 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | ||
37 | uintptr_t jmp_dest[2]; | ||
38 | }; | ||
39 | |||
40 | -extern bool parallel_cpus; | ||
41 | - | ||
42 | /* Hide the qatomic_read to make code a little easier on the eyes */ | ||
43 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
44 | { | ||
45 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
46 | /* current cflags for hashing/comparison */ | ||
47 | static inline uint32_t curr_cflags(CPUState *cpu) | ||
48 | { | ||
49 | - uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index); | ||
50 | - cflags |= parallel_cpus ? CF_PARALLEL : 0; | ||
51 | - cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
52 | - return cflags; | ||
53 | + return cpu->tcg_cflags; | ||
54 | } | ||
55 | |||
56 | /* TranslationBlock invalidate API */ | ||
57 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/include/hw/core/cpu.h | ||
60 | +++ b/include/hw/core/cpu.h | ||
61 | @@ -XXX,XX +XXX,XX @@ struct qemu_work_item; | ||
62 | * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will | ||
63 | * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER | ||
64 | * QOM parent. | ||
65 | + * @tcg_cflags: Pre-computed cflags for this cpu. | ||
66 | * @nr_cores: Number of cores within this CPU package. | ||
67 | * @nr_threads: Number of threads within this CPU. | ||
68 | * @running: #true if CPU is currently running (lockless). | ||
69 | @@ -XXX,XX +XXX,XX @@ struct CPUState { | ||
70 | /* TODO Move common fields from CPUArchState here. */ | ||
71 | int cpu_index; | ||
72 | int cluster_index; | ||
73 | + uint32_t tcg_cflags; | ||
74 | uint32_t halted; | ||
75 | uint32_t can_do_io; | ||
76 | int32_t exception_index; | ||
77 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
78 | index XXXXXXX..XXXXXXX 100644 | ||
79 | --- a/accel/tcg/cpu-exec.c | ||
80 | +++ b/accel/tcg/cpu-exec.c | ||
81 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
82 | mmap_unlock(); | ||
83 | } | ||
84 | |||
85 | - /* Since we got here, we know that parallel_cpus must be true. */ | ||
86 | - parallel_cpus = false; | ||
87 | cpu_exec_enter(cpu); | ||
88 | /* execute the generated code */ | ||
89 | trace_exec_tb(tb, pc); | ||
90 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
91 | * the execution. | ||
92 | */ | ||
93 | g_assert(cpu_in_exclusive_context(cpu)); | ||
94 | - parallel_cpus = true; | ||
95 | cpu->running = false; | ||
96 | end_exclusive(); | ||
97 | } | ||
98 | diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/accel/tcg/tcg-accel-ops-mttcg.c | ||
101 | +++ b/accel/tcg/tcg-accel-ops-mttcg.c | ||
102 | @@ -XXX,XX +XXX,XX @@ void mttcg_start_vcpu_thread(CPUState *cpu) | ||
103 | char thread_name[VCPU_THREAD_NAME_SIZE]; | ||
104 | |||
105 | g_assert(tcg_enabled()); | ||
106 | - | ||
107 | - parallel_cpus = (current_machine->smp.max_cpus > 1); | ||
108 | + tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); | ||
109 | |||
110 | cpu->thread = g_malloc0(sizeof(QemuThread)); | ||
111 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | ||
112 | diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c | ||
113 | index XXXXXXX..XXXXXXX 100644 | ||
114 | --- a/accel/tcg/tcg-accel-ops-rr.c | ||
115 | +++ b/accel/tcg/tcg-accel-ops-rr.c | ||
116 | @@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu) | ||
117 | static QemuThread *single_tcg_cpu_thread; | ||
118 | |||
119 | g_assert(tcg_enabled()); | ||
120 | - parallel_cpus = false; | ||
121 | + tcg_cpu_init_cflags(cpu, false); | ||
122 | |||
123 | if (!single_tcg_cpu_thread) { | ||
124 | cpu->thread = g_malloc0(sizeof(QemuThread)); | ||
125 | diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c | ||
126 | index XXXXXXX..XXXXXXX 100644 | ||
127 | --- a/accel/tcg/tcg-accel-ops.c | ||
128 | +++ b/accel/tcg/tcg-accel-ops.c | ||
129 | @@ -XXX,XX +XXX,XX @@ | ||
130 | |||
131 | /* common functionality among all TCG variants */ | ||
132 | |||
133 | +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) | ||
134 | +{ | ||
135 | + uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
136 | + cflags |= parallel ? CF_PARALLEL : 0; | ||
137 | + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
138 | + cpu->tcg_cflags = cflags; | ||
139 | +} | ||
140 | + | 23 | + |
141 | void tcg_cpus_destroy(CPUState *cpu) | 24 | +attralias=no |
142 | { | 25 | +cat > $TMPC << EOF |
143 | cpu_thread_signal_destroyed(cpu); | 26 | +int x = 1; |
144 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 27 | +extern const int y __attribute__((alias("x"))); |
145 | index XXXXXXX..XXXXXXX 100644 | 28 | +int main(void) { return 0; } |
146 | --- a/accel/tcg/translate-all.c | 29 | +EOF |
147 | +++ b/accel/tcg/translate-all.c | 30 | +if compile_prog "" "" ; then |
148 | @@ -XXX,XX +XXX,XX @@ static void *l1_map[V_L1_MAX_SIZE]; | 31 | + attralias=yes |
149 | TCGContext tcg_init_ctx; | 32 | +fi |
150 | __thread TCGContext *tcg_ctx; | ||
151 | TBContext tb_ctx; | ||
152 | -bool parallel_cpus; | ||
153 | |||
154 | static void page_table_config_init(void) | ||
155 | { | ||
156 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
157 | cflags = (cflags & ~CF_COUNT_MASK) | 1; | ||
158 | } | ||
159 | |||
160 | - cflags &= ~CF_CLUSTER_MASK; | ||
161 | - cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
162 | - | ||
163 | max_insns = cflags & CF_COUNT_MASK; | ||
164 | if (max_insns == 0) { | ||
165 | max_insns = CF_COUNT_MASK; | ||
166 | diff --git a/linux-user/main.c b/linux-user/main.c | ||
167 | index XXXXXXX..XXXXXXX 100644 | ||
168 | --- a/linux-user/main.c | ||
169 | +++ b/linux-user/main.c | ||
170 | @@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env) | ||
171 | /* Reset non arch specific state */ | ||
172 | cpu_reset(new_cpu); | ||
173 | |||
174 | + new_cpu->tcg_cflags = cpu->tcg_cflags; | ||
175 | memcpy(new_env, env, sizeof(CPUArchState)); | ||
176 | |||
177 | /* Clone all break/watchpoints. | ||
178 | diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c | ||
179 | index XXXXXXX..XXXXXXX 100644 | ||
180 | --- a/linux-user/sh4/signal.c | ||
181 | +++ b/linux-user/sh4/signal.c | ||
182 | @@ -XXX,XX +XXX,XX @@ static abi_ulong get_sigframe(struct target_sigaction *ka, | ||
183 | return (sp - frame_size) & -8ul; | ||
184 | } | ||
185 | |||
186 | -/* Notice when we're in the middle of a gUSA region and reset. | ||
187 | - Note that this will only occur for !parallel_cpus, as we will | ||
188 | - translate such sequences differently in a parallel context. */ | ||
189 | +/* | ||
190 | + * Notice when we're in the middle of a gUSA region and reset. | ||
191 | + * Note that this will only occur when #CF_PARALLEL is unset, as we | ||
192 | + * will translate such sequences differently in a parallel context. | ||
193 | + */ | ||
194 | static void unwind_gusa(CPUSH4State *regs) | ||
195 | { | ||
196 | /* If the stack pointer is sufficiently negative, and we haven't | ||
197 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c | ||
198 | index XXXXXXX..XXXXXXX 100644 | ||
199 | --- a/linux-user/syscall.c | ||
200 | +++ b/linux-user/syscall.c | ||
201 | @@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, | ||
202 | /* Grab a mutex so that thread setup appears atomic. */ | ||
203 | pthread_mutex_lock(&clone_lock); | ||
204 | |||
205 | + /* | ||
206 | + * If this is our first additional thread, we need to ensure we | ||
207 | + * generate code for parallel execution and flush old translations. | ||
208 | + * Do this now so that the copy gets CF_PARALLEL too. | ||
209 | + */ | ||
210 | + if (!(cpu->tcg_cflags & CF_PARALLEL)) { | ||
211 | + cpu->tcg_cflags |= CF_PARALLEL; | ||
212 | + tb_flush(cpu); | ||
213 | + } | ||
214 | + | 33 | + |
215 | /* we create a new CPU instance. */ | 34 | ######################################## |
216 | new_env = cpu_copy(env); | 35 | # check if getauxval is available. |
217 | /* Init regs that differ from the parent. */ | 36 | |
218 | @@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, | 37 | @@ -XXX,XX +XXX,XX @@ if test "$vector16" = "yes" ; then |
219 | sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); | 38 | echo "CONFIG_VECTOR16=y" >> $config_host_mak |
220 | cpu->random_seed = qemu_guest_random_seed_thread_part1(); | 39 | fi |
221 | 40 | ||
222 | - /* If this is our first additional thread, we need to ensure we | 41 | +if test "$attralias" = "yes" ; then |
223 | - * generate code for parallel execution and flush old translations. | 42 | + echo "CONFIG_ATTRIBUTE_ALIAS=y" >> $config_host_mak |
224 | - */ | 43 | +fi |
225 | - if (!parallel_cpus) { | 44 | + |
226 | - parallel_cpus = true; | 45 | if test "$getauxval" = "yes" ; then |
227 | - tb_flush(cpu); | 46 | echo "CONFIG_GETAUXVAL=y" >> $config_host_mak |
228 | - } | 47 | fi |
229 | - | ||
230 | ret = pthread_create(&info.thread, &attr, clone_func, &info); | ||
231 | /* TODO: Free new CPU state if thread creation failed. */ | ||
232 | |||
233 | -- | 48 | -- |
234 | 2.25.1 | 49 | 2.17.1 |
235 | 50 | ||
236 | 51 | diff view generated by jsdifflib |
1 | Allow other places in tcg to restart with a smaller tb. | 1 | Using a variable that is declared "const" for this tells the |
---|---|---|---|
2 | compiler that it may read the value once and assume that it | ||
3 | does not change across function calls. | ||
2 | 4 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | For target_page_size, this means we have only one assert per |
6 | function, and one read of the variable. | ||
7 | |||
8 | This reduces the size of qemu-system-aarch64 by 8k. | ||
9 | |||
10 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
11 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 13 | --- |
6 | tcg/tcg.c | 9 +++++++-- | 14 | include/exec/cpu-all.h | 14 ++++++--- |
7 | 1 file changed, 7 insertions(+), 2 deletions(-) | 15 | exec-vary.c | 66 +++++++++++++++++++++++++++++++++++++----- |
16 | 2 files changed, 68 insertions(+), 12 deletions(-) | ||
8 | 17 | ||
9 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 18 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h |
10 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/tcg/tcg.c | 20 | --- a/include/exec/cpu-all.h |
12 | +++ b/tcg/tcg.c | 21 | +++ b/include/exec/cpu-all.h |
13 | @@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which) | 22 | @@ -XXX,XX +XXX,XX @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val |
14 | s->tb_jmp_reset_offset[which] = tcg_current_code_size(s); | 23 | /* page related stuff */ |
15 | } | 24 | |
16 | 25 | #ifdef TARGET_PAGE_BITS_VARY | |
17 | +/* Signal overflow, starting over with fewer guest insns. */ | 26 | -extern bool target_page_bits_decided; |
18 | +static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s) | 27 | -extern int target_page_bits; |
19 | +{ | 28 | -#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \ |
20 | + siglongjmp(s->jmp_trans, -2); | 29 | - target_page_bits; }) |
21 | +} | 30 | +typedef struct { |
31 | + bool decided; | ||
32 | + int bits; | ||
33 | +} TargetPageBits; | ||
34 | +#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY) | ||
35 | +extern const TargetPageBits target_page; | ||
36 | +#else | ||
37 | +extern TargetPageBits target_page; | ||
38 | +#endif | ||
39 | +#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; }) | ||
40 | #else | ||
41 | #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS | ||
42 | #endif | ||
43 | diff --git a/exec-vary.c b/exec-vary.c | ||
44 | index XXXXXXX..XXXXXXX 100644 | ||
45 | --- a/exec-vary.c | ||
46 | +++ b/exec-vary.c | ||
47 | @@ -XXX,XX +XXX,XX @@ | ||
48 | |||
49 | #include "qemu/osdep.h" | ||
50 | #include "qemu-common.h" | ||
22 | + | 51 | + |
23 | #define C_PFX1(P, A) P##A | 52 | +#define IN_EXEC_VARY 1 |
24 | #define C_PFX2(P, A, B) P##A##_##B | 53 | + |
25 | #define C_PFX3(P, A, B, C) P##A##_##B##_##C | 54 | #include "exec/exec-all.h" |
26 | @@ -XXX,XX +XXX,XX @@ static TCGTemp *tcg_temp_alloc(TCGContext *s) | 55 | |
27 | int n = s->nb_temps++; | 56 | #ifdef TARGET_PAGE_BITS_VARY |
28 | 57 | -int target_page_bits; | |
29 | if (n >= TCG_MAX_TEMPS) { | 58 | -bool target_page_bits_decided; |
30 | - /* Signal overflow, starting over with fewer guest insns. */ | 59 | +# ifdef CONFIG_ATTRIBUTE_ALIAS |
31 | - siglongjmp(s->jmp_trans, -2); | 60 | +/* |
32 | + tcg_raise_tb_overflow(s); | 61 | + * We want to declare the "target_page" variable as const, which tells |
62 | + * the compiler that it can cache any value that it reads across calls. | ||
63 | + * This avoids multiple assertions and multiple reads within any one user. | ||
64 | + * | ||
65 | + * This works because we finish initializing the data before we ever read | ||
66 | + * from the "target_page" symbol. | ||
67 | + * | ||
68 | + * This also requires that we have a non-constant symbol by which we can | ||
69 | + * perform the actual initialization, and which forces the data to be | ||
70 | + * allocated within writable memory. Thus "init_target_page", and we use | ||
71 | + * that symbol exclusively in the two functions that initialize this value. | ||
72 | + * | ||
73 | + * The "target_page" symbol is created as an alias of "init_target_page". | ||
74 | + */ | ||
75 | +static TargetPageBits init_target_page; | ||
76 | + | ||
77 | +/* | ||
78 | + * Note that this is *not* a redundant decl, this is the definition of | ||
79 | + * the "target_page" symbol. The syntax for this definition requires | ||
80 | + * the use of the extern keyword. This seems to be a GCC bug in | ||
81 | + * either the syntax for the alias attribute or in -Wredundant-decls. | ||
82 | + * | ||
83 | + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91765 | ||
84 | + */ | ||
85 | +# pragma GCC diagnostic push | ||
86 | +# pragma GCC diagnostic ignored "-Wredundant-decls" | ||
87 | + | ||
88 | +extern const TargetPageBits target_page | ||
89 | + __attribute__((alias("init_target_page"))); | ||
90 | + | ||
91 | +# pragma GCC diagnostic pop | ||
92 | +# else | ||
93 | +/* | ||
94 | + * When aliases are not supported then we force two different declarations, | ||
95 | + * by way of suppressing the header declaration with IN_EXEC_VARY. | ||
96 | + * We assume that on such an old compiler, LTO cannot be used, and so the | ||
97 | + * compiler cannot not detect the mismatched declarations, and all is well. | ||
98 | + */ | ||
99 | +TargetPageBits target_page; | ||
100 | +# define init_target_page target_page | ||
101 | +# endif | ||
102 | #endif | ||
103 | |||
104 | bool set_preferred_target_page_bits(int bits) | ||
105 | @@ -XXX,XX +XXX,XX @@ bool set_preferred_target_page_bits(int bits) | ||
106 | */ | ||
107 | #ifdef TARGET_PAGE_BITS_VARY | ||
108 | assert(bits >= TARGET_PAGE_BITS_MIN); | ||
109 | - if (target_page_bits == 0 || target_page_bits > bits) { | ||
110 | - if (target_page_bits_decided) { | ||
111 | + if (init_target_page.bits == 0 || init_target_page.bits > bits) { | ||
112 | + if (init_target_page.decided) { | ||
113 | return false; | ||
114 | } | ||
115 | - target_page_bits = bits; | ||
116 | + init_target_page.bits = bits; | ||
33 | } | 117 | } |
34 | return memset(&s->temps[n], 0, sizeof(TCGTemp)); | 118 | #endif |
119 | return true; | ||
120 | @@ -XXX,XX +XXX,XX @@ bool set_preferred_target_page_bits(int bits) | ||
121 | void finalize_target_page_bits(void) | ||
122 | { | ||
123 | #ifdef TARGET_PAGE_BITS_VARY | ||
124 | - if (target_page_bits == 0) { | ||
125 | - target_page_bits = TARGET_PAGE_BITS_MIN; | ||
126 | + if (init_target_page.bits == 0) { | ||
127 | + init_target_page.bits = TARGET_PAGE_BITS_MIN; | ||
128 | } | ||
129 | - target_page_bits_decided = true; | ||
130 | + init_target_page.decided = true; | ||
131 | + | ||
132 | + /* | ||
133 | + * For the benefit of an -flto build, prevent the compiler from | ||
134 | + * hoisting a read from target_page before we finish initializing. | ||
135 | + */ | ||
136 | + barrier(); | ||
137 | #endif | ||
35 | } | 138 | } |
36 | -- | 139 | -- |
37 | 2.25.1 | 140 | 2.17.1 |
38 | 141 | ||
39 | 142 | diff view generated by jsdifflib |
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | 1 | This reduces the size of a release build by about 10k. |
---|---|---|---|
2 | cases that are identical between 32-bit and 64-bit hosts. | 2 | Noticably, within the tlb miss helpers. |
3 | 3 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | 6 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> |
7 | [PMD: Split patch as 5/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-6-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 8 | --- |
12 | tcg/tci/tcg-target.c.inc | 49 ++++++++++++---------------------------- | 9 | include/exec/cpu-all.h | 4 ++++ |
13 | 1 file changed, 14 insertions(+), 35 deletions(-) | 10 | 1 file changed, 4 insertions(+) |
14 | 11 | ||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | 12 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h |
16 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/tcg/tci/tcg-target.c.inc | 14 | --- a/include/exec/cpu-all.h |
18 | +++ b/tcg/tci/tcg-target.c.inc | 15 | +++ b/include/exec/cpu-all.h |
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | 16 | @@ -XXX,XX +XXX,XX @@ extern const TargetPageBits target_page; |
20 | tcg_out8(s, args[5]); /* condition */ | 17 | #else |
21 | break; | 18 | extern TargetPageBits target_page; |
22 | #endif | 19 | #endif |
23 | - case INDEX_op_ld8u_i32: | 20 | +#ifdef CONFIG_DEBUG_TCG |
24 | - case INDEX_op_ld8s_i32: | 21 | #define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; }) |
25 | - case INDEX_op_ld16u_i32: | 22 | #else |
26 | - case INDEX_op_ld16s_i32: | 23 | +#define TARGET_PAGE_BITS target_page.bits |
27 | + | 24 | +#endif |
28 | + CASE_32_64(ld8u) | 25 | +#else |
29 | + CASE_32_64(ld8s) | 26 | #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS |
30 | + CASE_32_64(ld16u) | ||
31 | + CASE_32_64(ld16s) | ||
32 | case INDEX_op_ld_i32: | ||
33 | - case INDEX_op_st8_i32: | ||
34 | - case INDEX_op_st16_i32: | ||
35 | + CASE_64(ld32u) | ||
36 | + CASE_64(ld32s) | ||
37 | + CASE_64(ld) | ||
38 | + CASE_32_64(st8) | ||
39 | + CASE_32_64(st16) | ||
40 | case INDEX_op_st_i32: | ||
41 | - case INDEX_op_ld8u_i64: | ||
42 | - case INDEX_op_ld8s_i64: | ||
43 | - case INDEX_op_ld16u_i64: | ||
44 | - case INDEX_op_ld16s_i64: | ||
45 | - case INDEX_op_ld32u_i64: | ||
46 | - case INDEX_op_ld32s_i64: | ||
47 | - case INDEX_op_ld_i64: | ||
48 | - case INDEX_op_st8_i64: | ||
49 | - case INDEX_op_st16_i64: | ||
50 | - case INDEX_op_st32_i64: | ||
51 | - case INDEX_op_st_i64: | ||
52 | + CASE_64(st32) | ||
53 | + CASE_64(st) | ||
54 | stack_bounds_check(args[1], args[2]); | ||
55 | tcg_out_r(s, args[0]); | ||
56 | tcg_out_r(s, args[1]); | ||
57 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
58 | #endif | 27 | #endif |
59 | 28 | ||
60 | case INDEX_op_qemu_ld_i32: | ||
61 | - tcg_out_r(s, *args++); | ||
62 | - tcg_out_r(s, *args++); | ||
63 | - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { | ||
64 | - tcg_out_r(s, *args++); | ||
65 | - } | ||
66 | - tcg_out_i(s, *args++); | ||
67 | - break; | ||
68 | - case INDEX_op_qemu_ld_i64: | ||
69 | - tcg_out_r(s, *args++); | ||
70 | - if (TCG_TARGET_REG_BITS == 32) { | ||
71 | - tcg_out_r(s, *args++); | ||
72 | - } | ||
73 | - tcg_out_r(s, *args++); | ||
74 | - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { | ||
75 | - tcg_out_r(s, *args++); | ||
76 | - } | ||
77 | - tcg_out_i(s, *args++); | ||
78 | - break; | ||
79 | case INDEX_op_qemu_st_i32: | ||
80 | tcg_out_r(s, *args++); | ||
81 | tcg_out_r(s, *args++); | ||
82 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
83 | } | ||
84 | tcg_out_i(s, *args++); | ||
85 | break; | ||
86 | + | ||
87 | + case INDEX_op_qemu_ld_i64: | ||
88 | case INDEX_op_qemu_st_i64: | ||
89 | tcg_out_r(s, *args++); | ||
90 | if (TCG_TARGET_REG_BITS == 32) { | ||
91 | -- | 29 | -- |
92 | 2.25.1 | 30 | 2.17.1 |
93 | 31 | ||
94 | 32 | diff view generated by jsdifflib |
1 | In all cases restricted to 64-bit hosts, tcg_read_r is | 1 | There are some uint64_t uses that expect TARGET_PAGE_MASK to |
---|---|---|---|
2 | identical. We retain the 64-bit symbol for the single | 2 | extend for a 32-bit, so this must continue to be a signed type. |
3 | case of INDEX_op_qemu_st_i64. | 3 | Define based on TARGET_PAGE_BITS not TARGET_PAGE_SIZE; this |
4 | will make a following patch more clear. | ||
4 | 5 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 6 | This should not have a functional effect so far. |
7 | |||
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 11 | --- |
8 | tcg/tci.c | 93 +++++++++++++++++++++++++------------------------------ | 12 | include/exec/cpu-all.h | 2 +- |
9 | 1 file changed, 42 insertions(+), 51 deletions(-) | 13 | 1 file changed, 1 insertion(+), 1 deletion(-) |
10 | 14 | ||
11 | diff --git a/tcg/tci.c b/tcg/tci.c | 15 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h |
12 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tcg/tci.c | 17 | --- a/include/exec/cpu-all.h |
14 | +++ b/tcg/tci.c | 18 | +++ b/include/exec/cpu-all.h |
15 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | 19 | @@ -XXX,XX +XXX,XX @@ extern TargetPageBits target_page; |
16 | return regs[index]; | ||
17 | } | ||
18 | |||
19 | -#if TCG_TARGET_REG_BITS == 64 | ||
20 | -static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
21 | -{ | ||
22 | - return tci_read_reg(regs, index); | ||
23 | -} | ||
24 | -#endif | ||
25 | - | ||
26 | static void | ||
27 | tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value) | ||
28 | { | ||
29 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
30 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
31 | const uint8_t **tb_ptr) | ||
32 | { | ||
33 | - uint64_t value = tci_read_reg64(regs, **tb_ptr); | ||
34 | - *tb_ptr += 1; | ||
35 | - return value; | ||
36 | + return tci_read_r(regs, tb_ptr); | ||
37 | } | ||
38 | #endif | 20 | #endif |
39 | 21 | ||
40 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | 22 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
41 | #elif TCG_TARGET_REG_BITS == 64 | 23 | -#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
42 | case INDEX_op_setcond_i64: | 24 | +#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS) |
43 | t0 = *tb_ptr++; | 25 | #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) |
44 | - t1 = tci_read_r64(regs, &tb_ptr); | 26 | |
45 | - t2 = tci_read_r64(regs, &tb_ptr); | 27 | /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even |
46 | + t1 = tci_read_r(regs, &tb_ptr); | ||
47 | + t2 = tci_read_r(regs, &tb_ptr); | ||
48 | condition = *tb_ptr++; | ||
49 | tci_write_reg(regs, t0, tci_compare64(t1, t2, condition)); | ||
50 | break; | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | #if TCG_TARGET_REG_BITS == 64 | ||
53 | case INDEX_op_mov_i64: | ||
54 | t0 = *tb_ptr++; | ||
55 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
56 | + t1 = tci_read_r(regs, &tb_ptr); | ||
57 | tci_write_reg(regs, t0, t1); | ||
58 | break; | ||
59 | case INDEX_op_tci_movi_i64: | ||
60 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
61 | tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2)); | ||
62 | break; | ||
63 | case INDEX_op_st_i64: | ||
64 | - t0 = tci_read_r64(regs, &tb_ptr); | ||
65 | + t0 = tci_read_r(regs, &tb_ptr); | ||
66 | t1 = tci_read_r(regs, &tb_ptr); | ||
67 | t2 = tci_read_s32(&tb_ptr); | ||
68 | *(uint64_t *)(t1 + t2) = t0; | ||
69 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
70 | |||
71 | case INDEX_op_add_i64: | ||
72 | t0 = *tb_ptr++; | ||
73 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
74 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
75 | + t1 = tci_read_r(regs, &tb_ptr); | ||
76 | + t2 = tci_read_r(regs, &tb_ptr); | ||
77 | tci_write_reg(regs, t0, t1 + t2); | ||
78 | break; | ||
79 | case INDEX_op_sub_i64: | ||
80 | t0 = *tb_ptr++; | ||
81 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
82 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
83 | + t1 = tci_read_r(regs, &tb_ptr); | ||
84 | + t2 = tci_read_r(regs, &tb_ptr); | ||
85 | tci_write_reg(regs, t0, t1 - t2); | ||
86 | break; | ||
87 | case INDEX_op_mul_i64: | ||
88 | t0 = *tb_ptr++; | ||
89 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
90 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
91 | + t1 = tci_read_r(regs, &tb_ptr); | ||
92 | + t2 = tci_read_r(regs, &tb_ptr); | ||
93 | tci_write_reg(regs, t0, t1 * t2); | ||
94 | break; | ||
95 | case INDEX_op_div_i64: | ||
96 | t0 = *tb_ptr++; | ||
97 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
98 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
99 | + t1 = tci_read_r(regs, &tb_ptr); | ||
100 | + t2 = tci_read_r(regs, &tb_ptr); | ||
101 | tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2); | ||
102 | break; | ||
103 | case INDEX_op_divu_i64: | ||
104 | t0 = *tb_ptr++; | ||
105 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
106 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
107 | + t1 = tci_read_r(regs, &tb_ptr); | ||
108 | + t2 = tci_read_r(regs, &tb_ptr); | ||
109 | tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2); | ||
110 | break; | ||
111 | case INDEX_op_rem_i64: | ||
112 | t0 = *tb_ptr++; | ||
113 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
114 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
115 | + t1 = tci_read_r(regs, &tb_ptr); | ||
116 | + t2 = tci_read_r(regs, &tb_ptr); | ||
117 | tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2); | ||
118 | break; | ||
119 | case INDEX_op_remu_i64: | ||
120 | t0 = *tb_ptr++; | ||
121 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
122 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
123 | + t1 = tci_read_r(regs, &tb_ptr); | ||
124 | + t2 = tci_read_r(regs, &tb_ptr); | ||
125 | tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2); | ||
126 | break; | ||
127 | case INDEX_op_and_i64: | ||
128 | t0 = *tb_ptr++; | ||
129 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
130 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
131 | + t1 = tci_read_r(regs, &tb_ptr); | ||
132 | + t2 = tci_read_r(regs, &tb_ptr); | ||
133 | tci_write_reg(regs, t0, t1 & t2); | ||
134 | break; | ||
135 | case INDEX_op_or_i64: | ||
136 | t0 = *tb_ptr++; | ||
137 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
138 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
139 | + t1 = tci_read_r(regs, &tb_ptr); | ||
140 | + t2 = tci_read_r(regs, &tb_ptr); | ||
141 | tci_write_reg(regs, t0, t1 | t2); | ||
142 | break; | ||
143 | case INDEX_op_xor_i64: | ||
144 | t0 = *tb_ptr++; | ||
145 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
146 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
147 | + t1 = tci_read_r(regs, &tb_ptr); | ||
148 | + t2 = tci_read_r(regs, &tb_ptr); | ||
149 | tci_write_reg(regs, t0, t1 ^ t2); | ||
150 | break; | ||
151 | |||
152 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
153 | |||
154 | case INDEX_op_shl_i64: | ||
155 | t0 = *tb_ptr++; | ||
156 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
157 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
158 | + t1 = tci_read_r(regs, &tb_ptr); | ||
159 | + t2 = tci_read_r(regs, &tb_ptr); | ||
160 | tci_write_reg(regs, t0, t1 << (t2 & 63)); | ||
161 | break; | ||
162 | case INDEX_op_shr_i64: | ||
163 | t0 = *tb_ptr++; | ||
164 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
165 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
166 | + t1 = tci_read_r(regs, &tb_ptr); | ||
167 | + t2 = tci_read_r(regs, &tb_ptr); | ||
168 | tci_write_reg(regs, t0, t1 >> (t2 & 63)); | ||
169 | break; | ||
170 | case INDEX_op_sar_i64: | ||
171 | t0 = *tb_ptr++; | ||
172 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
173 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
174 | + t1 = tci_read_r(regs, &tb_ptr); | ||
175 | + t2 = tci_read_r(regs, &tb_ptr); | ||
176 | tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63))); | ||
177 | break; | ||
178 | #if TCG_TARGET_HAS_rot_i64 | ||
179 | case INDEX_op_rotl_i64: | ||
180 | t0 = *tb_ptr++; | ||
181 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
182 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
183 | + t1 = tci_read_r(regs, &tb_ptr); | ||
184 | + t2 = tci_read_r(regs, &tb_ptr); | ||
185 | tci_write_reg(regs, t0, rol64(t1, t2 & 63)); | ||
186 | break; | ||
187 | case INDEX_op_rotr_i64: | ||
188 | t0 = *tb_ptr++; | ||
189 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
190 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
191 | + t1 = tci_read_r(regs, &tb_ptr); | ||
192 | + t2 = tci_read_r(regs, &tb_ptr); | ||
193 | tci_write_reg(regs, t0, ror64(t1, t2 & 63)); | ||
194 | break; | ||
195 | #endif | ||
196 | #if TCG_TARGET_HAS_deposit_i64 | ||
197 | case INDEX_op_deposit_i64: | ||
198 | t0 = *tb_ptr++; | ||
199 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
200 | - t2 = tci_read_r64(regs, &tb_ptr); | ||
201 | + t1 = tci_read_r(regs, &tb_ptr); | ||
202 | + t2 = tci_read_r(regs, &tb_ptr); | ||
203 | tmp16 = *tb_ptr++; | ||
204 | tmp8 = *tb_ptr++; | ||
205 | tmp64 = (((1ULL << tmp8) - 1) << tmp16); | ||
206 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
207 | break; | ||
208 | #endif | ||
209 | case INDEX_op_brcond_i64: | ||
210 | - t0 = tci_read_r64(regs, &tb_ptr); | ||
211 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
212 | + t0 = tci_read_r(regs, &tb_ptr); | ||
213 | + t1 = tci_read_r(regs, &tb_ptr); | ||
214 | condition = *tb_ptr++; | ||
215 | label = tci_read_label(&tb_ptr); | ||
216 | if (tci_compare64(t0, t1, condition)) { | ||
217 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
218 | #if TCG_TARGET_HAS_bswap64_i64 | ||
219 | case INDEX_op_bswap64_i64: | ||
220 | t0 = *tb_ptr++; | ||
221 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
222 | + t1 = tci_read_r(regs, &tb_ptr); | ||
223 | tci_write_reg(regs, t0, bswap64(t1)); | ||
224 | break; | ||
225 | #endif | ||
226 | #if TCG_TARGET_HAS_not_i64 | ||
227 | case INDEX_op_not_i64: | ||
228 | t0 = *tb_ptr++; | ||
229 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
230 | + t1 = tci_read_r(regs, &tb_ptr); | ||
231 | tci_write_reg(regs, t0, ~t1); | ||
232 | break; | ||
233 | #endif | ||
234 | #if TCG_TARGET_HAS_neg_i64 | ||
235 | case INDEX_op_neg_i64: | ||
236 | t0 = *tb_ptr++; | ||
237 | - t1 = tci_read_r64(regs, &tb_ptr); | ||
238 | + t1 = tci_read_r(regs, &tb_ptr); | ||
239 | tci_write_reg(regs, t0, -t1); | ||
240 | break; | ||
241 | #endif | ||
242 | -- | 28 | -- |
243 | 2.25.1 | 29 | 2.17.1 |
244 | 30 | ||
245 | 31 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | This eliminates a set of runtime shifts. It turns out that we |
---|---|---|---|
2 | require TARGET_PAGE_MASK more often than TARGET_PAGE_SIZE, so | ||
3 | redefine TARGET_PAGE_SIZE based on TARGET_PAGE_MASK instead of | ||
4 | the other way around. | ||
2 | 5 | ||
3 | Having a function return either and valid TB and some system state | 6 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
4 | seems excessive. It will make the subsequent re-factoring easier if we | 7 | Reviewed-by: David Hildenbrand <david@redhat.com> |
5 | lookup the current state where we are. | 8 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> |
6 | |||
7 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Message-Id: <20210224165811.11567-2-alex.bennee@linaro.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 10 | --- |
11 | include/exec/tb-lookup.h | 18 ++++++++---------- | 11 | include/exec/cpu-all.h | 8 ++++++-- |
12 | accel/tcg/cpu-exec.c | 10 ++++++++-- | 12 | exec-vary.c | 1 + |
13 | accel/tcg/tcg-runtime.c | 4 +++- | 13 | 2 files changed, 7 insertions(+), 2 deletions(-) |
14 | 3 files changed, 19 insertions(+), 13 deletions(-) | ||
15 | 14 | ||
16 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | 15 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h |
17 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/exec/tb-lookup.h | 17 | --- a/include/exec/cpu-all.h |
19 | +++ b/include/exec/tb-lookup.h | 18 | +++ b/include/exec/cpu-all.h |
20 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val |
21 | #include "exec/tb-hash.h" | 20 | typedef struct { |
22 | 21 | bool decided; | |
23 | /* Might cause an exception, so have a longjmp destination ready */ | 22 | int bits; |
24 | -static inline TranslationBlock * | 23 | + target_long mask; |
25 | -tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, | 24 | } TargetPageBits; |
26 | - uint32_t *flags, uint32_t cf_mask) | 25 | #if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY) |
27 | +static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | 26 | extern const TargetPageBits target_page; |
28 | + target_ulong cs_base, | 27 | @@ -XXX,XX +XXX,XX @@ extern TargetPageBits target_page; |
29 | + uint32_t flags, uint32_t cf_mask) | 28 | #endif |
30 | { | 29 | #ifdef CONFIG_DEBUG_TCG |
31 | - CPUArchState *env = (CPUArchState *)cpu->env_ptr; | 30 | #define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; }) |
32 | TranslationBlock *tb; | 31 | +#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; }) |
33 | uint32_t hash; | 32 | #else |
34 | 33 | #define TARGET_PAGE_BITS target_page.bits | |
35 | - cpu_get_tb_cpu_state(env, pc, cs_base, flags); | 34 | +#define TARGET_PAGE_MASK target_page.mask |
36 | - hash = tb_jmp_cache_hash_func(*pc); | 35 | #endif |
37 | + hash = tb_jmp_cache_hash_func(pc); | 36 | +#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) |
38 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | 37 | #else |
39 | 38 | #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS | |
40 | cf_mask &= ~CF_CLUSTER_MASK; | 39 | +#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
41 | cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; | 40 | +#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS) |
42 | 41 | #endif | |
43 | if (likely(tb && | 42 | |
44 | - tb->pc == *pc && | 43 | -#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
45 | - tb->cs_base == *cs_base && | 44 | -#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS) |
46 | - tb->flags == *flags && | 45 | #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) |
47 | + tb->pc == pc && | 46 | |
48 | + tb->cs_base == cs_base && | 47 | /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even |
49 | + tb->flags == flags && | 48 | diff --git a/exec-vary.c b/exec-vary.c |
50 | tb->trace_vcpu_dstate == *cpu->trace_dstate && | 49 | index XXXXXXX..XXXXXXX 100644 |
51 | (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { | 50 | --- a/exec-vary.c |
52 | return tb; | 51 | +++ b/exec-vary.c |
52 | @@ -XXX,XX +XXX,XX @@ void finalize_target_page_bits(void) | ||
53 | if (init_target_page.bits == 0) { | ||
54 | init_target_page.bits = TARGET_PAGE_BITS_MIN; | ||
53 | } | 55 | } |
54 | - tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask); | 56 | + init_target_page.mask = (target_long)-1 << init_target_page.bits; |
55 | + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); | 57 | init_target_page.decided = true; |
56 | if (tb == NULL) { | 58 | |
57 | return NULL; | 59 | /* |
58 | } | ||
59 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/accel/tcg/cpu-exec.c | ||
62 | +++ b/accel/tcg/cpu-exec.c | ||
63 | @@ -XXX,XX +XXX,XX @@ static void cpu_exec_exit(CPUState *cpu) | ||
64 | |||
65 | void cpu_exec_step_atomic(CPUState *cpu) | ||
66 | { | ||
67 | + CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
68 | TranslationBlock *tb; | ||
69 | target_ulong cs_base, pc; | ||
70 | uint32_t flags; | ||
71 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
72 | g_assert(!cpu->running); | ||
73 | cpu->running = true; | ||
74 | |||
75 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); | ||
76 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
77 | + tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
78 | + | ||
79 | if (tb == NULL) { | ||
80 | mmap_lock(); | ||
81 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||
82 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu, | ||
83 | TranslationBlock *last_tb, | ||
84 | int tb_exit, uint32_t cf_mask) | ||
85 | { | ||
86 | + CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
87 | TranslationBlock *tb; | ||
88 | target_ulong cs_base, pc; | ||
89 | uint32_t flags; | ||
90 | |||
91 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); | ||
92 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
93 | + | ||
94 | + tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
95 | if (tb == NULL) { | ||
96 | mmap_lock(); | ||
97 | tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); | ||
98 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/accel/tcg/tcg-runtime.c | ||
101 | +++ b/accel/tcg/tcg-runtime.c | ||
102 | @@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | ||
103 | target_ulong cs_base, pc; | ||
104 | uint32_t flags; | ||
105 | |||
106 | - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags()); | ||
107 | + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
108 | + | ||
109 | + tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags()); | ||
110 | if (tb == NULL) { | ||
111 | return tcg_code_gen_epilogue; | ||
112 | } | ||
113 | -- | 60 | -- |
114 | 2.25.1 | 61 | 2.17.1 |
115 | 62 | ||
116 | 63 | diff view generated by jsdifflib |
1 | Fix a typo in the encodeing of the cmle (zero) instruction. | 1 | Using uintptr_t instead of target_ulong meant that, for 64-bit guest |
---|---|---|---|
2 | and 32-bit host, we truncated the guest address comparator and so may | ||
3 | not hit the tlb when we should. | ||
2 | 4 | ||
3 | Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations") | 5 | Fixes: 4811e9095c0 |
6 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 8 | --- |
6 | tcg/aarch64/tcg-target.c.inc | 2 +- | 9 | accel/tcg/cputlb.c | 2 +- |
7 | 1 file changed, 1 insertion(+), 1 deletion(-) | 10 | 1 file changed, 1 insertion(+), 1 deletion(-) |
8 | 11 | ||
9 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | 12 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c |
10 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/tcg/aarch64/tcg-target.c.inc | 14 | --- a/accel/tcg/cputlb.c |
12 | +++ b/tcg/aarch64/tcg-target.c.inc | 15 | +++ b/accel/tcg/cputlb.c |
13 | @@ -XXX,XX +XXX,XX @@ typedef enum { | 16 | @@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, |
14 | I3617_CMEQ0 = 0x0e209800, | 17 | MMUAccessType access_type, int mmu_idx) |
15 | I3617_CMLT0 = 0x0e20a800, | 18 | { |
16 | I3617_CMGE0 = 0x2e208800, | 19 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); |
17 | - I3617_CMLE0 = 0x2e20a800, | 20 | - uintptr_t tlb_addr, page; |
18 | + I3617_CMLE0 = 0x2e209800, | 21 | + target_ulong tlb_addr, page; |
19 | I3617_NOT = 0x2e205800, | 22 | size_t elt_ofs; |
20 | I3617_ABS = 0x0e20b800, | 23 | |
21 | I3617_NEG = 0x2e20b800, | 24 | switch (access_type) { |
22 | -- | 25 | -- |
23 | 2.25.1 | 26 | 2.17.1 |
24 | 27 | ||
25 | 28 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use the provided cpu_ldst.h interfaces. This fixes the build vs | ||
2 | the unconverted uses of g2h(), adds missed memory trace events, | ||
3 | and correctly recognizes when a SIGSEGV belongs to the guest via | ||
4 | set_helper_retaddr(). | ||
5 | 1 | ||
6 | Fixes: 3e8f1628e864 | ||
7 | Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | tcg/tci.c | 73 +++++++++++++++++++++---------------------------------- | ||
11 | 1 file changed, 28 insertions(+), 45 deletions(-) | ||
12 | |||
13 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/tcg/tci.c | ||
16 | +++ b/tcg/tci.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) | ||
18 | return result; | ||
19 | } | ||
20 | |||
21 | -#ifdef CONFIG_SOFTMMU | ||
22 | -# define qemu_ld_ub \ | ||
23 | - helper_ret_ldub_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
24 | -# define qemu_ld_leuw \ | ||
25 | - helper_le_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
26 | -# define qemu_ld_leul \ | ||
27 | - helper_le_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
28 | -# define qemu_ld_leq \ | ||
29 | - helper_le_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
30 | -# define qemu_ld_beuw \ | ||
31 | - helper_be_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
32 | -# define qemu_ld_beul \ | ||
33 | - helper_be_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
34 | -# define qemu_ld_beq \ | ||
35 | - helper_be_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr) | ||
36 | -# define qemu_st_b(X) \ | ||
37 | - helper_ret_stb_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
38 | -# define qemu_st_lew(X) \ | ||
39 | - helper_le_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
40 | -# define qemu_st_lel(X) \ | ||
41 | - helper_le_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
42 | -# define qemu_st_leq(X) \ | ||
43 | - helper_le_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
44 | -# define qemu_st_bew(X) \ | ||
45 | - helper_be_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
46 | -# define qemu_st_bel(X) \ | ||
47 | - helper_be_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
48 | -# define qemu_st_beq(X) \ | ||
49 | - helper_be_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr) | ||
50 | -#else | ||
51 | -# define qemu_ld_ub ldub_p(g2h(taddr)) | ||
52 | -# define qemu_ld_leuw lduw_le_p(g2h(taddr)) | ||
53 | -# define qemu_ld_leul (uint32_t)ldl_le_p(g2h(taddr)) | ||
54 | -# define qemu_ld_leq ldq_le_p(g2h(taddr)) | ||
55 | -# define qemu_ld_beuw lduw_be_p(g2h(taddr)) | ||
56 | -# define qemu_ld_beul (uint32_t)ldl_be_p(g2h(taddr)) | ||
57 | -# define qemu_ld_beq ldq_be_p(g2h(taddr)) | ||
58 | -# define qemu_st_b(X) stb_p(g2h(taddr), X) | ||
59 | -# define qemu_st_lew(X) stw_le_p(g2h(taddr), X) | ||
60 | -# define qemu_st_lel(X) stl_le_p(g2h(taddr), X) | ||
61 | -# define qemu_st_leq(X) stq_le_p(g2h(taddr), X) | ||
62 | -# define qemu_st_bew(X) stw_be_p(g2h(taddr), X) | ||
63 | -# define qemu_st_bel(X) stl_be_p(g2h(taddr), X) | ||
64 | -# define qemu_st_beq(X) stq_be_p(g2h(taddr), X) | ||
65 | -#endif | ||
66 | +#define qemu_ld_ub \ | ||
67 | + cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
68 | +#define qemu_ld_leuw \ | ||
69 | + cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
70 | +#define qemu_ld_leul \ | ||
71 | + cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
72 | +#define qemu_ld_leq \ | ||
73 | + cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
74 | +#define qemu_ld_beuw \ | ||
75 | + cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
76 | +#define qemu_ld_beul \ | ||
77 | + cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
78 | +#define qemu_ld_beq \ | ||
79 | + cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
80 | +#define qemu_st_b(X) \ | ||
81 | + cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
82 | +#define qemu_st_lew(X) \ | ||
83 | + cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
84 | +#define qemu_st_lel(X) \ | ||
85 | + cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
86 | +#define qemu_st_leq(X) \ | ||
87 | + cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
88 | +#define qemu_st_bew(X) \ | ||
89 | + cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
90 | +#define qemu_st_bel(X) \ | ||
91 | + cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
92 | +#define qemu_st_beq(X) \ | ||
93 | + cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr) | ||
94 | |||
95 | #if TCG_TARGET_REG_BITS == 64 | ||
96 | # define CASE_32_64(x) \ | ||
97 | -- | ||
98 | 2.25.1 | ||
99 | |||
100 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The use in tcg_tb_lookup is given a random pc that comes from the pc | ||
2 | of a signal handler. Do not assert that the pointer is already within | ||
3 | the code gen buffer at all, much less the writable mirror of it. | ||
4 | 1 | ||
5 | Fixes: db0c51a3803 | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/tcg.c | 20 ++++++++++++++++++-- | ||
9 | 1 file changed, 18 insertions(+), 2 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/tcg.c | ||
14 | +++ b/tcg/tcg.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void) | ||
16 | } | ||
17 | } | ||
18 | |||
19 | -static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp) | ||
20 | +static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) | ||
21 | { | ||
22 | - void *p = tcg_splitwx_to_rw(cp); | ||
23 | size_t region_idx; | ||
24 | |||
25 | + /* | ||
26 | + * Like tcg_splitwx_to_rw, with no assert. The pc may come from | ||
27 | + * a signal handler over which the caller has no control. | ||
28 | + */ | ||
29 | + if (!in_code_gen_buffer(p)) { | ||
30 | + p -= tcg_splitwx_diff; | ||
31 | + if (!in_code_gen_buffer(p)) { | ||
32 | + return NULL; | ||
33 | + } | ||
34 | + } | ||
35 | + | ||
36 | if (p < region.start_aligned) { | ||
37 | region_idx = 0; | ||
38 | } else { | ||
39 | @@ -XXX,XX +XXX,XX @@ void tcg_tb_insert(TranslationBlock *tb) | ||
40 | { | ||
41 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | ||
42 | |||
43 | + g_assert(rt != NULL); | ||
44 | qemu_mutex_lock(&rt->lock); | ||
45 | g_tree_insert(rt->tree, &tb->tc, tb); | ||
46 | qemu_mutex_unlock(&rt->lock); | ||
47 | @@ -XXX,XX +XXX,XX @@ void tcg_tb_remove(TranslationBlock *tb) | ||
48 | { | ||
49 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | ||
50 | |||
51 | + g_assert(rt != NULL); | ||
52 | qemu_mutex_lock(&rt->lock); | ||
53 | g_tree_remove(rt->tree, &tb->tc); | ||
54 | qemu_mutex_unlock(&rt->lock); | ||
55 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) | ||
56 | TranslationBlock *tb; | ||
57 | struct tb_tc s = { .ptr = (void *)tc_ptr }; | ||
58 | |||
59 | + if (rt == NULL) { | ||
60 | + return NULL; | ||
61 | + } | ||
62 | + | ||
63 | qemu_mutex_lock(&rt->lock); | ||
64 | tb = g_tree_lookup(rt->tree, &s); | ||
65 | qemu_mutex_unlock(&rt->lock); | ||
66 | -- | ||
67 | 2.25.1 | ||
68 | |||
69 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 1/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-2-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 85 +++++++++++++++++----------------------- | ||
13 | 1 file changed, 37 insertions(+), 48 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) | ||
20 | old_code_ptr[1] = s->code_ptr - old_code_ptr; | ||
21 | } | ||
22 | |||
23 | +#if TCG_TARGET_REG_BITS == 64 | ||
24 | +# define CASE_32_64(x) \ | ||
25 | + case glue(glue(INDEX_op_, x), _i64): \ | ||
26 | + case glue(glue(INDEX_op_, x), _i32): | ||
27 | +# define CASE_64(x) \ | ||
28 | + case glue(glue(INDEX_op_, x), _i64): | ||
29 | +#else | ||
30 | +# define CASE_32_64(x) \ | ||
31 | + case glue(glue(INDEX_op_, x), _i32): | ||
32 | +# define CASE_64(x) | ||
33 | +#endif | ||
34 | + | ||
35 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
36 | const int *const_args) | ||
37 | { | ||
38 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
39 | case INDEX_op_exit_tb: | ||
40 | tcg_out64(s, args[0]); | ||
41 | break; | ||
42 | + | ||
43 | case INDEX_op_goto_tb: | ||
44 | if (s->tb_jmp_insn_offset) { | ||
45 | /* Direct jump method. */ | ||
46 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
47 | tcg_debug_assert(args[2] == (int32_t)args[2]); | ||
48 | tcg_out32(s, args[2]); | ||
49 | break; | ||
50 | - case INDEX_op_add_i32: | ||
51 | - case INDEX_op_sub_i32: | ||
52 | - case INDEX_op_mul_i32: | ||
53 | - case INDEX_op_and_i32: | ||
54 | - case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */ | ||
55 | - case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */ | ||
56 | - case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */ | ||
57 | - case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */ | ||
58 | - case INDEX_op_or_i32: | ||
59 | - case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */ | ||
60 | - case INDEX_op_xor_i32: | ||
61 | - case INDEX_op_shl_i32: | ||
62 | - case INDEX_op_shr_i32: | ||
63 | - case INDEX_op_sar_i32: | ||
64 | - case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | ||
65 | - case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | ||
66 | + | ||
67 | + CASE_32_64(add) | ||
68 | + CASE_32_64(sub) | ||
69 | + CASE_32_64(mul) | ||
70 | + CASE_32_64(and) | ||
71 | + CASE_32_64(or) | ||
72 | + CASE_32_64(xor) | ||
73 | + CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */ | ||
74 | + CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */ | ||
75 | + CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */ | ||
76 | + CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */ | ||
77 | + CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */ | ||
78 | + CASE_32_64(shl) | ||
79 | + CASE_32_64(shr) | ||
80 | + CASE_32_64(sar) | ||
81 | + CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */ | ||
82 | + CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */ | ||
83 | + CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
84 | + CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
85 | + CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
86 | + CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */ | ||
87 | tcg_out_r(s, args[0]); | ||
88 | tcg_out_r(s, args[1]); | ||
89 | tcg_out_r(s, args[2]); | ||
90 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
91 | break; | ||
92 | |||
93 | #if TCG_TARGET_REG_BITS == 64 | ||
94 | - case INDEX_op_add_i64: | ||
95 | - case INDEX_op_sub_i64: | ||
96 | - case INDEX_op_mul_i64: | ||
97 | - case INDEX_op_and_i64: | ||
98 | - case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */ | ||
99 | - case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */ | ||
100 | - case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */ | ||
101 | - case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */ | ||
102 | - case INDEX_op_or_i64: | ||
103 | - case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */ | ||
104 | - case INDEX_op_xor_i64: | ||
105 | - case INDEX_op_shl_i64: | ||
106 | - case INDEX_op_shr_i64: | ||
107 | - case INDEX_op_sar_i64: | ||
108 | - case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | ||
109 | - case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | ||
110 | - case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
111 | - case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
112 | - case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
113 | - case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | ||
114 | - tcg_out_r(s, args[0]); | ||
115 | - tcg_out_r(s, args[1]); | ||
116 | - tcg_out_r(s, args[2]); | ||
117 | - break; | ||
118 | case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */ | ||
119 | tcg_out_r(s, args[0]); | ||
120 | tcg_out_r(s, args[1]); | ||
121 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
122 | tcg_out_r(s, args[0]); | ||
123 | tcg_out_r(s, args[1]); | ||
124 | break; | ||
125 | - case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
126 | - case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
127 | - case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
128 | - case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | ||
129 | - tcg_out_r(s, args[0]); | ||
130 | - tcg_out_r(s, args[1]); | ||
131 | - tcg_out_r(s, args[2]); | ||
132 | - break; | ||
133 | + | ||
134 | #if TCG_TARGET_REG_BITS == 32 | ||
135 | case INDEX_op_add2_i32: | ||
136 | case INDEX_op_sub2_i32: | ||
137 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
138 | } | ||
139 | tcg_out_i(s, *args++); | ||
140 | break; | ||
141 | + | ||
142 | case INDEX_op_mb: | ||
143 | break; | ||
144 | + | ||
145 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | ||
146 | case INDEX_op_mov_i64: | ||
147 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | ||
148 | -- | ||
149 | 2.25.1 | ||
150 | |||
151 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 2/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-3-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 35 ++++++++++++++--------------------- | ||
13 | 1 file changed, 14 insertions(+), 21 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out8(s, args[2]); /* condition */ | ||
21 | tci_out_label(s, arg_label(args[3])); | ||
22 | break; | ||
23 | - case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ | ||
24 | - case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ | ||
25 | - case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ | ||
26 | - case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */ | ||
27 | - case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */ | ||
28 | - case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */ | ||
29 | - case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */ | ||
30 | - case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */ | ||
31 | - case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */ | ||
32 | - case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */ | ||
33 | - case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */ | ||
34 | - case INDEX_op_ext_i32_i64: | ||
35 | - case INDEX_op_extu_i32_i64: | ||
36 | #endif /* TCG_TARGET_REG_BITS == 64 */ | ||
37 | - case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */ | ||
38 | - case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */ | ||
39 | - case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */ | ||
40 | - case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */ | ||
41 | - case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */ | ||
42 | - case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */ | ||
43 | - case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ | ||
44 | - case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ | ||
45 | + | ||
46 | + CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ | ||
47 | + CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ | ||
48 | + CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */ | ||
49 | + CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */ | ||
50 | + CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */ | ||
51 | + CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */ | ||
52 | + CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */ | ||
53 | + CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */ | ||
54 | + CASE_64(ext_i32) | ||
55 | + CASE_64(extu_i32) | ||
56 | + CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */ | ||
57 | + CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */ | ||
58 | + CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */ | ||
59 | tcg_out_r(s, args[0]); | ||
60 | tcg_out_r(s, args[1]); | ||
61 | break; | ||
62 | -- | ||
63 | 2.25.1 | ||
64 | |||
65 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 3/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-4-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 12 ++---------- | ||
13 | 1 file changed, 2 insertions(+), 10 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | tcg_out_r(s, args[1]); | ||
21 | tcg_out_r(s, args[2]); | ||
22 | break; | ||
23 | - case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */ | ||
24 | + | ||
25 | + CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */ | ||
26 | tcg_out_r(s, args[0]); | ||
27 | tcg_out_r(s, args[1]); | ||
28 | tcg_out_r(s, args[2]); | ||
29 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
30 | break; | ||
31 | |||
32 | #if TCG_TARGET_REG_BITS == 64 | ||
33 | - case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */ | ||
34 | - tcg_out_r(s, args[0]); | ||
35 | - tcg_out_r(s, args[1]); | ||
36 | - tcg_out_r(s, args[2]); | ||
37 | - tcg_debug_assert(args[3] <= UINT8_MAX); | ||
38 | - tcg_out8(s, args[3]); | ||
39 | - tcg_debug_assert(args[4] <= UINT8_MAX); | ||
40 | - tcg_out8(s, args[4]); | ||
41 | - break; | ||
42 | case INDEX_op_brcond_i64: | ||
43 | tcg_out_r(s, args[0]); | ||
44 | tcg_out_r(s, args[1]); | ||
45 | -- | ||
46 | 2.25.1 | ||
47 | |||
48 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use CASE_32_64 and CASE_64 to reduce ifdefs and merge | ||
2 | cases that are identical between 32-bit and 64-bit hosts. | ||
3 | 1 | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org> | ||
7 | [PMD: Split patch as 4/5] | ||
8 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20210218232840.1760806-5-f4bug@amsat.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | tcg/tci/tcg-target.c.inc | 23 ++++++----------------- | ||
13 | 1 file changed, 6 insertions(+), 17 deletions(-) | ||
14 | |||
15 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tcg/tci/tcg-target.c.inc | ||
18 | +++ b/tcg/tci/tcg-target.c.inc | ||
19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
20 | } | ||
21 | set_jmp_reset_offset(s, args[0]); | ||
22 | break; | ||
23 | + | ||
24 | case INDEX_op_br: | ||
25 | tci_out_label(s, arg_label(args[0])); | ||
26 | break; | ||
27 | - case INDEX_op_setcond_i32: | ||
28 | + | ||
29 | + CASE_32_64(setcond) | ||
30 | tcg_out_r(s, args[0]); | ||
31 | tcg_out_r(s, args[1]); | ||
32 | tcg_out_r(s, args[2]); | ||
33 | tcg_out8(s, args[3]); /* condition */ | ||
34 | break; | ||
35 | + | ||
36 | #if TCG_TARGET_REG_BITS == 32 | ||
37 | case INDEX_op_setcond2_i32: | ||
38 | /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */ | ||
39 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
40 | tcg_out_r(s, args[4]); | ||
41 | tcg_out8(s, args[5]); /* condition */ | ||
42 | break; | ||
43 | -#elif TCG_TARGET_REG_BITS == 64 | ||
44 | - case INDEX_op_setcond_i64: | ||
45 | - tcg_out_r(s, args[0]); | ||
46 | - tcg_out_r(s, args[1]); | ||
47 | - tcg_out_r(s, args[2]); | ||
48 | - tcg_out8(s, args[3]); /* condition */ | ||
49 | - break; | ||
50 | #endif | ||
51 | case INDEX_op_ld8u_i32: | ||
52 | case INDEX_op_ld8s_i32: | ||
53 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
54 | tcg_out8(s, args[4]); | ||
55 | break; | ||
56 | |||
57 | -#if TCG_TARGET_REG_BITS == 64 | ||
58 | - case INDEX_op_brcond_i64: | ||
59 | + CASE_32_64(brcond) | ||
60 | tcg_out_r(s, args[0]); | ||
61 | tcg_out_r(s, args[1]); | ||
62 | tcg_out8(s, args[2]); /* condition */ | ||
63 | tci_out_label(s, arg_label(args[3])); | ||
64 | break; | ||
65 | -#endif /* TCG_TARGET_REG_BITS == 64 */ | ||
66 | |||
67 | CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ | ||
68 | CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ | ||
69 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | ||
70 | tcg_out_r(s, args[3]); | ||
71 | break; | ||
72 | #endif | ||
73 | - case INDEX_op_brcond_i32: | ||
74 | - tcg_out_r(s, args[0]); | ||
75 | - tcg_out_r(s, args[1]); | ||
76 | - tcg_out8(s, args[2]); /* condition */ | ||
77 | - tci_out_label(s, arg_label(args[3])); | ||
78 | - break; | ||
79 | + | ||
80 | case INDEX_op_qemu_ld_i32: | ||
81 | tcg_out_r(s, *args++); | ||
82 | tcg_out_r(s, *args++); | ||
83 | -- | ||
84 | 2.25.1 | ||
85 | |||
86 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext8u opcodes, and allow truncation | ||
2 | to happen with the store for st8 opcodes. | ||
3 | 1 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | tcg/tci.c | 23 +++++------------------ | ||
8 | 1 file changed, 5 insertions(+), 18 deletions(-) | ||
9 | |||
10 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tcg/tci.c | ||
13 | +++ b/tcg/tci.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
15 | } | ||
16 | #endif | ||
17 | |||
18 | -static uint8_t tci_read_reg8(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (uint8_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | - | ||
23 | static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index) | ||
24 | { | ||
25 | return (uint16_t)tci_read_reg(regs, index); | ||
26 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
27 | return value; | ||
28 | } | ||
29 | |||
30 | -/* Read indexed register (8 bit) from bytecode. */ | ||
31 | -static uint8_t tci_read_r8(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
32 | -{ | ||
33 | - uint8_t value = tci_read_reg8(regs, **tb_ptr); | ||
34 | - *tb_ptr += 1; | ||
35 | - return value; | ||
36 | -} | ||
37 | - | ||
38 | #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
39 | /* Read indexed register (8 bit signed) from bytecode. */ | ||
40 | static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
41 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
42 | tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2)); | ||
43 | break; | ||
44 | CASE_32_64(st8) | ||
45 | - t0 = tci_read_r8(regs, &tb_ptr); | ||
46 | + t0 = tci_read_r(regs, &tb_ptr); | ||
47 | t1 = tci_read_r(regs, &tb_ptr); | ||
48 | t2 = tci_read_s32(&tb_ptr); | ||
49 | *(uint8_t *)(t1 + t2) = t0; | ||
50 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
51 | #if TCG_TARGET_HAS_ext8u_i32 | ||
52 | case INDEX_op_ext8u_i32: | ||
53 | t0 = *tb_ptr++; | ||
54 | - t1 = tci_read_r8(regs, &tb_ptr); | ||
55 | - tci_write_reg(regs, t0, t1); | ||
56 | + t1 = tci_read_r(regs, &tb_ptr); | ||
57 | + tci_write_reg(regs, t0, (uint8_t)t1); | ||
58 | break; | ||
59 | #endif | ||
60 | #if TCG_TARGET_HAS_ext16u_i32 | ||
61 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
62 | #if TCG_TARGET_HAS_ext8u_i64 | ||
63 | case INDEX_op_ext8u_i64: | ||
64 | t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r8(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, t1); | ||
67 | + t1 = tci_read_r(regs, &tb_ptr); | ||
68 | + tci_write_reg(regs, t0, (uint8_t)t1); | ||
69 | break; | ||
70 | #endif | ||
71 | #if TCG_TARGET_HAS_ext8s_i64 | ||
72 | -- | ||
73 | 2.25.1 | ||
74 | |||
75 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext8s opcodes. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 25 ++++--------------------- | ||
7 | 1 file changed, 4 insertions(+), 21 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
14 | return regs[index]; | ||
15 | } | ||
16 | |||
17 | -#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
18 | -static int8_t tci_read_reg8s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (int8_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
25 | static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index) | ||
26 | { | ||
27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
28 | return value; | ||
29 | } | ||
30 | |||
31 | -#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
32 | -/* Read indexed register (8 bit signed) from bytecode. */ | ||
33 | -static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - int8_t value = tci_read_reg8s(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
38 | -} | ||
39 | -#endif | ||
40 | - | ||
41 | /* Read indexed register (16 bit) from bytecode. */ | ||
42 | static uint16_t tci_read_r16(const tcg_target_ulong *regs, | ||
43 | const uint8_t **tb_ptr) | ||
44 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
45 | #if TCG_TARGET_HAS_ext8s_i32 | ||
46 | case INDEX_op_ext8s_i32: | ||
47 | t0 = *tb_ptr++; | ||
48 | - t1 = tci_read_r8s(regs, &tb_ptr); | ||
49 | - tci_write_reg(regs, t0, t1); | ||
50 | + t1 = tci_read_r(regs, &tb_ptr); | ||
51 | + tci_write_reg(regs, t0, (int8_t)t1); | ||
52 | break; | ||
53 | #endif | ||
54 | #if TCG_TARGET_HAS_ext16s_i32 | ||
55 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
56 | #if TCG_TARGET_HAS_ext8s_i64 | ||
57 | case INDEX_op_ext8s_i64: | ||
58 | t0 = *tb_ptr++; | ||
59 | - t1 = tci_read_r8s(regs, &tb_ptr); | ||
60 | - tci_write_reg(regs, t0, t1); | ||
61 | + t1 = tci_read_r(regs, &tb_ptr); | ||
62 | + tci_write_reg(regs, t0, (int8_t)t1); | ||
63 | break; | ||
64 | #endif | ||
65 | #if TCG_TARGET_HAS_ext16s_i64 | ||
66 | -- | ||
67 | 2.25.1 | ||
68 | |||
69 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext16u opcodes, and allow truncation | ||
2 | to happen with the store for st16 opcodes, and with the call | ||
3 | for bswap16 opcodes. | ||
4 | 1 | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | tcg/tci.c | 28 +++++++--------------------- | ||
9 | 1 file changed, 7 insertions(+), 21 deletions(-) | ||
10 | |||
11 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/tcg/tci.c | ||
14 | +++ b/tcg/tci.c | ||
15 | @@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
16 | } | ||
17 | #endif | ||
18 | |||
19 | -static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index) | ||
20 | -{ | ||
21 | - return (uint16_t)tci_read_reg(regs, index); | ||
22 | -} | ||
23 | - | ||
24 | static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index) | ||
25 | { | ||
26 | return (uint32_t)tci_read_reg(regs, index); | ||
27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
28 | return value; | ||
29 | } | ||
30 | |||
31 | -/* Read indexed register (16 bit) from bytecode. */ | ||
32 | -static uint16_t tci_read_r16(const tcg_target_ulong *regs, | ||
33 | - const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - uint16_t value = tci_read_reg16(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
38 | -} | ||
39 | - | ||
40 | #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
41 | /* Read indexed register (16 bit signed) from bytecode. */ | ||
42 | static int16_t tci_read_r16s(const tcg_target_ulong *regs, | ||
43 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
44 | *(uint8_t *)(t1 + t2) = t0; | ||
45 | break; | ||
46 | CASE_32_64(st16) | ||
47 | - t0 = tci_read_r16(regs, &tb_ptr); | ||
48 | + t0 = tci_read_r(regs, &tb_ptr); | ||
49 | t1 = tci_read_r(regs, &tb_ptr); | ||
50 | t2 = tci_read_s32(&tb_ptr); | ||
51 | *(uint16_t *)(t1 + t2) = t0; | ||
52 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
53 | #if TCG_TARGET_HAS_ext16u_i32 | ||
54 | case INDEX_op_ext16u_i32: | ||
55 | t0 = *tb_ptr++; | ||
56 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
57 | - tci_write_reg(regs, t0, t1); | ||
58 | + t1 = tci_read_r(regs, &tb_ptr); | ||
59 | + tci_write_reg(regs, t0, (uint16_t)t1); | ||
60 | break; | ||
61 | #endif | ||
62 | #if TCG_TARGET_HAS_bswap16_i32 | ||
63 | case INDEX_op_bswap16_i32: | ||
64 | t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
66 | + t1 = tci_read_r(regs, &tb_ptr); | ||
67 | tci_write_reg(regs, t0, bswap16(t1)); | ||
68 | break; | ||
69 | #endif | ||
70 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
71 | #if TCG_TARGET_HAS_ext16u_i64 | ||
72 | case INDEX_op_ext16u_i64: | ||
73 | t0 = *tb_ptr++; | ||
74 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
75 | - tci_write_reg(regs, t0, t1); | ||
76 | + t1 = tci_read_r(regs, &tb_ptr); | ||
77 | + tci_write_reg(regs, t0, (uint16_t)t1); | ||
78 | break; | ||
79 | #endif | ||
80 | #if TCG_TARGET_HAS_ext32s_i64 | ||
81 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
82 | #if TCG_TARGET_HAS_bswap16_i64 | ||
83 | case INDEX_op_bswap16_i64: | ||
84 | t0 = *tb_ptr++; | ||
85 | - t1 = tci_read_r16(regs, &tb_ptr); | ||
86 | + t1 = tci_read_r(regs, &tb_ptr); | ||
87 | tci_write_reg(regs, t0, bswap16(t1)); | ||
88 | break; | ||
89 | #endif | ||
90 | -- | ||
91 | 2.25.1 | ||
92 | |||
93 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext16s opcodes. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 26 ++++---------------------- | ||
7 | 1 file changed, 4 insertions(+), 22 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
14 | return regs[index]; | ||
15 | } | ||
16 | |||
17 | -#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
18 | -static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (int16_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_REG_BITS == 64 | ||
25 | static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
26 | { | ||
27 | @@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr) | ||
28 | return value; | ||
29 | } | ||
30 | |||
31 | -#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
32 | -/* Read indexed register (16 bit signed) from bytecode. */ | ||
33 | -static int16_t tci_read_r16s(const tcg_target_ulong *regs, | ||
34 | - const uint8_t **tb_ptr) | ||
35 | -{ | ||
36 | - int16_t value = tci_read_reg16s(regs, **tb_ptr); | ||
37 | - *tb_ptr += 1; | ||
38 | - return value; | ||
39 | -} | ||
40 | -#endif | ||
41 | - | ||
42 | /* Read indexed register (32 bit) from bytecode. */ | ||
43 | static uint32_t tci_read_r32(const tcg_target_ulong *regs, | ||
44 | const uint8_t **tb_ptr) | ||
45 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
46 | #if TCG_TARGET_HAS_ext16s_i32 | ||
47 | case INDEX_op_ext16s_i32: | ||
48 | t0 = *tb_ptr++; | ||
49 | - t1 = tci_read_r16s(regs, &tb_ptr); | ||
50 | - tci_write_reg(regs, t0, t1); | ||
51 | + t1 = tci_read_r(regs, &tb_ptr); | ||
52 | + tci_write_reg(regs, t0, (int16_t)t1); | ||
53 | break; | ||
54 | #endif | ||
55 | #if TCG_TARGET_HAS_ext8u_i32 | ||
56 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
57 | #if TCG_TARGET_HAS_ext16s_i64 | ||
58 | case INDEX_op_ext16s_i64: | ||
59 | t0 = *tb_ptr++; | ||
60 | - t1 = tci_read_r16s(regs, &tb_ptr); | ||
61 | - tci_write_reg(regs, t0, t1); | ||
62 | + t1 = tci_read_r(regs, &tb_ptr); | ||
63 | + tci_write_reg(regs, t0, (int16_t)t1); | ||
64 | break; | ||
65 | #endif | ||
66 | #if TCG_TARGET_HAS_ext16u_i64 | ||
67 | -- | ||
68 | 2.25.1 | ||
69 | |||
70 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Use explicit casts for ext32s opcodes. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 20 ++------------------ | ||
7 | 1 file changed, 2 insertions(+), 18 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index) | ||
14 | return regs[index]; | ||
15 | } | ||
16 | |||
17 | -#if TCG_TARGET_REG_BITS == 64 | ||
18 | -static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index) | ||
19 | -{ | ||
20 | - return (int32_t)tci_read_reg(regs, index); | ||
21 | -} | ||
22 | -#endif | ||
23 | - | ||
24 | #if TCG_TARGET_REG_BITS == 64 | ||
25 | static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index) | ||
26 | { | ||
27 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
28 | return tci_uint64(tci_read_r(regs, tb_ptr), low); | ||
29 | } | ||
30 | #elif TCG_TARGET_REG_BITS == 64 | ||
31 | -/* Read indexed register (32 bit signed) from bytecode. */ | ||
32 | -static int32_t tci_read_r32s(const tcg_target_ulong *regs, | ||
33 | - const uint8_t **tb_ptr) | ||
34 | -{ | ||
35 | - int32_t value = tci_read_reg32s(regs, **tb_ptr); | ||
36 | - *tb_ptr += 1; | ||
37 | - return value; | ||
38 | -} | ||
39 | - | ||
40 | /* Read indexed register (64 bit) from bytecode. */ | ||
41 | static uint64_t tci_read_r64(const tcg_target_ulong *regs, | ||
42 | const uint8_t **tb_ptr) | ||
43 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
44 | #endif | ||
45 | case INDEX_op_ext_i32_i64: | ||
46 | t0 = *tb_ptr++; | ||
47 | - t1 = tci_read_r32s(regs, &tb_ptr); | ||
48 | - tci_write_reg(regs, t0, t1); | ||
49 | + t1 = tci_read_r(regs, &tb_ptr); | ||
50 | + tci_write_reg(regs, t0, (int32_t)t1); | ||
51 | break; | ||
52 | #if TCG_TARGET_HAS_ext32u_i64 | ||
53 | case INDEX_op_ext32u_i64: | ||
54 | -- | ||
55 | 2.25.1 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This includes ext8s, ext8u, ext16s, ext16u. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 44 ++++++++------------------------------------ | ||
7 | 1 file changed, 8 insertions(+), 36 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
14 | tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64); | ||
15 | break; | ||
16 | #endif /* TCG_TARGET_REG_BITS == 32 */ | ||
17 | -#if TCG_TARGET_HAS_ext8s_i32 | ||
18 | - case INDEX_op_ext8s_i32: | ||
19 | +#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 | ||
20 | + CASE_32_64(ext8s) | ||
21 | t0 = *tb_ptr++; | ||
22 | t1 = tci_read_r(regs, &tb_ptr); | ||
23 | tci_write_reg(regs, t0, (int8_t)t1); | ||
24 | break; | ||
25 | #endif | ||
26 | -#if TCG_TARGET_HAS_ext16s_i32 | ||
27 | - case INDEX_op_ext16s_i32: | ||
28 | +#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 | ||
29 | + CASE_32_64(ext16s) | ||
30 | t0 = *tb_ptr++; | ||
31 | t1 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, (int16_t)t1); | ||
33 | break; | ||
34 | #endif | ||
35 | -#if TCG_TARGET_HAS_ext8u_i32 | ||
36 | - case INDEX_op_ext8u_i32: | ||
37 | +#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 | ||
38 | + CASE_32_64(ext8u) | ||
39 | t0 = *tb_ptr++; | ||
40 | t1 = tci_read_r(regs, &tb_ptr); | ||
41 | tci_write_reg(regs, t0, (uint8_t)t1); | ||
42 | break; | ||
43 | #endif | ||
44 | -#if TCG_TARGET_HAS_ext16u_i32 | ||
45 | - case INDEX_op_ext16u_i32: | ||
46 | +#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 | ||
47 | + CASE_32_64(ext16u) | ||
48 | t0 = *tb_ptr++; | ||
49 | t1 = tci_read_r(regs, &tb_ptr); | ||
50 | tci_write_reg(regs, t0, (uint16_t)t1); | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | continue; | ||
53 | } | ||
54 | break; | ||
55 | -#if TCG_TARGET_HAS_ext8u_i64 | ||
56 | - case INDEX_op_ext8u_i64: | ||
57 | - t0 = *tb_ptr++; | ||
58 | - t1 = tci_read_r(regs, &tb_ptr); | ||
59 | - tci_write_reg(regs, t0, (uint8_t)t1); | ||
60 | - break; | ||
61 | -#endif | ||
62 | -#if TCG_TARGET_HAS_ext8s_i64 | ||
63 | - case INDEX_op_ext8s_i64: | ||
64 | - t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, (int8_t)t1); | ||
67 | - break; | ||
68 | -#endif | ||
69 | -#if TCG_TARGET_HAS_ext16s_i64 | ||
70 | - case INDEX_op_ext16s_i64: | ||
71 | - t0 = *tb_ptr++; | ||
72 | - t1 = tci_read_r(regs, &tb_ptr); | ||
73 | - tci_write_reg(regs, t0, (int16_t)t1); | ||
74 | - break; | ||
75 | -#endif | ||
76 | -#if TCG_TARGET_HAS_ext16u_i64 | ||
77 | - case INDEX_op_ext16u_i64: | ||
78 | - t0 = *tb_ptr++; | ||
79 | - t1 = tci_read_r(regs, &tb_ptr); | ||
80 | - tci_write_reg(regs, t0, (uint16_t)t1); | ||
81 | - break; | ||
82 | -#endif | ||
83 | #if TCG_TARGET_HAS_ext32s_i64 | ||
84 | case INDEX_op_ext32s_i64: | ||
85 | #endif | ||
86 | -- | ||
87 | 2.25.1 | ||
88 | |||
89 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This includes bswap16 and bswap32. | ||
2 | 1 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
5 | --- | ||
6 | tcg/tci.c | 22 ++++------------------ | ||
7 | 1 file changed, 4 insertions(+), 18 deletions(-) | ||
8 | |||
9 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tcg/tci.c | ||
12 | +++ b/tcg/tci.c | ||
13 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
14 | tci_write_reg(regs, t0, (uint16_t)t1); | ||
15 | break; | ||
16 | #endif | ||
17 | -#if TCG_TARGET_HAS_bswap16_i32 | ||
18 | - case INDEX_op_bswap16_i32: | ||
19 | +#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 | ||
20 | + CASE_32_64(bswap16) | ||
21 | t0 = *tb_ptr++; | ||
22 | t1 = tci_read_r(regs, &tb_ptr); | ||
23 | tci_write_reg(regs, t0, bswap16(t1)); | ||
24 | break; | ||
25 | #endif | ||
26 | -#if TCG_TARGET_HAS_bswap32_i32 | ||
27 | - case INDEX_op_bswap32_i32: | ||
28 | +#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 | ||
29 | + CASE_32_64(bswap32) | ||
30 | t0 = *tb_ptr++; | ||
31 | t1 = tci_read_r(regs, &tb_ptr); | ||
32 | tci_write_reg(regs, t0, bswap32(t1)); | ||
33 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
34 | t1 = tci_read_r(regs, &tb_ptr); | ||
35 | tci_write_reg(regs, t0, (uint32_t)t1); | ||
36 | break; | ||
37 | -#if TCG_TARGET_HAS_bswap16_i64 | ||
38 | - case INDEX_op_bswap16_i64: | ||
39 | - t0 = *tb_ptr++; | ||
40 | - t1 = tci_read_r(regs, &tb_ptr); | ||
41 | - tci_write_reg(regs, t0, bswap16(t1)); | ||
42 | - break; | ||
43 | -#endif | ||
44 | -#if TCG_TARGET_HAS_bswap32_i64 | ||
45 | - case INDEX_op_bswap32_i64: | ||
46 | - t0 = *tb_ptr++; | ||
47 | - t1 = tci_read_r(regs, &tb_ptr); | ||
48 | - tci_write_reg(regs, t0, bswap32(t1)); | ||
49 | - break; | ||
50 | -#endif | ||
51 | #if TCG_TARGET_HAS_bswap64_i64 | ||
52 | case INDEX_op_bswap64_i64: | ||
53 | t0 = *tb_ptr++; | ||
54 | -- | ||
55 | 2.25.1 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
3 | --- | ||
4 | tcg/tci.c | 29 +++++------------------------ | ||
5 | 1 file changed, 5 insertions(+), 24 deletions(-) | ||
6 | 1 | ||
7 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
8 | index XXXXXXX..XXXXXXX 100644 | ||
9 | --- a/tcg/tci.c | ||
10 | +++ b/tcg/tci.c | ||
11 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
12 | tci_write_reg(regs, t0, tci_compare64(t1, t2, condition)); | ||
13 | break; | ||
14 | #endif | ||
15 | - case INDEX_op_mov_i32: | ||
16 | + CASE_32_64(mov) | ||
17 | t0 = *tb_ptr++; | ||
18 | t1 = tci_read_r(regs, &tb_ptr); | ||
19 | tci_write_reg(regs, t0, t1); | ||
20 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
21 | tci_write_reg(regs, t0, bswap32(t1)); | ||
22 | break; | ||
23 | #endif | ||
24 | -#if TCG_TARGET_HAS_not_i32 | ||
25 | - case INDEX_op_not_i32: | ||
26 | +#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 | ||
27 | + CASE_32_64(not) | ||
28 | t0 = *tb_ptr++; | ||
29 | t1 = tci_read_r(regs, &tb_ptr); | ||
30 | tci_write_reg(regs, t0, ~t1); | ||
31 | break; | ||
32 | #endif | ||
33 | -#if TCG_TARGET_HAS_neg_i32 | ||
34 | - case INDEX_op_neg_i32: | ||
35 | +#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 | ||
36 | + CASE_32_64(neg) | ||
37 | t0 = *tb_ptr++; | ||
38 | t1 = tci_read_r(regs, &tb_ptr); | ||
39 | tci_write_reg(regs, t0, -t1); | ||
40 | break; | ||
41 | #endif | ||
42 | #if TCG_TARGET_REG_BITS == 64 | ||
43 | - case INDEX_op_mov_i64: | ||
44 | - t0 = *tb_ptr++; | ||
45 | - t1 = tci_read_r(regs, &tb_ptr); | ||
46 | - tci_write_reg(regs, t0, t1); | ||
47 | - break; | ||
48 | case INDEX_op_tci_movi_i64: | ||
49 | t0 = *tb_ptr++; | ||
50 | t1 = tci_read_i64(&tb_ptr); | ||
51 | @@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, | ||
52 | tci_write_reg(regs, t0, bswap64(t1)); | ||
53 | break; | ||
54 | #endif | ||
55 | -#if TCG_TARGET_HAS_not_i64 | ||
56 | - case INDEX_op_not_i64: | ||
57 | - t0 = *tb_ptr++; | ||
58 | - t1 = tci_read_r(regs, &tb_ptr); | ||
59 | - tci_write_reg(regs, t0, ~t1); | ||
60 | - break; | ||
61 | -#endif | ||
62 | -#if TCG_TARGET_HAS_neg_i64 | ||
63 | - case INDEX_op_neg_i64: | ||
64 | - t0 = *tb_ptr++; | ||
65 | - t1 = tci_read_r(regs, &tb_ptr); | ||
66 | - tci_write_reg(regs, t0, -t1); | ||
67 | - break; | ||
68 | -#endif | ||
69 | #endif /* TCG_TARGET_REG_BITS == 64 */ | ||
70 | |||
71 | /* QEMU specific operations. */ | ||
72 | -- | ||
73 | 2.25.1 | ||
74 | |||
75 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | From: Clement Deschamps <clement.deschamps@greensocs.com> |
---|---|---|---|
2 | 2 | ||
3 | There is nothing special about this compile flag that doesn't mean we | 3 | This fixes a segmentation fault in icount mode when executing |
4 | can't just compute it with curr_cflags() which we should be using when | 4 | from an IO region. |
5 | building a new set. | ||
6 | 5 | ||
7 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | 6 | TB is marked as CF_NOCACHE but tb->orig_tb is not initialized |
8 | Message-Id: <20210224165811.11567-3-alex.bennee@linaro.org> | 7 | (equals previous value in code_gen_buffer). |
8 | |||
9 | The issue happens in cpu_io_recompile() when it tries to invalidate orig_tb. | ||
10 | |||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Signed-off-by: Clement Deschamps <clement.deschamps@greensocs.com> | ||
13 | Message-Id: <20191022140016.918371-1-clement.deschamps@greensocs.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 15 | --- |
11 | include/exec/exec-all.h | 8 +++++--- | 16 | accel/tcg/translate-all.c | 1 + |
12 | include/exec/tb-lookup.h | 3 --- | 17 | 1 file changed, 1 insertion(+) |
13 | accel/tcg/cpu-exec.c | 9 ++++----- | ||
14 | accel/tcg/tcg-runtime.c | 2 +- | ||
15 | accel/tcg/translate-all.c | 6 +++--- | ||
16 | softmmu/physmem.c | 2 +- | ||
17 | 6 files changed, 14 insertions(+), 16 deletions(-) | ||
18 | 18 | ||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/exec-all.h | ||
22 | +++ b/include/exec/exec-all.h | ||
23 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
24 | } | ||
25 | |||
26 | /* current cflags for hashing/comparison */ | ||
27 | -static inline uint32_t curr_cflags(void) | ||
28 | +static inline uint32_t curr_cflags(CPUState *cpu) | ||
29 | { | ||
30 | - return (parallel_cpus ? CF_PARALLEL : 0) | ||
31 | - | (icount_enabled() ? CF_USE_ICOUNT : 0); | ||
32 | + uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index); | ||
33 | + cflags |= parallel_cpus ? CF_PARALLEL : 0; | ||
34 | + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; | ||
35 | + return cflags; | ||
36 | } | ||
37 | |||
38 | /* TranslationBlock invalidate API */ | ||
39 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/include/exec/tb-lookup.h | ||
42 | +++ b/include/exec/tb-lookup.h | ||
43 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
44 | hash = tb_jmp_cache_hash_func(pc); | ||
45 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
46 | |||
47 | - cf_mask &= ~CF_CLUSTER_MASK; | ||
48 | - cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
49 | - | ||
50 | if (likely(tb && | ||
51 | tb->pc == pc && | ||
52 | tb->cs_base == cs_base && | ||
53 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
54 | index XXXXXXX..XXXXXXX 100644 | ||
55 | --- a/accel/tcg/cpu-exec.c | ||
56 | +++ b/accel/tcg/cpu-exec.c | ||
57 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
58 | TranslationBlock *tb; | ||
59 | target_ulong cs_base, pc; | ||
60 | uint32_t flags; | ||
61 | - uint32_t cflags = 1; | ||
62 | - uint32_t cf_mask = cflags & CF_HASH_MASK; | ||
63 | + uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1; | ||
64 | int tb_exit; | ||
65 | |||
66 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { | ||
67 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu) | ||
68 | cpu->running = true; | ||
69 | |||
70 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
71 | - tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
72 | + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | ||
73 | |||
74 | if (tb == NULL) { | ||
75 | mmap_lock(); | ||
76 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) | ||
77 | if (replay_has_exception() | ||
78 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { | ||
79 | /* Execute just one insn to trigger exception pending in the log */ | ||
80 | - cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1; | ||
81 | + cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1; | ||
82 | } | ||
83 | #endif | ||
84 | return false; | ||
85 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | ||
86 | have CF_INVALID set, -1 is a convenient invalid value that | ||
87 | does not require tcg headers for cpu_common_reset. */ | ||
88 | if (cflags == -1) { | ||
89 | - cflags = curr_cflags(); | ||
90 | + cflags = curr_cflags(cpu); | ||
91 | } else { | ||
92 | cpu->cflags_next_tb = -1; | ||
93 | } | ||
94 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
95 | index XXXXXXX..XXXXXXX 100644 | ||
96 | --- a/accel/tcg/tcg-runtime.c | ||
97 | +++ b/accel/tcg/tcg-runtime.c | ||
98 | @@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | ||
99 | |||
100 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
101 | |||
102 | - tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags()); | ||
103 | + tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu)); | ||
104 | if (tb == NULL) { | ||
105 | return tcg_code_gen_epilogue; | ||
106 | } | ||
107 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 19 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c |
108 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
109 | --- a/accel/tcg/translate-all.c | 21 | --- a/accel/tcg/translate-all.c |
110 | +++ b/accel/tcg/translate-all.c | 22 | +++ b/accel/tcg/translate-all.c |
111 | @@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, | 23 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, |
112 | if (current_tb_modified) { | 24 | tb->cs_base = cs_base; |
113 | page_collection_unlock(pages); | 25 | tb->flags = flags; |
114 | /* Force execution of one insn next time. */ | 26 | tb->cflags = cflags; |
115 | - cpu->cflags_next_tb = 1 | curr_cflags(); | 27 | + tb->orig_tb = NULL; |
116 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | 28 | tb->trace_vcpu_dstate = *cpu->trace_dstate; |
117 | mmap_unlock(); | 29 | tcg_ctx->tb_cflags = cflags; |
118 | cpu_loop_exit_noexc(cpu); | 30 | tb_overflow: |
119 | } | ||
120 | @@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) | ||
121 | #ifdef TARGET_HAS_PRECISE_SMC | ||
122 | if (current_tb_modified) { | ||
123 | /* Force execution of one insn next time. */ | ||
124 | - cpu->cflags_next_tb = 1 | curr_cflags(); | ||
125 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | ||
126 | return true; | ||
127 | } | ||
128 | #endif | ||
129 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||
130 | * operations only (which execute after completion) so we don't | ||
131 | * double instrument the instruction. | ||
132 | */ | ||
133 | - cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n; | ||
134 | + cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; | ||
135 | |||
136 | qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, | ||
137 | "cpu_io_recompile: rewound execution of TB to " | ||
138 | diff --git a/softmmu/physmem.c b/softmmu/physmem.c | ||
139 | index XXXXXXX..XXXXXXX 100644 | ||
140 | --- a/softmmu/physmem.c | ||
141 | +++ b/softmmu/physmem.c | ||
142 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
143 | cpu_loop_exit_restore(cpu, ra); | ||
144 | } else { | ||
145 | /* Force execution of one insn next time. */ | ||
146 | - cpu->cflags_next_tb = 1 | curr_cflags(); | ||
147 | + cpu->cflags_next_tb = 1 | curr_cflags(cpu); | ||
148 | mmap_unlock(); | ||
149 | if (ra) { | ||
150 | cpu_restore_state(cpu, ra, true); | ||
151 | -- | 31 | -- |
152 | 2.25.1 | 32 | 2.17.1 |
153 | 33 | ||
154 | 34 | diff view generated by jsdifflib |
1 | From: Alex Bennée <alex.bennee@linaro.org> | 1 | Since 2ac01d6dafab, this function does only two things: assert a |
---|---|---|---|
2 | lock is held, and call tcg_tb_alloc. It is used exactly once, | ||
3 | and its user has already done the assert. | ||
2 | 4 | ||
3 | We don't really deal in cf_mask most of the time. The one time it's | 5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
4 | relevant is when we want to remove an invalidated TB from the QHT | 6 | Reviewed-by: Clement Deschamps <clement.deschamps@greensocs.com> |
5 | lookup. Everywhere else we should be looking up things without | ||
6 | CF_INVALID set. | ||
7 | |||
8 | Signed-off-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Message-Id: <20210224165811.11567-4-alex.bennee@linaro.org> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 8 | --- |
12 | include/exec/exec-all.h | 4 +--- | 9 | accel/tcg/translate-all.c | 20 ++------------------ |
13 | include/exec/tb-lookup.h | 9 ++++++--- | 10 | 1 file changed, 2 insertions(+), 18 deletions(-) |
14 | accel/tcg/cpu-exec.c | 16 ++++++++-------- | ||
15 | accel/tcg/tcg-runtime.c | 2 +- | ||
16 | accel/tcg/translate-all.c | 8 +++++--- | ||
17 | 5 files changed, 21 insertions(+), 18 deletions(-) | ||
18 | 11 | ||
19 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/exec-all.h | ||
22 | +++ b/include/exec/exec-all.h | ||
23 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | ||
24 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ | ||
25 | #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ | ||
26 | #define CF_CLUSTER_SHIFT 24 | ||
27 | -/* cflags' mask for hashing/comparison, basically ignore CF_INVALID */ | ||
28 | -#define CF_HASH_MASK (~CF_INVALID) | ||
29 | |||
30 | /* Per-vCPU dynamic tracing state used to generate this TB */ | ||
31 | uint32_t trace_vcpu_dstate; | ||
32 | @@ -XXX,XX +XXX,XX @@ void tb_flush(CPUState *cpu); | ||
33 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); | ||
34 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
35 | target_ulong cs_base, uint32_t flags, | ||
36 | - uint32_t cf_mask); | ||
37 | + uint32_t cflags); | ||
38 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); | ||
39 | |||
40 | /* GETPC is the true target of the return instruction that we'll execute. */ | ||
41 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/include/exec/tb-lookup.h | ||
44 | +++ b/include/exec/tb-lookup.h | ||
45 | @@ -XXX,XX +XXX,XX @@ | ||
46 | /* Might cause an exception, so have a longjmp destination ready */ | ||
47 | static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
48 | target_ulong cs_base, | ||
49 | - uint32_t flags, uint32_t cf_mask) | ||
50 | + uint32_t flags, uint32_t cflags) | ||
51 | { | ||
52 | TranslationBlock *tb; | ||
53 | uint32_t hash; | ||
54 | |||
55 | + /* we should never be trying to look up an INVALID tb */ | ||
56 | + tcg_debug_assert(!(cflags & CF_INVALID)); | ||
57 | + | ||
58 | hash = tb_jmp_cache_hash_func(pc); | ||
59 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
60 | |||
61 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | ||
62 | tb->cs_base == cs_base && | ||
63 | tb->flags == flags && | ||
64 | tb->trace_vcpu_dstate == *cpu->trace_dstate && | ||
65 | - (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { | ||
66 | + tb_cflags(tb) == cflags)) { | ||
67 | return tb; | ||
68 | } | ||
69 | - tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
70 | + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | ||
71 | if (tb == NULL) { | ||
72 | return NULL; | ||
73 | } | ||
74 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/accel/tcg/cpu-exec.c | ||
77 | +++ b/accel/tcg/cpu-exec.c | ||
78 | @@ -XXX,XX +XXX,XX @@ struct tb_desc { | ||
79 | CPUArchState *env; | ||
80 | tb_page_addr_t phys_page1; | ||
81 | uint32_t flags; | ||
82 | - uint32_t cf_mask; | ||
83 | + uint32_t cflags; | ||
84 | uint32_t trace_vcpu_dstate; | ||
85 | }; | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
88 | tb->cs_base == desc->cs_base && | ||
89 | tb->flags == desc->flags && | ||
90 | tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && | ||
91 | - (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { | ||
92 | + tb_cflags(tb) == desc->cflags) { | ||
93 | /* check next page if needed */ | ||
94 | if (tb->page_addr[1] == -1) { | ||
95 | return true; | ||
96 | @@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d) | ||
97 | |||
98 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
99 | target_ulong cs_base, uint32_t flags, | ||
100 | - uint32_t cf_mask) | ||
101 | + uint32_t cflags) | ||
102 | { | ||
103 | tb_page_addr_t phys_pc; | ||
104 | struct tb_desc desc; | ||
105 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
106 | desc.env = (CPUArchState *)cpu->env_ptr; | ||
107 | desc.cs_base = cs_base; | ||
108 | desc.flags = flags; | ||
109 | - desc.cf_mask = cf_mask; | ||
110 | + desc.cflags = cflags; | ||
111 | desc.trace_vcpu_dstate = *cpu->trace_dstate; | ||
112 | desc.pc = pc; | ||
113 | phys_pc = get_page_addr_code(desc.env, pc); | ||
114 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | ||
115 | return NULL; | ||
116 | } | ||
117 | desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; | ||
118 | - h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); | ||
119 | + h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); | ||
120 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); | ||
121 | } | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n, | ||
124 | |||
125 | static inline TranslationBlock *tb_find(CPUState *cpu, | ||
126 | TranslationBlock *last_tb, | ||
127 | - int tb_exit, uint32_t cf_mask) | ||
128 | + int tb_exit, uint32_t cflags) | ||
129 | { | ||
130 | CPUArchState *env = (CPUArchState *)cpu->env_ptr; | ||
131 | TranslationBlock *tb; | ||
132 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu, | ||
133 | |||
134 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | ||
135 | |||
136 | - tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask); | ||
137 | + tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | ||
138 | if (tb == NULL) { | ||
139 | mmap_lock(); | ||
140 | - tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); | ||
141 | + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | ||
142 | mmap_unlock(); | ||
143 | /* We add the TB in the virtual pc hash table for the fast lookup */ | ||
144 | qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); | ||
145 | diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c | ||
146 | index XXXXXXX..XXXXXXX 100644 | ||
147 | --- a/accel/tcg/tcg-runtime.c | ||
148 | +++ b/accel/tcg/tcg-runtime.c | ||
149 | @@ -XXX,XX +XXX,XX @@ | ||
150 | #include "exec/helper-proto.h" | ||
151 | #include "exec/cpu_ldst.h" | ||
152 | #include "exec/exec-all.h" | ||
153 | -#include "exec/tb-lookup.h" | ||
154 | #include "disas/disas.h" | ||
155 | #include "exec/log.h" | ||
156 | #include "tcg/tcg.h" | ||
157 | +#include "exec/tb-lookup.h" | ||
158 | |||
159 | /* 32-bit helpers */ | ||
160 | |||
161 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | 12 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c |
162 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
163 | --- a/accel/tcg/translate-all.c | 14 | --- a/accel/tcg/translate-all.c |
164 | +++ b/accel/tcg/translate-all.c | 15 | +++ b/accel/tcg/translate-all.c |
165 | @@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp) | 16 | @@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size) |
166 | return a->pc == b->pc && | 17 | #endif |
167 | a->cs_base == b->cs_base && | 18 | } |
168 | a->flags == b->flags && | 19 | |
169 | - (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && | 20 | -/* |
170 | + (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && | 21 | - * Allocate a new translation block. Flush the translation buffer if |
171 | a->trace_vcpu_dstate == b->trace_vcpu_dstate && | 22 | - * too many translation blocks or too much generated code. |
172 | a->page_addr[0] == b->page_addr[0] && | 23 | - */ |
173 | a->page_addr[1] == b->page_addr[1]; | 24 | -static TranslationBlock *tb_alloc(target_ulong pc) |
174 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | 25 | -{ |
175 | PageDesc *p; | 26 | - TranslationBlock *tb; |
176 | uint32_t h; | 27 | - |
177 | tb_page_addr_t phys_pc; | 28 | - assert_memory_lock(); |
178 | + uint32_t orig_cflags = tb_cflags(tb); | 29 | - |
179 | 30 | - tb = tcg_tb_alloc(tcg_ctx); | |
31 | - if (unlikely(tb == NULL)) { | ||
32 | - return NULL; | ||
33 | - } | ||
34 | - return tb; | ||
35 | -} | ||
36 | - | ||
37 | /* call with @p->lock held */ | ||
38 | static inline void invalidate_page_bitmap(PageDesc *p) | ||
39 | { | ||
40 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
41 | TCGProfile *prof = &tcg_ctx->prof; | ||
42 | int64_t ti; | ||
43 | #endif | ||
44 | + | ||
180 | assert_memory_lock(); | 45 | assert_memory_lock(); |
181 | 46 | ||
182 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | 47 | phys_pc = get_page_addr_code(env, pc); |
183 | 48 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | |
184 | /* remove the TB from the hash list */ | ||
185 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | ||
186 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, | ||
187 | + h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, | ||
188 | tb->trace_vcpu_dstate); | ||
189 | if (!qht_remove(&tb_ctx.htable, tb, h)) { | ||
190 | return; | ||
191 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
192 | uint32_t h; | ||
193 | |||
194 | assert_memory_lock(); | ||
195 | + tcg_debug_assert(!(tb->cflags & CF_INVALID)); | ||
196 | |||
197 | /* | ||
198 | * Add the TB to the page list, acquiring first the pages's locks. | ||
199 | @@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | ||
200 | } | 49 | } |
201 | 50 | ||
202 | /* add in the hash table */ | 51 | buffer_overflow: |
203 | - h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, | 52 | - tb = tb_alloc(pc); |
204 | + h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, | 53 | + tb = tcg_tb_alloc(tcg_ctx); |
205 | tb->trace_vcpu_dstate); | 54 | if (unlikely(!tb)) { |
206 | qht_insert(&tb_ctx.htable, tb, h, &existing_tb); | 55 | /* flush must be done */ |
207 | 56 | tb_flush(cpu); | |
208 | -- | 57 | -- |
209 | 2.25.1 | 58 | 2.17.1 |
210 | 59 | ||
211 | 60 | diff view generated by jsdifflib |