1
V2 fixes a build problem that affected win32.
1
v2: Fix FreeBSD build error in patch 18.
2
3
2
4
r~
3
r~
5
4
6
5
7
The following changes since commit 187f35512106501fe9a11057f4d8705431e0026d:
6
The following changes since commit 0d239e513e0117e66fa739fb71a43b9383a108ff:
8
7
9
Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-next-251019-3' into staging (2019-10-26 10:13:48 +0100)
8
Merge tag 'pull-lu-20231018' of https://gitlab.com/rth7680/qemu into staging (2023-10-19 10:20:57 -0700)
10
9
11
are available in the Git repository at:
10
are available in the Git repository at:
12
11
13
https://github.com/rth7680/qemu.git tags/pull-tcg-20191028
12
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20231018-2
14
13
15
for you to fetch changes up to fe9b676fb3160496b4b2bf0c57d33be724bf04c3:
14
for you to fetch changes up to a75f704d972b9408f5e2843784b3add48c724c52:
16
15
17
translate-all: Remove tb_alloc (2019-10-28 10:35:23 +0100)
16
target/i386: Use i128 for 128 and 256-bit loads and stores (2023-10-19 21:11:44 -0700)
18
17
19
----------------------------------------------------------------
18
----------------------------------------------------------------
20
Improvements for TARGET_PAGE_BITS_VARY
19
tcg: Drop unused tcg_temp_free define
21
Fix for TCI ld16u_i64.
20
tcg: Introduce tcg_use_softmmu
22
Fix for segv on icount execute from i/o memory.
21
tcg: Optimize past conditional branches
23
Two misc cleanups.
22
tcg: Use constant zero when expanding with divu2
23
tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB
24
tcg/ppc: Use ADDPCIS for power9
25
tcg/ppc: Use prefixed instructions for power10
26
tcg/ppc: Disable TCG_REG_TB for Power9/Power10
24
27
25
----------------------------------------------------------------
28
----------------------------------------------------------------
26
Alex Bennée (1):
29
Jordan Niethe (1):
27
cputlb: ensure _cmmu helper functions follow the naming standard
30
tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB
28
31
29
Clement Deschamps (1):
32
Mike Frysinger (1):
30
translate-all: fix uninitialized tb->orig_tb
33
tcg: drop unused tcg_temp_free define
31
34
32
Richard Henderson (8):
35
Richard Henderson (27):
33
exec: Split out variable page size support to exec-vary.c
36
tcg/ppc: Untabify tcg-target.c.inc
34
configure: Detect compiler support for __attribute__((alias))
37
tcg/ppc: Reinterpret tb-relative to TB+4
35
exec: Use const alias for TARGET_PAGE_BITS_VARY
38
tcg/ppc: Use ADDPCIS in tcg_out_tb_start
36
exec: Restrict TARGET_PAGE_BITS_VARY assert to CONFIG_DEBUG_TCG
39
tcg/ppc: Use ADDPCIS in tcg_out_movi_int
37
exec: Promote TARGET_PAGE_MASK to target_long
40
tcg/ppc: Use ADDPCIS for the constant pool
38
exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY
41
tcg/ppc: Use ADDPCIS in tcg_out_goto_tb
39
cputlb: Fix tlb_vaddr_to_host
42
tcg/ppc: Use PADDI in tcg_out_movi
40
translate-all: Remove tb_alloc
43
tcg/ppc: Use prefixed instructions in tcg_out_mem_long
44
tcg/ppc: Use PLD in tcg_out_movi for constant pool
45
tcg/ppc: Use prefixed instructions in tcg_out_dupi_vec
46
tcg/ppc: Use PLD in tcg_out_goto_tb
47
tcg/ppc: Disable TCG_REG_TB for Power9/Power10
48
tcg: Introduce tcg_use_softmmu
49
tcg: Provide guest_base fallback for system mode
50
tcg/arm: Use tcg_use_softmmu
51
tcg/aarch64: Use tcg_use_softmmu
52
tcg/i386: Use tcg_use_softmmu
53
tcg/loongarch64: Use tcg_use_softmmu
54
tcg/mips: Use tcg_use_softmmu
55
tcg/ppc: Use tcg_use_softmmu
56
tcg/riscv: Do not reserve TCG_GUEST_BASE_REG for guest_base zero
57
tcg/riscv: Use tcg_use_softmmu
58
tcg/s390x: Use tcg_use_softmmu
59
tcg: Use constant zero when expanding with divu2
60
tcg: Optimize past conditional branches
61
tcg: Add tcg_gen_{ld,st}_i128
62
target/i386: Use i128 for 128 and 256-bit loads and stores
41
63
42
Stefan Weil (1):
64
include/tcg/tcg-op-common.h | 3 +
43
tci: Add implementation for INDEX_op_ld16u_i64
65
include/tcg/tcg-op.h | 2 -
44
66
include/tcg/tcg.h | 8 +-
45
Wei Yang (1):
67
target/i386/tcg/translate.c | 63 ++---
46
cpu: use ROUND_UP() to define xxx_PAGE_ALIGN
68
tcg/optimize.c | 8 +-
47
69
tcg/tcg-op-ldst.c | 14 +-
48
Makefile.target | 2 +-
70
tcg/tcg-op.c | 38 ++-
49
include/exec/cpu-all.h | 33 ++++++++----
71
tcg/tcg.c | 13 +-
50
include/exec/cpu_ldst_template.h | 4 +-
72
tcg/aarch64/tcg-target.c.inc | 177 ++++++------
51
include/qemu-common.h | 6 +++
73
tcg/arm/tcg-target.c.inc | 203 +++++++-------
52
tcg/tcg.h | 20 +++++---
74
tcg/i386/tcg-target.c.inc | 198 +++++++-------
53
accel/tcg/cputlb.c | 26 ++++++++--
75
tcg/loongarch64/tcg-target.c.inc | 126 +++++----
54
accel/tcg/translate-all.c | 21 ++------
76
tcg/mips/tcg-target.c.inc | 231 ++++++++--------
55
exec-vary.c | 108 +++++++++++++++++++++++++++++++++++++++
77
tcg/ppc/tcg-target.c.inc | 561 ++++++++++++++++++++++++++-------------
56
exec.c | 34 ------------
78
tcg/riscv/tcg-target.c.inc | 189 ++++++-------
57
target/cris/translate_v10.inc.c | 3 +-
79
tcg/s390x/tcg-target.c.inc | 161 ++++++-----
58
tcg/tci.c | 15 ++++++
80
16 files changed, 1102 insertions(+), 893 deletions(-)
59
configure | 19 +++++++
60
12 files changed, 214 insertions(+), 77 deletions(-)
61
create mode 100644 exec-vary.c
62
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Weil <sw@weilnetz.de>
2
1
3
This fixes "make check-tcg" on a Debian x86_64 host.
4
5
Signed-off-by: Stefan Weil <sw@weilnetz.de>
6
Tested-by: Thomas Huth <thuth@redhat.com>
7
Message-Id: <20190410194838.10123-1-sw@weilnetz.de>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/tci.c | 15 +++++++++++++++
11
1 file changed, 15 insertions(+)
12
13
diff --git a/tcg/tci.c b/tcg/tci.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tci.c
16
+++ b/tcg/tci.c
17
@@ -XXX,XX +XXX,XX @@ static void tci_write_reg8(tcg_target_ulong *regs, TCGReg index, uint8_t value)
18
tci_write_reg(regs, index, value);
19
}
20
21
+static void
22
+tci_write_reg16(tcg_target_ulong *regs, TCGReg index, uint16_t value)
23
+{
24
+ tci_write_reg(regs, index, value);
25
+}
26
+
27
static void
28
tci_write_reg32(tcg_target_ulong *regs, TCGReg index, uint32_t value)
29
{
30
@@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
31
tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
32
break;
33
case INDEX_op_ld8s_i32:
34
+ TODO();
35
+ break;
36
case INDEX_op_ld16u_i32:
37
TODO();
38
break;
39
@@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
40
tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
41
break;
42
case INDEX_op_ld8s_i64:
43
+ TODO();
44
+ break;
45
case INDEX_op_ld16u_i64:
46
+ t0 = *tb_ptr++;
47
+ t1 = tci_read_r(regs, &tb_ptr);
48
+ t2 = tci_read_s32(&tb_ptr);
49
+ tci_write_reg16(regs, t0, *(uint16_t *)(t1 + t2));
50
+ break;
51
case INDEX_op_ld16s_i64:
52
TODO();
53
break;
54
--
55
2.17.1
56
57
diff view generated by jsdifflib
Deleted patch
1
From: Alex Bennée <alex.bennee@linaro.org>
2
1
3
We document this in docs/devel/load-stores.rst so lets follow it. The
4
32 bit and 64 bit access functions have historically not included the
5
sign so we leave those as is. We also introduce some signed helpers
6
which are used for loading immediate values in the translator.
7
8
Fixes: 282dffc8
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
11
Message-Id: <20191021150910.23216-1-alex.bennee@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
include/exec/cpu_ldst_template.h | 4 ++--
15
tcg/tcg.h | 20 ++++++++++++++------
16
accel/tcg/cputlb.c | 24 +++++++++++++++++++++---
17
target/cris/translate_v10.inc.c | 3 +--
18
4 files changed, 38 insertions(+), 13 deletions(-)
19
20
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/cpu_ldst_template.h
23
+++ b/include/exec/cpu_ldst_template.h
24
@@ -XXX,XX +XXX,XX @@
25
#ifdef SOFTMMU_CODE_ACCESS
26
#define ADDR_READ addr_code
27
#define MMUSUFFIX _cmmu
28
-#define URETSUFFIX SUFFIX
29
-#define SRETSUFFIX SUFFIX
30
+#define URETSUFFIX USUFFIX
31
+#define SRETSUFFIX glue(s, SUFFIX)
32
#else
33
#define ADDR_READ addr_read
34
#define MMUSUFFIX _mmu
35
diff --git a/tcg/tcg.h b/tcg/tcg.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/tcg.h
38
+++ b/tcg/tcg.h
39
@@ -XXX,XX +XXX,XX @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
40
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
41
TCGMemOpIdx oi, uintptr_t retaddr);
42
43
-uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
44
+uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
45
TCGMemOpIdx oi, uintptr_t retaddr);
46
-uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
47
+int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
48
TCGMemOpIdx oi, uintptr_t retaddr);
49
+uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
50
+ TCGMemOpIdx oi, uintptr_t retaddr);
51
+int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
52
+ TCGMemOpIdx oi, uintptr_t retaddr);
53
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
54
TCGMemOpIdx oi, uintptr_t retaddr);
55
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
56
TCGMemOpIdx oi, uintptr_t retaddr);
57
-uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
58
- TCGMemOpIdx oi, uintptr_t retaddr);
59
+uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
60
+ TCGMemOpIdx oi, uintptr_t retaddr);
61
+int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
62
+ TCGMemOpIdx oi, uintptr_t retaddr);
63
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
64
TCGMemOpIdx oi, uintptr_t retaddr);
65
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
66
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
67
# define helper_ret_stw_mmu helper_be_stw_mmu
68
# define helper_ret_stl_mmu helper_be_stl_mmu
69
# define helper_ret_stq_mmu helper_be_stq_mmu
70
-# define helper_ret_ldw_cmmu helper_be_ldw_cmmu
71
+# define helper_ret_lduw_cmmu helper_be_lduw_cmmu
72
+# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu
73
# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
74
# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
75
#else
76
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
77
# define helper_ret_stw_mmu helper_le_stw_mmu
78
# define helper_ret_stl_mmu helper_le_stl_mmu
79
# define helper_ret_stq_mmu helper_le_stq_mmu
80
-# define helper_ret_ldw_cmmu helper_le_ldw_cmmu
81
+# define helper_ret_lduw_cmmu helper_le_lduw_cmmu
82
+# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu
83
# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
84
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
85
#endif
86
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/accel/tcg/cputlb.c
89
+++ b/accel/tcg/cputlb.c
90
@@ -XXX,XX +XXX,XX @@ static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
91
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
92
}
93
94
-uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
95
+uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
96
TCGMemOpIdx oi, uintptr_t retaddr)
97
{
98
return full_ldub_cmmu(env, addr, oi, retaddr);
99
}
100
101
+int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
102
+ TCGMemOpIdx oi, uintptr_t retaddr)
103
+{
104
+ return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
105
+}
106
+
107
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
108
TCGMemOpIdx oi, uintptr_t retaddr)
109
{
110
@@ -XXX,XX +XXX,XX @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
111
full_le_lduw_cmmu);
112
}
113
114
-uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
115
+uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
116
TCGMemOpIdx oi, uintptr_t retaddr)
117
{
118
return full_le_lduw_cmmu(env, addr, oi, retaddr);
119
}
120
121
+int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
122
+ TCGMemOpIdx oi, uintptr_t retaddr)
123
+{
124
+ return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
125
+}
126
+
127
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
128
TCGMemOpIdx oi, uintptr_t retaddr)
129
{
130
@@ -XXX,XX +XXX,XX @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
131
full_be_lduw_cmmu);
132
}
133
134
-uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
135
+uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
136
TCGMemOpIdx oi, uintptr_t retaddr)
137
{
138
return full_be_lduw_cmmu(env, addr, oi, retaddr);
139
}
140
141
+int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
142
+ TCGMemOpIdx oi, uintptr_t retaddr)
143
+{
144
+ return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
145
+}
146
+
147
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
148
TCGMemOpIdx oi, uintptr_t retaddr)
149
{
150
diff --git a/target/cris/translate_v10.inc.c b/target/cris/translate_v10.inc.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/target/cris/translate_v10.inc.c
153
+++ b/target/cris/translate_v10.inc.c
154
@@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
155
case CRISV10_IND_BCC_M:
156
157
cris_cc_mask(dc, 0);
158
- imm = cpu_ldsw_code(env, dc->pc + 2);
159
- simm = (int16_t)imm;
160
+ simm = cpu_ldsw_code(env, dc->pc + 2);
161
simm += 4;
162
163
LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm);
164
--
165
2.17.1
166
167
diff view generated by jsdifflib
Deleted patch
1
From: Wei Yang <richardw.yang@linux.intel.com>
2
1
3
Use ROUND_UP() to define, which is a little bit easy to read.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
7
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
8
Reviewed-by: Juan Quintela <quintela@redhat.com>
9
Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
10
Message-Id: <20191013021145.16011-2-richardw.yang@linux.intel.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
include/exec/cpu-all.h | 7 +++----
14
1 file changed, 3 insertions(+), 4 deletions(-)
15
16
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/cpu-all.h
19
+++ b/include/exec/cpu-all.h
20
@@ -XXX,XX +XXX,XX @@ extern int target_page_bits;
21
22
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
23
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
24
-#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
25
+#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
26
27
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
28
* when intptr_t is 32-bit and we are aligning a long long.
29
@@ -XXX,XX +XXX,XX @@ extern int target_page_bits;
30
extern uintptr_t qemu_host_page_size;
31
extern intptr_t qemu_host_page_mask;
32
33
-#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
34
-#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
35
- qemu_real_host_page_mask)
36
+#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
37
+#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
38
39
/* same as PROT_xxx */
40
#define PAGE_READ 0x0001
41
--
42
2.17.1
43
44
diff view generated by jsdifflib
Deleted patch
1
The next patch will play a trick with "const" that will
2
confuse the compiler about the uses of target_page_bits
3
within exec.c. Moving everything to a new file prevents
4
this confusion.
5
1
6
No functional change so far.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
Makefile.target | 2 +-
14
include/qemu-common.h | 6 +++++
15
exec-vary.c | 57 +++++++++++++++++++++++++++++++++++++++++++
16
exec.c | 34 --------------------------
17
4 files changed, 64 insertions(+), 35 deletions(-)
18
create mode 100644 exec-vary.c
19
20
diff --git a/Makefile.target b/Makefile.target
21
index XXXXXXX..XXXXXXX 100644
22
--- a/Makefile.target
23
+++ b/Makefile.target
24
@@ -XXX,XX +XXX,XX @@ obj-y += trace/
25
26
#########################################################
27
# cpu emulator library
28
-obj-y += exec.o
29
+obj-y += exec.o exec-vary.o
30
obj-y += accel/
31
obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/tcg-op-vec.o tcg/tcg-op-gvec.o
32
obj-$(CONFIG_TCG) += tcg/tcg-common.o tcg/optimize.o
33
diff --git a/include/qemu-common.h b/include/qemu-common.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/include/qemu-common.h
36
+++ b/include/qemu-common.h
37
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu);
38
*/
39
bool set_preferred_target_page_bits(int bits);
40
41
+/**
42
+ * finalize_target_page_bits:
43
+ * Commit the final value set by set_preferred_target_page_bits.
44
+ */
45
+void finalize_target_page_bits(void);
46
+
47
/**
48
* Sends a (part of) iovec down a socket, yielding when the socket is full, or
49
* Receives data into a (part of) iovec from a socket,
50
diff --git a/exec-vary.c b/exec-vary.c
51
new file mode 100644
52
index XXXXXXX..XXXXXXX
53
--- /dev/null
54
+++ b/exec-vary.c
55
@@ -XXX,XX +XXX,XX @@
56
+/*
57
+ * Variable page size handling
58
+ *
59
+ * Copyright (c) 2003 Fabrice Bellard
60
+ *
61
+ * This library is free software; you can redistribute it and/or
62
+ * modify it under the terms of the GNU Lesser General Public
63
+ * License as published by the Free Software Foundation; either
64
+ * version 2 of the License, or (at your option) any later version.
65
+ *
66
+ * This library is distributed in the hope that it will be useful,
67
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
68
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
69
+ * Lesser General Public License for more details.
70
+ *
71
+ * You should have received a copy of the GNU Lesser General Public
72
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
73
+ */
74
+
75
+#include "qemu/osdep.h"
76
+#include "qemu-common.h"
77
+#include "exec/exec-all.h"
78
+
79
+#ifdef TARGET_PAGE_BITS_VARY
80
+int target_page_bits;
81
+bool target_page_bits_decided;
82
+#endif
83
+
84
+bool set_preferred_target_page_bits(int bits)
85
+{
86
+ /*
87
+ * The target page size is the lowest common denominator for all
88
+ * the CPUs in the system, so we can only make it smaller, never
89
+ * larger. And we can't make it smaller once we've committed to
90
+ * a particular size.
91
+ */
92
+#ifdef TARGET_PAGE_BITS_VARY
93
+ assert(bits >= TARGET_PAGE_BITS_MIN);
94
+ if (target_page_bits == 0 || target_page_bits > bits) {
95
+ if (target_page_bits_decided) {
96
+ return false;
97
+ }
98
+ target_page_bits = bits;
99
+ }
100
+#endif
101
+ return true;
102
+}
103
+
104
+void finalize_target_page_bits(void)
105
+{
106
+#ifdef TARGET_PAGE_BITS_VARY
107
+ if (target_page_bits == 0) {
108
+ target_page_bits = TARGET_PAGE_BITS_MIN;
109
+ }
110
+ target_page_bits_decided = true;
111
+#endif
112
+}
113
diff --git a/exec.c b/exec.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/exec.c
116
+++ b/exec.c
117
@@ -XXX,XX +XXX,XX @@ AddressSpace address_space_memory;
118
static MemoryRegion io_mem_unassigned;
119
#endif
120
121
-#ifdef TARGET_PAGE_BITS_VARY
122
-int target_page_bits;
123
-bool target_page_bits_decided;
124
-#endif
125
-
126
CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
127
128
/* current CPU in the current thread. It is only valid inside
129
@@ -XXX,XX +XXX,XX @@ int use_icount;
130
uintptr_t qemu_host_page_size;
131
intptr_t qemu_host_page_mask;
132
133
-bool set_preferred_target_page_bits(int bits)
134
-{
135
- /* The target page size is the lowest common denominator for all
136
- * the CPUs in the system, so we can only make it smaller, never
137
- * larger. And we can't make it smaller once we've committed to
138
- * a particular size.
139
- */
140
-#ifdef TARGET_PAGE_BITS_VARY
141
- assert(bits >= TARGET_PAGE_BITS_MIN);
142
- if (target_page_bits == 0 || target_page_bits > bits) {
143
- if (target_page_bits_decided) {
144
- return false;
145
- }
146
- target_page_bits = bits;
147
- }
148
-#endif
149
- return true;
150
-}
151
-
152
#if !defined(CONFIG_USER_ONLY)
153
154
-static void finalize_target_page_bits(void)
155
-{
156
-#ifdef TARGET_PAGE_BITS_VARY
157
- if (target_page_bits == 0) {
158
- target_page_bits = TARGET_PAGE_BITS_MIN;
159
- }
160
- target_page_bits_decided = true;
161
-#endif
162
-}
163
-
164
typedef struct PhysPageEntry PhysPageEntry;
165
166
struct PhysPageEntry {
167
--
168
2.17.1
169
170
diff view generated by jsdifflib
Deleted patch
1
Such support is present almost everywhere, except for Xcode 9.
2
It is added in Xcode 10, but travis uses xcode9 by default,
3
so we should support it for a while yet.
4
1
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
configure | 19 +++++++++++++++++++
9
1 file changed, 19 insertions(+)
10
11
diff --git a/configure b/configure
12
index XXXXXXX..XXXXXXX 100755
13
--- a/configure
14
+++ b/configure
15
@@ -XXX,XX +XXX,XX @@ if compile_prog "" "" ; then
16
vector16=yes
17
fi
18
19
+########################################
20
+# See if __attribute__((alias)) is supported.
21
+# This false for Xcode 9, but has been remedied for Xcode 10.
22
+# Unfortunately, travis uses Xcode 9 by default.
23
+
24
+attralias=no
25
+cat > $TMPC << EOF
26
+int x = 1;
27
+extern const int y __attribute__((alias("x")));
28
+int main(void) { return 0; }
29
+EOF
30
+if compile_prog "" "" ; then
31
+ attralias=yes
32
+fi
33
+
34
########################################
35
# check if getauxval is available.
36
37
@@ -XXX,XX +XXX,XX @@ if test "$vector16" = "yes" ; then
38
echo "CONFIG_VECTOR16=y" >> $config_host_mak
39
fi
40
41
+if test "$attralias" = "yes" ; then
42
+ echo "CONFIG_ATTRIBUTE_ALIAS=y" >> $config_host_mak
43
+fi
44
+
45
if test "$getauxval" = "yes" ; then
46
echo "CONFIG_GETAUXVAL=y" >> $config_host_mak
47
fi
48
--
49
2.17.1
50
51
diff view generated by jsdifflib
Deleted patch
1
Using a variable that is declared "const" for this tells the
2
compiler that it may read the value once and assume that it
3
does not change across function calls.
4
1
5
For target_page_size, this means we have only one assert per
6
function, and one read of the variable.
7
8
This reduces the size of qemu-system-aarch64 by 8k.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
include/exec/cpu-all.h | 14 ++++++---
15
exec-vary.c | 66 +++++++++++++++++++++++++++++++++++++-----
16
2 files changed, 68 insertions(+), 12 deletions(-)
17
18
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/exec/cpu-all.h
21
+++ b/include/exec/cpu-all.h
22
@@ -XXX,XX +XXX,XX @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
23
/* page related stuff */
24
25
#ifdef TARGET_PAGE_BITS_VARY
26
-extern bool target_page_bits_decided;
27
-extern int target_page_bits;
28
-#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \
29
- target_page_bits; })
30
+typedef struct {
31
+ bool decided;
32
+ int bits;
33
+} TargetPageBits;
34
+#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
35
+extern const TargetPageBits target_page;
36
+#else
37
+extern TargetPageBits target_page;
38
+#endif
39
+#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
40
#else
41
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
42
#endif
43
diff --git a/exec-vary.c b/exec-vary.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/exec-vary.c
46
+++ b/exec-vary.c
47
@@ -XXX,XX +XXX,XX @@
48
49
#include "qemu/osdep.h"
50
#include "qemu-common.h"
51
+
52
+#define IN_EXEC_VARY 1
53
+
54
#include "exec/exec-all.h"
55
56
#ifdef TARGET_PAGE_BITS_VARY
57
-int target_page_bits;
58
-bool target_page_bits_decided;
59
+# ifdef CONFIG_ATTRIBUTE_ALIAS
60
+/*
61
+ * We want to declare the "target_page" variable as const, which tells
62
+ * the compiler that it can cache any value that it reads across calls.
63
+ * This avoids multiple assertions and multiple reads within any one user.
64
+ *
65
+ * This works because we finish initializing the data before we ever read
66
+ * from the "target_page" symbol.
67
+ *
68
+ * This also requires that we have a non-constant symbol by which we can
69
+ * perform the actual initialization, and which forces the data to be
70
+ * allocated within writable memory. Thus "init_target_page", and we use
71
+ * that symbol exclusively in the two functions that initialize this value.
72
+ *
73
+ * The "target_page" symbol is created as an alias of "init_target_page".
74
+ */
75
+static TargetPageBits init_target_page;
76
+
77
+/*
78
+ * Note that this is *not* a redundant decl, this is the definition of
79
+ * the "target_page" symbol. The syntax for this definition requires
80
+ * the use of the extern keyword. This seems to be a GCC bug in
81
+ * either the syntax for the alias attribute or in -Wredundant-decls.
82
+ *
83
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91765
84
+ */
85
+# pragma GCC diagnostic push
86
+# pragma GCC diagnostic ignored "-Wredundant-decls"
87
+
88
+extern const TargetPageBits target_page
89
+ __attribute__((alias("init_target_page")));
90
+
91
+# pragma GCC diagnostic pop
92
+# else
93
+/*
94
+ * When aliases are not supported then we force two different declarations,
95
+ * by way of suppressing the header declaration with IN_EXEC_VARY.
96
+ * We assume that on such an old compiler, LTO cannot be used, and so the
97
+ * compiler cannot not detect the mismatched declarations, and all is well.
98
+ */
99
+TargetPageBits target_page;
100
+# define init_target_page target_page
101
+# endif
102
#endif
103
104
bool set_preferred_target_page_bits(int bits)
105
@@ -XXX,XX +XXX,XX @@ bool set_preferred_target_page_bits(int bits)
106
*/
107
#ifdef TARGET_PAGE_BITS_VARY
108
assert(bits >= TARGET_PAGE_BITS_MIN);
109
- if (target_page_bits == 0 || target_page_bits > bits) {
110
- if (target_page_bits_decided) {
111
+ if (init_target_page.bits == 0 || init_target_page.bits > bits) {
112
+ if (init_target_page.decided) {
113
return false;
114
}
115
- target_page_bits = bits;
116
+ init_target_page.bits = bits;
117
}
118
#endif
119
return true;
120
@@ -XXX,XX +XXX,XX @@ bool set_preferred_target_page_bits(int bits)
121
void finalize_target_page_bits(void)
122
{
123
#ifdef TARGET_PAGE_BITS_VARY
124
- if (target_page_bits == 0) {
125
- target_page_bits = TARGET_PAGE_BITS_MIN;
126
+ if (init_target_page.bits == 0) {
127
+ init_target_page.bits = TARGET_PAGE_BITS_MIN;
128
}
129
- target_page_bits_decided = true;
130
+ init_target_page.decided = true;
131
+
132
+ /*
133
+ * For the benefit of an -flto build, prevent the compiler from
134
+ * hoisting a read from target_page before we finish initializing.
135
+ */
136
+ barrier();
137
#endif
138
}
139
--
140
2.17.1
141
142
diff view generated by jsdifflib
Deleted patch
1
This reduces the size of a release build by about 10k.
2
Noticably, within the tlb miss helpers.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/cpu-all.h | 4 ++++
10
1 file changed, 4 insertions(+)
11
12
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/cpu-all.h
15
+++ b/include/exec/cpu-all.h
16
@@ -XXX,XX +XXX,XX @@ extern const TargetPageBits target_page;
17
#else
18
extern TargetPageBits target_page;
19
#endif
20
+#ifdef CONFIG_DEBUG_TCG
21
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
22
#else
23
+#define TARGET_PAGE_BITS target_page.bits
24
+#endif
25
+#else
26
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
27
#endif
28
29
--
30
2.17.1
31
32
diff view generated by jsdifflib
Deleted patch
1
There are some uint64_t uses that expect TARGET_PAGE_MASK to
2
extend for a 32-bit, so this must continue to be a signed type.
3
Define based on TARGET_PAGE_BITS not TARGET_PAGE_SIZE; this
4
will make a following patch more clear.
5
1
6
This should not have a functional effect so far.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/exec/cpu-all.h | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu-all.h
18
+++ b/include/exec/cpu-all.h
19
@@ -XXX,XX +XXX,XX @@ extern TargetPageBits target_page;
20
#endif
21
22
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
23
-#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
24
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
25
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
26
27
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
28
--
29
2.17.1
30
31
diff view generated by jsdifflib
1
This eliminates a set of runtime shifts. It turns out that we
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
require TARGET_PAGE_MASK more often than TARGET_PAGE_SIZE, so
3
redefine TARGET_PAGE_SIZE based on TARGET_PAGE_MASK instead of
4
the other way around.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
8
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
3
---
11
include/exec/cpu-all.h | 8 ++++++--
4
tcg/i386/tcg-target.c.inc | 198 +++++++++++++++++++-------------------
12
exec-vary.c | 1 +
5
1 file changed, 98 insertions(+), 100 deletions(-)
13
2 files changed, 7 insertions(+), 2 deletions(-)
14
6
15
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
7
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu-all.h
9
--- a/tcg/i386/tcg-target.c.inc
18
+++ b/include/exec/cpu-all.h
10
+++ b/tcg/i386/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
11
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
20
typedef struct {
12
# define ALL_VECTOR_REGS 0x00ff0000u
21
bool decided;
13
# define ALL_BYTEL_REGS 0x0000000fu
22
int bits;
23
+ target_long mask;
24
} TargetPageBits;
25
#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
26
extern const TargetPageBits target_page;
27
@@ -XXX,XX +XXX,XX @@ extern TargetPageBits target_page;
28
#endif
14
#endif
29
#ifdef CONFIG_DEBUG_TCG
15
-#ifdef CONFIG_SOFTMMU
30
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
16
-# define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
31
+#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; })
17
-#else
18
-# define SOFTMMU_RESERVE_REGS 0
19
-#endif
20
+#define SOFTMMU_RESERVE_REGS \
21
+ (tcg_use_softmmu ? (1 << TCG_REG_L0) | (1 << TCG_REG_L1) : 0)
22
23
/* For 64-bit, we always know that CMOV is available. */
24
#if TCG_TARGET_REG_BITS == 64
25
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
26
return true;
27
}
28
29
-#ifndef CONFIG_SOFTMMU
30
+#ifdef CONFIG_USER_ONLY
31
static HostAddress x86_guest_base = {
32
.index = -1
33
};
34
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
35
}
36
return 0;
37
}
38
+#define setup_guest_base_seg setup_guest_base_seg
39
#elif defined(__x86_64__) && \
40
(defined (__FreeBSD__) || defined (__FreeBSD_kernel__))
41
# include <machine/sysarch.h>
42
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
43
}
44
return 0;
45
}
46
+#define setup_guest_base_seg setup_guest_base_seg
47
+#endif
32
#else
48
#else
33
#define TARGET_PAGE_BITS target_page.bits
49
-static inline int setup_guest_base_seg(void)
34
+#define TARGET_PAGE_MASK target_page.mask
50
-{
35
#endif
51
- return 0;
36
+#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
52
-}
37
#else
53
-#endif /* setup_guest_base_seg */
38
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
54
-#endif /* !SOFTMMU */
39
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
55
+# define x86_guest_base (*(HostAddress *)({ qemu_build_not_reached(); NULL; }))
40
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
56
+#endif /* CONFIG_USER_ONLY */
41
#endif
57
+#ifndef setup_guest_base_seg
42
58
+# define setup_guest_base_seg() 0
43
-#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
59
+#endif
44
-#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
60
45
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
61
#define MIN_TLB_MASK_TABLE_OFS INT_MIN
46
62
47
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
63
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
48
diff --git a/exec-vary.c b/exec-vary.c
64
MemOp s_bits = opc & MO_SIZE;
49
index XXXXXXX..XXXXXXX 100644
65
unsigned a_mask;
50
--- a/exec-vary.c
66
51
+++ b/exec-vary.c
67
-#ifdef CONFIG_SOFTMMU
52
@@ -XXX,XX +XXX,XX @@ void finalize_target_page_bits(void)
68
- h->index = TCG_REG_L0;
53
if (init_target_page.bits == 0) {
69
- h->ofs = 0;
54
init_target_page.bits = TARGET_PAGE_BITS_MIN;
70
- h->seg = 0;
55
}
71
-#else
56
+ init_target_page.mask = (target_long)-1 << init_target_page.bits;
72
- *h = x86_guest_base;
57
init_target_page.decided = true;
73
-#endif
74
+ if (tcg_use_softmmu) {
75
+ h->index = TCG_REG_L0;
76
+ h->ofs = 0;
77
+ h->seg = 0;
78
+ } else {
79
+ *h = x86_guest_base;
80
+ }
81
h->base = addrlo;
82
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
83
a_mask = (1 << h->aa.align) - 1;
84
85
-#ifdef CONFIG_SOFTMMU
86
- int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
87
- : offsetof(CPUTLBEntry, addr_write);
88
- TCGType ttype = TCG_TYPE_I32;
89
- TCGType tlbtype = TCG_TYPE_I32;
90
- int trexw = 0, hrexw = 0, tlbrexw = 0;
91
- unsigned mem_index = get_mmuidx(oi);
92
- unsigned s_mask = (1 << s_bits) - 1;
93
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
94
- int tlb_mask;
95
+ if (tcg_use_softmmu) {
96
+ int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
97
+ : offsetof(CPUTLBEntry, addr_write);
98
+ TCGType ttype = TCG_TYPE_I32;
99
+ TCGType tlbtype = TCG_TYPE_I32;
100
+ int trexw = 0, hrexw = 0, tlbrexw = 0;
101
+ unsigned mem_index = get_mmuidx(oi);
102
+ unsigned s_mask = (1 << s_bits) - 1;
103
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
104
+ int tlb_mask;
105
106
- ldst = new_ldst_label(s);
107
- ldst->is_ld = is_ld;
108
- ldst->oi = oi;
109
- ldst->addrlo_reg = addrlo;
110
- ldst->addrhi_reg = addrhi;
111
+ ldst = new_ldst_label(s);
112
+ ldst->is_ld = is_ld;
113
+ ldst->oi = oi;
114
+ ldst->addrlo_reg = addrlo;
115
+ ldst->addrhi_reg = addrhi;
116
117
- if (TCG_TARGET_REG_BITS == 64) {
118
- ttype = s->addr_type;
119
- trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
120
- if (TCG_TYPE_PTR == TCG_TYPE_I64) {
121
- hrexw = P_REXW;
122
- if (s->page_bits + s->tlb_dyn_max_bits > 32) {
123
- tlbtype = TCG_TYPE_I64;
124
- tlbrexw = P_REXW;
125
+ if (TCG_TARGET_REG_BITS == 64) {
126
+ ttype = s->addr_type;
127
+ trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
128
+ if (TCG_TYPE_PTR == TCG_TYPE_I64) {
129
+ hrexw = P_REXW;
130
+ if (s->page_bits + s->tlb_dyn_max_bits > 32) {
131
+ tlbtype = TCG_TYPE_I64;
132
+ tlbrexw = P_REXW;
133
+ }
134
}
135
}
136
- }
137
138
- tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
139
- tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
140
- s->page_bits - CPU_TLB_ENTRY_BITS);
141
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
142
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
143
+ s->page_bits - CPU_TLB_ENTRY_BITS);
144
145
- tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
146
- fast_ofs + offsetof(CPUTLBDescFast, mask));
147
+ tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
148
+ fast_ofs + offsetof(CPUTLBDescFast, mask));
149
150
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
151
- fast_ofs + offsetof(CPUTLBDescFast, table));
152
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
153
+ fast_ofs + offsetof(CPUTLBDescFast, table));
154
155
- /*
156
- * If the required alignment is at least as large as the access, simply
157
- * copy the address and mask. For lesser alignments, check that we don't
158
- * cross pages for the complete access.
159
- */
160
- if (a_mask >= s_mask) {
161
- tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
162
- } else {
163
- tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
164
- addrlo, s_mask - a_mask);
165
- }
166
- tlb_mask = s->page_mask | a_mask;
167
- tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
168
+ /*
169
+ * If the required alignment is at least as large as the access,
170
+ * simply copy the address and mask. For lesser alignments,
171
+ * check that we don't cross pages for the complete access.
172
+ */
173
+ if (a_mask >= s_mask) {
174
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
175
+ } else {
176
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
177
+ addrlo, s_mask - a_mask);
178
+ }
179
+ tlb_mask = s->page_mask | a_mask;
180
+ tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
181
182
- /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
183
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
184
- TCG_REG_L1, TCG_REG_L0, cmp_ofs);
185
-
186
- /* jne slow_path */
187
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
188
- ldst->label_ptr[0] = s->code_ptr;
189
- s->code_ptr += 4;
190
-
191
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
192
- /* cmp 4(TCG_REG_L0), addrhi */
193
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, cmp_ofs + 4);
194
+ /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
195
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
196
+ TCG_REG_L1, TCG_REG_L0, cmp_ofs);
197
198
/* jne slow_path */
199
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
200
- ldst->label_ptr[1] = s->code_ptr;
201
+ ldst->label_ptr[0] = s->code_ptr;
202
s->code_ptr += 4;
203
- }
204
205
- /* TLB Hit. */
206
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
207
- offsetof(CPUTLBEntry, addend));
208
-#else
209
- if (a_mask) {
210
+ if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
211
+ /* cmp 4(TCG_REG_L0), addrhi */
212
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi,
213
+ TCG_REG_L0, cmp_ofs + 4);
214
+
215
+ /* jne slow_path */
216
+ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
217
+ ldst->label_ptr[1] = s->code_ptr;
218
+ s->code_ptr += 4;
219
+ }
220
+
221
+ /* TLB Hit. */
222
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
223
+ offsetof(CPUTLBEntry, addend));
224
+ } else if (a_mask) {
225
ldst = new_ldst_label(s);
226
227
ldst->is_ld = is_ld;
228
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
229
ldst->label_ptr[0] = s->code_ptr;
230
s->code_ptr += 4;
231
}
232
-#endif
233
234
return ldst;
235
}
236
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
237
tcg_out_push(s, tcg_target_callee_save_regs[i]);
238
}
239
240
-#if TCG_TARGET_REG_BITS == 32
241
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
242
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
243
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
244
- /* jmp *tb. */
245
- tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
246
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
247
- + stack_addend);
248
-#else
249
-# if !defined(CONFIG_SOFTMMU)
250
- if (guest_base) {
251
+ if (!tcg_use_softmmu && guest_base) {
252
int seg = setup_guest_base_seg();
253
if (seg != 0) {
254
x86_guest_base.seg = seg;
255
} else if (guest_base == (int32_t)guest_base) {
256
x86_guest_base.ofs = guest_base;
257
} else {
258
+ assert(TCG_TARGET_REG_BITS == 64);
259
/* Choose R12 because, as a base, it requires a SIB byte. */
260
x86_guest_base.index = TCG_REG_R12;
261
tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base);
262
tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index);
263
}
264
}
265
-# endif
266
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
267
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
268
- /* jmp *tb. */
269
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
270
-#endif
271
+
272
+ if (TCG_TARGET_REG_BITS == 32) {
273
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
274
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
275
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
276
+ /* jmp *tb. */
277
+ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
278
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
279
+ + stack_addend);
280
+ } else {
281
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
282
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
283
+ /* jmp *tb. */
284
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
285
+ }
58
286
59
/*
287
/*
288
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
60
--
289
--
61
2.17.1
290
2.34.1
62
291
63
292
diff view generated by jsdifflib
Deleted patch
1
Using uintptr_t instead of target_ulong meant that, for 64-bit guest
2
and 32-bit host, we truncated the guest address comparator and so may
3
not hit the tlb when we should.
4
1
5
Fixes: 4811e9095c0
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
17
MMUAccessType access_type, int mmu_idx)
18
{
19
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
20
- uintptr_t tlb_addr, page;
21
+ target_ulong tlb_addr, page;
22
size_t elt_ofs;
23
24
switch (access_type) {
25
--
26
2.17.1
27
28
diff view generated by jsdifflib
Deleted patch
1
From: Clement Deschamps <clement.deschamps@greensocs.com>
2
1
3
This fixes a segmentation fault in icount mode when executing
4
from an IO region.
5
6
TB is marked as CF_NOCACHE but tb->orig_tb is not initialized
7
(equals previous value in code_gen_buffer).
8
9
The issue happens in cpu_io_recompile() when it tries to invalidate orig_tb.
10
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Clement Deschamps <clement.deschamps@greensocs.com>
13
Message-Id: <20191022140016.918371-1-clement.deschamps@greensocs.com>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
accel/tcg/translate-all.c | 1 +
17
1 file changed, 1 insertion(+)
18
19
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/translate-all.c
22
+++ b/accel/tcg/translate-all.c
23
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
24
tb->cs_base = cs_base;
25
tb->flags = flags;
26
tb->cflags = cflags;
27
+ tb->orig_tb = NULL;
28
tb->trace_vcpu_dstate = *cpu->trace_dstate;
29
tcg_ctx->tb_cflags = cflags;
30
tb_overflow:
31
--
32
2.17.1
33
34
diff view generated by jsdifflib
Deleted patch
1
Since 2ac01d6dafab, this function does only two things: assert a
2
lock is held, and call tcg_tb_alloc. It is used exactly once,
3
and its user has already done the assert.
4
1
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Clement Deschamps <clement.deschamps@greensocs.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/translate-all.c | 20 ++------------------
10
1 file changed, 2 insertions(+), 18 deletions(-)
11
12
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/translate-all.c
15
+++ b/accel/tcg/translate-all.c
16
@@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size)
17
#endif
18
}
19
20
-/*
21
- * Allocate a new translation block. Flush the translation buffer if
22
- * too many translation blocks or too much generated code.
23
- */
24
-static TranslationBlock *tb_alloc(target_ulong pc)
25
-{
26
- TranslationBlock *tb;
27
-
28
- assert_memory_lock();
29
-
30
- tb = tcg_tb_alloc(tcg_ctx);
31
- if (unlikely(tb == NULL)) {
32
- return NULL;
33
- }
34
- return tb;
35
-}
36
-
37
/* call with @p->lock held */
38
static inline void invalidate_page_bitmap(PageDesc *p)
39
{
40
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
41
TCGProfile *prof = &tcg_ctx->prof;
42
int64_t ti;
43
#endif
44
+
45
assert_memory_lock();
46
47
phys_pc = get_page_addr_code(env, pc);
48
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
49
}
50
51
buffer_overflow:
52
- tb = tb_alloc(pc);
53
+ tb = tcg_tb_alloc(tcg_ctx);
54
if (unlikely(!tb)) {
55
/* flush must be done */
56
tb_flush(cpu);
57
--
58
2.17.1
59
60
diff view generated by jsdifflib