1 | The following changes since commit fec105c2abda8567ec15230429c41429b5ee307c: | 1 | The following changes since commit 1cbd2d914939ee6028e9688d4ba859a528c28405: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/kraxel/tags/audio-20190828-pull-request' into staging (2019-09-03 14:03:15 +0100) | 3 | Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2021-06-04 13:38:49 +0100) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://github.com/rth7680/qemu.git tags/pull-tcg-20190903 | 7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210604 |
8 | 8 | ||
9 | for you to fetch changes up to c25c283df0f08582df29f1d5d7be1516b851532d: | 9 | for you to fetch changes up to 0006039e29b9e6118beab300146f7c4931f7a217: |
10 | 10 | ||
11 | tcg: Factor out probe_write() logic into probe_access() (2019-09-03 08:34:18 -0700) | 11 | tcg/arm: Implement TCG_TARGET_HAS_rotv_vec (2021-06-04 11:50:11 -0700) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Allow page table bit to swap endianness. | 14 | Host vector support for arm neon. |
15 | Reorganize watchpoints out of i/o path. | ||
16 | Return host address from probe_write / probe_access. | ||
17 | 15 | ||
18 | ---------------------------------------------------------------- | 16 | ---------------------------------------------------------------- |
19 | David Hildenbrand (11): | 17 | Richard Henderson (15): |
20 | exec: Factor out core logic of check_watchpoint() | 18 | tcg: Change parameters for tcg_target_const_match |
21 | tcg: Check for watchpoints in probe_write() | 19 | tcg/arm: Add host vector framework |
22 | s390x/tcg: Use guest_addr_valid() instead of h2g_valid() in probe_write_access() | 20 | tcg/arm: Implement tcg_out_ld/st for vector types |
23 | s390x/tcg: Fix length calculation in probe_write_access() | 21 | tcg/arm: Implement tcg_out_mov for vector types |
24 | tcg: Factor out CONFIG_USER_ONLY probe_write() from s390x code | 22 | tcg/arm: Implement tcg_out_dup*_vec |
25 | tcg: Enforce single page access in probe_write() | 23 | tcg/arm: Implement minimal vector operations |
26 | mips/tcg: Call probe_write() for CONFIG_USER_ONLY as well | 24 | tcg/arm: Implement andc, orc, abs, neg, not vector operations |
27 | hppa/tcg: Call probe_write() also for CONFIG_USER_ONLY | 25 | tcg/arm: Implement TCG_TARGET_HAS_shi_vec |
28 | s390x/tcg: Pass a size to probe_write() in do_csst() | 26 | tcg/arm: Implement TCG_TARGET_HAS_mul_vec |
29 | tcg: Make probe_write() return a pointer to the host page | 27 | tcg/arm: Implement TCG_TARGET_HAS_sat_vec |
30 | tcg: Factor out probe_write() logic into probe_access() | 28 | tcg/arm: Implement TCG_TARGET_HAS_minmax_vec |
29 | tcg/arm: Implement TCG_TARGET_HAS_bitsel_vec | ||
30 | tcg/arm: Implement TCG_TARGET_HAS_shv_vec | ||
31 | tcg/arm: Implement TCG_TARGET_HAS_roti_vec | ||
32 | tcg/arm: Implement TCG_TARGET_HAS_rotv_vec | ||
31 | 33 | ||
32 | Richard Henderson (6): | 34 | tcg/arm/tcg-target-con-set.h | 10 + |
33 | exec: Move user-only watchpoint stubs inline | 35 | tcg/arm/tcg-target-con-str.h | 3 + |
34 | cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK | 36 | tcg/arm/tcg-target.h | 52 ++- |
35 | exec: Factor out cpu_watchpoint_address_matches | 37 | tcg/arm/tcg-target.opc.h | 16 + |
36 | cputlb: Fix size operand for tlb_fill on unaligned store | 38 | tcg/tcg.c | 5 +- |
37 | cputlb: Remove double-alignment in store_helper | 39 | tcg/aarch64/tcg-target.c.inc | 5 +- |
38 | cputlb: Handle watchpoints via TLB_WATCHPOINT | 40 | tcg/arm/tcg-target.c.inc | 956 +++++++++++++++++++++++++++++++++++++++++-- |
41 | tcg/i386/tcg-target.c.inc | 4 +- | ||
42 | tcg/mips/tcg-target.c.inc | 5 +- | ||
43 | tcg/ppc/tcg-target.c.inc | 4 +- | ||
44 | tcg/riscv/tcg-target.c.inc | 4 +- | ||
45 | tcg/s390/tcg-target.c.inc | 5 +- | ||
46 | tcg/sparc/tcg-target.c.inc | 5 +- | ||
47 | tcg/tci/tcg-target.c.inc | 6 +- | ||
48 | 14 files changed, 1001 insertions(+), 79 deletions(-) | ||
49 | create mode 100644 tcg/arm/tcg-target.opc.h | ||
39 | 50 | ||
40 | Tony Nguyen (19): | ||
41 | tcg: TCGMemOp is now accelerator independent MemOp | ||
42 | memory: Introduce size_memop | ||
43 | target/mips: Access MemoryRegion with MemOp | ||
44 | hw/s390x: Access MemoryRegion with MemOp | ||
45 | hw/intc/armv7m_nic: Access MemoryRegion with MemOp | ||
46 | hw/virtio: Access MemoryRegion with MemOp | ||
47 | hw/vfio: Access MemoryRegion with MemOp | ||
48 | exec: Access MemoryRegion with MemOp | ||
49 | cputlb: Access MemoryRegion with MemOp | ||
50 | memory: Access MemoryRegion with MemOp | ||
51 | hw/s390x: Hard code size with MO_{8|16|32|64} | ||
52 | target/mips: Hard code size with MO_{8|16|32|64} | ||
53 | exec: Hard code size with MO_{8|16|32|64} | ||
54 | memory: Access MemoryRegion with endianness | ||
55 | cputlb: Replace size and endian operands for MemOp | ||
56 | memory: Single byte swap along the I/O path | ||
57 | cputlb: Byte swap memory transaction attribute | ||
58 | target/sparc: Add TLB entry with attributes | ||
59 | target/sparc: sun4u Invert Endian TTE bit | ||
60 | |||
61 | include/exec/cpu-all.h | 8 +- | ||
62 | include/exec/exec-all.h | 10 +- | ||
63 | include/exec/memattrs.h | 2 + | ||
64 | include/exec/memop.h | 134 +++++++++++ | ||
65 | include/exec/memory.h | 12 +- | ||
66 | include/hw/core/cpu.h | 37 +++ | ||
67 | target/arm/translate-a64.h | 2 +- | ||
68 | target/arm/translate.h | 2 +- | ||
69 | target/sparc/cpu.h | 2 + | ||
70 | tcg/tcg-op.h | 80 +++--- | ||
71 | tcg/tcg.h | 101 +------- | ||
72 | trace/mem-internal.h | 4 +- | ||
73 | trace/mem.h | 4 +- | ||
74 | accel/tcg/cputlb.c | 414 ++++++++++++++++++-------------- | ||
75 | accel/tcg/user-exec.c | 32 +++ | ||
76 | exec.c | 177 +++----------- | ||
77 | hw/intc/armv7m_nvic.c | 13 +- | ||
78 | hw/s390x/s390-pci-inst.c | 11 +- | ||
79 | hw/vfio/pci-quirks.c | 7 +- | ||
80 | hw/virtio/virtio-pci.c | 15 +- | ||
81 | memory.c | 58 +++-- | ||
82 | memory_ldst.inc.c | 81 ++----- | ||
83 | target/alpha/translate.c | 2 +- | ||
84 | target/arm/translate-a64.c | 48 ++-- | ||
85 | target/arm/translate-sve.c | 2 +- | ||
86 | target/arm/translate.c | 32 +-- | ||
87 | target/hppa/op_helper.c | 2 - | ||
88 | target/hppa/translate.c | 14 +- | ||
89 | target/i386/translate.c | 132 +++++----- | ||
90 | target/m68k/translate.c | 2 +- | ||
91 | target/microblaze/translate.c | 4 +- | ||
92 | target/mips/op_helper.c | 13 +- | ||
93 | target/mips/translate.c | 8 +- | ||
94 | target/openrisc/translate.c | 4 +- | ||
95 | target/ppc/translate.c | 12 +- | ||
96 | target/riscv/insn_trans/trans_rva.inc.c | 8 +- | ||
97 | target/riscv/insn_trans/trans_rvi.inc.c | 4 +- | ||
98 | target/s390x/mem_helper.c | 13 +- | ||
99 | target/s390x/translate.c | 6 +- | ||
100 | target/s390x/translate_vx.inc.c | 10 +- | ||
101 | target/sparc/mmu_helper.c | 40 +-- | ||
102 | target/sparc/translate.c | 14 +- | ||
103 | target/tilegx/translate.c | 10 +- | ||
104 | target/tricore/translate.c | 8 +- | ||
105 | tcg/aarch64/tcg-target.inc.c | 26 +- | ||
106 | tcg/arm/tcg-target.inc.c | 26 +- | ||
107 | tcg/i386/tcg-target.inc.c | 24 +- | ||
108 | tcg/mips/tcg-target.inc.c | 16 +- | ||
109 | tcg/optimize.c | 2 +- | ||
110 | tcg/ppc/tcg-target.inc.c | 12 +- | ||
111 | tcg/riscv/tcg-target.inc.c | 20 +- | ||
112 | tcg/s390/tcg-target.inc.c | 14 +- | ||
113 | tcg/sparc/tcg-target.inc.c | 6 +- | ||
114 | tcg/tcg-op.c | 38 +-- | ||
115 | tcg/tcg.c | 2 +- | ||
116 | MAINTAINERS | 1 + | ||
117 | tcg/README | 2 +- | ||
118 | 57 files changed, 918 insertions(+), 865 deletions(-) | ||
119 | create mode 100644 include/exec/memop.h | ||
120 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Change the return value to bool, because that's what is should |
---|---|---|---|
2 | 2 | have been from the start. Pass the ct mask instead of the whole | |
3 | Preparation for collapsing the two byte swaps, adjust_endianness and | 3 | TCGArgConstraint, as that's the only part that's relevant. |
4 | handle_bswap, along the I/O path. | 4 | |
5 | 5 | Change the value argument to int64_t. We will need the extra | |
6 | Target dependant attributes are conditionalized upon NEED_CPU_H. | 6 | width for 32-bit hosts wanting to match vector constants. |
7 | 7 | ||
8 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Acked-by: David Gibson <david@gibson.dropbear.id.au> | ||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Acked-by: Cornelia Huck <cohuck@redhat.com> | ||
12 | Message-Id: <81d9cd7d7f5aaadfa772d6c48ecee834e9cf7882.1566466906.git.tony.nguyen@bt.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 10 | --- |
15 | include/exec/memop.h | 110 ++++++++++++++++++++ | 11 | tcg/tcg.c | 5 ++--- |
16 | target/arm/translate-a64.h | 2 +- | 12 | tcg/aarch64/tcg-target.c.inc | 5 +---- |
17 | target/arm/translate.h | 2 +- | 13 | tcg/arm/tcg-target.c.inc | 5 +---- |
18 | tcg/tcg-op.h | 80 +++++++------- | 14 | tcg/i386/tcg-target.c.inc | 4 +--- |
19 | tcg/tcg.h | 101 ++---------------- | 15 | tcg/mips/tcg-target.c.inc | 5 +---- |
20 | trace/mem-internal.h | 4 +- | 16 | tcg/ppc/tcg-target.c.inc | 4 +--- |
21 | trace/mem.h | 4 +- | 17 | tcg/riscv/tcg-target.c.inc | 4 +--- |
22 | accel/tcg/cputlb.c | 2 +- | 18 | tcg/s390/tcg-target.c.inc | 5 +---- |
23 | target/alpha/translate.c | 2 +- | 19 | tcg/sparc/tcg-target.c.inc | 5 +---- |
24 | target/arm/translate-a64.c | 48 ++++----- | 20 | tcg/tci/tcg-target.c.inc | 6 ++---- |
25 | target/arm/translate-sve.c | 2 +- | 21 | 10 files changed, 12 insertions(+), 36 deletions(-) |
26 | target/arm/translate.c | 32 +++--- | 22 | |
27 | target/hppa/translate.c | 14 +-- | ||
28 | target/i386/translate.c | 132 ++++++++++++------------ | ||
29 | target/m68k/translate.c | 2 +- | ||
30 | target/microblaze/translate.c | 4 +- | ||
31 | target/mips/translate.c | 8 +- | ||
32 | target/openrisc/translate.c | 4 +- | ||
33 | target/ppc/translate.c | 12 +-- | ||
34 | target/riscv/insn_trans/trans_rva.inc.c | 8 +- | ||
35 | target/riscv/insn_trans/trans_rvi.inc.c | 4 +- | ||
36 | target/s390x/translate.c | 6 +- | ||
37 | target/s390x/translate_vx.inc.c | 10 +- | ||
38 | target/sparc/translate.c | 14 +-- | ||
39 | target/tilegx/translate.c | 10 +- | ||
40 | target/tricore/translate.c | 8 +- | ||
41 | tcg/aarch64/tcg-target.inc.c | 26 ++--- | ||
42 | tcg/arm/tcg-target.inc.c | 26 ++--- | ||
43 | tcg/i386/tcg-target.inc.c | 24 ++--- | ||
44 | tcg/mips/tcg-target.inc.c | 16 +-- | ||
45 | tcg/optimize.c | 2 +- | ||
46 | tcg/ppc/tcg-target.inc.c | 12 +-- | ||
47 | tcg/riscv/tcg-target.inc.c | 20 ++-- | ||
48 | tcg/s390/tcg-target.inc.c | 14 +-- | ||
49 | tcg/sparc/tcg-target.inc.c | 6 +- | ||
50 | tcg/tcg-op.c | 38 +++---- | ||
51 | tcg/tcg.c | 2 +- | ||
52 | MAINTAINERS | 1 + | ||
53 | tcg/README | 2 +- | ||
54 | 39 files changed, 418 insertions(+), 396 deletions(-) | ||
55 | create mode 100644 include/exec/memop.h | ||
56 | |||
57 | diff --git a/include/exec/memop.h b/include/exec/memop.h | ||
58 | new file mode 100644 | ||
59 | index XXXXXXX..XXXXXXX | ||
60 | --- /dev/null | ||
61 | +++ b/include/exec/memop.h | ||
62 | @@ -XXX,XX +XXX,XX @@ | ||
63 | +/* | ||
64 | + * Constants for memory operations | ||
65 | + * | ||
66 | + * Authors: | ||
67 | + * Richard Henderson <rth@twiddle.net> | ||
68 | + * | ||
69 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
70 | + * See the COPYING file in the top-level directory. | ||
71 | + * | ||
72 | + */ | ||
73 | + | ||
74 | +#ifndef MEMOP_H | ||
75 | +#define MEMOP_H | ||
76 | + | ||
77 | +typedef enum MemOp { | ||
78 | + MO_8 = 0, | ||
79 | + MO_16 = 1, | ||
80 | + MO_32 = 2, | ||
81 | + MO_64 = 3, | ||
82 | + MO_SIZE = 3, /* Mask for the above. */ | ||
83 | + | ||
84 | + MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ | ||
85 | + | ||
86 | + MO_BSWAP = 8, /* Host reverse endian. */ | ||
87 | +#ifdef HOST_WORDS_BIGENDIAN | ||
88 | + MO_LE = MO_BSWAP, | ||
89 | + MO_BE = 0, | ||
90 | +#else | ||
91 | + MO_LE = 0, | ||
92 | + MO_BE = MO_BSWAP, | ||
93 | +#endif | ||
94 | +#ifdef NEED_CPU_H | ||
95 | +#ifdef TARGET_WORDS_BIGENDIAN | ||
96 | + MO_TE = MO_BE, | ||
97 | +#else | ||
98 | + MO_TE = MO_LE, | ||
99 | +#endif | ||
100 | +#endif | ||
101 | + | ||
102 | + /* | ||
103 | + * MO_UNALN accesses are never checked for alignment. | ||
104 | + * MO_ALIGN accesses will result in a call to the CPU's | ||
105 | + * do_unaligned_access hook if the guest address is not aligned. | ||
106 | + * The default depends on whether the target CPU defines | ||
107 | + * TARGET_ALIGNED_ONLY. | ||
108 | + * | ||
109 | + * Some architectures (e.g. ARMv8) need the address which is aligned | ||
110 | + * to a size more than the size of the memory access. | ||
111 | + * Some architectures (e.g. SPARCv9) need an address which is aligned, | ||
112 | + * but less strictly than the natural alignment. | ||
113 | + * | ||
114 | + * MO_ALIGN supposes the alignment size is the size of a memory access. | ||
115 | + * | ||
116 | + * There are three options: | ||
117 | + * - unaligned access permitted (MO_UNALN). | ||
118 | + * - an alignment to the size of an access (MO_ALIGN); | ||
119 | + * - an alignment to a specified size, which may be more or less than | ||
120 | + * the access size (MO_ALIGN_x where 'x' is a size in bytes); | ||
121 | + */ | ||
122 | + MO_ASHIFT = 4, | ||
123 | + MO_AMASK = 7 << MO_ASHIFT, | ||
124 | +#ifdef NEED_CPU_H | ||
125 | +#ifdef TARGET_ALIGNED_ONLY | ||
126 | + MO_ALIGN = 0, | ||
127 | + MO_UNALN = MO_AMASK, | ||
128 | +#else | ||
129 | + MO_ALIGN = MO_AMASK, | ||
130 | + MO_UNALN = 0, | ||
131 | +#endif | ||
132 | +#endif | ||
133 | + MO_ALIGN_2 = 1 << MO_ASHIFT, | ||
134 | + MO_ALIGN_4 = 2 << MO_ASHIFT, | ||
135 | + MO_ALIGN_8 = 3 << MO_ASHIFT, | ||
136 | + MO_ALIGN_16 = 4 << MO_ASHIFT, | ||
137 | + MO_ALIGN_32 = 5 << MO_ASHIFT, | ||
138 | + MO_ALIGN_64 = 6 << MO_ASHIFT, | ||
139 | + | ||
140 | + /* Combinations of the above, for ease of use. */ | ||
141 | + MO_UB = MO_8, | ||
142 | + MO_UW = MO_16, | ||
143 | + MO_UL = MO_32, | ||
144 | + MO_SB = MO_SIGN | MO_8, | ||
145 | + MO_SW = MO_SIGN | MO_16, | ||
146 | + MO_SL = MO_SIGN | MO_32, | ||
147 | + MO_Q = MO_64, | ||
148 | + | ||
149 | + MO_LEUW = MO_LE | MO_UW, | ||
150 | + MO_LEUL = MO_LE | MO_UL, | ||
151 | + MO_LESW = MO_LE | MO_SW, | ||
152 | + MO_LESL = MO_LE | MO_SL, | ||
153 | + MO_LEQ = MO_LE | MO_Q, | ||
154 | + | ||
155 | + MO_BEUW = MO_BE | MO_UW, | ||
156 | + MO_BEUL = MO_BE | MO_UL, | ||
157 | + MO_BESW = MO_BE | MO_SW, | ||
158 | + MO_BESL = MO_BE | MO_SL, | ||
159 | + MO_BEQ = MO_BE | MO_Q, | ||
160 | + | ||
161 | +#ifdef NEED_CPU_H | ||
162 | + MO_TEUW = MO_TE | MO_UW, | ||
163 | + MO_TEUL = MO_TE | MO_UL, | ||
164 | + MO_TESW = MO_TE | MO_SW, | ||
165 | + MO_TESL = MO_TE | MO_SL, | ||
166 | + MO_TEQ = MO_TE | MO_Q, | ||
167 | +#endif | ||
168 | + | ||
169 | + MO_SSIZE = MO_SIZE | MO_SIGN, | ||
170 | +} MemOp; | ||
171 | + | ||
172 | +#endif | ||
173 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | ||
174 | index XXXXXXX..XXXXXXX 100644 | ||
175 | --- a/target/arm/translate-a64.h | ||
176 | +++ b/target/arm/translate-a64.h | ||
177 | @@ -XXX,XX +XXX,XX @@ static inline void assert_fp_access_checked(DisasContext *s) | ||
178 | * the FP/vector register Qn. | ||
179 | */ | ||
180 | static inline int vec_reg_offset(DisasContext *s, int regno, | ||
181 | - int element, TCGMemOp size) | ||
182 | + int element, MemOp size) | ||
183 | { | ||
184 | int element_size = 1 << size; | ||
185 | int offs = element * element_size; | ||
186 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
187 | index XXXXXXX..XXXXXXX 100644 | ||
188 | --- a/target/arm/translate.h | ||
189 | +++ b/target/arm/translate.h | ||
190 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
191 | int condexec_cond; | ||
192 | int thumb; | ||
193 | int sctlr_b; | ||
194 | - TCGMemOp be_data; | ||
195 | + MemOp be_data; | ||
196 | #if !defined(CONFIG_USER_ONLY) | ||
197 | int user; | ||
198 | #endif | ||
199 | diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h | ||
200 | index XXXXXXX..XXXXXXX 100644 | ||
201 | --- a/tcg/tcg-op.h | ||
202 | +++ b/tcg/tcg-op.h | ||
203 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void); | ||
204 | #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 | ||
205 | #endif | ||
206 | |||
207 | -void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); | ||
208 | -void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); | ||
209 | -void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); | ||
210 | -void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); | ||
211 | +void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp); | ||
212 | +void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp); | ||
213 | +void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp); | ||
214 | +void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp); | ||
215 | |||
216 | static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) | ||
217 | { | ||
218 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) | ||
219 | } | ||
220 | |||
221 | void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, | ||
222 | - TCGArg, TCGMemOp); | ||
223 | + TCGArg, MemOp); | ||
224 | void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, | ||
225 | - TCGArg, TCGMemOp); | ||
226 | + TCGArg, MemOp); | ||
227 | |||
228 | -void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
229 | -void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
230 | +void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
231 | +void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
232 | |||
233 | -void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
234 | -void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
235 | -void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
236 | -void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
237 | -void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
238 | -void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
239 | -void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
240 | -void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
241 | -void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
242 | -void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
243 | -void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
244 | -void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
245 | -void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
246 | -void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
247 | -void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
248 | -void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
249 | +void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
250 | +void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
251 | +void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
252 | +void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
253 | +void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
254 | +void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
255 | +void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
256 | +void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
257 | +void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
258 | +void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
259 | +void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
260 | +void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
261 | +void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
262 | +void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
263 | +void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
264 | +void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
265 | |||
266 | -void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
267 | -void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
268 | -void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
269 | -void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
270 | -void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
271 | -void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
272 | -void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
273 | -void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
274 | -void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
275 | -void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
276 | -void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
277 | -void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
278 | -void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
279 | -void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
280 | -void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
281 | -void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
282 | +void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
283 | +void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
284 | +void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
285 | +void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
286 | +void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
287 | +void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
288 | +void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
289 | +void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
290 | +void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
291 | +void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
292 | +void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
293 | +void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
294 | +void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
295 | +void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
296 | +void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
297 | +void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
298 | |||
299 | void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); | ||
300 | void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); | ||
301 | diff --git a/tcg/tcg.h b/tcg/tcg.h | ||
302 | index XXXXXXX..XXXXXXX 100644 | ||
303 | --- a/tcg/tcg.h | ||
304 | +++ b/tcg/tcg.h | ||
305 | @@ -XXX,XX +XXX,XX @@ | ||
306 | #define TCG_H | ||
307 | |||
308 | #include "cpu.h" | ||
309 | +#include "exec/memop.h" | ||
310 | #include "exec/tb-context.h" | ||
311 | #include "qemu/bitops.h" | ||
312 | #include "qemu/queue.h" | ||
313 | @@ -XXX,XX +XXX,XX @@ typedef enum TCGType { | ||
314 | #endif | ||
315 | } TCGType; | ||
316 | |||
317 | -/* Constants for qemu_ld and qemu_st for the Memory Operation field. */ | ||
318 | -typedef enum TCGMemOp { | ||
319 | - MO_8 = 0, | ||
320 | - MO_16 = 1, | ||
321 | - MO_32 = 2, | ||
322 | - MO_64 = 3, | ||
323 | - MO_SIZE = 3, /* Mask for the above. */ | ||
324 | - | ||
325 | - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ | ||
326 | - | ||
327 | - MO_BSWAP = 8, /* Host reverse endian. */ | ||
328 | -#ifdef HOST_WORDS_BIGENDIAN | ||
329 | - MO_LE = MO_BSWAP, | ||
330 | - MO_BE = 0, | ||
331 | -#else | ||
332 | - MO_LE = 0, | ||
333 | - MO_BE = MO_BSWAP, | ||
334 | -#endif | ||
335 | -#ifdef TARGET_WORDS_BIGENDIAN | ||
336 | - MO_TE = MO_BE, | ||
337 | -#else | ||
338 | - MO_TE = MO_LE, | ||
339 | -#endif | ||
340 | - | ||
341 | - /* | ||
342 | - * MO_UNALN accesses are never checked for alignment. | ||
343 | - * MO_ALIGN accesses will result in a call to the CPU's | ||
344 | - * do_unaligned_access hook if the guest address is not aligned. | ||
345 | - * The default depends on whether the target CPU defines | ||
346 | - * TARGET_ALIGNED_ONLY. | ||
347 | - * | ||
348 | - * Some architectures (e.g. ARMv8) need the address which is aligned | ||
349 | - * to a size more than the size of the memory access. | ||
350 | - * Some architectures (e.g. SPARCv9) need an address which is aligned, | ||
351 | - * but less strictly than the natural alignment. | ||
352 | - * | ||
353 | - * MO_ALIGN supposes the alignment size is the size of a memory access. | ||
354 | - * | ||
355 | - * There are three options: | ||
356 | - * - unaligned access permitted (MO_UNALN). | ||
357 | - * - an alignment to the size of an access (MO_ALIGN); | ||
358 | - * - an alignment to a specified size, which may be more or less than | ||
359 | - * the access size (MO_ALIGN_x where 'x' is a size in bytes); | ||
360 | - */ | ||
361 | - MO_ASHIFT = 4, | ||
362 | - MO_AMASK = 7 << MO_ASHIFT, | ||
363 | -#ifdef TARGET_ALIGNED_ONLY | ||
364 | - MO_ALIGN = 0, | ||
365 | - MO_UNALN = MO_AMASK, | ||
366 | -#else | ||
367 | - MO_ALIGN = MO_AMASK, | ||
368 | - MO_UNALN = 0, | ||
369 | -#endif | ||
370 | - MO_ALIGN_2 = 1 << MO_ASHIFT, | ||
371 | - MO_ALIGN_4 = 2 << MO_ASHIFT, | ||
372 | - MO_ALIGN_8 = 3 << MO_ASHIFT, | ||
373 | - MO_ALIGN_16 = 4 << MO_ASHIFT, | ||
374 | - MO_ALIGN_32 = 5 << MO_ASHIFT, | ||
375 | - MO_ALIGN_64 = 6 << MO_ASHIFT, | ||
376 | - | ||
377 | - /* Combinations of the above, for ease of use. */ | ||
378 | - MO_UB = MO_8, | ||
379 | - MO_UW = MO_16, | ||
380 | - MO_UL = MO_32, | ||
381 | - MO_SB = MO_SIGN | MO_8, | ||
382 | - MO_SW = MO_SIGN | MO_16, | ||
383 | - MO_SL = MO_SIGN | MO_32, | ||
384 | - MO_Q = MO_64, | ||
385 | - | ||
386 | - MO_LEUW = MO_LE | MO_UW, | ||
387 | - MO_LEUL = MO_LE | MO_UL, | ||
388 | - MO_LESW = MO_LE | MO_SW, | ||
389 | - MO_LESL = MO_LE | MO_SL, | ||
390 | - MO_LEQ = MO_LE | MO_Q, | ||
391 | - | ||
392 | - MO_BEUW = MO_BE | MO_UW, | ||
393 | - MO_BEUL = MO_BE | MO_UL, | ||
394 | - MO_BESW = MO_BE | MO_SW, | ||
395 | - MO_BESL = MO_BE | MO_SL, | ||
396 | - MO_BEQ = MO_BE | MO_Q, | ||
397 | - | ||
398 | - MO_TEUW = MO_TE | MO_UW, | ||
399 | - MO_TEUL = MO_TE | MO_UL, | ||
400 | - MO_TESW = MO_TE | MO_SW, | ||
401 | - MO_TESL = MO_TE | MO_SL, | ||
402 | - MO_TEQ = MO_TE | MO_Q, | ||
403 | - | ||
404 | - MO_SSIZE = MO_SIZE | MO_SIGN, | ||
405 | -} TCGMemOp; | ||
406 | - | ||
407 | /** | ||
408 | * get_alignment_bits | ||
409 | - * @memop: TCGMemOp value | ||
410 | + * @memop: MemOp value | ||
411 | * | ||
412 | * Extract the alignment size from the memop. | ||
413 | */ | ||
414 | -static inline unsigned get_alignment_bits(TCGMemOp memop) | ||
415 | +static inline unsigned get_alignment_bits(MemOp memop) | ||
416 | { | ||
417 | unsigned a = memop & MO_AMASK; | ||
418 | |||
419 | @@ -XXX,XX +XXX,XX @@ static inline size_t tcg_current_code_size(TCGContext *s) | ||
420 | return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); | ||
421 | } | ||
422 | |||
423 | -/* Combine the TCGMemOp and mmu_idx parameters into a single value. */ | ||
424 | +/* Combine the MemOp and mmu_idx parameters into a single value. */ | ||
425 | typedef uint32_t TCGMemOpIdx; | ||
426 | |||
427 | /** | ||
428 | @@ -XXX,XX +XXX,XX @@ typedef uint32_t TCGMemOpIdx; | ||
429 | * | ||
430 | * Encode these values into a single parameter. | ||
431 | */ | ||
432 | -static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) | ||
433 | +static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) | ||
434 | { | ||
435 | tcg_debug_assert(idx <= 15); | ||
436 | return (op << 4) | idx; | ||
437 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) | ||
438 | * | ||
439 | * Extract the memory operation from the combined value. | ||
440 | */ | ||
441 | -static inline TCGMemOp get_memop(TCGMemOpIdx oi) | ||
442 | +static inline MemOp get_memop(TCGMemOpIdx oi) | ||
443 | { | ||
444 | return oi >> 4; | ||
445 | } | ||
446 | diff --git a/trace/mem-internal.h b/trace/mem-internal.h | ||
447 | index XXXXXXX..XXXXXXX 100644 | ||
448 | --- a/trace/mem-internal.h | ||
449 | +++ b/trace/mem-internal.h | ||
450 | @@ -XXX,XX +XXX,XX @@ | ||
451 | #define TRACE_MEM_ST (1ULL << 5) /* store (y/n) */ | ||
452 | |||
453 | static inline uint8_t trace_mem_build_info( | ||
454 | - int size_shift, bool sign_extend, TCGMemOp endianness, bool store) | ||
455 | + int size_shift, bool sign_extend, MemOp endianness, bool store) | ||
456 | { | ||
457 | uint8_t res; | ||
458 | |||
459 | @@ -XXX,XX +XXX,XX @@ static inline uint8_t trace_mem_build_info( | ||
460 | return res; | ||
461 | } | ||
462 | |||
463 | -static inline uint8_t trace_mem_get_info(TCGMemOp op, bool store) | ||
464 | +static inline uint8_t trace_mem_get_info(MemOp op, bool store) | ||
465 | { | ||
466 | return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN), | ||
467 | op & MO_BSWAP, store); | ||
468 | diff --git a/trace/mem.h b/trace/mem.h | ||
469 | index XXXXXXX..XXXXXXX 100644 | ||
470 | --- a/trace/mem.h | ||
471 | +++ b/trace/mem.h | ||
472 | @@ -XXX,XX +XXX,XX @@ | ||
473 | * | ||
474 | * Return a value for the 'info' argument in guest memory access traces. | ||
475 | */ | ||
476 | -static uint8_t trace_mem_get_info(TCGMemOp op, bool store); | ||
477 | +static uint8_t trace_mem_get_info(MemOp op, bool store); | ||
478 | |||
479 | /** | ||
480 | * trace_mem_build_info: | ||
481 | @@ -XXX,XX +XXX,XX @@ static uint8_t trace_mem_get_info(TCGMemOp op, bool store); | ||
482 | * Return a value for the 'info' argument in guest memory access traces. | ||
483 | */ | ||
484 | static uint8_t trace_mem_build_info(int size_shift, bool sign_extend, | ||
485 | - TCGMemOp endianness, bool store); | ||
486 | + MemOp endianness, bool store); | ||
487 | |||
488 | |||
489 | #include "trace/mem-internal.h" | ||
490 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
491 | index XXXXXXX..XXXXXXX 100644 | ||
492 | --- a/accel/tcg/cputlb.c | ||
493 | +++ b/accel/tcg/cputlb.c | ||
494 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
495 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
496 | CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); | ||
497 | target_ulong tlb_addr = tlb_addr_write(tlbe); | ||
498 | - TCGMemOp mop = get_memop(oi); | ||
499 | + MemOp mop = get_memop(oi); | ||
500 | int a_bits = get_alignment_bits(mop); | ||
501 | int s_bits = mop & MO_SIZE; | ||
502 | void *hostaddr; | ||
503 | diff --git a/target/alpha/translate.c b/target/alpha/translate.c | ||
504 | index XXXXXXX..XXXXXXX 100644 | ||
505 | --- a/target/alpha/translate.c | ||
506 | +++ b/target/alpha/translate.c | ||
507 | @@ -XXX,XX +XXX,XX @@ static inline void gen_store_mem(DisasContext *ctx, | ||
508 | |||
509 | static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, | ||
510 | int32_t disp16, int mem_idx, | ||
511 | - TCGMemOp op) | ||
512 | + MemOp op) | ||
513 | { | ||
514 | TCGLabel *lab_fail, *lab_done; | ||
515 | TCGv addr, val; | ||
516 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
517 | index XXXXXXX..XXXXXXX 100644 | ||
518 | --- a/target/arm/translate-a64.c | ||
519 | +++ b/target/arm/translate-a64.c | ||
520 | @@ -XXX,XX +XXX,XX @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64); | ||
521 | typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); | ||
522 | typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); | ||
523 | typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); | ||
524 | -typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp); | ||
525 | +typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); | ||
526 | |||
527 | /* initialize TCG globals. */ | ||
528 | void a64_translate_init(void) | ||
529 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) | ||
530 | * Dn, Sn, Hn or Bn). | ||
531 | * (Note that this is not the same mapping as for A32; see cpu.h) | ||
532 | */ | ||
533 | -static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size) | ||
534 | +static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) | ||
535 | { | ||
536 | return vec_reg_offset(s, regno, 0, size); | ||
537 | } | ||
538 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s, | ||
539 | bool iss_valid, unsigned int iss_srt, | ||
540 | bool iss_sf, bool iss_ar) | ||
541 | { | ||
542 | - TCGMemOp memop = s->be_data + size; | ||
543 | + MemOp memop = s->be_data + size; | ||
544 | |||
545 | g_assert(size <= 3); | ||
546 | |||
547 | @@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) | ||
548 | TCGv_i64 tmphi; | ||
549 | |||
550 | if (size < 4) { | ||
551 | - TCGMemOp memop = s->be_data + size; | ||
552 | + MemOp memop = s->be_data + size; | ||
553 | tmphi = tcg_const_i64(0); | ||
554 | tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop); | ||
555 | } else { | ||
556 | @@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) | ||
557 | |||
558 | /* Get value of an element within a vector register */ | ||
559 | static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, | ||
560 | - int element, TCGMemOp memop) | ||
561 | + int element, MemOp memop) | ||
562 | { | ||
563 | int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); | ||
564 | switch (memop) { | ||
565 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, | ||
566 | } | ||
567 | |||
568 | static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, | ||
569 | - int element, TCGMemOp memop) | ||
570 | + int element, MemOp memop) | ||
571 | { | ||
572 | int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); | ||
573 | switch (memop) { | ||
574 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, | ||
575 | |||
576 | /* Set value of an element within a vector register */ | ||
577 | static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, | ||
578 | - int element, TCGMemOp memop) | ||
579 | + int element, MemOp memop) | ||
580 | { | ||
581 | int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); | ||
582 | switch (memop) { | ||
583 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, | ||
584 | } | ||
585 | |||
586 | static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, | ||
587 | - int destidx, int element, TCGMemOp memop) | ||
588 | + int destidx, int element, MemOp memop) | ||
589 | { | ||
590 | int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); | ||
591 | switch (memop) { | ||
592 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, | ||
593 | |||
594 | /* Store from vector register to memory */ | ||
595 | static void do_vec_st(DisasContext *s, int srcidx, int element, | ||
596 | - TCGv_i64 tcg_addr, int size, TCGMemOp endian) | ||
597 | + TCGv_i64 tcg_addr, int size, MemOp endian) | ||
598 | { | ||
599 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | ||
600 | |||
601 | @@ -XXX,XX +XXX,XX @@ static void do_vec_st(DisasContext *s, int srcidx, int element, | ||
602 | |||
603 | /* Load from memory to vector register */ | ||
604 | static void do_vec_ld(DisasContext *s, int destidx, int element, | ||
605 | - TCGv_i64 tcg_addr, int size, TCGMemOp endian) | ||
606 | + TCGv_i64 tcg_addr, int size, MemOp endian) | ||
607 | { | ||
608 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | ||
609 | |||
610 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
611 | TCGv_i64 addr, int size, bool is_pair) | ||
612 | { | ||
613 | int idx = get_mem_index(s); | ||
614 | - TCGMemOp memop = s->be_data; | ||
615 | + MemOp memop = s->be_data; | ||
616 | |||
617 | g_assert(size <= 3); | ||
618 | if (is_pair) { | ||
619 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
620 | bool is_postidx = extract32(insn, 23, 1); | ||
621 | bool is_q = extract32(insn, 30, 1); | ||
622 | TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; | ||
623 | - TCGMemOp endian = s->be_data; | ||
624 | + MemOp endian = s->be_data; | ||
625 | |||
626 | int ebytes; /* bytes per element */ | ||
627 | int elements; /* elements per vector */ | ||
628 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) | ||
629 | unsigned int mos, type, rm, cond, rn, rd; | ||
630 | TCGv_i64 t_true, t_false, t_zero; | ||
631 | DisasCompare64 c; | ||
632 | - TCGMemOp sz; | ||
633 | + MemOp sz; | ||
634 | |||
635 | mos = extract32(insn, 29, 3); | ||
636 | type = extract32(insn, 22, 2); | ||
637 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) | ||
638 | int mos = extract32(insn, 29, 3); | ||
639 | uint64_t imm; | ||
640 | TCGv_i64 tcg_res; | ||
641 | - TCGMemOp sz; | ||
642 | + MemOp sz; | ||
643 | |||
644 | if (mos || imm5) { | ||
645 | unallocated_encoding(s); | ||
646 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, | ||
647 | { | ||
648 | if (esize == size) { | ||
649 | int element; | ||
650 | - TCGMemOp msize = esize == 16 ? MO_16 : MO_32; | ||
651 | + MemOp msize = esize == 16 ? MO_16 : MO_32; | ||
652 | TCGv_i32 tcg_elem; | ||
653 | |||
654 | /* We should have one register left here */ | ||
655 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, | ||
656 | int shift = (2 * esize) - immhb; | ||
657 | int elements = is_scalar ? 1 : (64 / esize); | ||
658 | bool round = extract32(opcode, 0, 1); | ||
659 | - TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); | ||
660 | + MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); | ||
661 | TCGv_i64 tcg_rn, tcg_rd, tcg_round; | ||
662 | TCGv_i32 tcg_rd_narrowed; | ||
663 | TCGv_i64 tcg_final; | ||
664 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, | ||
665 | } | ||
666 | }; | ||
667 | NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; | ||
668 | - TCGMemOp memop = scalar ? size : MO_32; | ||
669 | + MemOp memop = scalar ? size : MO_32; | ||
670 | int maxpass = scalar ? 1 : is_q ? 4 : 2; | ||
671 | |||
672 | for (pass = 0; pass < maxpass; pass++) { | ||
673 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, | ||
674 | TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16); | ||
675 | TCGv_i32 tcg_shift = NULL; | ||
676 | |||
677 | - TCGMemOp mop = size | (is_signed ? MO_SIGN : 0); | ||
678 | + MemOp mop = size | (is_signed ? MO_SIGN : 0); | ||
679 | int pass; | ||
680 | |||
681 | if (fracbits || size == MO_64) { | ||
682 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, | ||
683 | int dsize = is_q ? 128 : 64; | ||
684 | int esize = 8 << size; | ||
685 | int elements = dsize/esize; | ||
686 | - TCGMemOp memop = size | (is_u ? 0 : MO_SIGN); | ||
687 | + MemOp memop = size | (is_u ? 0 : MO_SIGN); | ||
688 | TCGv_i64 tcg_rn = new_tmp_a64(s); | ||
689 | TCGv_i64 tcg_rd = new_tmp_a64(s); | ||
690 | TCGv_i64 tcg_round; | ||
691 | @@ -XXX,XX +XXX,XX @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, | ||
692 | TCGv_i64 tcg_op1 = tcg_temp_new_i64(); | ||
693 | TCGv_i64 tcg_op2 = tcg_temp_new_i64(); | ||
694 | TCGv_i64 tcg_passres; | ||
695 | - TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); | ||
696 | + MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); | ||
697 | |||
698 | int elt = pass + is_q * 2; | ||
699 | |||
700 | @@ -XXX,XX +XXX,XX @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, | ||
701 | |||
702 | if (size == 2) { | ||
703 | /* 32 + 32 -> 64 op */ | ||
704 | - TCGMemOp memop = size + (u ? 0 : MO_SIGN); | ||
705 | + MemOp memop = size + (u ? 0 : MO_SIGN); | ||
706 | |||
707 | for (pass = 0; pass < maxpass; pass++) { | ||
708 | TCGv_i64 tcg_op1 = tcg_temp_new_i64(); | ||
709 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
710 | |||
711 | switch (is_fp) { | ||
712 | case 1: /* normal fp */ | ||
713 | - /* convert insn encoded size to TCGMemOp size */ | ||
714 | + /* convert insn encoded size to MemOp size */ | ||
715 | switch (size) { | ||
716 | case 0: /* half-precision */ | ||
717 | size = MO_16; | ||
718 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
719 | return; | ||
720 | } | ||
721 | |||
722 | - /* Given TCGMemOp size, adjust register and indexing. */ | ||
723 | + /* Given MemOp size, adjust register and indexing. */ | ||
724 | switch (size) { | ||
725 | case MO_16: | ||
726 | index = h << 2 | l << 1 | m; | ||
727 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
728 | TCGv_i64 tcg_res[2]; | ||
729 | int pass; | ||
730 | bool satop = extract32(opcode, 0, 1); | ||
731 | - TCGMemOp memop = MO_32; | ||
732 | + MemOp memop = MO_32; | ||
733 | |||
734 | if (satop || !u) { | ||
735 | memop |= MO_SIGN; | ||
736 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
737 | index XXXXXXX..XXXXXXX 100644 | ||
738 | --- a/target/arm/translate-sve.c | ||
739 | +++ b/target/arm/translate-sve.c | ||
740 | @@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) | ||
741 | */ | ||
742 | |||
743 | /* The memory mode of the dtype. */ | ||
744 | -static const TCGMemOp dtype_mop[16] = { | ||
745 | +static const MemOp dtype_mop[16] = { | ||
746 | MO_UB, MO_UB, MO_UB, MO_UB, | ||
747 | MO_SL, MO_UW, MO_UW, MO_UW, | ||
748 | MO_SW, MO_SW, MO_UL, MO_UL, | ||
749 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
750 | index XXXXXXX..XXXXXXX 100644 | ||
751 | --- a/target/arm/translate.c | ||
752 | +++ b/target/arm/translate.c | ||
753 | @@ -XXX,XX +XXX,XX @@ typedef enum ISSInfo { | ||
754 | } ISSInfo; | ||
755 | |||
756 | /* Save the syndrome information for a Data Abort */ | ||
757 | -static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo) | ||
758 | +static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo) | ||
759 | { | ||
760 | uint32_t syn; | ||
761 | int sas = memop & MO_SIZE; | ||
762 | @@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) | ||
763 | * that the address argument is TCGv_i32 rather than TCGv. | ||
764 | */ | ||
765 | |||
766 | -static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) | ||
767 | +static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) | ||
768 | { | ||
769 | TCGv addr = tcg_temp_new(); | ||
770 | tcg_gen_extu_i32_tl(addr, a32); | ||
771 | @@ -XXX,XX +XXX,XX @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) | ||
772 | } | ||
773 | |||
774 | static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
775 | - int index, TCGMemOp opc) | ||
776 | + int index, MemOp opc) | ||
777 | { | ||
778 | TCGv addr; | ||
779 | |||
780 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
781 | } | ||
782 | |||
783 | static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
784 | - int index, TCGMemOp opc) | ||
785 | + int index, MemOp opc) | ||
786 | { | ||
787 | TCGv addr; | ||
788 | |||
789 | @@ -XXX,XX +XXX,XX @@ static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) | ||
790 | } | ||
791 | |||
792 | static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | ||
793 | - int index, TCGMemOp opc) | ||
794 | + int index, MemOp opc) | ||
795 | { | ||
796 | TCGv addr = gen_aa32_addr(s, a32, opc); | ||
797 | tcg_gen_qemu_ld_i64(val, addr, index, opc); | ||
798 | @@ -XXX,XX +XXX,XX @@ static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, | ||
799 | } | ||
800 | |||
801 | static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | ||
802 | - int index, TCGMemOp opc) | ||
803 | + int index, MemOp opc) | ||
804 | { | ||
805 | TCGv addr = gen_aa32_addr(s, a32, opc); | ||
806 | |||
807 | @@ -XXX,XX +XXX,XX @@ neon_reg_offset (int reg, int n) | ||
808 | * where 0 is the least significant end of the register. | ||
809 | */ | ||
810 | static inline long | ||
811 | -neon_element_offset(int reg, int element, TCGMemOp size) | ||
812 | +neon_element_offset(int reg, int element, MemOp size) | ||
813 | { | ||
814 | int element_size = 1 << size; | ||
815 | int ofs = element * element_size; | ||
816 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass) | ||
817 | return tmp; | ||
818 | } | ||
819 | |||
820 | -static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) | ||
821 | +static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop) | ||
822 | { | ||
823 | long offset = neon_element_offset(reg, ele, mop & MO_SIZE); | ||
824 | |||
825 | @@ -XXX,XX +XXX,XX @@ static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) | ||
826 | } | ||
827 | } | ||
828 | |||
829 | -static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop) | ||
830 | +static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop) | ||
831 | { | ||
832 | long offset = neon_element_offset(reg, ele, mop & MO_SIZE); | ||
833 | |||
834 | @@ -XXX,XX +XXX,XX @@ static void neon_store_reg(int reg, int pass, TCGv_i32 var) | ||
835 | tcg_temp_free_i32(var); | ||
836 | } | ||
837 | |||
838 | -static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) | ||
839 | +static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var) | ||
840 | { | ||
841 | long offset = neon_element_offset(reg, ele, size); | ||
842 | |||
843 | @@ -XXX,XX +XXX,XX @@ static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) | ||
844 | } | ||
845 | } | ||
846 | |||
847 | -static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var) | ||
848 | +static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var) | ||
849 | { | ||
850 | long offset = neon_element_offset(reg, ele, size); | ||
851 | |||
852 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | ||
853 | int n; | ||
854 | int vec_size; | ||
855 | int mmu_idx; | ||
856 | - TCGMemOp endian; | ||
857 | + MemOp endian; | ||
858 | TCGv_i32 addr; | ||
859 | TCGv_i32 tmp; | ||
860 | TCGv_i32 tmp2; | ||
861 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
862 | } else if ((insn & 0x380) == 0) { | ||
863 | /* VDUP */ | ||
864 | int element; | ||
865 | - TCGMemOp size; | ||
866 | + MemOp size; | ||
867 | |||
868 | if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { | ||
869 | return 1; | ||
870 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
871 | TCGv_i32 addr, int size) | ||
872 | { | ||
873 | TCGv_i32 tmp = tcg_temp_new_i32(); | ||
874 | - TCGMemOp opc = size | MO_ALIGN | s->be_data; | ||
875 | + MemOp opc = size | MO_ALIGN | s->be_data; | ||
876 | |||
877 | s->is_ldex = true; | ||
878 | |||
879 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | ||
880 | TCGv taddr; | ||
881 | TCGLabel *done_label; | ||
882 | TCGLabel *fail_label; | ||
883 | - TCGMemOp opc = size | MO_ALIGN | s->be_data; | ||
884 | + MemOp opc = size | MO_ALIGN | s->be_data; | ||
885 | |||
886 | /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { | ||
887 | [addr] = {Rt}; | ||
888 | @@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) | ||
889 | */ | ||
890 | |||
891 | TCGv taddr; | ||
892 | - TCGMemOp opc = s->be_data; | ||
893 | + MemOp opc = s->be_data; | ||
894 | |||
895 | rm = (insn) & 0xf; | ||
896 | |||
897 | diff --git a/target/hppa/translate.c b/target/hppa/translate.c | ||
898 | index XXXXXXX..XXXXXXX 100644 | ||
899 | --- a/target/hppa/translate.c | ||
900 | +++ b/target/hppa/translate.c | ||
901 | @@ -XXX,XX +XXX,XX @@ static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, | ||
902 | */ | ||
903 | static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, | ||
904 | unsigned rx, int scale, target_sreg disp, | ||
905 | - unsigned sp, int modify, TCGMemOp mop) | ||
906 | + unsigned sp, int modify, MemOp mop) | ||
907 | { | ||
908 | TCGv_reg ofs; | ||
909 | TCGv_tl addr; | ||
910 | @@ -XXX,XX +XXX,XX @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, | ||
911 | |||
912 | static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, | ||
913 | unsigned rx, int scale, target_sreg disp, | ||
914 | - unsigned sp, int modify, TCGMemOp mop) | ||
915 | + unsigned sp, int modify, MemOp mop) | ||
916 | { | ||
917 | TCGv_reg ofs; | ||
918 | TCGv_tl addr; | ||
919 | @@ -XXX,XX +XXX,XX @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, | ||
920 | |||
921 | static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, | ||
922 | unsigned rx, int scale, target_sreg disp, | ||
923 | - unsigned sp, int modify, TCGMemOp mop) | ||
924 | + unsigned sp, int modify, MemOp mop) | ||
925 | { | ||
926 | TCGv_reg ofs; | ||
927 | TCGv_tl addr; | ||
928 | @@ -XXX,XX +XXX,XX @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, | ||
929 | |||
930 | static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, | ||
931 | unsigned rx, int scale, target_sreg disp, | ||
932 | - unsigned sp, int modify, TCGMemOp mop) | ||
933 | + unsigned sp, int modify, MemOp mop) | ||
934 | { | ||
935 | TCGv_reg ofs; | ||
936 | TCGv_tl addr; | ||
937 | @@ -XXX,XX +XXX,XX @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, | ||
938 | |||
939 | static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, | ||
940 | unsigned rx, int scale, target_sreg disp, | ||
941 | - unsigned sp, int modify, TCGMemOp mop) | ||
942 | + unsigned sp, int modify, MemOp mop) | ||
943 | { | ||
944 | TCGv_reg dest; | ||
945 | |||
946 | @@ -XXX,XX +XXX,XX @@ static bool trans_fldd(DisasContext *ctx, arg_ldst *a) | ||
947 | |||
948 | static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, | ||
949 | target_sreg disp, unsigned sp, | ||
950 | - int modify, TCGMemOp mop) | ||
951 | + int modify, MemOp mop) | ||
952 | { | ||
953 | nullify_over(ctx); | ||
954 | do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); | ||
955 | @@ -XXX,XX +XXX,XX @@ static bool trans_st(DisasContext *ctx, arg_ldst *a) | ||
956 | |||
957 | static bool trans_ldc(DisasContext *ctx, arg_ldst *a) | ||
958 | { | ||
959 | - TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size; | ||
960 | + MemOp mop = MO_TEUL | MO_ALIGN_16 | a->size; | ||
961 | TCGv_reg zero, dest, ofs; | ||
962 | TCGv_tl addr; | ||
963 | |||
964 | diff --git a/target/i386/translate.c b/target/i386/translate.c | ||
965 | index XXXXXXX..XXXXXXX 100644 | ||
966 | --- a/target/i386/translate.c | ||
967 | +++ b/target/i386/translate.c | ||
968 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
969 | /* current insn context */ | ||
970 | int override; /* -1 if no override */ | ||
971 | int prefix; | ||
972 | - TCGMemOp aflag; | ||
973 | - TCGMemOp dflag; | ||
974 | + MemOp aflag; | ||
975 | + MemOp dflag; | ||
976 | target_ulong pc_start; | ||
977 | target_ulong pc; /* pc = eip + cs_base */ | ||
978 | /* current block context */ | ||
979 | @@ -XXX,XX +XXX,XX @@ static void gen_eob(DisasContext *s); | ||
980 | static void gen_jr(DisasContext *s, TCGv dest); | ||
981 | static void gen_jmp(DisasContext *s, target_ulong eip); | ||
982 | static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); | ||
983 | -static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d); | ||
984 | +static void gen_op(DisasContext *s1, int op, MemOp ot, int d); | ||
985 | |||
986 | /* i386 arith/logic operations */ | ||
987 | enum { | ||
988 | @@ -XXX,XX +XXX,XX @@ static inline bool byte_reg_is_xH(DisasContext *s, int reg) | ||
989 | } | ||
990 | |||
991 | /* Select the size of a push/pop operation. */ | ||
992 | -static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) | ||
993 | +static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) | ||
994 | { | ||
995 | if (CODE64(s)) { | ||
996 | return ot == MO_16 ? MO_16 : MO_64; | ||
997 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) | ||
998 | } | ||
999 | |||
1000 | /* Select the size of the stack pointer. */ | ||
1001 | -static inline TCGMemOp mo_stacksize(DisasContext *s) | ||
1002 | +static inline MemOp mo_stacksize(DisasContext *s) | ||
1003 | { | ||
1004 | return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; | ||
1005 | } | ||
1006 | |||
1007 | /* Select only size 64 else 32. Used for SSE operand sizes. */ | ||
1008 | -static inline TCGMemOp mo_64_32(TCGMemOp ot) | ||
1009 | +static inline MemOp mo_64_32(MemOp ot) | ||
1010 | { | ||
1011 | #ifdef TARGET_X86_64 | ||
1012 | return ot == MO_64 ? MO_64 : MO_32; | ||
1013 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOp mo_64_32(TCGMemOp ot) | ||
1014 | |||
1015 | /* Select size 8 if lsb of B is clear, else OT. Used for decoding | ||
1016 | byte vs word opcodes. */ | ||
1017 | -static inline TCGMemOp mo_b_d(int b, TCGMemOp ot) | ||
1018 | +static inline MemOp mo_b_d(int b, MemOp ot) | ||
1019 | { | ||
1020 | return b & 1 ? ot : MO_8; | ||
1021 | } | ||
1022 | |||
1023 | /* Select size 8 if lsb of B is clear, else OT capped at 32. | ||
1024 | Used for decoding operand size of port opcodes. */ | ||
1025 | -static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) | ||
1026 | +static inline MemOp mo_b_d32(int b, MemOp ot) | ||
1027 | { | ||
1028 | return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; | ||
1029 | } | ||
1030 | |||
1031 | -static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) | ||
1032 | +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) | ||
1033 | { | ||
1034 | switch(ot) { | ||
1035 | case MO_8: | ||
1036 | @@ -XXX,XX +XXX,XX @@ static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) | ||
1037 | } | ||
1038 | |||
1039 | static inline | ||
1040 | -void gen_op_mov_v_reg(DisasContext *s, TCGMemOp ot, TCGv t0, int reg) | ||
1041 | +void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) | ||
1042 | { | ||
1043 | if (ot == MO_8 && byte_reg_is_xH(s, reg)) { | ||
1044 | tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); | ||
1045 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_jmp_v(TCGv dest) | ||
1046 | } | ||
1047 | |||
1048 | static inline | ||
1049 | -void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, int32_t val) | ||
1050 | +void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) | ||
1051 | { | ||
1052 | tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); | ||
1053 | gen_op_mov_reg_v(s, size, reg, s->tmp0); | ||
1054 | } | ||
1055 | |||
1056 | -static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg) | ||
1057 | +static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) | ||
1058 | { | ||
1059 | tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); | ||
1060 | gen_op_mov_reg_v(s, size, reg, s->tmp0); | ||
1061 | @@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_im(DisasContext *s, target_ulong pc) | ||
1062 | /* Compute SEG:REG into A0. SEG is selected from the override segment | ||
1063 | (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to | ||
1064 | indicate no override. */ | ||
1065 | -static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, | ||
1066 | +static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, | ||
1067 | int def_seg, int ovr_seg) | ||
1068 | { | ||
1069 | switch (aflag) { | ||
1070 | @@ -XXX,XX +XXX,XX @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) | ||
1071 | gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); | ||
1072 | } | ||
1073 | |||
1074 | -static inline void gen_op_movl_T0_Dshift(DisasContext *s, TCGMemOp ot) | ||
1075 | +static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) | ||
1076 | { | ||
1077 | tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df)); | ||
1078 | tcg_gen_shli_tl(s->T0, s->T0, ot); | ||
1079 | }; | ||
1080 | |||
1081 | -static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign) | ||
1082 | +static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) | ||
1083 | { | ||
1084 | switch (size) { | ||
1085 | case MO_8: | ||
1086 | @@ -XXX,XX +XXX,XX @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign) | ||
1087 | } | ||
1088 | } | ||
1089 | |||
1090 | -static void gen_extu(TCGMemOp ot, TCGv reg) | ||
1091 | +static void gen_extu(MemOp ot, TCGv reg) | ||
1092 | { | ||
1093 | gen_ext_tl(reg, reg, ot, false); | ||
1094 | } | ||
1095 | |||
1096 | -static void gen_exts(TCGMemOp ot, TCGv reg) | ||
1097 | +static void gen_exts(MemOp ot, TCGv reg) | ||
1098 | { | ||
1099 | gen_ext_tl(reg, reg, ot, true); | ||
1100 | } | ||
1101 | |||
1102 | static inline | ||
1103 | -void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) | ||
1104 | +void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) | ||
1105 | { | ||
1106 | tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); | ||
1107 | gen_extu(size, s->tmp0); | ||
1108 | @@ -XXX,XX +XXX,XX @@ void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) | ||
1109 | } | ||
1110 | |||
1111 | static inline | ||
1112 | -void gen_op_jz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) | ||
1113 | +void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) | ||
1114 | { | ||
1115 | tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); | ||
1116 | gen_extu(size, s->tmp0); | ||
1117 | tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); | ||
1118 | } | ||
1119 | |||
1120 | -static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n) | ||
1121 | +static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) | ||
1122 | { | ||
1123 | switch (ot) { | ||
1124 | case MO_8: | ||
1125 | @@ -XXX,XX +XXX,XX @@ static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n) | ||
1126 | } | ||
1127 | } | ||
1128 | |||
1129 | -static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) | ||
1130 | +static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n) | ||
1131 | { | ||
1132 | switch (ot) { | ||
1133 | case MO_8: | ||
1134 | @@ -XXX,XX +XXX,XX @@ static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | -static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, | ||
1139 | +static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip, | ||
1140 | uint32_t svm_flags) | ||
1141 | { | ||
1142 | target_ulong next_eip; | ||
1143 | @@ -XXX,XX +XXX,XX @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, | ||
1144 | } | ||
1145 | } | ||
1146 | |||
1147 | -static inline void gen_movs(DisasContext *s, TCGMemOp ot) | ||
1148 | +static inline void gen_movs(DisasContext *s, MemOp ot) | ||
1149 | { | ||
1150 | gen_string_movl_A0_ESI(s); | ||
1151 | gen_op_ld_v(s, ot, s->T0, s->A0); | ||
1152 | @@ -XXX,XX +XXX,XX @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) | ||
1153 | return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; | ||
1154 | default: | ||
1155 | { | ||
1156 | - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1157 | + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1158 | TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true); | ||
1159 | return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; | ||
1160 | } | ||
1161 | @@ -XXX,XX +XXX,XX @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) | ||
1162 | .mask = -1 }; | ||
1163 | default: | ||
1164 | { | ||
1165 | - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1166 | + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1167 | TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); | ||
1168 | return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; | ||
1169 | } | ||
1170 | @@ -XXX,XX +XXX,XX @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) | ||
1171 | static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) | ||
1172 | { | ||
1173 | int inv, jcc_op, cond; | ||
1174 | - TCGMemOp size; | ||
1175 | + MemOp size; | ||
1176 | CCPrepare cc; | ||
1177 | TCGv t0; | ||
1178 | |||
1179 | @@ -XXX,XX +XXX,XX @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) | ||
1180 | return l2; | ||
1181 | } | ||
1182 | |||
1183 | -static inline void gen_stos(DisasContext *s, TCGMemOp ot) | ||
1184 | +static inline void gen_stos(DisasContext *s, MemOp ot) | ||
1185 | { | ||
1186 | gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); | ||
1187 | gen_string_movl_A0_EDI(s); | ||
1188 | @@ -XXX,XX +XXX,XX @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot) | ||
1189 | gen_op_add_reg_T0(s, s->aflag, R_EDI); | ||
1190 | } | ||
1191 | |||
1192 | -static inline void gen_lods(DisasContext *s, TCGMemOp ot) | ||
1193 | +static inline void gen_lods(DisasContext *s, MemOp ot) | ||
1194 | { | ||
1195 | gen_string_movl_A0_ESI(s); | ||
1196 | gen_op_ld_v(s, ot, s->T0, s->A0); | ||
1197 | @@ -XXX,XX +XXX,XX @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) | ||
1198 | gen_op_add_reg_T0(s, s->aflag, R_ESI); | ||
1199 | } | ||
1200 | |||
1201 | -static inline void gen_scas(DisasContext *s, TCGMemOp ot) | ||
1202 | +static inline void gen_scas(DisasContext *s, MemOp ot) | ||
1203 | { | ||
1204 | gen_string_movl_A0_EDI(s); | ||
1205 | gen_op_ld_v(s, ot, s->T1, s->A0); | ||
1206 | @@ -XXX,XX +XXX,XX @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) | ||
1207 | gen_op_add_reg_T0(s, s->aflag, R_EDI); | ||
1208 | } | ||
1209 | |||
1210 | -static inline void gen_cmps(DisasContext *s, TCGMemOp ot) | ||
1211 | +static inline void gen_cmps(DisasContext *s, MemOp ot) | ||
1212 | { | ||
1213 | gen_string_movl_A0_EDI(s); | ||
1214 | gen_op_ld_v(s, ot, s->T1, s->A0); | ||
1215 | @@ -XXX,XX +XXX,XX @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | -static inline void gen_ins(DisasContext *s, TCGMemOp ot) | ||
1220 | +static inline void gen_ins(DisasContext *s, MemOp ot) | ||
1221 | { | ||
1222 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | ||
1223 | gen_io_start(); | ||
1224 | @@ -XXX,XX +XXX,XX @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) | ||
1225 | } | ||
1226 | } | ||
1227 | |||
1228 | -static inline void gen_outs(DisasContext *s, TCGMemOp ot) | ||
1229 | +static inline void gen_outs(DisasContext *s, MemOp ot) | ||
1230 | { | ||
1231 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | ||
1232 | gen_io_start(); | ||
1233 | @@ -XXX,XX +XXX,XX @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) | ||
1234 | /* same method as Valgrind : we generate jumps to current or next | ||
1235 | instruction */ | ||
1236 | #define GEN_REPZ(op) \ | ||
1237 | -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ | ||
1238 | +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ | ||
1239 | target_ulong cur_eip, target_ulong next_eip) \ | ||
1240 | { \ | ||
1241 | TCGLabel *l2; \ | ||
1242 | @@ -XXX,XX +XXX,XX @@ static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ | ||
1243 | } | ||
1244 | |||
1245 | #define GEN_REPZ2(op) \ | ||
1246 | -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ | ||
1247 | +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ | ||
1248 | target_ulong cur_eip, \ | ||
1249 | target_ulong next_eip, \ | ||
1250 | int nz) \ | ||
1251 | @@ -XXX,XX +XXX,XX @@ static void gen_illegal_opcode(DisasContext *s) | ||
1252 | } | ||
1253 | |||
1254 | /* if d == OR_TMP0, it means memory operand (address in A0) */ | ||
1255 | -static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) | ||
1256 | +static void gen_op(DisasContext *s1, int op, MemOp ot, int d) | ||
1257 | { | ||
1258 | if (d != OR_TMP0) { | ||
1259 | if (s1->prefix & PREFIX_LOCK) { | ||
1260 | @@ -XXX,XX +XXX,XX @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) | ||
1261 | } | ||
1262 | |||
1263 | /* if d == OR_TMP0, it means memory operand (address in A0) */ | ||
1264 | -static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) | ||
1265 | +static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) | ||
1266 | { | ||
1267 | if (s1->prefix & PREFIX_LOCK) { | ||
1268 | if (d != OR_TMP0) { | ||
1269 | @@ -XXX,XX +XXX,XX @@ static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) | ||
1270 | set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); | ||
1271 | } | ||
1272 | |||
1273 | -static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, | ||
1274 | +static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, | ||
1275 | TCGv shm1, TCGv count, bool is_right) | ||
1276 | { | ||
1277 | TCGv_i32 z32, s32, oldop; | ||
1278 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, | ||
1279 | set_cc_op(s, CC_OP_DYNAMIC); | ||
1280 | } | ||
1281 | |||
1282 | -static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1283 | +static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, | ||
1284 | int is_right, int is_arith) | ||
1285 | { | ||
1286 | target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1287 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1288 | gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); | ||
1289 | } | ||
1290 | |||
1291 | -static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1292 | +static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, | ||
1293 | int is_right, int is_arith) | ||
1294 | { | ||
1295 | int mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1296 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1297 | } | ||
1298 | } | ||
1299 | |||
1300 | -static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) | ||
1301 | +static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) | ||
1302 | { | ||
1303 | target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1304 | TCGv_i32 t0, t1; | ||
1305 | @@ -XXX,XX +XXX,XX @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) | ||
1306 | set_cc_op(s, CC_OP_DYNAMIC); | ||
1307 | } | ||
1308 | |||
1309 | -static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1310 | +static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, | ||
1311 | int is_right) | ||
1312 | { | ||
1313 | int mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1314 | @@ -XXX,XX +XXX,XX @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1315 | } | ||
1316 | |||
1317 | /* XXX: add faster immediate = 1 case */ | ||
1318 | -static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1319 | +static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, | ||
1320 | int is_right) | ||
1321 | { | ||
1322 | gen_compute_eflags(s); | ||
1323 | @@ -XXX,XX +XXX,XX @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1324 | } | ||
1325 | |||
1326 | /* XXX: add faster immediate case */ | ||
1327 | -static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1328 | +static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, | ||
1329 | bool is_right, TCGv count_in) | ||
1330 | { | ||
1331 | target_ulong mask = (ot == MO_64 ? 63 : 31); | ||
1332 | @@ -XXX,XX +XXX,XX @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1333 | tcg_temp_free(count); | ||
1334 | } | ||
1335 | |||
1336 | -static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) | ||
1337 | +static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) | ||
1338 | { | ||
1339 | if (s != OR_TMP1) | ||
1340 | gen_op_mov_v_reg(s1, ot, s1->T1, s); | ||
1341 | @@ -XXX,XX +XXX,XX @@ static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | -static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c) | ||
1346 | +static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c) | ||
1347 | { | ||
1348 | switch(op) { | ||
1349 | case OP_ROL: | ||
1350 | @@ -XXX,XX +XXX,XX @@ static void gen_add_A0_ds_seg(DisasContext *s) | ||
1351 | /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == | ||
1352 | OR_TMP0 */ | ||
1353 | static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, | ||
1354 | - TCGMemOp ot, int reg, int is_store) | ||
1355 | + MemOp ot, int reg, int is_store) | ||
1356 | { | ||
1357 | int mod, rm; | ||
1358 | |||
1359 | @@ -XXX,XX +XXX,XX @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | -static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) | ||
1364 | +static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) | ||
1365 | { | ||
1366 | uint32_t ret; | ||
1367 | |||
1368 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) | ||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | -static inline int insn_const_size(TCGMemOp ot) | ||
1373 | +static inline int insn_const_size(MemOp ot) | ||
1374 | { | ||
1375 | if (ot <= MO_32) { | ||
1376 | return 1 << ot; | ||
1377 | @@ -XXX,XX +XXX,XX @@ static inline void gen_jcc(DisasContext *s, int b, | ||
1378 | } | ||
1379 | } | ||
1380 | |||
1381 | -static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, | ||
1382 | +static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, | ||
1383 | int modrm, int reg) | ||
1384 | { | ||
1385 | CCPrepare cc; | ||
1386 | @@ -XXX,XX +XXX,XX @@ static inline void gen_stack_update(DisasContext *s, int addend) | ||
1387 | /* Generate a push. It depends on ss32, addseg and dflag. */ | ||
1388 | static void gen_push_v(DisasContext *s, TCGv val) | ||
1389 | { | ||
1390 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1391 | - TCGMemOp a_ot = mo_stacksize(s); | ||
1392 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1393 | + MemOp a_ot = mo_stacksize(s); | ||
1394 | int size = 1 << d_ot; | ||
1395 | TCGv new_esp = s->A0; | ||
1396 | |||
1397 | @@ -XXX,XX +XXX,XX @@ static void gen_push_v(DisasContext *s, TCGv val) | ||
1398 | } | ||
1399 | |||
1400 | /* two step pop is necessary for precise exceptions */ | ||
1401 | -static TCGMemOp gen_pop_T0(DisasContext *s) | ||
1402 | +static MemOp gen_pop_T0(DisasContext *s) | ||
1403 | { | ||
1404 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1405 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1406 | |||
1407 | gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); | ||
1408 | gen_op_ld_v(s, d_ot, s->T0, s->A0); | ||
1409 | @@ -XXX,XX +XXX,XX @@ static TCGMemOp gen_pop_T0(DisasContext *s) | ||
1410 | return d_ot; | ||
1411 | } | ||
1412 | |||
1413 | -static inline void gen_pop_update(DisasContext *s, TCGMemOp ot) | ||
1414 | +static inline void gen_pop_update(DisasContext *s, MemOp ot) | ||
1415 | { | ||
1416 | gen_stack_update(s, 1 << ot); | ||
1417 | } | ||
1418 | @@ -XXX,XX +XXX,XX @@ static inline void gen_stack_A0(DisasContext *s) | ||
1419 | |||
1420 | static void gen_pusha(DisasContext *s) | ||
1421 | { | ||
1422 | - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1423 | - TCGMemOp d_ot = s->dflag; | ||
1424 | + MemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1425 | + MemOp d_ot = s->dflag; | ||
1426 | int size = 1 << d_ot; | ||
1427 | int i; | ||
1428 | |||
1429 | @@ -XXX,XX +XXX,XX @@ static void gen_pusha(DisasContext *s) | ||
1430 | |||
1431 | static void gen_popa(DisasContext *s) | ||
1432 | { | ||
1433 | - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1434 | - TCGMemOp d_ot = s->dflag; | ||
1435 | + MemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1436 | + MemOp d_ot = s->dflag; | ||
1437 | int size = 1 << d_ot; | ||
1438 | int i; | ||
1439 | |||
1440 | @@ -XXX,XX +XXX,XX @@ static void gen_popa(DisasContext *s) | ||
1441 | |||
1442 | static void gen_enter(DisasContext *s, int esp_addend, int level) | ||
1443 | { | ||
1444 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1445 | - TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; | ||
1446 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1447 | + MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; | ||
1448 | int size = 1 << d_ot; | ||
1449 | |||
1450 | /* Push BP; compute FrameTemp into T1. */ | ||
1451 | @@ -XXX,XX +XXX,XX @@ static void gen_enter(DisasContext *s, int esp_addend, int level) | ||
1452 | |||
1453 | static void gen_leave(DisasContext *s) | ||
1454 | { | ||
1455 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1456 | - TCGMemOp a_ot = mo_stacksize(s); | ||
1457 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1458 | + MemOp a_ot = mo_stacksize(s); | ||
1459 | |||
1460 | gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); | ||
1461 | gen_op_ld_v(s, d_ot, s->T0, s->A0); | ||
1462 | @@ -XXX,XX +XXX,XX @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, | ||
1463 | SSEFunc_0_eppi sse_fn_eppi; | ||
1464 | SSEFunc_0_ppi sse_fn_ppi; | ||
1465 | SSEFunc_0_eppt sse_fn_eppt; | ||
1466 | - TCGMemOp ot; | ||
1467 | + MemOp ot; | ||
1468 | |||
1469 | b &= 0xff; | ||
1470 | if (s->prefix & PREFIX_DATA) | ||
1471 | @@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) | ||
1472 | CPUX86State *env = cpu->env_ptr; | ||
1473 | int b, prefixes; | ||
1474 | int shift; | ||
1475 | - TCGMemOp ot, aflag, dflag; | ||
1476 | + MemOp ot, aflag, dflag; | ||
1477 | int modrm, reg, rm, mod, op, opreg, val; | ||
1478 | target_ulong next_eip, tval; | ||
1479 | int rex_w, rex_r; | ||
1480 | @@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) | ||
1481 | case 0x1be: /* movsbS Gv, Eb */ | ||
1482 | case 0x1bf: /* movswS Gv, Eb */ | ||
1483 | { | ||
1484 | - TCGMemOp d_ot; | ||
1485 | - TCGMemOp s_ot; | ||
1486 | + MemOp d_ot; | ||
1487 | + MemOp s_ot; | ||
1488 | |||
1489 | /* d_ot is the size of destination */ | ||
1490 | d_ot = dflag; | ||
1491 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c | ||
1492 | index XXXXXXX..XXXXXXX 100644 | ||
1493 | --- a/target/m68k/translate.c | ||
1494 | +++ b/target/m68k/translate.c | ||
1495 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(cas) | ||
1496 | uint16_t ext; | ||
1497 | TCGv load; | ||
1498 | TCGv cmp; | ||
1499 | - TCGMemOp opc; | ||
1500 | + MemOp opc; | ||
1501 | |||
1502 | switch ((insn >> 9) & 3) { | ||
1503 | case 1: | ||
1504 | diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c | ||
1505 | index XXXXXXX..XXXXXXX 100644 | ||
1506 | --- a/target/microblaze/translate.c | ||
1507 | +++ b/target/microblaze/translate.c | ||
1508 | @@ -XXX,XX +XXX,XX @@ static void dec_load(DisasContext *dc) | ||
1509 | unsigned int size; | ||
1510 | bool rev = false, ex = false, ea = false; | ||
1511 | int mem_index = cpu_mmu_index(&dc->cpu->env, false); | ||
1512 | - TCGMemOp mop; | ||
1513 | + MemOp mop; | ||
1514 | |||
1515 | mop = dc->opcode & 3; | ||
1516 | size = 1 << mop; | ||
1517 | @@ -XXX,XX +XXX,XX @@ static void dec_store(DisasContext *dc) | ||
1518 | unsigned int size; | ||
1519 | bool rev = false, ex = false, ea = false; | ||
1520 | int mem_index = cpu_mmu_index(&dc->cpu->env, false); | ||
1521 | - TCGMemOp mop; | ||
1522 | + MemOp mop; | ||
1523 | |||
1524 | mop = dc->opcode & 3; | ||
1525 | size = 1 << mop; | ||
1526 | diff --git a/target/mips/translate.c b/target/mips/translate.c | ||
1527 | index XXXXXXX..XXXXXXX 100644 | ||
1528 | --- a/target/mips/translate.c | ||
1529 | +++ b/target/mips/translate.c | ||
1530 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
1531 | int32_t CP0_Config5; | ||
1532 | /* Routine used to access memory */ | ||
1533 | int mem_idx; | ||
1534 | - TCGMemOp default_tcg_memop_mask; | ||
1535 | + MemOp default_tcg_memop_mask; | ||
1536 | uint32_t hflags, saved_hflags; | ||
1537 | target_ulong btarget; | ||
1538 | bool ulri; | ||
1539 | @@ -XXX,XX +XXX,XX @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, | ||
1540 | |||
1541 | /* Store conditional */ | ||
1542 | static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, | ||
1543 | - TCGMemOp tcg_mo, bool eva) | ||
1544 | + MemOp tcg_mo, bool eva) | ||
1545 | { | ||
1546 | TCGv addr, t0, val; | ||
1547 | TCGLabel *l1 = gen_new_label(); | ||
1548 | @@ -XXX,XX +XXX,XX @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) | ||
1549 | } | ||
1550 | |||
1551 | static inline void gen_r6_ld(target_long addr, int reg, int memidx, | ||
1552 | - TCGMemOp memop) | ||
1553 | + MemOp memop) | ||
1554 | { | ||
1555 | TCGv t0 = tcg_const_tl(addr); | ||
1556 | tcg_gen_qemu_ld_tl(t0, t0, memidx, memop); | ||
1557 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
1558 | extract32(ctx->opcode, 0, 8); | ||
1559 | TCGv va = tcg_temp_new(); | ||
1560 | TCGv t1 = tcg_temp_new(); | ||
1561 | - TCGMemOp memop = (extract32(ctx->opcode, 8, 3)) == | ||
1562 | + MemOp memop = (extract32(ctx->opcode, 8, 3)) == | ||
1563 | NM_P_LS_UAWM ? MO_UNALN : 0; | ||
1564 | |||
1565 | count = (count == 0) ? 8 : count; | ||
1566 | diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c | ||
1567 | index XXXXXXX..XXXXXXX 100644 | ||
1568 | --- a/target/openrisc/translate.c | ||
1569 | +++ b/target/openrisc/translate.c | ||
1570 | @@ -XXX,XX +XXX,XX @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a) | ||
1571 | return true; | ||
1572 | } | ||
1573 | |||
1574 | -static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop) | ||
1575 | +static void do_load(DisasContext *dc, arg_load *a, MemOp mop) | ||
1576 | { | ||
1577 | TCGv ea; | ||
1578 | |||
1579 | @@ -XXX,XX +XXX,XX @@ static bool trans_l_swa(DisasContext *dc, arg_store *a) | ||
1580 | return true; | ||
1581 | } | ||
1582 | |||
1583 | -static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop) | ||
1584 | +static void do_store(DisasContext *dc, arg_store *a, MemOp mop) | ||
1585 | { | ||
1586 | TCGv t0 = tcg_temp_new(); | ||
1587 | tcg_gen_addi_tl(t0, cpu_R[a->a], a->i); | ||
1588 | diff --git a/target/ppc/translate.c b/target/ppc/translate.c | ||
1589 | index XXXXXXX..XXXXXXX 100644 | ||
1590 | --- a/target/ppc/translate.c | ||
1591 | +++ b/target/ppc/translate.c | ||
1592 | @@ -XXX,XX +XXX,XX @@ struct DisasContext { | ||
1593 | int mem_idx; | ||
1594 | int access_type; | ||
1595 | /* Translation flags */ | ||
1596 | - TCGMemOp default_tcg_memop_mask; | ||
1597 | + MemOp default_tcg_memop_mask; | ||
1598 | #if defined(TARGET_PPC64) | ||
1599 | bool sf_mode; | ||
1600 | bool has_cfar; | ||
1601 | @@ -XXX,XX +XXX,XX @@ static void gen_isync(DisasContext *ctx) | ||
1602 | |||
1603 | #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE)) | ||
1604 | |||
1605 | -static void gen_load_locked(DisasContext *ctx, TCGMemOp memop) | ||
1606 | +static void gen_load_locked(DisasContext *ctx, MemOp memop) | ||
1607 | { | ||
1608 | TCGv gpr = cpu_gpr[rD(ctx->opcode)]; | ||
1609 | TCGv t0 = tcg_temp_new(); | ||
1610 | @@ -XXX,XX +XXX,XX @@ LARX(lbarx, DEF_MEMOP(MO_UB)) | ||
1611 | LARX(lharx, DEF_MEMOP(MO_UW)) | ||
1612 | LARX(lwarx, DEF_MEMOP(MO_UL)) | ||
1613 | |||
1614 | -static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop, | ||
1615 | +static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, | ||
1616 | TCGv EA, TCGCond cond, int addend) | ||
1617 | { | ||
1618 | TCGv t = tcg_temp_new(); | ||
1619 | @@ -XXX,XX +XXX,XX @@ static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop, | ||
1620 | tcg_temp_free(u); | ||
1621 | } | ||
1622 | |||
1623 | -static void gen_ld_atomic(DisasContext *ctx, TCGMemOp memop) | ||
1624 | +static void gen_ld_atomic(DisasContext *ctx, MemOp memop) | ||
1625 | { | ||
1626 | uint32_t gpr_FC = FC(ctx->opcode); | ||
1627 | TCGv EA = tcg_temp_new(); | ||
1628 | @@ -XXX,XX +XXX,XX @@ static void gen_ldat(DisasContext *ctx) | ||
1629 | } | ||
1630 | #endif | ||
1631 | |||
1632 | -static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop) | ||
1633 | +static void gen_st_atomic(DisasContext *ctx, MemOp memop) | ||
1634 | { | ||
1635 | uint32_t gpr_FC = FC(ctx->opcode); | ||
1636 | TCGv EA = tcg_temp_new(); | ||
1637 | @@ -XXX,XX +XXX,XX @@ static void gen_stdat(DisasContext *ctx) | ||
1638 | } | ||
1639 | #endif | ||
1640 | |||
1641 | -static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop) | ||
1642 | +static void gen_conditional_store(DisasContext *ctx, MemOp memop) | ||
1643 | { | ||
1644 | TCGLabel *l1 = gen_new_label(); | ||
1645 | TCGLabel *l2 = gen_new_label(); | ||
1646 | diff --git a/target/riscv/insn_trans/trans_rva.inc.c b/target/riscv/insn_trans/trans_rva.inc.c | ||
1647 | index XXXXXXX..XXXXXXX 100644 | ||
1648 | --- a/target/riscv/insn_trans/trans_rva.inc.c | ||
1649 | +++ b/target/riscv/insn_trans/trans_rva.inc.c | ||
1650 | @@ -XXX,XX +XXX,XX @@ | ||
1651 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
1652 | */ | ||
1653 | |||
1654 | -static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1655 | +static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) | ||
1656 | { | ||
1657 | TCGv src1 = tcg_temp_new(); | ||
1658 | /* Put addr in load_res, data in load_val. */ | ||
1659 | @@ -XXX,XX +XXX,XX @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1660 | return true; | ||
1661 | } | ||
1662 | |||
1663 | -static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1664 | +static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) | ||
1665 | { | ||
1666 | TCGv src1 = tcg_temp_new(); | ||
1667 | TCGv src2 = tcg_temp_new(); | ||
1668 | @@ -XXX,XX +XXX,XX @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1669 | } | ||
1670 | |||
1671 | static bool gen_amo(DisasContext *ctx, arg_atomic *a, | ||
1672 | - void(*func)(TCGv, TCGv, TCGv, TCGArg, TCGMemOp), | ||
1673 | - TCGMemOp mop) | ||
1674 | + void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), | ||
1675 | + MemOp mop) | ||
1676 | { | ||
1677 | TCGv src1 = tcg_temp_new(); | ||
1678 | TCGv src2 = tcg_temp_new(); | ||
1679 | diff --git a/target/riscv/insn_trans/trans_rvi.inc.c b/target/riscv/insn_trans/trans_rvi.inc.c | ||
1680 | index XXXXXXX..XXXXXXX 100644 | ||
1681 | --- a/target/riscv/insn_trans/trans_rvi.inc.c | ||
1682 | +++ b/target/riscv/insn_trans/trans_rvi.inc.c | ||
1683 | @@ -XXX,XX +XXX,XX @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) | ||
1684 | return gen_branch(ctx, a, TCG_COND_GEU); | ||
1685 | } | ||
1686 | |||
1687 | -static bool gen_load(DisasContext *ctx, arg_lb *a, TCGMemOp memop) | ||
1688 | +static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) | ||
1689 | { | ||
1690 | TCGv t0 = tcg_temp_new(); | ||
1691 | TCGv t1 = tcg_temp_new(); | ||
1692 | @@ -XXX,XX +XXX,XX @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a) | ||
1693 | return gen_load(ctx, a, MO_TEUW); | ||
1694 | } | ||
1695 | |||
1696 | -static bool gen_store(DisasContext *ctx, arg_sb *a, TCGMemOp memop) | ||
1697 | +static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) | ||
1698 | { | ||
1699 | TCGv t0 = tcg_temp_new(); | ||
1700 | TCGv dat = tcg_temp_new(); | ||
1701 | diff --git a/target/s390x/translate.c b/target/s390x/translate.c | ||
1702 | index XXXXXXX..XXXXXXX 100644 | ||
1703 | --- a/target/s390x/translate.c | ||
1704 | +++ b/target/s390x/translate.c | ||
1705 | @@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_offset(uint8_t reg) | ||
1706 | return offsetof(CPUS390XState, vregs[reg][0]); | ||
1707 | } | ||
1708 | |||
1709 | -static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es) | ||
1710 | +static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) | ||
1711 | { | ||
1712 | /* Convert element size (es) - e.g. MO_8 - to bytes */ | ||
1713 | const uint8_t bytes = 1 << es; | ||
1714 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_csst(DisasContext *s, DisasOps *o) | ||
1715 | #ifndef CONFIG_USER_ONLY | ||
1716 | static DisasJumpType op_csp(DisasContext *s, DisasOps *o) | ||
1717 | { | ||
1718 | - TCGMemOp mop = s->insn->data; | ||
1719 | + MemOp mop = s->insn->data; | ||
1720 | TCGv_i64 addr, old, cc; | ||
1721 | TCGLabel *lab = gen_new_label(); | ||
1722 | |||
1723 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) | ||
1724 | static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) | ||
1725 | { | ||
1726 | TCGv_i64 a1, a2; | ||
1727 | - TCGMemOp mop = s->insn->data; | ||
1728 | + MemOp mop = s->insn->data; | ||
1729 | |||
1730 | /* In a parallel context, stop the world and single step. */ | ||
1731 | if (tb_cflags(s->base.tb) & CF_PARALLEL) { | ||
1732 | diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c | ||
1733 | index XXXXXXX..XXXXXXX 100644 | ||
1734 | --- a/target/s390x/translate_vx.inc.c | ||
1735 | +++ b/target/s390x/translate_vx.inc.c | ||
1736 | @@ -XXX,XX +XXX,XX @@ | ||
1737 | #define FPF_LONG 3 | ||
1738 | #define FPF_EXT 4 | ||
1739 | |||
1740 | -static inline bool valid_vec_element(uint8_t enr, TCGMemOp es) | ||
1741 | +static inline bool valid_vec_element(uint8_t enr, MemOp es) | ||
1742 | { | ||
1743 | return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1)); | ||
1744 | } | ||
1745 | |||
1746 | static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, | ||
1747 | - TCGMemOp memop) | ||
1748 | + MemOp memop) | ||
1749 | { | ||
1750 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1751 | |||
1752 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, | ||
1753 | } | ||
1754 | |||
1755 | static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, | ||
1756 | - TCGMemOp memop) | ||
1757 | + MemOp memop) | ||
1758 | { | ||
1759 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1760 | |||
1761 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, | ||
1762 | } | ||
1763 | |||
1764 | static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, | ||
1765 | - TCGMemOp memop) | ||
1766 | + MemOp memop) | ||
1767 | { | ||
1768 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1769 | |||
1770 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, | ||
1771 | } | ||
1772 | |||
1773 | static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr, | ||
1774 | - TCGMemOp memop) | ||
1775 | + MemOp memop) | ||
1776 | { | ||
1777 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1778 | |||
1779 | diff --git a/target/sparc/translate.c b/target/sparc/translate.c | ||
1780 | index XXXXXXX..XXXXXXX 100644 | ||
1781 | --- a/target/sparc/translate.c | ||
1782 | +++ b/target/sparc/translate.c | ||
1783 | @@ -XXX,XX +XXX,XX @@ static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, | ||
1784 | } | ||
1785 | |||
1786 | static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, | ||
1787 | - TCGv addr, int mmu_idx, TCGMemOp memop) | ||
1788 | + TCGv addr, int mmu_idx, MemOp memop) | ||
1789 | { | ||
1790 | gen_address_mask(dc, addr); | ||
1791 | tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop); | ||
1792 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
1793 | ASIType type; | ||
1794 | int asi; | ||
1795 | int mem_idx; | ||
1796 | - TCGMemOp memop; | ||
1797 | + MemOp memop; | ||
1798 | } DisasASI; | ||
1799 | |||
1800 | -static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) | ||
1801 | +static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) | ||
1802 | { | ||
1803 | int asi = GET_FIELD(insn, 19, 26); | ||
1804 | ASIType type = GET_ASI_HELPER; | ||
1805 | @@ -XXX,XX +XXX,XX @@ static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) | ||
1806 | } | ||
1807 | |||
1808 | static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, | ||
1809 | - int insn, TCGMemOp memop) | ||
1810 | + int insn, MemOp memop) | ||
1811 | { | ||
1812 | DisasASI da = get_asi(dc, insn, memop); | ||
1813 | |||
1814 | @@ -XXX,XX +XXX,XX @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, | ||
1815 | } | ||
1816 | |||
1817 | static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, | ||
1818 | - int insn, TCGMemOp memop) | ||
1819 | + int insn, MemOp memop) | ||
1820 | { | ||
1821 | DisasASI da = get_asi(dc, insn, memop); | ||
1822 | |||
1823 | @@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, | ||
1824 | case GET_ASI_BLOCK: | ||
1825 | /* Valid for lddfa on aligned registers only. */ | ||
1826 | if (size == 8 && (rd & 7) == 0) { | ||
1827 | - TCGMemOp memop; | ||
1828 | + MemOp memop; | ||
1829 | TCGv eight; | ||
1830 | int i; | ||
1831 | |||
1832 | @@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, | ||
1833 | case GET_ASI_BLOCK: | ||
1834 | /* Valid for stdfa on aligned registers only. */ | ||
1835 | if (size == 8 && (rd & 7) == 0) { | ||
1836 | - TCGMemOp memop; | ||
1837 | + MemOp memop; | ||
1838 | TCGv eight; | ||
1839 | int i; | ||
1840 | |||
1841 | diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c | ||
1842 | index XXXXXXX..XXXXXXX 100644 | ||
1843 | --- a/target/tilegx/translate.c | ||
1844 | +++ b/target/tilegx/translate.c | ||
1845 | @@ -XXX,XX +XXX,XX @@ static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd) | ||
1846 | } | ||
1847 | |||
1848 | static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca, | ||
1849 | - unsigned srcb, TCGMemOp memop, const char *name) | ||
1850 | + unsigned srcb, MemOp memop, const char *name) | ||
1851 | { | ||
1852 | if (dest) { | ||
1853 | return TILEGX_EXCP_OPCODE_UNKNOWN; | ||
1854 | @@ -XXX,XX +XXX,XX @@ static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca, | ||
1855 | } | ||
1856 | |||
1857 | static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb, | ||
1858 | - int imm, TCGMemOp memop, const char *name) | ||
1859 | + int imm, MemOp memop, const char *name) | ||
1860 | { | ||
1861 | TCGv tsrca = load_gr(dc, srca); | ||
1862 | TCGv tsrcb = load_gr(dc, srcb); | ||
1863 | @@ -XXX,XX +XXX,XX @@ static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext, | ||
1864 | { | ||
1865 | TCGv tdest, tsrca; | ||
1866 | const char *mnemonic; | ||
1867 | - TCGMemOp memop; | ||
1868 | + MemOp memop; | ||
1869 | TileExcp ret = TILEGX_EXCP_NONE; | ||
1870 | bool prefetch_nofault = false; | ||
1871 | |||
1872 | @@ -XXX,XX +XXX,XX @@ static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext, | ||
1873 | TCGv tsrca = load_gr(dc, srca); | ||
1874 | bool prefetch_nofault = false; | ||
1875 | const char *mnemonic; | ||
1876 | - TCGMemOp memop; | ||
1877 | + MemOp memop; | ||
1878 | int i2, i3; | ||
1879 | TCGv t0; | ||
1880 | |||
1881 | @@ -XXX,XX +XXX,XX @@ static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle) | ||
1882 | unsigned srca = get_SrcA_Y2(bundle); | ||
1883 | unsigned srcbdest = get_SrcBDest_Y2(bundle); | ||
1884 | const char *mnemonic; | ||
1885 | - TCGMemOp memop; | ||
1886 | + MemOp memop; | ||
1887 | bool prefetch_nofault = false; | ||
1888 | |||
1889 | switch (OEY2(opc, mode)) { | ||
1890 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c | ||
1891 | index XXXXXXX..XXXXXXX 100644 | ||
1892 | --- a/target/tricore/translate.c | ||
1893 | +++ b/target/tricore/translate.c | ||
1894 | @@ -XXX,XX +XXX,XX @@ static inline void generate_trap(DisasContext *ctx, int class, int tin); | ||
1895 | /* Functions for load/save to/from memory */ | ||
1896 | |||
1897 | static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, | ||
1898 | - int16_t con, TCGMemOp mop) | ||
1899 | + int16_t con, MemOp mop) | ||
1900 | { | ||
1901 | TCGv temp = tcg_temp_new(); | ||
1902 | tcg_gen_addi_tl(temp, r2, con); | ||
1903 | @@ -XXX,XX +XXX,XX @@ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, | ||
1904 | } | ||
1905 | |||
1906 | static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, | ||
1907 | - int16_t con, TCGMemOp mop) | ||
1908 | + int16_t con, MemOp mop) | ||
1909 | { | ||
1910 | TCGv temp = tcg_temp_new(); | ||
1911 | tcg_gen_addi_tl(temp, r2, con); | ||
1912 | @@ -XXX,XX +XXX,XX @@ static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, | ||
1913 | } | ||
1914 | |||
1915 | static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, | ||
1916 | - TCGMemOp mop) | ||
1917 | + MemOp mop) | ||
1918 | { | ||
1919 | TCGv temp = tcg_temp_new(); | ||
1920 | tcg_gen_addi_tl(temp, r2, off); | ||
1921 | @@ -XXX,XX +XXX,XX @@ static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, | ||
1922 | } | ||
1923 | |||
1924 | static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, | ||
1925 | - TCGMemOp mop) | ||
1926 | + MemOp mop) | ||
1927 | { | ||
1928 | TCGv temp = tcg_temp_new(); | ||
1929 | tcg_gen_addi_tl(temp, r2, off); | ||
1930 | diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c | ||
1931 | index XXXXXXX..XXXXXXX 100644 | ||
1932 | --- a/tcg/aarch64/tcg-target.inc.c | ||
1933 | +++ b/tcg/aarch64/tcg-target.inc.c | ||
1934 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) | ||
1935 | tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); | ||
1936 | } | ||
1937 | |||
1938 | -static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, | ||
1939 | +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, | ||
1940 | TCGReg rd, TCGReg rn) | ||
1941 | { | ||
1942 | /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ | ||
1943 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, | ||
1944 | tcg_out_sbfm(s, ext, rd, rn, 0, bits); | ||
1945 | } | ||
1946 | |||
1947 | -static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, | ||
1948 | +static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, | ||
1949 | TCGReg rd, TCGReg rn) | ||
1950 | { | ||
1951 | /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ | ||
1952 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) | ||
1953 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
1954 | { | ||
1955 | TCGMemOpIdx oi = lb->oi; | ||
1956 | - TCGMemOp opc = get_memop(oi); | ||
1957 | - TCGMemOp size = opc & MO_SIZE; | ||
1958 | + MemOp opc = get_memop(oi); | ||
1959 | + MemOp size = opc & MO_SIZE; | ||
1960 | |||
1961 | if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { | ||
1962 | return false; | ||
1963 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
1964 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
1965 | { | ||
1966 | TCGMemOpIdx oi = lb->oi; | ||
1967 | - TCGMemOp opc = get_memop(oi); | ||
1968 | - TCGMemOp size = opc & MO_SIZE; | ||
1969 | + MemOp opc = get_memop(oi); | ||
1970 | + MemOp size = opc & MO_SIZE; | ||
1971 | |||
1972 | if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { | ||
1973 | return false; | ||
1974 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); | ||
1975 | slow path for the failure case, which will be patched later when finalizing | ||
1976 | the slow path. Generated code returns the host addend in X1, | ||
1977 | clobbers X0,X2,X3,TMP. */ | ||
1978 | -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, | ||
1979 | +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, | ||
1980 | tcg_insn_unit **label_ptr, int mem_index, | ||
1981 | bool is_read) | ||
1982 | { | ||
1983 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, | ||
1984 | |||
1985 | #endif /* CONFIG_SOFTMMU */ | ||
1986 | |||
1987 | -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, | ||
1988 | +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, | ||
1989 | TCGReg data_r, TCGReg addr_r, | ||
1990 | TCGType otype, TCGReg off_r) | ||
1991 | { | ||
1992 | - const TCGMemOp bswap = memop & MO_BSWAP; | ||
1993 | + const MemOp bswap = memop & MO_BSWAP; | ||
1994 | |||
1995 | switch (memop & MO_SSIZE) { | ||
1996 | case MO_UB: | ||
1997 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, | ||
1998 | } | ||
1999 | } | ||
2000 | |||
2001 | -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, | ||
2002 | +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, | ||
2003 | TCGReg data_r, TCGReg addr_r, | ||
2004 | TCGType otype, TCGReg off_r) | ||
2005 | { | ||
2006 | - const TCGMemOp bswap = memop & MO_BSWAP; | ||
2007 | + const MemOp bswap = memop & MO_BSWAP; | ||
2008 | |||
2009 | switch (memop & MO_SIZE) { | ||
2010 | case MO_8: | ||
2011 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, | ||
2012 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
2013 | TCGMemOpIdx oi, TCGType ext) | ||
2014 | { | ||
2015 | - TCGMemOp memop = get_memop(oi); | ||
2016 | + MemOp memop = get_memop(oi); | ||
2017 | const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | ||
2018 | #ifdef CONFIG_SOFTMMU | ||
2019 | unsigned mem_index = get_mmuidx(oi); | ||
2020 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
2021 | static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
2022 | TCGMemOpIdx oi) | ||
2023 | { | ||
2024 | - TCGMemOp memop = get_memop(oi); | ||
2025 | + MemOp memop = get_memop(oi); | ||
2026 | const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | ||
2027 | #ifdef CONFIG_SOFTMMU | ||
2028 | unsigned mem_index = get_mmuidx(oi); | ||
2029 | diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c | ||
2030 | index XXXXXXX..XXXXXXX 100644 | ||
2031 | --- a/tcg/arm/tcg-target.inc.c | ||
2032 | +++ b/tcg/arm/tcg-target.inc.c | ||
2033 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); | ||
2034 | containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ | ||
2035 | |||
2036 | static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
2037 | - TCGMemOp opc, int mem_index, bool is_load) | ||
2038 | + MemOp opc, int mem_index, bool is_load) | ||
2039 | { | ||
2040 | int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) | ||
2041 | : offsetof(CPUTLBEntry, addr_write)); | ||
2042 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2043 | { | ||
2044 | TCGReg argreg, datalo, datahi; | ||
2045 | TCGMemOpIdx oi = lb->oi; | ||
2046 | - TCGMemOp opc = get_memop(oi); | ||
2047 | + MemOp opc = get_memop(oi); | ||
2048 | void *func; | ||
2049 | |||
2050 | if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { | ||
2051 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2052 | { | ||
2053 | TCGReg argreg, datalo, datahi; | ||
2054 | TCGMemOpIdx oi = lb->oi; | ||
2055 | - TCGMemOp opc = get_memop(oi); | ||
2056 | + MemOp opc = get_memop(oi); | ||
2057 | |||
2058 | if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { | ||
2059 | return false; | ||
2060 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2061 | } | ||
2062 | #endif /* SOFTMMU */ | ||
2063 | |||
2064 | -static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, | ||
2065 | +static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
2066 | TCGReg datalo, TCGReg datahi, | ||
2067 | TCGReg addrlo, TCGReg addend) | ||
2068 | { | ||
2069 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2070 | + MemOp bswap = opc & MO_BSWAP; | ||
2071 | |||
2072 | switch (opc & MO_SSIZE) { | ||
2073 | case MO_UB: | ||
2074 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, | ||
2075 | } | ||
2076 | } | ||
2077 | |||
2078 | -static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, | ||
2079 | +static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, | ||
2080 | TCGReg datalo, TCGReg datahi, | ||
2081 | TCGReg addrlo) | ||
2082 | { | ||
2083 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2084 | + MemOp bswap = opc & MO_BSWAP; | ||
2085 | |||
2086 | switch (opc & MO_SSIZE) { | ||
2087 | case MO_UB: | ||
2088 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2089 | { | ||
2090 | TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); | ||
2091 | TCGMemOpIdx oi; | ||
2092 | - TCGMemOp opc; | ||
2093 | + MemOp opc; | ||
2094 | #ifdef CONFIG_SOFTMMU | ||
2095 | int mem_index; | ||
2096 | TCGReg addend; | ||
2097 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2098 | #endif | ||
2099 | } | ||
2100 | |||
2101 | -static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, | ||
2102 | +static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, | ||
2103 | TCGReg datalo, TCGReg datahi, | ||
2104 | TCGReg addrlo, TCGReg addend) | ||
2105 | { | ||
2106 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2107 | + MemOp bswap = opc & MO_BSWAP; | ||
2108 | |||
2109 | switch (opc & MO_SIZE) { | ||
2110 | case MO_8: | ||
2111 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, | ||
2112 | } | ||
2113 | } | ||
2114 | |||
2115 | -static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, | ||
2116 | +static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, | ||
2117 | TCGReg datalo, TCGReg datahi, | ||
2118 | TCGReg addrlo) | ||
2119 | { | ||
2120 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2121 | + MemOp bswap = opc & MO_BSWAP; | ||
2122 | |||
2123 | switch (opc & MO_SIZE) { | ||
2124 | case MO_8: | ||
2125 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
2126 | { | ||
2127 | TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); | ||
2128 | TCGMemOpIdx oi; | ||
2129 | - TCGMemOp opc; | ||
2130 | + MemOp opc; | ||
2131 | #ifdef CONFIG_SOFTMMU | ||
2132 | int mem_index; | ||
2133 | TCGReg addend; | ||
2134 | diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c | ||
2135 | index XXXXXXX..XXXXXXX 100644 | ||
2136 | --- a/tcg/i386/tcg-target.inc.c | ||
2137 | +++ b/tcg/i386/tcg-target.inc.c | ||
2138 | @@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[16] = { | ||
2139 | First argument register is clobbered. */ | ||
2140 | |||
2141 | static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
2142 | - int mem_index, TCGMemOp opc, | ||
2143 | + int mem_index, MemOp opc, | ||
2144 | tcg_insn_unit **label_ptr, int which) | ||
2145 | { | ||
2146 | const TCGReg r0 = TCG_REG_L0; | ||
2147 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, | ||
2148 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2149 | { | ||
2150 | TCGMemOpIdx oi = l->oi; | ||
2151 | - TCGMemOp opc = get_memop(oi); | ||
2152 | + MemOp opc = get_memop(oi); | ||
2153 | TCGReg data_reg; | ||
2154 | tcg_insn_unit **label_ptr = &l->label_ptr[0]; | ||
2155 | int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); | ||
2156 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2157 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2158 | { | ||
2159 | TCGMemOpIdx oi = l->oi; | ||
2160 | - TCGMemOp opc = get_memop(oi); | ||
2161 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2162 | + MemOp opc = get_memop(oi); | ||
2163 | + MemOp s_bits = opc & MO_SIZE; | ||
2164 | tcg_insn_unit **label_ptr = &l->label_ptr[0]; | ||
2165 | TCGReg retaddr; | ||
2166 | |||
2167 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) | ||
2168 | |||
2169 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
2170 | TCGReg base, int index, intptr_t ofs, | ||
2171 | - int seg, bool is64, TCGMemOp memop) | ||
2172 | + int seg, bool is64, MemOp memop) | ||
2173 | { | ||
2174 | - const TCGMemOp real_bswap = memop & MO_BSWAP; | ||
2175 | - TCGMemOp bswap = real_bswap; | ||
2176 | + const MemOp real_bswap = memop & MO_BSWAP; | ||
2177 | + MemOp bswap = real_bswap; | ||
2178 | int rexw = is64 * P_REXW; | ||
2179 | int movop = OPC_MOVL_GvEv; | ||
2180 | |||
2181 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2182 | TCGReg datalo, datahi, addrlo; | ||
2183 | TCGReg addrhi __attribute__((unused)); | ||
2184 | TCGMemOpIdx oi; | ||
2185 | - TCGMemOp opc; | ||
2186 | + MemOp opc; | ||
2187 | #if defined(CONFIG_SOFTMMU) | ||
2188 | int mem_index; | ||
2189 | tcg_insn_unit *label_ptr[2]; | ||
2190 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2191 | |||
2192 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
2193 | TCGReg base, int index, intptr_t ofs, | ||
2194 | - int seg, TCGMemOp memop) | ||
2195 | + int seg, MemOp memop) | ||
2196 | { | ||
2197 | /* ??? Ideally we wouldn't need a scratch register. For user-only, | ||
2198 | we could perform the bswap twice to restore the original value | ||
2199 | instead of moving to the scratch. But as it is, the L constraint | ||
2200 | means that TCG_REG_L0 is definitely free here. */ | ||
2201 | const TCGReg scratch = TCG_REG_L0; | ||
2202 | - const TCGMemOp real_bswap = memop & MO_BSWAP; | ||
2203 | - TCGMemOp bswap = real_bswap; | ||
2204 | + const MemOp real_bswap = memop & MO_BSWAP; | ||
2205 | + MemOp bswap = real_bswap; | ||
2206 | int movop = OPC_MOVL_EvGv; | ||
2207 | |||
2208 | if (have_movbe && real_bswap) { | ||
2209 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
2210 | TCGReg datalo, datahi, addrlo; | ||
2211 | TCGReg addrhi __attribute__((unused)); | ||
2212 | TCGMemOpIdx oi; | ||
2213 | - TCGMemOp opc; | ||
2214 | + MemOp opc; | ||
2215 | #if defined(CONFIG_SOFTMMU) | ||
2216 | int mem_index; | ||
2217 | tcg_insn_unit *label_ptr[2]; | ||
2218 | diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c | ||
2219 | index XXXXXXX..XXXXXXX 100644 | ||
2220 | --- a/tcg/mips/tcg-target.inc.c | ||
2221 | +++ b/tcg/mips/tcg-target.inc.c | ||
2222 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, | ||
2223 | TCGReg addrh, TCGMemOpIdx oi, | ||
2224 | tcg_insn_unit *label_ptr[2], bool is_load) | ||
2225 | { | ||
2226 | - TCGMemOp opc = get_memop(oi); | ||
2227 | + MemOp opc = get_memop(oi); | ||
2228 | unsigned s_bits = opc & MO_SIZE; | ||
2229 | unsigned a_bits = get_alignment_bits(opc); | ||
2230 | int mem_index = get_mmuidx(oi); | ||
2231 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, | ||
2232 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2233 | { | ||
2234 | TCGMemOpIdx oi = l->oi; | ||
2235 | - TCGMemOp opc = get_memop(oi); | ||
2236 | + MemOp opc = get_memop(oi); | ||
2237 | TCGReg v0; | ||
2238 | int i; | ||
2239 | |||
2240 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2241 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2242 | { | ||
2243 | TCGMemOpIdx oi = l->oi; | ||
2244 | - TCGMemOp opc = get_memop(oi); | ||
2245 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2246 | + MemOp opc = get_memop(oi); | ||
2247 | + MemOp s_bits = opc & MO_SIZE; | ||
2248 | int i; | ||
2249 | |||
2250 | /* resolve label address */ | ||
2251 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2252 | #endif | ||
2253 | |||
2254 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2255 | - TCGReg base, TCGMemOp opc, bool is_64) | ||
2256 | + TCGReg base, MemOp opc, bool is_64) | ||
2257 | { | ||
2258 | switch (opc & (MO_SSIZE | MO_BSWAP)) { | ||
2259 | case MO_UB: | ||
2260 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2261 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2262 | TCGReg data_regl, data_regh; | ||
2263 | TCGMemOpIdx oi; | ||
2264 | - TCGMemOp opc; | ||
2265 | + MemOp opc; | ||
2266 | #if defined(CONFIG_SOFTMMU) | ||
2267 | tcg_insn_unit *label_ptr[2]; | ||
2268 | #endif | ||
2269 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2270 | } | ||
2271 | |||
2272 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2273 | - TCGReg base, TCGMemOp opc) | ||
2274 | + TCGReg base, MemOp opc) | ||
2275 | { | ||
2276 | /* Don't clutter the code below with checks to avoid bswapping ZERO. */ | ||
2277 | if ((lo | hi) == 0) { | ||
2278 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
2279 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2280 | TCGReg data_regl, data_regh; | ||
2281 | TCGMemOpIdx oi; | ||
2282 | - TCGMemOp opc; | ||
2283 | + MemOp opc; | ||
2284 | #if defined(CONFIG_SOFTMMU) | ||
2285 | tcg_insn_unit *label_ptr[2]; | ||
2286 | #endif | ||
2287 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
2288 | index XXXXXXX..XXXXXXX 100644 | ||
2289 | --- a/tcg/optimize.c | ||
2290 | +++ b/tcg/optimize.c | ||
2291 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
2292 | CASE_OP_32_64(qemu_ld): | ||
2293 | { | ||
2294 | TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; | ||
2295 | - TCGMemOp mop = get_memop(oi); | ||
2296 | + MemOp mop = get_memop(oi); | ||
2297 | if (!(mop & MO_SIGN)) { | ||
2298 | mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; | ||
2299 | } | ||
2300 | diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c | ||
2301 | index XXXXXXX..XXXXXXX 100644 | ||
2302 | --- a/tcg/ppc/tcg-target.inc.c | ||
2303 | +++ b/tcg/ppc/tcg-target.inc.c | ||
2304 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); | ||
2305 | in CR7, loads the addend of the TLB into R3, and returns the register | ||
2306 | containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ | ||
2307 | |||
2308 | -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc, | ||
2309 | +static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, | ||
2310 | TCGReg addrlo, TCGReg addrhi, | ||
2311 | int mem_index, bool is_read) | ||
2312 | { | ||
2313 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, | ||
2314 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2315 | { | ||
2316 | TCGMemOpIdx oi = lb->oi; | ||
2317 | - TCGMemOp opc = get_memop(oi); | ||
2318 | + MemOp opc = get_memop(oi); | ||
2319 | TCGReg hi, lo, arg = TCG_REG_R3; | ||
2320 | |||
2321 | if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { | ||
2322 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2323 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2324 | { | ||
2325 | TCGMemOpIdx oi = lb->oi; | ||
2326 | - TCGMemOp opc = get_memop(oi); | ||
2327 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2328 | + MemOp opc = get_memop(oi); | ||
2329 | + MemOp s_bits = opc & MO_SIZE; | ||
2330 | TCGReg hi, lo, arg = TCG_REG_R3; | ||
2331 | |||
2332 | if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { | ||
2333 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2334 | TCGReg datalo, datahi, addrlo, rbase; | ||
2335 | TCGReg addrhi __attribute__((unused)); | ||
2336 | TCGMemOpIdx oi; | ||
2337 | - TCGMemOp opc, s_bits; | ||
2338 | + MemOp opc, s_bits; | ||
2339 | #ifdef CONFIG_SOFTMMU | ||
2340 | int mem_index; | ||
2341 | tcg_insn_unit *label_ptr; | ||
2342 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
2343 | TCGReg datalo, datahi, addrlo, rbase; | ||
2344 | TCGReg addrhi __attribute__((unused)); | ||
2345 | TCGMemOpIdx oi; | ||
2346 | - TCGMemOp opc, s_bits; | ||
2347 | + MemOp opc, s_bits; | ||
2348 | #ifdef CONFIG_SOFTMMU | ||
2349 | int mem_index; | ||
2350 | tcg_insn_unit *label_ptr; | ||
2351 | diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c | ||
2352 | index XXXXXXX..XXXXXXX 100644 | ||
2353 | --- a/tcg/riscv/tcg-target.inc.c | ||
2354 | +++ b/tcg/riscv/tcg-target.inc.c | ||
2355 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, | ||
2356 | TCGReg addrh, TCGMemOpIdx oi, | ||
2357 | tcg_insn_unit **label_ptr, bool is_load) | ||
2358 | { | ||
2359 | - TCGMemOp opc = get_memop(oi); | ||
2360 | + MemOp opc = get_memop(oi); | ||
2361 | unsigned s_bits = opc & MO_SIZE; | ||
2362 | unsigned a_bits = get_alignment_bits(opc); | ||
2363 | tcg_target_long compare_mask; | ||
2364 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, | ||
2365 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2366 | { | ||
2367 | TCGMemOpIdx oi = l->oi; | ||
2368 | - TCGMemOp opc = get_memop(oi); | ||
2369 | + MemOp opc = get_memop(oi); | ||
2370 | TCGReg a0 = tcg_target_call_iarg_regs[0]; | ||
2371 | TCGReg a1 = tcg_target_call_iarg_regs[1]; | ||
2372 | TCGReg a2 = tcg_target_call_iarg_regs[2]; | ||
2373 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2374 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2375 | { | ||
2376 | TCGMemOpIdx oi = l->oi; | ||
2377 | - TCGMemOp opc = get_memop(oi); | ||
2378 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2379 | + MemOp opc = get_memop(oi); | ||
2380 | + MemOp s_bits = opc & MO_SIZE; | ||
2381 | TCGReg a0 = tcg_target_call_iarg_regs[0]; | ||
2382 | TCGReg a1 = tcg_target_call_iarg_regs[1]; | ||
2383 | TCGReg a2 = tcg_target_call_iarg_regs[2]; | ||
2384 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2385 | #endif /* CONFIG_SOFTMMU */ | ||
2386 | |||
2387 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2388 | - TCGReg base, TCGMemOp opc, bool is_64) | ||
2389 | + TCGReg base, MemOp opc, bool is_64) | ||
2390 | { | ||
2391 | - const TCGMemOp bswap = opc & MO_BSWAP; | ||
2392 | + const MemOp bswap = opc & MO_BSWAP; | ||
2393 | |||
2394 | /* We don't yet handle byteswapping, assert */ | ||
2395 | g_assert(!bswap); | ||
2396 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2397 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2398 | TCGReg data_regl, data_regh; | ||
2399 | TCGMemOpIdx oi; | ||
2400 | - TCGMemOp opc; | ||
2401 | + MemOp opc; | ||
2402 | #if defined(CONFIG_SOFTMMU) | ||
2403 | tcg_insn_unit *label_ptr[1]; | ||
2404 | #endif | ||
2405 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2406 | } | ||
2407 | |||
2408 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2409 | - TCGReg base, TCGMemOp opc) | ||
2410 | + TCGReg base, MemOp opc) | ||
2411 | { | ||
2412 | - const TCGMemOp bswap = opc & MO_BSWAP; | ||
2413 | + const MemOp bswap = opc & MO_BSWAP; | ||
2414 | |||
2415 | /* We don't yet handle byteswapping, assert */ | ||
2416 | g_assert(!bswap); | ||
2417 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
2418 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2419 | TCGReg data_regl, data_regh; | ||
2420 | TCGMemOpIdx oi; | ||
2421 | - TCGMemOp opc; | ||
2422 | + MemOp opc; | ||
2423 | #if defined(CONFIG_SOFTMMU) | ||
2424 | tcg_insn_unit *label_ptr[1]; | ||
2425 | #endif | ||
2426 | diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c | ||
2427 | index XXXXXXX..XXXXXXX 100644 | ||
2428 | --- a/tcg/s390/tcg-target.inc.c | ||
2429 | +++ b/tcg/s390/tcg-target.inc.c | ||
2430 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) | ||
2431 | } | ||
2432 | } | ||
2433 | |||
2434 | -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | ||
2435 | +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, | ||
2436 | TCGReg base, TCGReg index, int disp) | ||
2437 | { | ||
2438 | switch (opc & (MO_SSIZE | MO_BSWAP)) { | ||
2439 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | ||
2440 | } | ||
2441 | } | ||
2442 | |||
2443 | -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | ||
2444 | +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, | ||
2445 | TCGReg base, TCGReg index, int disp) | ||
2446 | { | ||
2447 | switch (opc & (MO_SIZE | MO_BSWAP)) { | ||
2448 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); | ||
2449 | |||
2450 | /* Load and compare a TLB entry, leaving the flags set. Loads the TLB | ||
2451 | addend into R2. Returns a register with the santitized guest address. */ | ||
2452 | -static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, | ||
2453 | +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, | ||
2454 | int mem_index, bool is_ld) | ||
2455 | { | ||
2456 | unsigned s_bits = opc & MO_SIZE; | ||
2457 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2458 | TCGReg addr_reg = lb->addrlo_reg; | ||
2459 | TCGReg data_reg = lb->datalo_reg; | ||
2460 | TCGMemOpIdx oi = lb->oi; | ||
2461 | - TCGMemOp opc = get_memop(oi); | ||
2462 | + MemOp opc = get_memop(oi); | ||
2463 | |||
2464 | if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, | ||
2465 | (intptr_t)s->code_ptr, 2)) { | ||
2466 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2467 | TCGReg addr_reg = lb->addrlo_reg; | ||
2468 | TCGReg data_reg = lb->datalo_reg; | ||
2469 | TCGMemOpIdx oi = lb->oi; | ||
2470 | - TCGMemOp opc = get_memop(oi); | ||
2471 | + MemOp opc = get_memop(oi); | ||
2472 | |||
2473 | if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, | ||
2474 | (intptr_t)s->code_ptr, 2)) { | ||
2475 | @@ -XXX,XX +XXX,XX @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, | ||
2476 | static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
2477 | TCGMemOpIdx oi) | ||
2478 | { | ||
2479 | - TCGMemOp opc = get_memop(oi); | ||
2480 | + MemOp opc = get_memop(oi); | ||
2481 | #ifdef CONFIG_SOFTMMU | ||
2482 | unsigned mem_index = get_mmuidx(oi); | ||
2483 | tcg_insn_unit *label_ptr; | ||
2484 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
2485 | static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
2486 | TCGMemOpIdx oi) | ||
2487 | { | ||
2488 | - TCGMemOp opc = get_memop(oi); | ||
2489 | + MemOp opc = get_memop(oi); | ||
2490 | #ifdef CONFIG_SOFTMMU | ||
2491 | unsigned mem_index = get_mmuidx(oi); | ||
2492 | tcg_insn_unit *label_ptr; | ||
2493 | diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c | ||
2494 | index XXXXXXX..XXXXXXX 100644 | ||
2495 | --- a/tcg/sparc/tcg-target.inc.c | ||
2496 | +++ b/tcg/sparc/tcg-target.inc.c | ||
2497 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); | ||
2498 | is in the returned register, maybe %o0. The TLB addend is in %o1. */ | ||
2499 | |||
2500 | static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, | ||
2501 | - TCGMemOp opc, int which) | ||
2502 | + MemOp opc, int which) | ||
2503 | { | ||
2504 | int fast_off = TLB_MASK_TABLE_OFS(mem_index); | ||
2505 | int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); | ||
2506 | @@ -XXX,XX +XXX,XX @@ static const int qemu_st_opc[16] = { | ||
2507 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | ||
2508 | TCGMemOpIdx oi, bool is_64) | ||
2509 | { | ||
2510 | - TCGMemOp memop = get_memop(oi); | ||
2511 | + MemOp memop = get_memop(oi); | ||
2512 | #ifdef CONFIG_SOFTMMU | ||
2513 | unsigned memi = get_mmuidx(oi); | ||
2514 | TCGReg addrz, param; | ||
2515 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | ||
2516 | static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, | ||
2517 | TCGMemOpIdx oi) | ||
2518 | { | ||
2519 | - TCGMemOp memop = get_memop(oi); | ||
2520 | + MemOp memop = get_memop(oi); | ||
2521 | #ifdef CONFIG_SOFTMMU | ||
2522 | unsigned memi = get_mmuidx(oi); | ||
2523 | TCGReg addrz, param; | ||
2524 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c | ||
2525 | index XXXXXXX..XXXXXXX 100644 | ||
2526 | --- a/tcg/tcg-op.c | ||
2527 | +++ b/tcg/tcg-op.c | ||
2528 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void) | ||
2529 | } | ||
2530 | } | ||
2531 | |||
2532 | -static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) | ||
2533 | +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) | ||
2534 | { | ||
2535 | /* Trigger the asserts within as early as possible. */ | ||
2536 | (void)get_alignment_bits(op); | ||
2537 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) | ||
2538 | } | ||
2539 | |||
2540 | static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, | ||
2541 | - TCGMemOp memop, TCGArg idx) | ||
2542 | + MemOp memop, TCGArg idx) | ||
2543 | { | ||
2544 | TCGMemOpIdx oi = make_memop_idx(memop, idx); | ||
2545 | #if TARGET_LONG_BITS == 32 | ||
2546 | @@ -XXX,XX +XXX,XX @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, | ||
2547 | } | ||
2548 | |||
2549 | static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, | ||
2550 | - TCGMemOp memop, TCGArg idx) | ||
2551 | + MemOp memop, TCGArg idx) | ||
2552 | { | ||
2553 | TCGMemOpIdx oi = make_memop_idx(memop, idx); | ||
2554 | #if TARGET_LONG_BITS == 32 | ||
2555 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_req_mo(TCGBar type) | ||
2556 | } | ||
2557 | } | ||
2558 | |||
2559 | -void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2560 | +void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2561 | { | ||
2562 | - TCGMemOp orig_memop; | ||
2563 | + MemOp orig_memop; | ||
2564 | |||
2565 | tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); | ||
2566 | memop = tcg_canonicalize_memop(memop, 0, 0); | ||
2567 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2568 | } | ||
2569 | } | ||
2570 | |||
2571 | -void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2572 | +void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2573 | { | ||
2574 | TCGv_i32 swap = NULL; | ||
2575 | |||
2576 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2577 | } | ||
2578 | } | ||
2579 | |||
2580 | -void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2581 | +void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2582 | { | ||
2583 | - TCGMemOp orig_memop; | ||
2584 | + MemOp orig_memop; | ||
2585 | |||
2586 | if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { | ||
2587 | tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); | ||
2588 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2589 | } | ||
2590 | } | ||
2591 | |||
2592 | -void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2593 | +void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2594 | { | ||
2595 | TCGv_i64 swap = NULL; | ||
2596 | |||
2597 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2598 | } | ||
2599 | } | ||
2600 | |||
2601 | -static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) | ||
2602 | +static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) | ||
2603 | { | ||
2604 | switch (opc & MO_SSIZE) { | ||
2605 | case MO_SB: | ||
2606 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) | ||
2607 | } | ||
2608 | } | ||
2609 | |||
2610 | -static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc) | ||
2611 | +static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) | ||
2612 | { | ||
2613 | switch (opc & MO_SSIZE) { | ||
2614 | case MO_SB: | ||
2615 | @@ -XXX,XX +XXX,XX @@ static void * const table_cmpxchg[16] = { | ||
2616 | }; | ||
2617 | |||
2618 | void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, | ||
2619 | - TCGv_i32 newv, TCGArg idx, TCGMemOp memop) | ||
2620 | + TCGv_i32 newv, TCGArg idx, MemOp memop) | ||
2621 | { | ||
2622 | memop = tcg_canonicalize_memop(memop, 0, 0); | ||
2623 | |||
2624 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, | ||
2625 | } | ||
2626 | |||
2627 | void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, | ||
2628 | - TCGv_i64 newv, TCGArg idx, TCGMemOp memop) | ||
2629 | + TCGv_i64 newv, TCGArg idx, MemOp memop) | ||
2630 | { | ||
2631 | memop = tcg_canonicalize_memop(memop, 1, 0); | ||
2632 | |||
2633 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, | ||
2634 | } | ||
2635 | |||
2636 | static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2637 | - TCGArg idx, TCGMemOp memop, bool new_val, | ||
2638 | + TCGArg idx, MemOp memop, bool new_val, | ||
2639 | void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) | ||
2640 | { | ||
2641 | TCGv_i32 t1 = tcg_temp_new_i32(); | ||
2642 | @@ -XXX,XX +XXX,XX @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2643 | } | ||
2644 | |||
2645 | static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2646 | - TCGArg idx, TCGMemOp memop, void * const table[]) | ||
2647 | + TCGArg idx, MemOp memop, void * const table[]) | ||
2648 | { | ||
2649 | gen_atomic_op_i32 gen; | ||
2650 | |||
2651 | @@ -XXX,XX +XXX,XX @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2652 | } | ||
2653 | |||
2654 | static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | ||
2655 | - TCGArg idx, TCGMemOp memop, bool new_val, | ||
2656 | + TCGArg idx, MemOp memop, bool new_val, | ||
2657 | void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) | ||
2658 | { | ||
2659 | TCGv_i64 t1 = tcg_temp_new_i64(); | ||
2660 | @@ -XXX,XX +XXX,XX @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | ||
2661 | } | ||
2662 | |||
2663 | static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | ||
2664 | - TCGArg idx, TCGMemOp memop, void * const table[]) | ||
2665 | + TCGArg idx, MemOp memop, void * const table[]) | ||
2666 | { | ||
2667 | memop = tcg_canonicalize_memop(memop, 1, 0); | ||
2668 | |||
2669 | @@ -XXX,XX +XXX,XX @@ static void * const table_##NAME[16] = { \ | ||
2670 | WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ | ||
2671 | }; \ | ||
2672 | void tcg_gen_atomic_##NAME##_i32 \ | ||
2673 | - (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \ | ||
2674 | + (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ | ||
2675 | { \ | ||
2676 | if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ | ||
2677 | do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ | ||
2678 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_##NAME##_i32 \ | ||
2679 | } \ | ||
2680 | } \ | ||
2681 | void tcg_gen_atomic_##NAME##_i64 \ | ||
2682 | - (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \ | ||
2683 | + (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ | ||
2684 | { \ | ||
2685 | if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ | ||
2686 | do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ | ||
2687 | diff --git a/tcg/tcg.c b/tcg/tcg.c | 23 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
2688 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
2689 | --- a/tcg/tcg.c | 25 | --- a/tcg/tcg.c |
2690 | +++ b/tcg/tcg.c | 26 | +++ b/tcg/tcg.c |
2691 | @@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) | 27 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, |
2692 | case INDEX_op_qemu_st_i64: | 28 | static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, |
2693 | { | 29 | TCGReg base, intptr_t ofs); |
2694 | TCGMemOpIdx oi = op->args[k++]; | 30 | static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target); |
2695 | - TCGMemOp op = get_memop(oi); | 31 | -static int tcg_target_const_match(tcg_target_long val, TCGType type, |
2696 | + MemOp op = get_memop(oi); | 32 | - const TCGArgConstraint *arg_ct); |
2697 | unsigned ix = get_mmuidx(oi); | 33 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct); |
2698 | 34 | #ifdef TCG_TARGET_NEED_LDST_LABELS | |
2699 | if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { | 35 | static int tcg_out_ldst_finalize(TCGContext *s); |
2700 | diff --git a/MAINTAINERS b/MAINTAINERS | 36 | #endif |
2701 | index XXXXXXX..XXXXXXX 100644 | 37 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) |
2702 | --- a/MAINTAINERS | 38 | ts = arg_temp(arg); |
2703 | +++ b/MAINTAINERS | 39 | |
2704 | @@ -XXX,XX +XXX,XX @@ M: Paolo Bonzini <pbonzini@redhat.com> | 40 | if (ts->val_type == TEMP_VAL_CONST |
2705 | S: Supported | 41 | - && tcg_target_const_match(ts->val, ts->type, arg_ct)) { |
2706 | F: include/exec/ioport.h | 42 | + && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) { |
2707 | F: ioport.c | 43 | /* constant is OK for instruction */ |
2708 | +F: include/exec/memop.h | 44 | const_args[i] = 1; |
2709 | F: include/exec/memory.h | 45 | new_args[i] = ts->val; |
2710 | F: include/exec/ram_addr.h | 46 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc |
2711 | F: memory.c | 47 | index XXXXXXX..XXXXXXX 100644 |
2712 | diff --git a/tcg/README b/tcg/README | 48 | --- a/tcg/aarch64/tcg-target.c.inc |
2713 | index XXXXXXX..XXXXXXX 100644 | 49 | +++ b/tcg/aarch64/tcg-target.c.inc |
2714 | --- a/tcg/README | 50 | @@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) |
2715 | +++ b/tcg/README | 51 | } |
2716 | @@ -XXX,XX +XXX,XX @@ Both t0 and t1 may be split into little-endian ordered pairs of registers | 52 | } |
2717 | if dealing with 64-bit quantities on a 32-bit host. | 53 | |
2718 | 54 | -static int tcg_target_const_match(tcg_target_long val, TCGType type, | |
2719 | The memidx selects the qemu tlb index to use (e.g. user or kernel access). | 55 | - const TCGArgConstraint *arg_ct) |
2720 | -The flags are the TCGMemOp bits, selecting the sign, width, and endianness | 56 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) |
2721 | +The flags are the MemOp bits, selecting the sign, width, and endianness | 57 | { |
2722 | of the memory access. | 58 | - int ct = arg_ct->ct; |
2723 | 59 | - | |
2724 | For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a | 60 | if (ct & TCG_CT_CONST) { |
61 | return 1; | ||
62 | } | ||
63 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
64 | index XXXXXXX..XXXXXXX 100644 | ||
65 | --- a/tcg/arm/tcg-target.c.inc | ||
66 | +++ b/tcg/arm/tcg-target.c.inc | ||
67 | @@ -XXX,XX +XXX,XX @@ static inline int check_fit_imm(uint32_t imm) | ||
68 | * mov operand2: values represented with x << (2 * y), x < 0x100 | ||
69 | * add, sub, eor...: ditto | ||
70 | */ | ||
71 | -static inline int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
72 | - const TCGArgConstraint *arg_ct) | ||
73 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
74 | { | ||
75 | - int ct; | ||
76 | - ct = arg_ct->ct; | ||
77 | if (ct & TCG_CT_CONST) { | ||
78 | return 1; | ||
79 | } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) { | ||
80 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
81 | index XXXXXXX..XXXXXXX 100644 | ||
82 | --- a/tcg/i386/tcg-target.c.inc | ||
83 | +++ b/tcg/i386/tcg-target.c.inc | ||
84 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, | ||
85 | } | ||
86 | |||
87 | /* test if a constant matches the constraint */ | ||
88 | -static inline int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
89 | - const TCGArgConstraint *arg_ct) | ||
90 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
91 | { | ||
92 | - int ct = arg_ct->ct; | ||
93 | if (ct & TCG_CT_CONST) { | ||
94 | return 1; | ||
95 | } | ||
96 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc | ||
97 | index XXXXXXX..XXXXXXX 100644 | ||
98 | --- a/tcg/mips/tcg-target.c.inc | ||
99 | +++ b/tcg/mips/tcg-target.c.inc | ||
100 | @@ -XXX,XX +XXX,XX @@ static inline bool is_p2m1(tcg_target_long val) | ||
101 | } | ||
102 | |||
103 | /* test if a constant matches the constraint */ | ||
104 | -static inline int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
105 | - const TCGArgConstraint *arg_ct) | ||
106 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
107 | { | ||
108 | - int ct; | ||
109 | - ct = arg_ct->ct; | ||
110 | if (ct & TCG_CT_CONST) { | ||
111 | return 1; | ||
112 | } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { | ||
113 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | ||
114 | index XXXXXXX..XXXXXXX 100644 | ||
115 | --- a/tcg/ppc/tcg-target.c.inc | ||
116 | +++ b/tcg/ppc/tcg-target.c.inc | ||
117 | @@ -XXX,XX +XXX,XX @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target) | ||
118 | } | ||
119 | |||
120 | /* test if a constant matches the constraint */ | ||
121 | -static int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
122 | - const TCGArgConstraint *arg_ct) | ||
123 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
124 | { | ||
125 | - int ct = arg_ct->ct; | ||
126 | if (ct & TCG_CT_CONST) { | ||
127 | return 1; | ||
128 | } | ||
129 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc | ||
130 | index XXXXXXX..XXXXXXX 100644 | ||
131 | --- a/tcg/riscv/tcg-target.c.inc | ||
132 | +++ b/tcg/riscv/tcg-target.c.inc | ||
133 | @@ -XXX,XX +XXX,XX @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) | ||
134 | } | ||
135 | |||
136 | /* test if a constant matches the constraint */ | ||
137 | -static int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
138 | - const TCGArgConstraint *arg_ct) | ||
139 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
140 | { | ||
141 | - int ct = arg_ct->ct; | ||
142 | if (ct & TCG_CT_CONST) { | ||
143 | return 1; | ||
144 | } | ||
145 | diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc | ||
146 | index XXXXXXX..XXXXXXX 100644 | ||
147 | --- a/tcg/s390/tcg-target.c.inc | ||
148 | +++ b/tcg/s390/tcg-target.c.inc | ||
149 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type, | ||
150 | } | ||
151 | |||
152 | /* Test if a constant matches the constraint. */ | ||
153 | -static int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
154 | - const TCGArgConstraint *arg_ct) | ||
155 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
156 | { | ||
157 | - int ct = arg_ct->ct; | ||
158 | - | ||
159 | if (ct & TCG_CT_CONST) { | ||
160 | return 1; | ||
161 | } | ||
162 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc | ||
163 | index XXXXXXX..XXXXXXX 100644 | ||
164 | --- a/tcg/sparc/tcg-target.c.inc | ||
165 | +++ b/tcg/sparc/tcg-target.c.inc | ||
166 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type, | ||
167 | } | ||
168 | |||
169 | /* test if a constant matches the constraint */ | ||
170 | -static inline int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
171 | - const TCGArgConstraint *arg_ct) | ||
172 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
173 | { | ||
174 | - int ct = arg_ct->ct; | ||
175 | - | ||
176 | if (ct & TCG_CT_CONST) { | ||
177 | return 1; | ||
178 | } | ||
179 | diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc | ||
180 | index XXXXXXX..XXXXXXX 100644 | ||
181 | --- a/tcg/tci/tcg-target.c.inc | ||
182 | +++ b/tcg/tci/tcg-target.c.inc | ||
183 | @@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, | ||
184 | } | ||
185 | |||
186 | /* Test if a constant matches the constraint. */ | ||
187 | -static int tcg_target_const_match(tcg_target_long val, TCGType type, | ||
188 | - const TCGArgConstraint *arg_ct) | ||
189 | +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
190 | { | ||
191 | - /* No need to return 0 or 1, 0 or != 0 is good enough. */ | ||
192 | - return arg_ct->ct & TCG_CT_CONST; | ||
193 | + return ct & TCG_CT_CONST; | ||
194 | } | ||
195 | |||
196 | static void tcg_target_init(TCGContext *s) | ||
2725 | -- | 197 | -- |
2726 | 2.17.1 | 198 | 2.25.1 |
2727 | 199 | ||
2728 | 200 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Introduce no-op size_memop to aid preparatory conversion of | ||
7 | interfaces. | ||
8 | |||
9 | Once interfaces are converted, size_memop will be implemented to | ||
10 | return a MemOp from size in bytes. | ||
11 | |||
12 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Message-Id: <35b8ee74020f67cf40848fb7d5f127cf96c851d6.1566466906.git.tony.nguyen@bt.com> | ||
15 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | --- | ||
17 | include/exec/memop.h | 10 ++++++++++ | ||
18 | 1 file changed, 10 insertions(+) | ||
19 | |||
20 | diff --git a/include/exec/memop.h b/include/exec/memop.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/include/exec/memop.h | ||
23 | +++ b/include/exec/memop.h | ||
24 | @@ -XXX,XX +XXX,XX @@ typedef enum MemOp { | ||
25 | MO_SSIZE = MO_SIZE | MO_SIGN, | ||
26 | } MemOp; | ||
27 | |||
28 | +/* Size in bytes to MemOp. */ | ||
29 | +static inline unsigned size_memop(unsigned size) | ||
30 | +{ | ||
31 | + /* | ||
32 | + * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} | ||
33 | + * "unsigned size" operand into a "MemOp op". | ||
34 | + */ | ||
35 | + return size; | ||
36 | +} | ||
37 | + | ||
38 | #endif | ||
39 | -- | ||
40 | 2.17.1 | ||
41 | |||
42 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
18 | Message-Id: <af407f0a34dc95ef5aaf2c00dffda7c65df23c3a.1566466906.git.tony.nguyen@bt.com> | ||
19 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
20 | --- | ||
21 | target/mips/op_helper.c | 5 +++-- | ||
22 | 1 file changed, 3 insertions(+), 2 deletions(-) | ||
23 | |||
24 | diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/target/mips/op_helper.c | ||
27 | +++ b/target/mips/op_helper.c | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | #include "exec/helper-proto.h" | ||
30 | #include "exec/exec-all.h" | ||
31 | #include "exec/cpu_ldst.h" | ||
32 | +#include "exec/memop.h" | ||
33 | #include "sysemu/kvm.h" | ||
34 | #include "fpu/softfloat.h" | ||
35 | |||
36 | @@ -XXX,XX +XXX,XX @@ void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) | ||
37 | if (op == 9) { | ||
38 | /* Index Store Tag */ | ||
39 | memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, | ||
40 | - 8, MEMTXATTRS_UNSPECIFIED); | ||
41 | + size_memop(8), MEMTXATTRS_UNSPECIFIED); | ||
42 | } else if (op == 5) { | ||
43 | /* Index Load Tag */ | ||
44 | memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, | ||
45 | - 8, MEMTXATTRS_UNSPECIFIED); | ||
46 | + size_memop(8), MEMTXATTRS_UNSPECIFIED); | ||
47 | } | ||
48 | #endif | ||
49 | } | ||
50 | -- | ||
51 | 2.17.1 | ||
52 | |||
53 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
17 | Message-Id: <2f41da26201fb9b0339c2b7fde34df864f7f9ea8.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | hw/s390x/s390-pci-inst.c | 8 +++++--- | ||
21 | 1 file changed, 5 insertions(+), 3 deletions(-) | ||
22 | |||
23 | diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/hw/s390x/s390-pci-inst.c | ||
26 | +++ b/hw/s390x/s390-pci-inst.c | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
28 | #include "cpu.h" | ||
29 | #include "s390-pci-inst.h" | ||
30 | #include "s390-pci-bus.h" | ||
31 | +#include "exec/memop.h" | ||
32 | #include "exec/memory-internal.h" | ||
33 | #include "qemu/error-report.h" | ||
34 | #include "sysemu/hw_accel.h" | ||
35 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | ||
36 | mr = pbdev->pdev->io_regions[pcias].memory; | ||
37 | mr = s390_get_subregion(mr, offset, len); | ||
38 | offset -= mr->addr; | ||
39 | - return memory_region_dispatch_read(mr, offset, data, len, | ||
40 | + return memory_region_dispatch_read(mr, offset, data, size_memop(len), | ||
41 | MEMTXATTRS_UNSPECIFIED); | ||
42 | } | ||
43 | |||
44 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | ||
45 | mr = pbdev->pdev->io_regions[pcias].memory; | ||
46 | mr = s390_get_subregion(mr, offset, len); | ||
47 | offset -= mr->addr; | ||
48 | - return memory_region_dispatch_write(mr, offset, data, len, | ||
49 | + return memory_region_dispatch_write(mr, offset, data, size_memop(len), | ||
50 | MEMTXATTRS_UNSPECIFIED); | ||
51 | } | ||
52 | |||
53 | @@ -XXX,XX +XXX,XX @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, | ||
54 | |||
55 | for (i = 0; i < len / 8; i++) { | ||
56 | result = memory_region_dispatch_write(mr, offset + i * 8, | ||
57 | - ldq_p(buffer + i * 8), 8, | ||
58 | + ldq_p(buffer + i * 8), | ||
59 | + size_memop(8), | ||
60 | MEMTXATTRS_UNSPECIFIED); | ||
61 | if (result != MEMTX_OK) { | ||
62 | s390_program_interrupt(env, PGM_OPERAND, 6, ra); | ||
63 | -- | ||
64 | 2.17.1 | ||
65 | |||
66 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Message-Id: <21113bae2f54b45176701e0bf595937031368ae6.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | hw/intc/armv7m_nvic.c | 12 ++++++++---- | ||
21 | 1 file changed, 8 insertions(+), 4 deletions(-) | ||
22 | |||
23 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/hw/intc/armv7m_nvic.c | ||
26 | +++ b/hw/intc/armv7m_nvic.c | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
28 | #include "hw/qdev-properties.h" | ||
29 | #include "target/arm/cpu.h" | ||
30 | #include "exec/exec-all.h" | ||
31 | +#include "exec/memop.h" | ||
32 | #include "qemu/log.h" | ||
33 | #include "qemu/module.h" | ||
34 | #include "trace.h" | ||
35 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, | ||
36 | if (attrs.secure) { | ||
37 | /* S accesses to the alias act like NS accesses to the real region */ | ||
38 | attrs.secure = 0; | ||
39 | - return memory_region_dispatch_write(mr, addr, value, size, attrs); | ||
40 | + return memory_region_dispatch_write(mr, addr, value, size_memop(size), | ||
41 | + attrs); | ||
42 | } else { | ||
43 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | ||
44 | if (attrs.user) { | ||
45 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, | ||
46 | if (attrs.secure) { | ||
47 | /* S accesses to the alias act like NS accesses to the real region */ | ||
48 | attrs.secure = 0; | ||
49 | - return memory_region_dispatch_read(mr, addr, data, size, attrs); | ||
50 | + return memory_region_dispatch_read(mr, addr, data, size_memop(size), | ||
51 | + attrs); | ||
52 | } else { | ||
53 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | ||
54 | if (attrs.user) { | ||
55 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, | ||
56 | |||
57 | /* Direct the access to the correct systick */ | ||
58 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | ||
59 | - return memory_region_dispatch_write(mr, addr, value, size, attrs); | ||
60 | + return memory_region_dispatch_write(mr, addr, value, size_memop(size), | ||
61 | + attrs); | ||
62 | } | ||
63 | |||
64 | static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | ||
65 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | ||
66 | |||
67 | /* Direct the access to the correct systick */ | ||
68 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | ||
69 | - return memory_region_dispatch_read(mr, addr, data, size, attrs); | ||
70 | + return memory_region_dispatch_read(mr, addr, data, size_memop(size), attrs); | ||
71 | } | ||
72 | |||
73 | static const MemoryRegionOps nvic_systick_ops = { | ||
74 | -- | ||
75 | 2.17.1 | ||
76 | |||
77 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
17 | Message-Id: <ebf1f78029d5ac1de1739a11d679740a87a1f02f.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | hw/virtio/virtio-pci.c | 7 +++++-- | ||
21 | 1 file changed, 5 insertions(+), 2 deletions(-) | ||
22 | |||
23 | diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/hw/virtio/virtio-pci.c | ||
26 | +++ b/hw/virtio/virtio-pci.c | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
28 | |||
29 | #include "qemu/osdep.h" | ||
30 | |||
31 | +#include "exec/memop.h" | ||
32 | #include "standard-headers/linux/virtio_pci.h" | ||
33 | #include "hw/virtio/virtio.h" | ||
34 | #include "migration/qemu-file-types.h" | ||
35 | @@ -XXX,XX +XXX,XX @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, | ||
36 | /* As length is under guest control, handle illegal values. */ | ||
37 | return; | ||
38 | } | ||
39 | - memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED); | ||
40 | + memory_region_dispatch_write(mr, addr, val, size_memop(len), | ||
41 | + MEMTXATTRS_UNSPECIFIED); | ||
42 | } | ||
43 | |||
44 | static void | ||
45 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | ||
46 | /* Make sure caller aligned buf properly */ | ||
47 | assert(!(((uintptr_t)buf) & (len - 1))); | ||
48 | |||
49 | - memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED); | ||
50 | + memory_region_dispatch_read(mr, addr, &val, size_memop(len), | ||
51 | + MEMTXATTRS_UNSPECIFIED); | ||
52 | switch (len) { | ||
53 | case 1: | ||
54 | pci_set_byte(buf, val); | ||
55 | -- | ||
56 | 2.17.1 | ||
57 | |||
58 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
17 | Message-Id: <e70ff5814ac3656974180db6375397c43b0bc8b8.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | hw/vfio/pci-quirks.c | 6 ++++-- | ||
21 | 1 file changed, 4 insertions(+), 2 deletions(-) | ||
22 | |||
23 | diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/hw/vfio/pci-quirks.c | ||
26 | +++ b/hw/vfio/pci-quirks.c | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
28 | */ | ||
29 | |||
30 | #include "qemu/osdep.h" | ||
31 | +#include "exec/memop.h" | ||
32 | #include "qemu/units.h" | ||
33 | #include "qemu/error-report.h" | ||
34 | #include "qemu/main-loop.h" | ||
35 | @@ -XXX,XX +XXX,XX @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, | ||
36 | |||
37 | /* Write to the proper guest MSI-X table instead */ | ||
38 | memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, | ||
39 | - offset, val, size, | ||
40 | + offset, val, size_memop(size), | ||
41 | MEMTXATTRS_UNSPECIFIED); | ||
42 | } | ||
43 | return; /* Do not write guest MSI-X data to hardware */ | ||
44 | @@ -XXX,XX +XXX,XX @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, | ||
45 | if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { | ||
46 | hwaddr offset = rtl->addr & 0xfff; | ||
47 | memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, | ||
48 | - &data, size, MEMTXATTRS_UNSPECIFIED); | ||
49 | + &data, size_memop(size), | ||
50 | + MEMTXATTRS_UNSPECIFIED); | ||
51 | trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); | ||
52 | } | ||
53 | |||
54 | -- | ||
55 | 2.17.1 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Message-Id: <3b042deef0a60dd49ae2320ece92120ba6027f2b.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | exec.c | 6 ++++-- | ||
21 | memory_ldst.inc.c | 18 +++++++++--------- | ||
22 | 2 files changed, 13 insertions(+), 11 deletions(-) | ||
23 | |||
24 | diff --git a/exec.c b/exec.c | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/exec.c | ||
27 | +++ b/exec.c | ||
28 | @@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, | ||
29 | /* XXX: could force current_cpu to NULL to avoid | ||
30 | potential bugs */ | ||
31 | val = ldn_p(buf, l); | ||
32 | - result |= memory_region_dispatch_write(mr, addr1, val, l, attrs); | ||
33 | + result |= memory_region_dispatch_write(mr, addr1, val, | ||
34 | + size_memop(l), attrs); | ||
35 | } else { | ||
36 | /* RAM case */ | ||
37 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | ||
38 | @@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, | ||
39 | /* I/O case */ | ||
40 | release_lock |= prepare_mmio_access(mr); | ||
41 | l = memory_access_size(mr, l, addr1); | ||
42 | - result |= memory_region_dispatch_read(mr, addr1, &val, l, attrs); | ||
43 | + result |= memory_region_dispatch_read(mr, addr1, &val, | ||
44 | + size_memop(l), attrs); | ||
45 | stn_p(buf, l, val); | ||
46 | } else { | ||
47 | /* RAM case */ | ||
48 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | ||
49 | index XXXXXXX..XXXXXXX 100644 | ||
50 | --- a/memory_ldst.inc.c | ||
51 | +++ b/memory_ldst.inc.c | ||
52 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | ||
53 | release_lock |= prepare_mmio_access(mr); | ||
54 | |||
55 | /* I/O case */ | ||
56 | - r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs); | ||
57 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(4), attrs); | ||
58 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
59 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
60 | val = bswap32(val); | ||
61 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | ||
62 | release_lock |= prepare_mmio_access(mr); | ||
63 | |||
64 | /* I/O case */ | ||
65 | - r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs); | ||
66 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(8), attrs); | ||
67 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
68 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
69 | val = bswap64(val); | ||
70 | @@ -XXX,XX +XXX,XX @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, | ||
71 | release_lock |= prepare_mmio_access(mr); | ||
72 | |||
73 | /* I/O case */ | ||
74 | - r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs); | ||
75 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(1), attrs); | ||
76 | } else { | ||
77 | /* RAM case */ | ||
78 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
79 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
80 | release_lock |= prepare_mmio_access(mr); | ||
81 | |||
82 | /* I/O case */ | ||
83 | - r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs); | ||
84 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(2), attrs); | ||
85 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
86 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
87 | val = bswap16(val); | ||
88 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, | ||
89 | if (l < 4 || !memory_access_is_direct(mr, true)) { | ||
90 | release_lock |= prepare_mmio_access(mr); | ||
91 | |||
92 | - r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); | ||
93 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | ||
94 | } else { | ||
95 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
96 | stl_p(ptr, val); | ||
97 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
98 | val = bswap32(val); | ||
99 | } | ||
100 | #endif | ||
101 | - r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); | ||
102 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | ||
103 | } else { | ||
104 | /* RAM case */ | ||
105 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
106 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, | ||
107 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
108 | if (!memory_access_is_direct(mr, true)) { | ||
109 | release_lock |= prepare_mmio_access(mr); | ||
110 | - r = memory_region_dispatch_write(mr, addr1, val, 1, attrs); | ||
111 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(1), attrs); | ||
112 | } else { | ||
113 | /* RAM case */ | ||
114 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
115 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | ||
116 | val = bswap16(val); | ||
117 | } | ||
118 | #endif | ||
119 | - r = memory_region_dispatch_write(mr, addr1, val, 2, attrs); | ||
120 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(2), attrs); | ||
121 | } else { | ||
122 | /* RAM case */ | ||
123 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
124 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | ||
125 | val = bswap64(val); | ||
126 | } | ||
127 | #endif | ||
128 | - r = memory_region_dispatch_write(mr, addr1, val, 8, attrs); | ||
129 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(8), attrs); | ||
130 | } else { | ||
131 | /* RAM case */ | ||
132 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
133 | -- | ||
134 | 2.17.1 | ||
135 | |||
136 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Message-Id: <c4571c76467ade83660970f7ef9d7292297f1908.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | accel/tcg/cputlb.c | 8 ++++---- | ||
21 | 1 file changed, 4 insertions(+), 4 deletions(-) | ||
22 | |||
23 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/accel/tcg/cputlb.c | ||
26 | +++ b/accel/tcg/cputlb.c | ||
27 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
28 | qemu_mutex_lock_iothread(); | ||
29 | locked = true; | ||
30 | } | ||
31 | - r = memory_region_dispatch_read(mr, mr_offset, | ||
32 | - &val, size, iotlbentry->attrs); | ||
33 | + r = memory_region_dispatch_read(mr, mr_offset, &val, size_memop(size), | ||
34 | + iotlbentry->attrs); | ||
35 | if (r != MEMTX_OK) { | ||
36 | hwaddr physaddr = mr_offset + | ||
37 | section->offset_within_address_space - | ||
38 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
39 | qemu_mutex_lock_iothread(); | ||
40 | locked = true; | ||
41 | } | ||
42 | - r = memory_region_dispatch_write(mr, mr_offset, | ||
43 | - val, size, iotlbentry->attrs); | ||
44 | + r = memory_region_dispatch_write(mr, mr_offset, val, size_memop(size), | ||
45 | + iotlbentry->attrs); | ||
46 | if (r != MEMTX_OK) { | ||
47 | hwaddr physaddr = mr_offset + | ||
48 | section->offset_within_address_space - | ||
49 | -- | ||
50 | 2.17.1 | ||
51 | |||
52 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | Convert memory_region_dispatch_{read|write} operand "unsigned size" | ||
4 | into a "MemOp op". | ||
5 | |||
6 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <1dd82df5801866743f838f1d046475115a1d32da.1566466906.git.tony.nguyen@bt.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | --- | ||
11 | include/exec/memop.h | 22 +++++++++++++++------- | ||
12 | include/exec/memory.h | 9 +++++---- | ||
13 | memory.c | 7 +++++-- | ||
14 | 3 files changed, 25 insertions(+), 13 deletions(-) | ||
15 | |||
16 | diff --git a/include/exec/memop.h b/include/exec/memop.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/exec/memop.h | ||
19 | +++ b/include/exec/memop.h | ||
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | #ifndef MEMOP_H | ||
22 | #define MEMOP_H | ||
23 | |||
24 | +#include "qemu/host-utils.h" | ||
25 | + | ||
26 | typedef enum MemOp { | ||
27 | MO_8 = 0, | ||
28 | MO_16 = 1, | ||
29 | @@ -XXX,XX +XXX,XX @@ typedef enum MemOp { | ||
30 | MO_SSIZE = MO_SIZE | MO_SIGN, | ||
31 | } MemOp; | ||
32 | |||
33 | -/* Size in bytes to MemOp. */ | ||
34 | -static inline unsigned size_memop(unsigned size) | ||
35 | +/* MemOp to size in bytes. */ | ||
36 | +static inline unsigned memop_size(MemOp op) | ||
37 | { | ||
38 | - /* | ||
39 | - * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} | ||
40 | - * "unsigned size" operand into a "MemOp op". | ||
41 | - */ | ||
42 | - return size; | ||
43 | + return 1 << (op & MO_SIZE); | ||
44 | +} | ||
45 | + | ||
46 | +/* Size in bytes to MemOp. */ | ||
47 | +static inline MemOp size_memop(unsigned size) | ||
48 | +{ | ||
49 | +#ifdef CONFIG_DEBUG_TCG | ||
50 | + /* Power of 2 up to 8. */ | ||
51 | + assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); | ||
52 | +#endif | ||
53 | + return ctz32(size); | ||
54 | } | ||
55 | |||
56 | #endif | ||
57 | diff --git a/include/exec/memory.h b/include/exec/memory.h | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/include/exec/memory.h | ||
60 | +++ b/include/exec/memory.h | ||
61 | @@ -XXX,XX +XXX,XX @@ | ||
62 | #include "exec/cpu-common.h" | ||
63 | #include "exec/hwaddr.h" | ||
64 | #include "exec/memattrs.h" | ||
65 | +#include "exec/memop.h" | ||
66 | #include "exec/ramlist.h" | ||
67 | #include "qemu/bswap.h" | ||
68 | #include "qemu/queue.h" | ||
69 | @@ -XXX,XX +XXX,XX @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner); | ||
70 | * @mr: #MemoryRegion to access | ||
71 | * @addr: address within that region | ||
72 | * @pval: pointer to uint64_t which the data is written to | ||
73 | - * @size: size of the access in bytes | ||
74 | + * @op: size, sign, and endianness of the memory operation | ||
75 | * @attrs: memory transaction attributes to use for the access | ||
76 | */ | ||
77 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | ||
78 | hwaddr addr, | ||
79 | uint64_t *pval, | ||
80 | - unsigned size, | ||
81 | + MemOp op, | ||
82 | MemTxAttrs attrs); | ||
83 | /** | ||
84 | * memory_region_dispatch_write: perform a write directly to the specified | ||
85 | @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | ||
86 | * @mr: #MemoryRegion to access | ||
87 | * @addr: address within that region | ||
88 | * @data: data to write | ||
89 | - * @size: size of the access in bytes | ||
90 | + * @op: size, sign, and endianness of the memory operation | ||
91 | * @attrs: memory transaction attributes to use for the access | ||
92 | */ | ||
93 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | ||
94 | hwaddr addr, | ||
95 | uint64_t data, | ||
96 | - unsigned size, | ||
97 | + MemOp op, | ||
98 | MemTxAttrs attrs); | ||
99 | |||
100 | /** | ||
101 | diff --git a/memory.c b/memory.c | ||
102 | index XXXXXXX..XXXXXXX 100644 | ||
103 | --- a/memory.c | ||
104 | +++ b/memory.c | ||
105 | @@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, | ||
106 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | ||
107 | hwaddr addr, | ||
108 | uint64_t *pval, | ||
109 | - unsigned size, | ||
110 | + MemOp op, | ||
111 | MemTxAttrs attrs) | ||
112 | { | ||
113 | + unsigned size = memop_size(op); | ||
114 | MemTxResult r; | ||
115 | |||
116 | if (!memory_region_access_valid(mr, addr, size, false, attrs)) { | ||
117 | @@ -XXX,XX +XXX,XX @@ static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, | ||
118 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | ||
119 | hwaddr addr, | ||
120 | uint64_t data, | ||
121 | - unsigned size, | ||
122 | + MemOp op, | ||
123 | MemTxAttrs attrs) | ||
124 | { | ||
125 | + unsigned size = memop_size(op); | ||
126 | + | ||
127 | if (!memory_region_access_valid(mr, addr, size, true, attrs)) { | ||
128 | unassigned_mem_write(mr, addr, data, size); | ||
129 | return MEMTX_DECODE_ERROR; | ||
130 | -- | ||
131 | 2.17.1 | ||
132 | |||
133 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | Temporarily no-op size_memop was introduced to aid the conversion of | ||
4 | memory_region_dispatch_{read|write} operand "unsigned size" into | ||
5 | "MemOp op". | ||
6 | |||
7 | Now size_memop is implemented, again hard coded size but with | ||
8 | MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. | ||
9 | |||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
13 | Message-Id: <76dc97273a8eb5e10170ffc16526863df808f487.1566466906.git.tony.nguyen@bt.com> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
15 | --- | ||
16 | hw/s390x/s390-pci-inst.c | 3 +-- | ||
17 | 1 file changed, 1 insertion(+), 2 deletions(-) | ||
18 | |||
19 | diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/hw/s390x/s390-pci-inst.c | ||
22 | +++ b/hw/s390x/s390-pci-inst.c | ||
23 | @@ -XXX,XX +XXX,XX @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, | ||
24 | for (i = 0; i < len / 8; i++) { | ||
25 | result = memory_region_dispatch_write(mr, offset + i * 8, | ||
26 | ldq_p(buffer + i * 8), | ||
27 | - size_memop(8), | ||
28 | - MEMTXATTRS_UNSPECIFIED); | ||
29 | + MO_64, MEMTXATTRS_UNSPECIFIED); | ||
30 | if (result != MEMTX_OK) { | ||
31 | s390_program_interrupt(env, PGM_OPERAND, 6, ra); | ||
32 | return 0; | ||
33 | -- | ||
34 | 2.17.1 | ||
35 | |||
36 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | Temporarily no-op size_memop was introduced to aid the conversion of | ||
4 | memory_region_dispatch_{read|write} operand "unsigned size" into | ||
5 | "MemOp op". | ||
6 | |||
7 | Now size_memop is implemented, again hard coded size but with | ||
8 | MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. | ||
9 | |||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
13 | Message-Id: <99c4459d5c1dc9013820be3dbda9798165c15b99.1566466906.git.tony.nguyen@bt.com> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
15 | --- | ||
16 | target/mips/op_helper.c | 4 ++-- | ||
17 | 1 file changed, 2 insertions(+), 2 deletions(-) | ||
18 | |||
19 | diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/target/mips/op_helper.c | ||
22 | +++ b/target/mips/op_helper.c | ||
23 | @@ -XXX,XX +XXX,XX @@ void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) | ||
24 | if (op == 9) { | ||
25 | /* Index Store Tag */ | ||
26 | memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, | ||
27 | - size_memop(8), MEMTXATTRS_UNSPECIFIED); | ||
28 | + MO_64, MEMTXATTRS_UNSPECIFIED); | ||
29 | } else if (op == 5) { | ||
30 | /* Index Load Tag */ | ||
31 | memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, | ||
32 | - size_memop(8), MEMTXATTRS_UNSPECIFIED); | ||
33 | + MO_64, MEMTXATTRS_UNSPECIFIED); | ||
34 | } | ||
35 | #endif | ||
36 | } | ||
37 | -- | ||
38 | 2.17.1 | ||
39 | |||
40 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | Temporarily no-op size_memop was introduced to aid the conversion of | ||
4 | memory_region_dispatch_{read|write} operand "unsigned size" into | ||
5 | "MemOp op". | ||
6 | |||
7 | Now size_memop is implemented, again hard coded size but with | ||
8 | MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. | ||
9 | |||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-Id: <99f69701cad294db638f84abebc58115e1b9de9a.1566466906.git.tony.nguyen@bt.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | --- | ||
15 | memory_ldst.inc.c | 18 +++++++++--------- | ||
16 | 1 file changed, 9 insertions(+), 9 deletions(-) | ||
17 | |||
18 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/memory_ldst.inc.c | ||
21 | +++ b/memory_ldst.inc.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | ||
23 | release_lock |= prepare_mmio_access(mr); | ||
24 | |||
25 | /* I/O case */ | ||
26 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(4), attrs); | ||
27 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_32, attrs); | ||
28 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
29 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
30 | val = bswap32(val); | ||
31 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | ||
32 | release_lock |= prepare_mmio_access(mr); | ||
33 | |||
34 | /* I/O case */ | ||
35 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(8), attrs); | ||
36 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_64, attrs); | ||
37 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
38 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
39 | val = bswap64(val); | ||
40 | @@ -XXX,XX +XXX,XX @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, | ||
41 | release_lock |= prepare_mmio_access(mr); | ||
42 | |||
43 | /* I/O case */ | ||
44 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(1), attrs); | ||
45 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs); | ||
46 | } else { | ||
47 | /* RAM case */ | ||
48 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
49 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
50 | release_lock |= prepare_mmio_access(mr); | ||
51 | |||
52 | /* I/O case */ | ||
53 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(2), attrs); | ||
54 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_16, attrs); | ||
55 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
56 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
57 | val = bswap16(val); | ||
58 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, | ||
59 | if (l < 4 || !memory_access_is_direct(mr, true)) { | ||
60 | release_lock |= prepare_mmio_access(mr); | ||
61 | |||
62 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | ||
63 | + r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); | ||
64 | } else { | ||
65 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
66 | stl_p(ptr, val); | ||
67 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
68 | val = bswap32(val); | ||
69 | } | ||
70 | #endif | ||
71 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | ||
72 | + r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); | ||
73 | } else { | ||
74 | /* RAM case */ | ||
75 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
76 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, | ||
77 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
78 | if (!memory_access_is_direct(mr, true)) { | ||
79 | release_lock |= prepare_mmio_access(mr); | ||
80 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(1), attrs); | ||
81 | + r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs); | ||
82 | } else { | ||
83 | /* RAM case */ | ||
84 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
85 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | ||
86 | val = bswap16(val); | ||
87 | } | ||
88 | #endif | ||
89 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(2), attrs); | ||
90 | + r = memory_region_dispatch_write(mr, addr1, val, MO_16, attrs); | ||
91 | } else { | ||
92 | /* RAM case */ | ||
93 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
94 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | ||
95 | val = bswap64(val); | ||
96 | } | ||
97 | #endif | ||
98 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(8), attrs); | ||
99 | + r = memory_region_dispatch_write(mr, addr1, val, MO_64, attrs); | ||
100 | } else { | ||
101 | /* RAM case */ | ||
102 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
103 | -- | ||
104 | 2.17.1 | ||
105 | |||
106 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Add registers and function stubs. The functionality |
---|---|---|---|
2 | 2 | is disabled via use_neon_instructions defined to 0. | |
3 | Let size > 0 indicate a promise to write to those bytes. | 3 | |
4 | Check for write watchpoints in the probed range. | 4 | We must still include results for the mandatory opcodes in |
5 | 5 | tcg_target_op_def, as all opcodes are checked during tcg init. | |
6 | Suggested-by: Richard Henderson <richard.henderson@linaro.org> | 6 | |
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
9 | Message-Id: <20190823100741.9621-10-david@redhat.com> | ||
10 | [rth: Recompute index after tlb_fill; check TLB_WATCHPOINT.] | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 9 | --- |
13 | accel/tcg/cputlb.c | 15 +++++++++++++-- | 10 | tcg/arm/tcg-target-con-set.h | 4 ++ |
14 | 1 file changed, 13 insertions(+), 2 deletions(-) | 11 | tcg/arm/tcg-target-con-str.h | 1 + |
15 | 12 | tcg/arm/tcg-target.h | 48 ++++++++++++-- | |
16 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 13 | tcg/arm/tcg-target.opc.h | 12 ++++ |
14 | tcg/arm/tcg-target.c.inc | 117 +++++++++++++++++++++++++++++------ | ||
15 | 5 files changed, 158 insertions(+), 24 deletions(-) | ||
16 | create mode 100644 tcg/arm/tcg-target.opc.h | ||
17 | |||
18 | diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/accel/tcg/cputlb.c | 20 | --- a/tcg/arm/tcg-target-con-set.h |
19 | +++ b/accel/tcg/cputlb.c | 21 | +++ b/tcg/arm/tcg-target-con-set.h |
20 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 22 | @@ -XXX,XX +XXX,XX @@ C_O0_I1(r) |
23 | C_O0_I2(r, r) | ||
24 | C_O0_I2(r, rIN) | ||
25 | C_O0_I2(s, s) | ||
26 | +C_O0_I2(w, r) | ||
27 | C_O0_I3(s, s, s) | ||
28 | C_O0_I4(r, r, rI, rI) | ||
29 | C_O0_I4(s, s, s, s) | ||
30 | C_O1_I1(r, l) | ||
31 | C_O1_I1(r, r) | ||
32 | +C_O1_I1(w, r) | ||
33 | +C_O1_I1(w, wr) | ||
34 | C_O1_I2(r, 0, rZ) | ||
35 | C_O1_I2(r, l, l) | ||
36 | C_O1_I2(r, r, r) | ||
37 | @@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIK) | ||
38 | C_O1_I2(r, r, rIN) | ||
39 | C_O1_I2(r, r, ri) | ||
40 | C_O1_I2(r, rZ, rZ) | ||
41 | +C_O1_I2(w, w, w) | ||
42 | C_O1_I4(r, r, r, rI, rI) | ||
43 | C_O1_I4(r, r, rIN, rIK, 0) | ||
44 | C_O2_I1(r, r, l) | ||
45 | diff --git a/tcg/arm/tcg-target-con-str.h b/tcg/arm/tcg-target-con-str.h | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/tcg/arm/tcg-target-con-str.h | ||
48 | +++ b/tcg/arm/tcg-target-con-str.h | ||
49 | @@ -XXX,XX +XXX,XX @@ | ||
50 | REGS('r', ALL_GENERAL_REGS) | ||
51 | REGS('l', ALL_QLOAD_REGS) | ||
52 | REGS('s', ALL_QSTORE_REGS) | ||
53 | +REGS('w', ALL_VECTOR_REGS) | ||
54 | |||
55 | /* | ||
56 | * Define constraint letters for constants: | ||
57 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/tcg/arm/tcg-target.h | ||
60 | +++ b/tcg/arm/tcg-target.h | ||
61 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
62 | TCG_REG_R13, | ||
63 | TCG_REG_R14, | ||
64 | TCG_REG_PC, | ||
65 | + | ||
66 | + TCG_REG_Q0, | ||
67 | + TCG_REG_Q1, | ||
68 | + TCG_REG_Q2, | ||
69 | + TCG_REG_Q3, | ||
70 | + TCG_REG_Q4, | ||
71 | + TCG_REG_Q5, | ||
72 | + TCG_REG_Q6, | ||
73 | + TCG_REG_Q7, | ||
74 | + TCG_REG_Q8, | ||
75 | + TCG_REG_Q9, | ||
76 | + TCG_REG_Q10, | ||
77 | + TCG_REG_Q11, | ||
78 | + TCG_REG_Q12, | ||
79 | + TCG_REG_Q13, | ||
80 | + TCG_REG_Q14, | ||
81 | + TCG_REG_Q15, | ||
82 | + | ||
83 | + TCG_AREG0 = TCG_REG_R6, | ||
84 | + TCG_REG_CALL_STACK = TCG_REG_R13, | ||
85 | } TCGReg; | ||
86 | |||
87 | -#define TCG_TARGET_NB_REGS 16 | ||
88 | +#define TCG_TARGET_NB_REGS 32 | ||
89 | |||
90 | #ifdef __ARM_ARCH_EXT_IDIV__ | ||
91 | #define use_idiv_instructions 1 | ||
92 | #else | ||
93 | extern bool use_idiv_instructions; | ||
94 | #endif | ||
95 | - | ||
96 | +#define use_neon_instructions 0 | ||
97 | |||
98 | /* used for function call generation */ | ||
99 | -#define TCG_REG_CALL_STACK TCG_REG_R13 | ||
100 | #define TCG_TARGET_STACK_ALIGN 8 | ||
101 | #define TCG_TARGET_CALL_ALIGN_ARGS 1 | ||
102 | #define TCG_TARGET_CALL_STACK_OFFSET 0 | ||
103 | @@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions; | ||
104 | #define TCG_TARGET_HAS_direct_jump 0 | ||
105 | #define TCG_TARGET_HAS_qemu_st8_i32 0 | ||
106 | |||
107 | -enum { | ||
108 | - TCG_AREG0 = TCG_REG_R6, | ||
109 | -}; | ||
110 | +#define TCG_TARGET_HAS_v64 use_neon_instructions | ||
111 | +#define TCG_TARGET_HAS_v128 use_neon_instructions | ||
112 | +#define TCG_TARGET_HAS_v256 0 | ||
113 | + | ||
114 | +#define TCG_TARGET_HAS_andc_vec 0 | ||
115 | +#define TCG_TARGET_HAS_orc_vec 0 | ||
116 | +#define TCG_TARGET_HAS_not_vec 0 | ||
117 | +#define TCG_TARGET_HAS_neg_vec 0 | ||
118 | +#define TCG_TARGET_HAS_abs_vec 0 | ||
119 | +#define TCG_TARGET_HAS_roti_vec 0 | ||
120 | +#define TCG_TARGET_HAS_rots_vec 0 | ||
121 | +#define TCG_TARGET_HAS_rotv_vec 0 | ||
122 | +#define TCG_TARGET_HAS_shi_vec 0 | ||
123 | +#define TCG_TARGET_HAS_shs_vec 0 | ||
124 | +#define TCG_TARGET_HAS_shv_vec 0 | ||
125 | +#define TCG_TARGET_HAS_mul_vec 0 | ||
126 | +#define TCG_TARGET_HAS_sat_vec 0 | ||
127 | +#define TCG_TARGET_HAS_minmax_vec 0 | ||
128 | +#define TCG_TARGET_HAS_bitsel_vec 0 | ||
129 | +#define TCG_TARGET_HAS_cmpsel_vec 0 | ||
130 | |||
131 | #define TCG_TARGET_DEFAULT_MO (0) | ||
132 | #define TCG_TARGET_HAS_MEMORY_BSWAP 1 | ||
133 | diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h | ||
134 | new file mode 100644 | ||
135 | index XXXXXXX..XXXXXXX | ||
136 | --- /dev/null | ||
137 | +++ b/tcg/arm/tcg-target.opc.h | ||
138 | @@ -XXX,XX +XXX,XX @@ | ||
139 | +/* | ||
140 | + * Copyright (c) 2019 Linaro | ||
141 | + * | ||
142 | + * This work is licensed under the terms of the GNU GPL, version 2 or | ||
143 | + * (at your option) any later version. | ||
144 | + * | ||
145 | + * See the COPYING file in the top-level directory for details. | ||
146 | + * | ||
147 | + * Target-specific opcodes for host vector expansion. These will be | ||
148 | + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, | ||
149 | + * consider these to be UNSPEC with names. | ||
150 | + */ | ||
151 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
152 | index XXXXXXX..XXXXXXX 100644 | ||
153 | --- a/tcg/arm/tcg-target.c.inc | ||
154 | +++ b/tcg/arm/tcg-target.c.inc | ||
155 | @@ -XXX,XX +XXX,XX @@ bool use_idiv_instructions; | ||
156 | |||
157 | #ifdef CONFIG_DEBUG_TCG | ||
158 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | ||
159 | - "%r0", | ||
160 | - "%r1", | ||
161 | - "%r2", | ||
162 | - "%r3", | ||
163 | - "%r4", | ||
164 | - "%r5", | ||
165 | - "%r6", | ||
166 | - "%r7", | ||
167 | - "%r8", | ||
168 | - "%r9", | ||
169 | - "%r10", | ||
170 | - "%r11", | ||
171 | - "%r12", | ||
172 | - "%r13", | ||
173 | - "%r14", | ||
174 | - "%pc", | ||
175 | + "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", | ||
176 | + "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc", | ||
177 | + "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7", | ||
178 | + "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15", | ||
179 | }; | ||
180 | #endif | ||
181 | |||
182 | @@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = { | ||
183 | TCG_REG_R3, | ||
184 | TCG_REG_R12, | ||
185 | TCG_REG_R14, | ||
186 | + | ||
187 | + TCG_REG_Q0, | ||
188 | + TCG_REG_Q1, | ||
189 | + TCG_REG_Q2, | ||
190 | + TCG_REG_Q3, | ||
191 | + /* Q4 - Q7 are call-saved, and skipped. */ | ||
192 | + TCG_REG_Q8, | ||
193 | + TCG_REG_Q9, | ||
194 | + TCG_REG_Q10, | ||
195 | + TCG_REG_Q11, | ||
196 | + TCG_REG_Q12, | ||
197 | + TCG_REG_Q13, | ||
198 | + TCG_REG_Q14, | ||
199 | + TCG_REG_Q15, | ||
200 | }; | ||
201 | |||
202 | static const int tcg_target_call_iarg_regs[4] = { | ||
203 | @@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[2] = { | ||
204 | }; | ||
205 | |||
206 | #define TCG_REG_TMP TCG_REG_R12 | ||
207 | +#define TCG_VEC_TMP TCG_REG_Q15 | ||
208 | |||
209 | enum arm_cond_code_e { | ||
210 | COND_EQ = 0x0, | ||
211 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, | ||
212 | #define TCG_CT_CONST_ZERO 0x800 | ||
213 | |||
214 | #define ALL_GENERAL_REGS 0xffffu | ||
215 | +#define ALL_VECTOR_REGS 0xffff0000u | ||
216 | |||
217 | /* | ||
218 | * r0-r2 will be overwritten when reading the tlb entry (softmmu only) | ||
219 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
220 | case INDEX_op_qemu_st_i64: | ||
221 | return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s); | ||
222 | |||
223 | + case INDEX_op_st_vec: | ||
224 | + return C_O0_I2(w, r); | ||
225 | + case INDEX_op_ld_vec: | ||
226 | + case INDEX_op_dupm_vec: | ||
227 | + return C_O1_I1(w, r); | ||
228 | + case INDEX_op_dup_vec: | ||
229 | + return C_O1_I1(w, wr); | ||
230 | + case INDEX_op_dup2_vec: | ||
231 | + case INDEX_op_add_vec: | ||
232 | + case INDEX_op_sub_vec: | ||
233 | + case INDEX_op_xor_vec: | ||
234 | + case INDEX_op_or_vec: | ||
235 | + case INDEX_op_and_vec: | ||
236 | + case INDEX_op_cmp_vec: | ||
237 | + return C_O1_I2(w, w, w); | ||
238 | + | ||
239 | default: | ||
240 | g_assert_not_reached(); | ||
241 | } | ||
242 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s) | ||
21 | { | 243 | { |
22 | uintptr_t index = tlb_index(env, mmu_idx, addr); | 244 | /* Only probe for the platform and capabilities if we havn't already |
23 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | 245 | determined maximum values at compile time. */ |
24 | + target_ulong tlb_addr = tlb_addr_write(entry); | 246 | -#ifndef use_idiv_instructions |
25 | 247 | +#if !defined(use_idiv_instructions) || !defined(use_neon_instructions) | |
26 | - if (!tlb_hit(tlb_addr_write(entry), addr)) { | 248 | { |
27 | - /* TLB entry is for a different page */ | 249 | unsigned long hwcap = qemu_getauxval(AT_HWCAP); |
28 | + if (unlikely(!tlb_hit(tlb_addr, addr))) { | 250 | +#ifndef use_idiv_instructions |
29 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | 251 | use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; |
30 | tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | 252 | +#endif |
31 | mmu_idx, retaddr); | 253 | +#ifndef use_neon_instructions |
32 | + /* TLB resize via tlb_fill may have moved the entry. */ | 254 | + use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0; |
33 | + index = tlb_index(env, mmu_idx, addr); | 255 | +#endif |
34 | + entry = tlb_entry(env, mmu_idx, addr); | 256 | } |
257 | #endif | ||
258 | + | ||
259 | if (__ARM_ARCH < 7) { | ||
260 | const char *pl = (const char *)qemu_getauxval(AT_PLATFORM); | ||
261 | if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { | ||
262 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s) | ||
35 | } | 263 | } |
36 | + tlb_addr = tlb_addr_write(entry); | 264 | } |
265 | |||
266 | - tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; | ||
267 | + tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; | ||
268 | |||
269 | tcg_target_call_clobber_regs = 0; | ||
270 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); | ||
271 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s) | ||
272 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); | ||
273 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); | ||
274 | |||
275 | + if (use_neon_instructions) { | ||
276 | + tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; | ||
277 | + tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; | ||
278 | + | ||
279 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0); | ||
280 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1); | ||
281 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2); | ||
282 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3); | ||
283 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8); | ||
284 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9); | ||
285 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10); | ||
286 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11); | ||
287 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12); | ||
288 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13); | ||
289 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14); | ||
290 | + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15); | ||
37 | + } | 291 | + } |
38 | + | 292 | + |
39 | + /* Handle watchpoints. */ | 293 | s->reserved_regs = 0; |
40 | + if ((tlb_addr & TLB_WATCHPOINT) && size > 0) { | 294 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); |
41 | + cpu_check_watchpoint(env_cpu(env), addr, size, | 295 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); |
42 | + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | 296 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); |
43 | + BP_MEM_WRITE, retaddr); | 297 | + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); |
44 | } | ||
45 | } | 298 | } |
46 | 299 | ||
300 | static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, | ||
301 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_movi(TCGContext *s, TCGType type, | ||
302 | tcg_out_movi32(s, COND_AL, ret, arg); | ||
303 | } | ||
304 | |||
305 | +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, | ||
306 | + TCGReg rd, TCGReg rs) | ||
307 | +{ | ||
308 | + g_assert_not_reached(); | ||
309 | +} | ||
310 | + | ||
311 | +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, | ||
312 | + TCGReg rd, TCGReg base, intptr_t offset) | ||
313 | +{ | ||
314 | + g_assert_not_reached(); | ||
315 | +} | ||
316 | + | ||
317 | +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, | ||
318 | + TCGReg rd, int64_t v64) | ||
319 | +{ | ||
320 | + g_assert_not_reached(); | ||
321 | +} | ||
322 | + | ||
323 | +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
324 | + unsigned vecl, unsigned vece, | ||
325 | + const TCGArg *args, const int *const_args) | ||
326 | +{ | ||
327 | + g_assert_not_reached(); | ||
328 | +} | ||
329 | + | ||
330 | +int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
331 | +{ | ||
332 | + return 0; | ||
333 | +} | ||
334 | + | ||
335 | +void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | ||
336 | + TCGArg a0, ...) | ||
337 | +{ | ||
338 | + g_assert_not_reached(); | ||
339 | +} | ||
340 | + | ||
341 | static void tcg_out_nop_fill(tcg_insn_unit *p, int count) | ||
342 | { | ||
343 | int i; | ||
47 | -- | 344 | -- |
48 | 2.17.1 | 345 | 2.25.1 |
49 | 346 | ||
50 | 347 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | Preparation for collapsing the two byte swaps adjust_endianness and | ||
4 | handle_bswap into the former. | ||
5 | |||
6 | Call memory_region_dispatch_{read|write} with endianness encoded into | ||
7 | the "MemOp op" operand. | ||
8 | |||
9 | This patch does not change any behaviour as | ||
10 | memory_region_dispatch_{read|write} is yet to handle the endianness. | ||
11 | |||
12 | Once it does handle endianness, callers with byte swaps can collapse | ||
13 | them into adjust_endianness. | ||
14 | |||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
17 | Message-Id: <8066ab3eb037c0388dfadfe53c5118429dd1de3a.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
19 | --- | 3 | --- |
20 | include/exec/memory.h | 3 +++ | 4 | tcg/arm/tcg-target.c.inc | 70 ++++++++++++++++++++++++++++++++++++---- |
21 | accel/tcg/cputlb.c | 8 ++++++-- | 5 | 1 file changed, 64 insertions(+), 6 deletions(-) |
22 | exec.c | 13 +++++++++++-- | ||
23 | hw/intc/armv7m_nvic.c | 15 ++++++++------- | ||
24 | hw/s390x/s390-pci-inst.c | 6 ++++-- | ||
25 | hw/vfio/pci-quirks.c | 5 +++-- | ||
26 | hw/virtio/virtio-pci.c | 6 ++++-- | ||
27 | memory.c | 18 ++++++++++++++++++ | ||
28 | memory_ldst.inc.c | 24 ++++++++++++++++++------ | ||
29 | 9 files changed, 75 insertions(+), 23 deletions(-) | ||
30 | 6 | ||
31 | diff --git a/include/exec/memory.h b/include/exec/memory.h | 7 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
32 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/include/exec/memory.h | 9 | --- a/tcg/arm/tcg-target.c.inc |
34 | +++ b/include/exec/memory.h | 10 | +++ b/tcg/arm/tcg-target.c.inc |
35 | @@ -XXX,XX +XXX,XX @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, | 11 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
12 | INSN_NOP_v6k = 0xe320f000, | ||
13 | /* Otherwise the assembler uses mov r0,r0 */ | ||
14 | INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, | ||
15 | + | ||
16 | + INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ | ||
17 | + INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ | ||
18 | } ARMInsn; | ||
19 | |||
20 | #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) | ||
21 | @@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, | ||
36 | } | 22 | } |
37 | } | 23 | } |
38 | 24 | ||
39 | +/* enum device_endian to MemOp. */ | 25 | +/* |
40 | +MemOp devend_memop(enum device_endian end); | 26 | + * Note that TCGReg references Q-registers. |
27 | + * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting. | ||
28 | + */ | ||
29 | +static uint32_t encode_vd(TCGReg rd) | ||
30 | +{ | ||
31 | + tcg_debug_assert(rd >= TCG_REG_Q0); | ||
32 | + return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); | ||
33 | +} | ||
41 | + | 34 | + |
42 | #endif | 35 | +static void tcg_out_vldst(TCGContext *s, ARMInsn insn, |
43 | 36 | + TCGReg rd, TCGReg rn, int offset) | |
44 | #endif | 37 | +{ |
45 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 38 | + if (offset != 0) { |
46 | index XXXXXXX..XXXXXXX 100644 | 39 | + if (check_fit_imm(offset) || check_fit_imm(-offset)) { |
47 | --- a/accel/tcg/cputlb.c | 40 | + tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, |
48 | +++ b/accel/tcg/cputlb.c | 41 | + TCG_REG_TMP, rn, offset, true); |
49 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 42 | + } else { |
50 | qemu_mutex_lock_iothread(); | 43 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); |
51 | locked = true; | 44 | + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, |
52 | } | 45 | + TCG_REG_TMP, TCG_REG_TMP, rn, 0); |
53 | - r = memory_region_dispatch_read(mr, mr_offset, &val, size_memop(size), | 46 | + } |
54 | + r = memory_region_dispatch_read(mr, mr_offset, &val, | 47 | + rn = TCG_REG_TMP; |
55 | + size_memop(size) | MO_TE, | 48 | + } |
56 | iotlbentry->attrs); | 49 | + tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf); |
57 | if (r != MEMTX_OK) { | 50 | +} |
58 | hwaddr physaddr = mr_offset + | 51 | + |
59 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 52 | #ifdef CONFIG_SOFTMMU |
60 | qemu_mutex_lock_iothread(); | 53 | #include "../tcg-ldst.c.inc" |
61 | locked = true; | 54 | |
62 | } | 55 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s) |
63 | - r = memory_region_dispatch_write(mr, mr_offset, val, size_memop(size), | 56 | tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); |
64 | + r = memory_region_dispatch_write(mr, mr_offset, val, | ||
65 | + size_memop(size) | MO_TE, | ||
66 | iotlbentry->attrs); | ||
67 | if (r != MEMTX_OK) { | ||
68 | hwaddr physaddr = mr_offset + | ||
69 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
70 | } | ||
71 | } | ||
72 | |||
73 | + /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ | ||
74 | res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
75 | mmu_idx, addr, retaddr, access_type, size); | ||
76 | return handle_bswap(res, size, big_endian); | ||
77 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
78 | } | ||
79 | } | ||
80 | |||
81 | + /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ | ||
82 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
83 | handle_bswap(val, size, big_endian), | ||
84 | addr, retaddr, size); | ||
85 | diff --git a/exec.c b/exec.c | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/exec.c | ||
88 | +++ b/exec.c | ||
89 | @@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, | ||
90 | /* XXX: could force current_cpu to NULL to avoid | ||
91 | potential bugs */ | ||
92 | val = ldn_p(buf, l); | ||
93 | + /* | ||
94 | + * TODO: Merge bswap from ldn_p into memory_region_dispatch_write | ||
95 | + * by using ldn_he_p and dropping MO_TE to get a host-endian value. | ||
96 | + */ | ||
97 | result |= memory_region_dispatch_write(mr, addr1, val, | ||
98 | - size_memop(l), attrs); | ||
99 | + size_memop(l) | MO_TE, | ||
100 | + attrs); | ||
101 | } else { | ||
102 | /* RAM case */ | ||
103 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | ||
104 | @@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, | ||
105 | /* I/O case */ | ||
106 | release_lock |= prepare_mmio_access(mr); | ||
107 | l = memory_access_size(mr, l, addr1); | ||
108 | + /* | ||
109 | + * TODO: Merge bswap from stn_p into memory_region_dispatch_read | ||
110 | + * by using stn_he_p and dropping MO_TE to get a host-endian value. | ||
111 | + */ | ||
112 | result |= memory_region_dispatch_read(mr, addr1, &val, | ||
113 | - size_memop(l), attrs); | ||
114 | + size_memop(l) | MO_TE, attrs); | ||
115 | stn_p(buf, l, val); | ||
116 | } else { | ||
117 | /* RAM case */ | ||
118 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
119 | index XXXXXXX..XXXXXXX 100644 | ||
120 | --- a/hw/intc/armv7m_nvic.c | ||
121 | +++ b/hw/intc/armv7m_nvic.c | ||
122 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, | ||
123 | if (attrs.secure) { | ||
124 | /* S accesses to the alias act like NS accesses to the real region */ | ||
125 | attrs.secure = 0; | ||
126 | - return memory_region_dispatch_write(mr, addr, value, size_memop(size), | ||
127 | - attrs); | ||
128 | + return memory_region_dispatch_write(mr, addr, value, | ||
129 | + size_memop(size) | MO_TE, attrs); | ||
130 | } else { | ||
131 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | ||
132 | if (attrs.user) { | ||
133 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, | ||
134 | if (attrs.secure) { | ||
135 | /* S accesses to the alias act like NS accesses to the real region */ | ||
136 | attrs.secure = 0; | ||
137 | - return memory_region_dispatch_read(mr, addr, data, size_memop(size), | ||
138 | - attrs); | ||
139 | + return memory_region_dispatch_read(mr, addr, data, | ||
140 | + size_memop(size) | MO_TE, attrs); | ||
141 | } else { | ||
142 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | ||
143 | if (attrs.user) { | ||
144 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, | ||
145 | |||
146 | /* Direct the access to the correct systick */ | ||
147 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | ||
148 | - return memory_region_dispatch_write(mr, addr, value, size_memop(size), | ||
149 | - attrs); | ||
150 | + return memory_region_dispatch_write(mr, addr, value, | ||
151 | + size_memop(size) | MO_TE, attrs); | ||
152 | } | 57 | } |
153 | 58 | ||
154 | static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | 59 | -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, |
155 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | 60 | - TCGReg arg1, intptr_t arg2) |
156 | 61 | +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, | |
157 | /* Direct the access to the correct systick */ | 62 | + TCGReg arg1, intptr_t arg2) |
158 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | 63 | { |
159 | - return memory_region_dispatch_read(mr, addr, data, size_memop(size), attrs); | 64 | - tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); |
160 | + return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, | 65 | + switch (type) { |
161 | + attrs); | 66 | + case TCG_TYPE_I32: |
162 | } | 67 | + tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); |
163 | 68 | + return; | |
164 | static const MemoryRegionOps nvic_systick_ops = { | 69 | + case TCG_TYPE_V64: |
165 | diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c | 70 | + /* regs 1; size 8; align 8 */ |
166 | index XXXXXXX..XXXXXXX 100644 | 71 | + tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2); |
167 | --- a/hw/s390x/s390-pci-inst.c | 72 | + return; |
168 | +++ b/hw/s390x/s390-pci-inst.c | 73 | + case TCG_TYPE_V128: |
169 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | 74 | + /* regs 2; size 8; align 16 */ |
170 | mr = pbdev->pdev->io_regions[pcias].memory; | 75 | + tcg_out_vldst(s, INSN_VLD1 | 0xae0, arg, arg1, arg2); |
171 | mr = s390_get_subregion(mr, offset, len); | 76 | + return; |
172 | offset -= mr->addr; | ||
173 | - return memory_region_dispatch_read(mr, offset, data, size_memop(len), | ||
174 | + return memory_region_dispatch_read(mr, offset, data, | ||
175 | + size_memop(len) | MO_BE, | ||
176 | MEMTXATTRS_UNSPECIFIED); | ||
177 | } | ||
178 | |||
179 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | ||
180 | mr = pbdev->pdev->io_regions[pcias].memory; | ||
181 | mr = s390_get_subregion(mr, offset, len); | ||
182 | offset -= mr->addr; | ||
183 | - return memory_region_dispatch_write(mr, offset, data, size_memop(len), | ||
184 | + return memory_region_dispatch_write(mr, offset, data, | ||
185 | + size_memop(len) | MO_BE, | ||
186 | MEMTXATTRS_UNSPECIFIED); | ||
187 | } | ||
188 | |||
189 | diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c | ||
190 | index XXXXXXX..XXXXXXX 100644 | ||
191 | --- a/hw/vfio/pci-quirks.c | ||
192 | +++ b/hw/vfio/pci-quirks.c | ||
193 | @@ -XXX,XX +XXX,XX @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, | ||
194 | |||
195 | /* Write to the proper guest MSI-X table instead */ | ||
196 | memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, | ||
197 | - offset, val, size_memop(size), | ||
198 | + offset, val, | ||
199 | + size_memop(size) | MO_LE, | ||
200 | MEMTXATTRS_UNSPECIFIED); | ||
201 | } | ||
202 | return; /* Do not write guest MSI-X data to hardware */ | ||
203 | @@ -XXX,XX +XXX,XX @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, | ||
204 | if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { | ||
205 | hwaddr offset = rtl->addr & 0xfff; | ||
206 | memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, | ||
207 | - &data, size_memop(size), | ||
208 | + &data, size_memop(size) | MO_LE, | ||
209 | MEMTXATTRS_UNSPECIFIED); | ||
210 | trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); | ||
211 | } | ||
212 | diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c | ||
213 | index XXXXXXX..XXXXXXX 100644 | ||
214 | --- a/hw/virtio/virtio-pci.c | ||
215 | +++ b/hw/virtio/virtio-pci.c | ||
216 | @@ -XXX,XX +XXX,XX @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, | ||
217 | /* As length is under guest control, handle illegal values. */ | ||
218 | return; | ||
219 | } | ||
220 | - memory_region_dispatch_write(mr, addr, val, size_memop(len), | ||
221 | + /* TODO: Merge bswap from cpu_to_leXX into memory_region_dispatch_write. */ | ||
222 | + memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, | ||
223 | MEMTXATTRS_UNSPECIFIED); | ||
224 | } | ||
225 | |||
226 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | ||
227 | /* Make sure caller aligned buf properly */ | ||
228 | assert(!(((uintptr_t)buf) & (len - 1))); | ||
229 | |||
230 | - memory_region_dispatch_read(mr, addr, &val, size_memop(len), | ||
231 | + /* TODO: Merge bswap from leXX_to_cpu into memory_region_dispatch_read. */ | ||
232 | + memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, | ||
233 | MEMTXATTRS_UNSPECIFIED); | ||
234 | switch (len) { | ||
235 | case 1: | ||
236 | diff --git a/memory.c b/memory.c | ||
237 | index XXXXXXX..XXXXXXX 100644 | ||
238 | --- a/memory.c | ||
239 | +++ b/memory.c | ||
240 | @@ -XXX,XX +XXX,XX @@ static void memory_register_types(void) | ||
241 | } | ||
242 | |||
243 | type_init(memory_register_types) | ||
244 | + | ||
245 | +MemOp devend_memop(enum device_endian end) | ||
246 | +{ | ||
247 | + static MemOp conv[] = { | ||
248 | + [DEVICE_LITTLE_ENDIAN] = MO_LE, | ||
249 | + [DEVICE_BIG_ENDIAN] = MO_BE, | ||
250 | + [DEVICE_NATIVE_ENDIAN] = MO_TE, | ||
251 | + [DEVICE_HOST_ENDIAN] = 0, | ||
252 | + }; | ||
253 | + switch (end) { | ||
254 | + case DEVICE_LITTLE_ENDIAN: | ||
255 | + case DEVICE_BIG_ENDIAN: | ||
256 | + case DEVICE_NATIVE_ENDIAN: | ||
257 | + return conv[end]; | ||
258 | + default: | 77 | + default: |
259 | + g_assert_not_reached(); | 78 | + g_assert_not_reached(); |
260 | + } | 79 | + } |
261 | +} | 80 | } |
262 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | 81 | |
263 | index XXXXXXX..XXXXXXX 100644 | 82 | -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, |
264 | --- a/memory_ldst.inc.c | 83 | - TCGReg arg1, intptr_t arg2) |
265 | +++ b/memory_ldst.inc.c | 84 | +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, |
266 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | 85 | + TCGReg arg1, intptr_t arg2) |
267 | release_lock |= prepare_mmio_access(mr); | 86 | { |
268 | 87 | - tcg_out_st32(s, COND_AL, arg, arg1, arg2); | |
269 | /* I/O case */ | 88 | + switch (type) { |
270 | - r = memory_region_dispatch_read(mr, addr1, &val, MO_32, attrs); | 89 | + case TCG_TYPE_I32: |
271 | + /* TODO: Merge bswap32 into memory_region_dispatch_read. */ | 90 | + tcg_out_st32(s, COND_AL, arg, arg1, arg2); |
272 | + r = memory_region_dispatch_read(mr, addr1, &val, | 91 | + return; |
273 | + MO_32 | devend_memop(endian), attrs); | 92 | + case TCG_TYPE_V64: |
274 | #if defined(TARGET_WORDS_BIGENDIAN) | 93 | + /* regs 1; size 8; align 8 */ |
275 | if (endian == DEVICE_LITTLE_ENDIAN) { | 94 | + tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2); |
276 | val = bswap32(val); | 95 | + return; |
277 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | 96 | + case TCG_TYPE_V128: |
278 | release_lock |= prepare_mmio_access(mr); | 97 | + /* regs 2; size 8; align 16 */ |
279 | 98 | + tcg_out_vldst(s, INSN_VST1 | 0xae0, arg, arg1, arg2); | |
280 | /* I/O case */ | 99 | + return; |
281 | - r = memory_region_dispatch_read(mr, addr1, &val, MO_64, attrs); | 100 | + default: |
282 | + /* TODO: Merge bswap64 into memory_region_dispatch_read. */ | 101 | + g_assert_not_reached(); |
283 | + r = memory_region_dispatch_read(mr, addr1, &val, | 102 | + } |
284 | + MO_64 | devend_memop(endian), attrs); | 103 | } |
285 | #if defined(TARGET_WORDS_BIGENDIAN) | 104 | |
286 | if (endian == DEVICE_LITTLE_ENDIAN) { | 105 | static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, |
287 | val = bswap64(val); | ||
288 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
289 | release_lock |= prepare_mmio_access(mr); | ||
290 | |||
291 | /* I/O case */ | ||
292 | - r = memory_region_dispatch_read(mr, addr1, &val, MO_16, attrs); | ||
293 | + /* TODO: Merge bswap16 into memory_region_dispatch_read. */ | ||
294 | + r = memory_region_dispatch_read(mr, addr1, &val, | ||
295 | + MO_16 | devend_memop(endian), attrs); | ||
296 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
297 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
298 | val = bswap16(val); | ||
299 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
300 | val = bswap32(val); | ||
301 | } | ||
302 | #endif | ||
303 | - r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); | ||
304 | + /* TODO: Merge bswap32 into memory_region_dispatch_write. */ | ||
305 | + r = memory_region_dispatch_write(mr, addr1, val, | ||
306 | + MO_32 | devend_memop(endian), attrs); | ||
307 | } else { | ||
308 | /* RAM case */ | ||
309 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
310 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | ||
311 | val = bswap16(val); | ||
312 | } | ||
313 | #endif | ||
314 | - r = memory_region_dispatch_write(mr, addr1, val, MO_16, attrs); | ||
315 | + /* TODO: Merge bswap16 into memory_region_dispatch_write. */ | ||
316 | + r = memory_region_dispatch_write(mr, addr1, val, | ||
317 | + MO_16 | devend_memop(endian), attrs); | ||
318 | } else { | ||
319 | /* RAM case */ | ||
320 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
321 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | ||
322 | val = bswap64(val); | ||
323 | } | ||
324 | #endif | ||
325 | - r = memory_region_dispatch_write(mr, addr1, val, MO_64, attrs); | ||
326 | + /* TODO: Merge bswap64 into memory_region_dispatch_write. */ | ||
327 | + r = memory_region_dispatch_write(mr, addr1, val, | ||
328 | + MO_64 | devend_memop(endian), attrs); | ||
329 | } else { | ||
330 | /* RAM case */ | ||
331 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
332 | -- | 106 | -- |
333 | 2.17.1 | 107 | 2.25.1 |
334 | 108 | ||
335 | 109 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | Append MemTxAttrs to interfaces so we can pass along up coming Invert | ||
4 | Endian TTE bit on SPARC64. | ||
5 | |||
6 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <f8fcc3138570c460ef289a6b34ba7715ba36f99e.1566466906.git.tony.nguyen@bt.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 3 | --- |
11 | target/sparc/mmu_helper.c | 32 ++++++++++++++++++-------------- | 4 | tcg/arm/tcg-target.c.inc | 52 +++++++++++++++++++++++++++++++++++----- |
12 | 1 file changed, 18 insertions(+), 14 deletions(-) | 5 | 1 file changed, 46 insertions(+), 6 deletions(-) |
13 | 6 | ||
14 | diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c | 7 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
15 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/sparc/mmu_helper.c | 9 | --- a/tcg/arm/tcg-target.c.inc |
17 | +++ b/target/sparc/mmu_helper.c | 10 | +++ b/tcg/arm/tcg-target.c.inc |
18 | @@ -XXX,XX +XXX,XX @@ static const int perm_table[2][8] = { | 11 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
19 | }; | 12 | /* Otherwise the assembler uses mov r0,r0 */ |
20 | 13 | INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, | |
21 | static int get_physical_address(CPUSPARCState *env, hwaddr *physical, | 14 | |
22 | - int *prot, int *access_index, | 15 | + INSN_VORR = 0xf2200110, |
23 | + int *prot, int *access_index, MemTxAttrs *attrs, | 16 | + |
24 | target_ulong address, int rw, int mmu_idx, | 17 | INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ |
25 | target_ulong *page_size) | 18 | INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ |
19 | } ARMInsn; | ||
20 | @@ -XXX,XX +XXX,XX @@ static uint32_t encode_vd(TCGReg rd) | ||
21 | return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13); | ||
22 | } | ||
23 | |||
24 | +static uint32_t encode_vn(TCGReg rn) | ||
25 | +{ | ||
26 | + tcg_debug_assert(rn >= TCG_REG_Q0); | ||
27 | + return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17); | ||
28 | +} | ||
29 | + | ||
30 | +static uint32_t encode_vm(TCGReg rm) | ||
31 | +{ | ||
32 | + tcg_debug_assert(rm >= TCG_REG_Q0); | ||
33 | + return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); | ||
34 | +} | ||
35 | + | ||
36 | +static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, | ||
37 | + TCGReg d, TCGReg n, TCGReg m) | ||
38 | +{ | ||
39 | + tcg_out32(s, insn | (vece << 20) | (q << 6) | | ||
40 | + encode_vd(d) | encode_vn(n) | encode_vm(m)); | ||
41 | +} | ||
42 | + | ||
43 | static void tcg_out_vldst(TCGContext *s, ARMInsn insn, | ||
44 | TCGReg rd, TCGReg rn, int offset) | ||
26 | { | 45 | { |
27 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | 46 | @@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, |
28 | target_ulong vaddr; | 47 | return false; |
29 | target_ulong page_size; | ||
30 | int error_code = 0, prot, access_index; | ||
31 | + MemTxAttrs attrs = {}; | ||
32 | |||
33 | /* | ||
34 | * TODO: If we ever need tlb_vaddr_to_host for this target, | ||
35 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | ||
36 | assert(!probe); | ||
37 | |||
38 | address &= TARGET_PAGE_MASK; | ||
39 | - error_code = get_physical_address(env, &paddr, &prot, &access_index, | ||
40 | + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, | ||
41 | address, access_type, | ||
42 | mmu_idx, &page_size); | ||
43 | vaddr = address; | ||
44 | @@ -XXX,XX +XXX,XX @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, | ||
45 | return 0; | ||
46 | } | 48 | } |
47 | 49 | ||
48 | -static int get_physical_address_data(CPUSPARCState *env, | 50 | -static inline bool tcg_out_mov(TCGContext *s, TCGType type, |
49 | - hwaddr *physical, int *prot, | 51 | - TCGReg ret, TCGReg arg) |
50 | +static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, | 52 | +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) |
51 | + int *prot, MemTxAttrs *attrs, | ||
52 | target_ulong address, int rw, int mmu_idx) | ||
53 | { | 53 | { |
54 | CPUState *cs = env_cpu(env); | 54 | - tcg_out_mov_reg(s, COND_AL, ret, arg); |
55 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address_data(CPUSPARCState *env, | 55 | - return true; |
56 | return 1; | 56 | + if (ret == arg) { |
57 | + return true; | ||
58 | + } | ||
59 | + switch (type) { | ||
60 | + case TCG_TYPE_I32: | ||
61 | + if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) { | ||
62 | + tcg_out_mov_reg(s, COND_AL, ret, arg); | ||
63 | + return true; | ||
64 | + } | ||
65 | + return false; | ||
66 | + | ||
67 | + case TCG_TYPE_V64: | ||
68 | + case TCG_TYPE_V128: | ||
69 | + /* "VMOV D,N" is an alias for "VORR D,N,N". */ | ||
70 | + tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg); | ||
71 | + return true; | ||
72 | + | ||
73 | + default: | ||
74 | + g_assert_not_reached(); | ||
75 | + } | ||
57 | } | 76 | } |
58 | 77 | ||
59 | -static int get_physical_address_code(CPUSPARCState *env, | 78 | -static inline void tcg_out_movi(TCGContext *s, TCGType type, |
60 | - hwaddr *physical, int *prot, | 79 | - TCGReg ret, tcg_target_long arg) |
61 | +static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, | 80 | +static void tcg_out_movi(TCGContext *s, TCGType type, |
62 | + int *prot, MemTxAttrs *attrs, | 81 | + TCGReg ret, tcg_target_long arg) |
63 | target_ulong address, int mmu_idx) | ||
64 | { | 82 | { |
65 | CPUState *cs = env_cpu(env); | 83 | + tcg_debug_assert(type == TCG_TYPE_I32); |
66 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address_code(CPUSPARCState *env, | 84 | + tcg_debug_assert(ret < TCG_REG_Q0); |
85 | tcg_out_movi32(s, COND_AL, ret, arg); | ||
67 | } | 86 | } |
68 | 87 | ||
69 | static int get_physical_address(CPUSPARCState *env, hwaddr *physical, | ||
70 | - int *prot, int *access_index, | ||
71 | + int *prot, int *access_index, MemTxAttrs *attrs, | ||
72 | target_ulong address, int rw, int mmu_idx, | ||
73 | target_ulong *page_size) | ||
74 | { | ||
75 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, | ||
76 | } | ||
77 | |||
78 | if (rw == 2) { | ||
79 | - return get_physical_address_code(env, physical, prot, address, | ||
80 | + return get_physical_address_code(env, physical, prot, attrs, address, | ||
81 | mmu_idx); | ||
82 | } else { | ||
83 | - return get_physical_address_data(env, physical, prot, address, rw, | ||
84 | - mmu_idx); | ||
85 | + return get_physical_address_data(env, physical, prot, attrs, address, | ||
86 | + rw, mmu_idx); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | ||
91 | target_ulong vaddr; | ||
92 | hwaddr paddr; | ||
93 | target_ulong page_size; | ||
94 | + MemTxAttrs attrs = {}; | ||
95 | int error_code = 0, prot, access_index; | ||
96 | |||
97 | address &= TARGET_PAGE_MASK; | ||
98 | - error_code = get_physical_address(env, &paddr, &prot, &access_index, | ||
99 | + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, | ||
100 | address, access_type, | ||
101 | mmu_idx, &page_size); | ||
102 | if (likely(error_code == 0)) { | ||
103 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | ||
104 | env->dmmu.mmu_primary_context, | ||
105 | env->dmmu.mmu_secondary_context); | ||
106 | |||
107 | - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); | ||
108 | + tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, | ||
109 | + page_size); | ||
110 | return true; | ||
111 | } | ||
112 | if (probe) { | ||
113 | @@ -XXX,XX +XXX,XX @@ static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, | ||
114 | { | ||
115 | target_ulong page_size; | ||
116 | int prot, access_index; | ||
117 | + MemTxAttrs attrs = {}; | ||
118 | |||
119 | - return get_physical_address(env, phys, &prot, &access_index, addr, rw, | ||
120 | - mmu_idx, &page_size); | ||
121 | + return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, | ||
122 | + rw, mmu_idx, &page_size); | ||
123 | } | ||
124 | |||
125 | #if defined(TARGET_SPARC64) | ||
126 | -- | 88 | -- |
127 | 2.17.1 | 89 | 2.25.1 |
128 | 90 | ||
129 | 91 | diff view generated by jsdifflib |
1 | The raising of exceptions from check_watchpoint, buried inside | 1 | Most of dupi is copied from tcg/aarch64, which has the same |
---|---|---|---|
2 | of the I/O subsystem, is fundamentally broken. We do not have | 2 | encoding for AdvSimdExpandImm. |
3 | the helper return address with which we can unwind guest state. | ||
4 | 3 | ||
5 | Replace PHYS_SECTION_WATCH and io_mem_watch with TLB_WATCHPOINT. | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Move the call to cpu_check_watchpoint into the cputlb helpers | ||
7 | where we do have the helper return address. | ||
8 | |||
9 | This allows watchpoints on RAM to bypass the full i/o access path. | ||
10 | |||
11 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
12 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 6 | --- |
15 | include/exec/cpu-all.h | 5 +- | 7 | tcg/arm/tcg-target.c.inc | 283 +++++++++++++++++++++++++++++++++++++-- |
16 | accel/tcg/cputlb.c | 89 ++++++++++++++++++++++++++++---- | 8 | 1 file changed, 275 insertions(+), 8 deletions(-) |
17 | exec.c | 114 +++-------------------------------------- | ||
18 | 3 files changed, 90 insertions(+), 118 deletions(-) | ||
19 | 9 | ||
20 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h | 10 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
21 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/exec/cpu-all.h | 12 | --- a/tcg/arm/tcg-target.c.inc |
23 | +++ b/include/exec/cpu-all.h | 13 | +++ b/tcg/arm/tcg-target.c.inc |
24 | @@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env); | 14 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
25 | #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) | 15 | |
26 | /* Set if TLB entry is an IO callback. */ | 16 | INSN_VORR = 0xf2200110, |
27 | #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) | 17 | |
28 | +/* Set if TLB entry contains a watchpoint. */ | 18 | + INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ |
29 | +#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4)) | 19 | + INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ |
30 | 20 | + INSN_VLDR_D = 0xed100b00, /* VLDR.64 */ | |
31 | /* Use this mask to check interception with an alignment mask | 21 | INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */ |
32 | * in a TCG backend. | 22 | + INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */ |
33 | */ | 23 | INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */ |
34 | -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) | 24 | + INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */ |
35 | +#define TLB_FLAGS_MASK \ | 25 | } ARMInsn; |
36 | + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT) | 26 | |
37 | 27 | #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) | |
38 | /** | 28 | @@ -XXX,XX +XXX,XX @@ static const uint8_t tcg_cond_to_arm_cond[] = { |
39 | * tlb_hit_page: return true if page aligned @addr is a hit against the | 29 | [TCG_COND_GTU] = COND_HI, |
40 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 30 | }; |
41 | index XXXXXXX..XXXXXXX 100644 | 31 | |
42 | --- a/accel/tcg/cputlb.c | 32 | +static int encode_imm(uint32_t imm); |
43 | +++ b/accel/tcg/cputlb.c | 33 | + |
44 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 34 | +/* TCG private relocation type: add with pc+imm8 */ |
45 | hwaddr iotlb, xlat, sz, paddr_page; | 35 | +#define R_ARM_PC8 11 |
46 | target_ulong vaddr_page; | 36 | + |
47 | int asidx = cpu_asidx_from_attrs(cpu, attrs); | 37 | +/* TCG private relocation type: vldr with imm8 << 2 */ |
48 | + int wp_flags; | 38 | +#define R_ARM_PC11 12 |
49 | 39 | + | |
50 | assert_cpu_is_self(cpu); | 40 | static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target) |
51 | 41 | { | |
52 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 42 | const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); |
53 | code_address = address; | 43 | @@ -XXX,XX +XXX,XX @@ static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target) |
54 | iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, | 44 | return false; |
55 | paddr_page, xlat, prot, &address); | 45 | } |
56 | + wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, | 46 | |
57 | + TARGET_PAGE_SIZE); | 47 | +static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target) |
58 | 48 | +{ | |
59 | index = tlb_index(env, mmu_idx, vaddr_page); | 49 | + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); |
60 | te = tlb_entry(env, mmu_idx, vaddr_page); | 50 | + ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4; |
61 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 51 | + |
62 | tn.addend = addend - vaddr_page; | 52 | + if (offset >= -0xff && offset <= 0xff) { |
63 | if (prot & PAGE_READ) { | 53 | + tcg_insn_unit insn = *src_rw; |
64 | tn.addr_read = address; | 54 | + bool u = (offset >= 0); |
65 | + if (wp_flags & BP_MEM_READ) { | 55 | + if (!u) { |
66 | + tn.addr_read |= TLB_WATCHPOINT; | 56 | + offset = -offset; |
67 | + } | 57 | + } |
68 | } else { | 58 | + insn = deposit32(insn, 23, 1, u); |
69 | tn.addr_read = -1; | 59 | + insn = deposit32(insn, 0, 8, offset); |
60 | + *src_rw = insn; | ||
61 | + return true; | ||
62 | + } | ||
63 | + return false; | ||
64 | +} | ||
65 | + | ||
66 | +static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target) | ||
67 | +{ | ||
68 | + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); | ||
69 | + ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8; | ||
70 | + int rot = encode_imm(offset); | ||
71 | + | ||
72 | + if (rot >= 0) { | ||
73 | + *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7)); | ||
74 | + return true; | ||
75 | + } | ||
76 | + return false; | ||
77 | +} | ||
78 | + | ||
79 | static bool patch_reloc(tcg_insn_unit *code_ptr, int type, | ||
80 | intptr_t value, intptr_t addend) | ||
81 | { | ||
82 | tcg_debug_assert(addend == 0); | ||
83 | - | ||
84 | - if (type == R_ARM_PC24) { | ||
85 | + switch (type) { | ||
86 | + case R_ARM_PC24: | ||
87 | return reloc_pc24(code_ptr, (const tcg_insn_unit *)value); | ||
88 | - } else if (type == R_ARM_PC13) { | ||
89 | + case R_ARM_PC13: | ||
90 | return reloc_pc13(code_ptr, (const tcg_insn_unit *)value); | ||
91 | - } else { | ||
92 | + case R_ARM_PC11: | ||
93 | + return reloc_pc11(code_ptr, (const tcg_insn_unit *)value); | ||
94 | + case R_ARM_PC8: | ||
95 | + return reloc_pc8(code_ptr, (const tcg_insn_unit *)value); | ||
96 | + default: | ||
97 | g_assert_not_reached(); | ||
70 | } | 98 | } |
71 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 99 | } |
72 | if (prot & PAGE_WRITE_INV) { | 100 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t rotl(uint32_t val, int n) |
73 | tn.addr_write |= TLB_INVALID_MASK; | 101 | |
74 | } | 102 | /* ARM immediates for ALU instructions are made of an unsigned 8-bit |
75 | + if (wp_flags & BP_MEM_WRITE) { | 103 | right-rotated by an even amount between 0 and 30. */ |
76 | + tn.addr_write |= TLB_WATCHPOINT; | 104 | -static inline int encode_imm(uint32_t imm) |
77 | + } | 105 | +static int encode_imm(uint32_t imm) |
78 | } | 106 | { |
79 | 107 | int shift; | |
80 | copy_tlb_helper_locked(te, &tn); | 108 | |
81 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | 109 | @@ -XXX,XX +XXX,XX @@ static inline int check_fit_imm(uint32_t imm) |
82 | tlb_addr &= ~TLB_INVALID_MASK; | 110 | return encode_imm(imm) >= 0; |
83 | } | 111 | } |
84 | 112 | ||
85 | - /* Handle an IO access. */ | 113 | +/* Return true if v16 is a valid 16-bit shifted immediate. */ |
86 | + /* Handle anything that isn't just a straight memory access. */ | 114 | +static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) |
87 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | 115 | +{ |
88 | + CPUIOTLBEntry *iotlbentry; | 116 | + if (v16 == (v16 & 0xff)) { |
89 | + | 117 | + *cmode = 0x8; |
90 | + /* For anything that is unaligned, recurse through full_load. */ | 118 | + *imm8 = v16 & 0xff; |
91 | if ((addr & (size - 1)) != 0) { | 119 | + return true; |
92 | goto do_unaligned_access; | 120 | + } else if (v16 == (v16 & 0xff00)) { |
93 | } | 121 | + *cmode = 0xa; |
94 | - return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | 122 | + *imm8 = v16 >> 8; |
95 | - mmu_idx, addr, retaddr, access_type, op); | 123 | + return true; |
96 | + | 124 | + } |
97 | + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | 125 | + return false; |
98 | + | 126 | +} |
99 | + /* Handle watchpoints. */ | 127 | + |
100 | + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | 128 | +/* Return true if v32 is a valid 32-bit shifted immediate. */ |
101 | + /* On watchpoint hit, this will longjmp out. */ | 129 | +static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) |
102 | + cpu_check_watchpoint(env_cpu(env), addr, size, | 130 | +{ |
103 | + iotlbentry->attrs, BP_MEM_READ, retaddr); | 131 | + if (v32 == (v32 & 0xff)) { |
104 | + | 132 | + *cmode = 0x0; |
105 | + /* The backing page may or may not require I/O. */ | 133 | + *imm8 = v32 & 0xff; |
106 | + tlb_addr &= ~TLB_WATCHPOINT; | 134 | + return true; |
107 | + if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) { | 135 | + } else if (v32 == (v32 & 0xff00)) { |
108 | + goto do_aligned_access; | 136 | + *cmode = 0x2; |
109 | + } | 137 | + *imm8 = (v32 >> 8) & 0xff; |
110 | + } | 138 | + return true; |
111 | + | 139 | + } else if (v32 == (v32 & 0xff0000)) { |
112 | + /* Handle I/O access. */ | 140 | + *cmode = 0x4; |
113 | + return io_readx(env, iotlbentry, mmu_idx, addr, | 141 | + *imm8 = (v32 >> 16) & 0xff; |
114 | + retaddr, access_type, op); | 142 | + return true; |
115 | } | 143 | + } else if (v32 == (v32 & 0xff000000)) { |
116 | 144 | + *cmode = 0x6; | |
117 | /* Handle slow unaligned access (it spans two pages or IO). */ | 145 | + *imm8 = v32 >> 24; |
118 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | 146 | + return true; |
119 | return res & MAKE_64BIT_MASK(0, size * 8); | 147 | + } |
120 | } | 148 | + return false; |
121 | 149 | +} | |
122 | + do_aligned_access: | 150 | + |
123 | haddr = (void *)((uintptr_t)addr + entry->addend); | 151 | +/* Return true if v32 is a valid 32-bit shifting ones immediate. */ |
124 | switch (op) { | 152 | +static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) |
125 | case MO_UB: | 153 | +{ |
126 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 154 | + if ((v32 & 0xffff00ff) == 0xff) { |
127 | tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; | 155 | + *cmode = 0xc; |
128 | } | 156 | + *imm8 = (v32 >> 8) & 0xff; |
129 | 157 | + return true; | |
130 | - /* Handle an IO access. */ | 158 | + } else if ((v32 & 0xff00ffff) == 0xffff) { |
131 | + /* Handle anything that isn't just a straight memory access. */ | 159 | + *cmode = 0xd; |
132 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | 160 | + *imm8 = (v32 >> 16) & 0xff; |
133 | + CPUIOTLBEntry *iotlbentry; | 161 | + return true; |
134 | + | 162 | + } |
135 | + /* For anything that is unaligned, recurse through byte stores. */ | 163 | + return false; |
136 | if ((addr & (size - 1)) != 0) { | 164 | +} |
137 | goto do_unaligned_access; | 165 | + |
138 | } | 166 | +/* |
139 | - io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | 167 | + * Return non-zero if v32 can be formed by MOVI+ORR. |
140 | - val, addr, retaddr, op); | 168 | + * Place the parameters for MOVI in (cmode, imm8). |
141 | + | 169 | + * Return the cmode for ORR; the imm8 can be had via extraction from v32. |
142 | + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | 170 | + */ |
143 | + | 171 | +static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) |
144 | + /* Handle watchpoints. */ | 172 | +{ |
145 | + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | 173 | + int i; |
146 | + /* On watchpoint hit, this will longjmp out. */ | 174 | + |
147 | + cpu_check_watchpoint(env_cpu(env), addr, size, | 175 | + for (i = 6; i > 0; i -= 2) { |
148 | + iotlbentry->attrs, BP_MEM_WRITE, retaddr); | 176 | + /* Mask out one byte we can add with ORR. */ |
149 | + | 177 | + uint32_t tmp = v32 & ~(0xffu << (i * 4)); |
150 | + /* The backing page may or may not require I/O. */ | 178 | + if (is_shimm32(tmp, cmode, imm8) || |
151 | + tlb_addr &= ~TLB_WATCHPOINT; | 179 | + is_soimm32(tmp, cmode, imm8)) { |
152 | + if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) { | 180 | + break; |
153 | + goto do_aligned_access; | 181 | + } |
154 | + } | 182 | + } |
155 | + } | 183 | + return i; |
156 | + | 184 | +} |
157 | + /* Handle I/O access. */ | 185 | + |
158 | + io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op); | 186 | /* Test if a constant matches the constraint. |
159 | return; | 187 | * TODO: define constraints for: |
160 | } | 188 | * |
161 | 189 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, | |
162 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 190 | encode_vd(d) | encode_vn(n) | encode_vm(m)); |
163 | index2 = tlb_index(env, mmu_idx, page2); | 191 | } |
164 | entry2 = tlb_entry(env, mmu_idx, page2); | 192 | |
165 | tlb_addr2 = tlb_addr_write(entry2); | 193 | +static void tcg_out_vmovi(TCGContext *s, TCGReg rd, |
166 | - if (!tlb_hit_page(tlb_addr2, page2) | 194 | + int q, int op, int cmode, uint8_t imm8) |
167 | - && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | 195 | +{ |
168 | - tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | 196 | + tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5) |
169 | - mmu_idx, retaddr); | 197 | + | (cmode << 8) | extract32(imm8, 0, 4) |
170 | + if (!tlb_hit_page(tlb_addr2, page2)) { | 198 | + | (extract32(imm8, 4, 3) << 16) |
171 | + if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | 199 | + | (extract32(imm8, 7, 1) << 24)); |
172 | + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | 200 | +} |
173 | + mmu_idx, retaddr); | 201 | + |
174 | + index2 = tlb_index(env, mmu_idx, page2); | 202 | static void tcg_out_vldst(TCGContext *s, ARMInsn insn, |
175 | + entry2 = tlb_entry(env, mmu_idx, page2); | 203 | TCGReg rd, TCGReg rn, int offset) |
176 | + } | 204 | { |
177 | + tlb_addr2 = tlb_addr_write(entry2); | 205 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, |
206 | tcg_out_movi32(s, COND_AL, ret, arg); | ||
207 | } | ||
208 | |||
209 | +/* Type is always V128, with I64 elements. */ | ||
210 | +static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh) | ||
211 | +{ | ||
212 | + /* Move high element into place first. */ | ||
213 | + /* VMOV Dd+1, Ds */ | ||
214 | + tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh); | ||
215 | + /* Move low element into place; tcg_out_mov will check for nop. */ | ||
216 | + tcg_out_mov(s, TCG_TYPE_V64, rd, rl); | ||
217 | +} | ||
218 | + | ||
219 | static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, | ||
220 | TCGReg rd, TCGReg rs) | ||
221 | { | ||
222 | - g_assert_not_reached(); | ||
223 | + int q = type - TCG_TYPE_V64; | ||
224 | + | ||
225 | + if (vece == MO_64) { | ||
226 | + if (type == TCG_TYPE_V128) { | ||
227 | + tcg_out_dup2_vec(s, rd, rs, rs); | ||
228 | + } else { | ||
229 | + tcg_out_mov(s, TCG_TYPE_V64, rd, rs); | ||
230 | + } | ||
231 | + } else if (rs < TCG_REG_Q0) { | ||
232 | + int b = (vece == MO_8); | ||
233 | + int e = (vece == MO_16); | ||
234 | + tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) | | ||
235 | + encode_vn(rd) | (rs << 12)); | ||
236 | + } else { | ||
237 | + int imm4 = 1 << vece; | ||
238 | + tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) | | ||
239 | + encode_vd(rd) | encode_vm(rs)); | ||
240 | + } | ||
241 | + return true; | ||
242 | } | ||
243 | |||
244 | static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, | ||
245 | TCGReg rd, TCGReg base, intptr_t offset) | ||
246 | { | ||
247 | - g_assert_not_reached(); | ||
248 | + if (vece == MO_64) { | ||
249 | + tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset); | ||
250 | + if (type == TCG_TYPE_V128) { | ||
251 | + tcg_out_dup2_vec(s, rd, rd, rd); | ||
252 | + } | ||
253 | + } else { | ||
254 | + int q = type - TCG_TYPE_V64; | ||
255 | + tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5), | ||
256 | + rd, base, offset); | ||
257 | + } | ||
258 | + return true; | ||
259 | } | ||
260 | |||
261 | static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, | ||
262 | TCGReg rd, int64_t v64) | ||
263 | { | ||
264 | - g_assert_not_reached(); | ||
265 | + int q = type - TCG_TYPE_V64; | ||
266 | + int cmode, imm8, i; | ||
267 | + | ||
268 | + /* Test all bytes equal first. */ | ||
269 | + if (vece == MO_8) { | ||
270 | + tcg_out_vmovi(s, rd, q, 0, 0xe, v64); | ||
271 | + return; | ||
272 | + } | ||
273 | + | ||
274 | + /* | ||
275 | + * Test all bytes 0x00 or 0xff second. This can match cases that | ||
276 | + * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. | ||
277 | + */ | ||
278 | + for (i = imm8 = 0; i < 8; i++) { | ||
279 | + uint8_t byte = v64 >> (i * 8); | ||
280 | + if (byte == 0xff) { | ||
281 | + imm8 |= 1 << i; | ||
282 | + } else if (byte != 0) { | ||
283 | + goto fail_bytes; | ||
284 | + } | ||
285 | + } | ||
286 | + tcg_out_vmovi(s, rd, q, 1, 0xe, imm8); | ||
287 | + return; | ||
288 | + fail_bytes: | ||
289 | + | ||
290 | + /* | ||
291 | + * Tests for various replications. For each element width, if we | ||
292 | + * cannot find an expansion there's no point checking a larger | ||
293 | + * width because we already know by replication it cannot match. | ||
294 | + */ | ||
295 | + if (vece == MO_16) { | ||
296 | + uint16_t v16 = v64; | ||
297 | + | ||
298 | + if (is_shimm16(v16, &cmode, &imm8)) { | ||
299 | + tcg_out_vmovi(s, rd, q, 0, cmode, imm8); | ||
300 | + return; | ||
301 | + } | ||
302 | + if (is_shimm16(~v16, &cmode, &imm8)) { | ||
303 | + tcg_out_vmovi(s, rd, q, 1, cmode, imm8); | ||
304 | + return; | ||
178 | + } | 305 | + } |
179 | + | 306 | + |
180 | + /* | 307 | + /* |
181 | + * Handle watchpoints. Since this may trap, all checks | 308 | + * Otherwise, all remaining constants can be loaded in two insns: |
182 | + * must happen before any store. | 309 | + * rd = v16 & 0xff, rd |= v16 & 0xff00. |
183 | + */ | 310 | + */ |
184 | + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | 311 | + tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff); |
185 | + cpu_check_watchpoint(env_cpu(env), addr, size - size2, | 312 | + tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */ |
186 | + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | 313 | + return; |
187 | + BP_MEM_WRITE, retaddr); | 314 | + } |
188 | + } | 315 | + |
189 | + if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { | 316 | + if (vece == MO_32) { |
190 | + cpu_check_watchpoint(env_cpu(env), page2, size2, | 317 | + uint32_t v32 = v64; |
191 | + env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, | 318 | + |
192 | + BP_MEM_WRITE, retaddr); | 319 | + if (is_shimm32(v32, &cmode, &imm8) || |
193 | } | 320 | + is_soimm32(v32, &cmode, &imm8)) { |
194 | 321 | + tcg_out_vmovi(s, rd, q, 0, cmode, imm8); | |
195 | /* | 322 | + return; |
196 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 323 | + } |
197 | return; | 324 | + if (is_shimm32(~v32, &cmode, &imm8) || |
198 | } | 325 | + is_soimm32(~v32, &cmode, &imm8)) { |
199 | 326 | + tcg_out_vmovi(s, rd, q, 1, cmode, imm8); | |
200 | + do_aligned_access: | 327 | + return; |
201 | haddr = (void *)((uintptr_t)addr + entry->addend); | 328 | + } |
202 | switch (op) { | 329 | + |
203 | case MO_UB: | ||
204 | diff --git a/exec.c b/exec.c | ||
205 | index XXXXXXX..XXXXXXX 100644 | ||
206 | --- a/exec.c | ||
207 | +++ b/exec.c | ||
208 | @@ -XXX,XX +XXX,XX @@ typedef struct subpage_t { | ||
209 | #define PHYS_SECTION_UNASSIGNED 0 | ||
210 | #define PHYS_SECTION_NOTDIRTY 1 | ||
211 | #define PHYS_SECTION_ROM 2 | ||
212 | -#define PHYS_SECTION_WATCH 3 | ||
213 | |||
214 | static void io_mem_init(void); | ||
215 | static void memory_map_init(void); | ||
216 | static void tcg_log_global_after_sync(MemoryListener *listener); | ||
217 | static void tcg_commit(MemoryListener *listener); | ||
218 | |||
219 | -static MemoryRegion io_mem_watch; | ||
220 | - | ||
221 | /** | ||
222 | * CPUAddressSpace: all the information a CPU needs about an AddressSpace | ||
223 | * @cpu: the CPU whose AddressSpace this is | ||
224 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | ||
225 | target_ulong *address) | ||
226 | { | ||
227 | hwaddr iotlb; | ||
228 | - int flags, match; | ||
229 | |||
230 | if (memory_region_is_ram(section->mr)) { | ||
231 | /* Normal RAM. */ | ||
232 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | ||
233 | iotlb += xlat; | ||
234 | } | ||
235 | |||
236 | - /* Avoid trapping reads of pages with a write breakpoint. */ | ||
237 | - match = (prot & PAGE_READ ? BP_MEM_READ : 0) | ||
238 | - | (prot & PAGE_WRITE ? BP_MEM_WRITE : 0); | ||
239 | - flags = cpu_watchpoint_address_matches(cpu, vaddr, TARGET_PAGE_SIZE); | ||
240 | - if (flags & match) { | ||
241 | - /* | ||
242 | - * Make accesses to pages with watchpoints go via the | ||
243 | - * watchpoint trap routines. | ||
244 | - */ | ||
245 | - iotlb = PHYS_SECTION_WATCH + paddr; | ||
246 | - *address |= TLB_MMIO; | ||
247 | - } | ||
248 | - | ||
249 | return iotlb; | ||
250 | } | ||
251 | #endif /* defined(CONFIG_USER_ONLY) */ | ||
252 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
253 | |||
254 | assert(tcg_enabled()); | ||
255 | if (cpu->watchpoint_hit) { | ||
256 | - /* We re-entered the check after replacing the TB. Now raise | ||
257 | - * the debug interrupt so that is will trigger after the | ||
258 | - * current instruction. */ | ||
259 | + /* | 330 | + /* |
260 | + * We re-entered the check after replacing the TB. | 331 | + * Restrict the set of constants to those we can load with |
261 | + * Now raise the debug interrupt so that it will | 332 | + * two instructions. Others we load from the pool. |
262 | + * trigger after the current instruction. | ||
263 | + */ | 333 | + */ |
264 | + qemu_mutex_lock_iothread(); | 334 | + i = is_shimm32_pair(v32, &cmode, &imm8); |
265 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); | 335 | + if (i) { |
266 | + qemu_mutex_unlock_iothread(); | 336 | + tcg_out_vmovi(s, rd, q, 0, cmode, imm8); |
267 | return; | 337 | + tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8)); |
268 | } | 338 | + return; |
269 | 339 | + } | |
270 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | 340 | + i = is_shimm32_pair(~v32, &cmode, &imm8); |
271 | } | 341 | + if (i) { |
272 | } | 342 | + tcg_out_vmovi(s, rd, q, 1, cmode, imm8); |
273 | 343 | + tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8)); | |
274 | -static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | 344 | + return; |
275 | -{ | 345 | + } |
276 | - CPUState *cpu = current_cpu; | 346 | + } |
277 | - vaddr addr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; | 347 | + |
278 | - | 348 | + /* |
279 | - cpu_check_watchpoint(cpu, addr, len, attrs, flags, 0); | 349 | + * As a last resort, load from the constant pool. |
280 | -} | 350 | + */ |
281 | - | 351 | + if (!q || vece == MO_64) { |
282 | -/* Watchpoint access routines. Watchpoints are inserted using TLB tricks, | 352 | + new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32); |
283 | - so these check for a hit then pass through to the normal out-of-line | 353 | + /* VLDR Dd, [pc + offset] */ |
284 | - phys routines. */ | 354 | + tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16)); |
285 | -static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, | 355 | + if (q) { |
286 | - unsigned size, MemTxAttrs attrs) | 356 | + tcg_out_dup2_vec(s, rd, rd, rd); |
287 | -{ | 357 | + } |
288 | - MemTxResult res; | 358 | + } else { |
289 | - uint64_t data; | 359 | + new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0); |
290 | - int asidx = cpu_asidx_from_attrs(current_cpu, attrs); | 360 | + /* add tmp, pc, offset */ |
291 | - AddressSpace *as = current_cpu->cpu_ases[asidx].as; | 361 | + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0); |
292 | - | 362 | + tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0); |
293 | - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ); | 363 | + } |
294 | - switch (size) { | 364 | } |
295 | - case 1: | 365 | |
296 | - data = address_space_ldub(as, addr, attrs, &res); | 366 | static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, |
297 | - break; | ||
298 | - case 2: | ||
299 | - data = address_space_lduw(as, addr, attrs, &res); | ||
300 | - break; | ||
301 | - case 4: | ||
302 | - data = address_space_ldl(as, addr, attrs, &res); | ||
303 | - break; | ||
304 | - case 8: | ||
305 | - data = address_space_ldq(as, addr, attrs, &res); | ||
306 | - break; | ||
307 | - default: abort(); | ||
308 | - } | ||
309 | - *pdata = data; | ||
310 | - return res; | ||
311 | -} | ||
312 | - | ||
313 | -static MemTxResult watch_mem_write(void *opaque, hwaddr addr, | ||
314 | - uint64_t val, unsigned size, | ||
315 | - MemTxAttrs attrs) | ||
316 | -{ | ||
317 | - MemTxResult res; | ||
318 | - int asidx = cpu_asidx_from_attrs(current_cpu, attrs); | ||
319 | - AddressSpace *as = current_cpu->cpu_ases[asidx].as; | ||
320 | - | ||
321 | - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE); | ||
322 | - switch (size) { | ||
323 | - case 1: | ||
324 | - address_space_stb(as, addr, val, attrs, &res); | ||
325 | - break; | ||
326 | - case 2: | ||
327 | - address_space_stw(as, addr, val, attrs, &res); | ||
328 | - break; | ||
329 | - case 4: | ||
330 | - address_space_stl(as, addr, val, attrs, &res); | ||
331 | - break; | ||
332 | - case 8: | ||
333 | - address_space_stq(as, addr, val, attrs, &res); | ||
334 | - break; | ||
335 | - default: abort(); | ||
336 | - } | ||
337 | - return res; | ||
338 | -} | ||
339 | - | ||
340 | -static const MemoryRegionOps watch_mem_ops = { | ||
341 | - .read_with_attrs = watch_mem_read, | ||
342 | - .write_with_attrs = watch_mem_write, | ||
343 | - .endianness = DEVICE_NATIVE_ENDIAN, | ||
344 | - .valid = { | ||
345 | - .min_access_size = 1, | ||
346 | - .max_access_size = 8, | ||
347 | - .unaligned = false, | ||
348 | - }, | ||
349 | - .impl = { | ||
350 | - .min_access_size = 1, | ||
351 | - .max_access_size = 8, | ||
352 | - .unaligned = false, | ||
353 | - }, | ||
354 | -}; | ||
355 | - | ||
356 | static MemTxResult flatview_read(FlatView *fv, hwaddr addr, | ||
357 | MemTxAttrs attrs, uint8_t *buf, hwaddr len); | ||
358 | static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, | ||
359 | @@ -XXX,XX +XXX,XX @@ static void io_mem_init(void) | ||
360 | memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, | ||
361 | NULL, UINT64_MAX); | ||
362 | memory_region_clear_global_locking(&io_mem_notdirty); | ||
363 | - | ||
364 | - memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, | ||
365 | - NULL, UINT64_MAX); | ||
366 | } | ||
367 | |||
368 | AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) | ||
369 | @@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) | ||
370 | assert(n == PHYS_SECTION_NOTDIRTY); | ||
371 | n = dummy_section(&d->map, fv, &io_mem_rom); | ||
372 | assert(n == PHYS_SECTION_ROM); | ||
373 | - n = dummy_section(&d->map, fv, &io_mem_watch); | ||
374 | - assert(n == PHYS_SECTION_WATCH); | ||
375 | |||
376 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; | ||
377 | |||
378 | -- | 367 | -- |
379 | 2.17.1 | 368 | 2.25.1 |
380 | 369 | ||
381 | 370 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Implementing dup2, add, sub, and, or, xor as the minimal set. |
---|---|---|---|
2 | This allows us to actually enable neon in the header file. | ||
2 | 3 | ||
3 | Preparation for collapsing the two byte swaps adjust_endianness and | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | handle_bswap into the former. | ||
5 | |||
6 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <755b7104410956b743e1f1e9c34ab87db113360f.1566466906.git.tony.nguyen@bt.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 6 | --- |
11 | include/exec/memop.h | 6 ++ | 7 | tcg/arm/tcg-target-con-set.h | 3 + |
12 | accel/tcg/cputlb.c | 170 +++++++++++++++++++++---------------------- | 8 | tcg/arm/tcg-target-con-str.h | 2 + |
13 | 2 files changed, 87 insertions(+), 89 deletions(-) | 9 | tcg/arm/tcg-target.h | 6 +- |
10 | tcg/arm/tcg-target.c.inc | 201 +++++++++++++++++++++++++++++++++-- | ||
11 | 4 files changed, 204 insertions(+), 8 deletions(-) | ||
14 | 12 | ||
15 | diff --git a/include/exec/memop.h b/include/exec/memop.h | 13 | diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h |
16 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/exec/memop.h | 15 | --- a/tcg/arm/tcg-target-con-set.h |
18 | +++ b/include/exec/memop.h | 16 | +++ b/tcg/arm/tcg-target-con-set.h |
19 | @@ -XXX,XX +XXX,XX @@ static inline MemOp size_memop(unsigned size) | 17 | @@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIN) |
20 | return ctz32(size); | 18 | C_O1_I2(r, r, ri) |
21 | } | 19 | C_O1_I2(r, rZ, rZ) |
22 | 20 | C_O1_I2(w, w, w) | |
23 | +/* Big endianness from MemOp. */ | 21 | +C_O1_I2(w, w, wO) |
24 | +static inline bool memop_big_endian(MemOp op) | 22 | +C_O1_I2(w, w, wV) |
23 | +C_O1_I2(w, w, wZ) | ||
24 | C_O1_I4(r, r, r, rI, rI) | ||
25 | C_O1_I4(r, r, rIN, rIK, 0) | ||
26 | C_O2_I1(r, r, l) | ||
27 | diff --git a/tcg/arm/tcg-target-con-str.h b/tcg/arm/tcg-target-con-str.h | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/tcg/arm/tcg-target-con-str.h | ||
30 | +++ b/tcg/arm/tcg-target-con-str.h | ||
31 | @@ -XXX,XX +XXX,XX @@ REGS('w', ALL_VECTOR_REGS) | ||
32 | CONST('I', TCG_CT_CONST_ARM) | ||
33 | CONST('K', TCG_CT_CONST_INV) | ||
34 | CONST('N', TCG_CT_CONST_NEG) | ||
35 | +CONST('O', TCG_CT_CONST_ORRI) | ||
36 | +CONST('V', TCG_CT_CONST_ANDI) | ||
37 | CONST('Z', TCG_CT_CONST_ZERO) | ||
38 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/tcg/arm/tcg-target.h | ||
41 | +++ b/tcg/arm/tcg-target.h | ||
42 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
43 | #else | ||
44 | extern bool use_idiv_instructions; | ||
45 | #endif | ||
46 | -#define use_neon_instructions 0 | ||
47 | +#ifdef __ARM_NEON__ | ||
48 | +#define use_neon_instructions 1 | ||
49 | +#else | ||
50 | +extern bool use_neon_instructions; | ||
51 | +#endif | ||
52 | |||
53 | /* used for function call generation */ | ||
54 | #define TCG_TARGET_STACK_ALIGN 8 | ||
55 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/tcg/arm/tcg-target.c.inc | ||
58 | +++ b/tcg/arm/tcg-target.c.inc | ||
59 | @@ -XXX,XX +XXX,XX @@ int arm_arch = __ARM_ARCH; | ||
60 | #ifndef use_idiv_instructions | ||
61 | bool use_idiv_instructions; | ||
62 | #endif | ||
63 | +#ifndef use_neon_instructions | ||
64 | +bool use_neon_instructions; | ||
65 | +#endif | ||
66 | |||
67 | /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */ | ||
68 | #ifdef CONFIG_SOFTMMU | ||
69 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
70 | /* Otherwise the assembler uses mov r0,r0 */ | ||
71 | INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, | ||
72 | |||
73 | + INSN_VADD = 0xf2000800, | ||
74 | + INSN_VAND = 0xf2000110, | ||
75 | + INSN_VEOR = 0xf3000110, | ||
76 | INSN_VORR = 0xf2200110, | ||
77 | + INSN_VSUB = 0xf3000800, | ||
78 | + | ||
79 | + INSN_VMVN = 0xf3b00580, | ||
80 | + | ||
81 | + INSN_VCEQ0 = 0xf3b10100, | ||
82 | + INSN_VCGT0 = 0xf3b10000, | ||
83 | + INSN_VCGE0 = 0xf3b10080, | ||
84 | + INSN_VCLE0 = 0xf3b10180, | ||
85 | + INSN_VCLT0 = 0xf3b10200, | ||
86 | + | ||
87 | + INSN_VCEQ = 0xf3000810, | ||
88 | + INSN_VCGE = 0xf2000310, | ||
89 | + INSN_VCGT = 0xf2000300, | ||
90 | + INSN_VCGE_U = 0xf3000310, | ||
91 | + INSN_VCGT_U = 0xf3000300, | ||
92 | + | ||
93 | + INSN_VTST = 0xf2000810, | ||
94 | |||
95 | INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ | ||
96 | INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */ | ||
97 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, | ||
98 | #define TCG_CT_CONST_INV 0x200 | ||
99 | #define TCG_CT_CONST_NEG 0x400 | ||
100 | #define TCG_CT_CONST_ZERO 0x800 | ||
101 | +#define TCG_CT_CONST_ORRI 0x1000 | ||
102 | +#define TCG_CT_CONST_ANDI 0x2000 | ||
103 | |||
104 | #define ALL_GENERAL_REGS 0xffffu | ||
105 | #define ALL_VECTOR_REGS 0xffff0000u | ||
106 | @@ -XXX,XX +XXX,XX @@ static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) | ||
107 | return i; | ||
108 | } | ||
109 | |||
110 | +/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ | ||
111 | +static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) | ||
25 | +{ | 112 | +{ |
26 | + return (op & MO_BSWAP) == MO_BE; | 113 | + if (v32 == deposit32(v32, 16, 16, v32)) { |
114 | + return is_shimm16(v32, cmode, imm8); | ||
115 | + } else { | ||
116 | + return is_shimm32(v32, cmode, imm8); | ||
117 | + } | ||
27 | +} | 118 | +} |
28 | + | 119 | + |
29 | #endif | 120 | /* Test if a constant matches the constraint. |
30 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 121 | * TODO: define constraints for: |
31 | index XXXXXXX..XXXXXXX 100644 | 122 | * |
32 | --- a/accel/tcg/cputlb.c | 123 | @@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) |
33 | +++ b/accel/tcg/cputlb.c | 124 | return 1; |
34 | @@ -XXX,XX +XXX,XX @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size, | 125 | } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { |
35 | 126 | return 1; | |
36 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 127 | - } else { |
37 | int mmu_idx, target_ulong addr, uintptr_t retaddr, | 128 | - return 0; |
38 | - MMUAccessType access_type, int size) | 129 | } |
39 | + MMUAccessType access_type, MemOp op) | 130 | + |
131 | + switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { | ||
132 | + case 0: | ||
133 | + break; | ||
134 | + case TCG_CT_CONST_ANDI: | ||
135 | + val = ~val; | ||
136 | + /* fallthru */ | ||
137 | + case TCG_CT_CONST_ORRI: | ||
138 | + if (val == deposit64(val, 32, 32, val)) { | ||
139 | + int cmode, imm8; | ||
140 | + return is_shimm1632(val, &cmode, &imm8); | ||
141 | + } | ||
142 | + break; | ||
143 | + default: | ||
144 | + /* Both bits should not be set for the same insn. */ | ||
145 | + g_assert_not_reached(); | ||
146 | + } | ||
147 | + | ||
148 | + return 0; | ||
149 | } | ||
150 | |||
151 | static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) | ||
152 | @@ -XXX,XX +XXX,XX @@ static uint32_t encode_vm(TCGReg rm) | ||
153 | return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1); | ||
154 | } | ||
155 | |||
156 | +static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece, | ||
157 | + TCGReg d, TCGReg m) | ||
158 | +{ | ||
159 | + tcg_out32(s, insn | (vece << 18) | (q << 6) | | ||
160 | + encode_vd(d) | encode_vm(m)); | ||
161 | +} | ||
162 | + | ||
163 | static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece, | ||
164 | TCGReg d, TCGReg n, TCGReg m) | ||
40 | { | 165 | { |
41 | CPUState *cpu = env_cpu(env); | 166 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) |
42 | hwaddr mr_offset; | 167 | case INDEX_op_add_vec: |
43 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 168 | case INDEX_op_sub_vec: |
44 | qemu_mutex_lock_iothread(); | 169 | case INDEX_op_xor_vec: |
45 | locked = true; | 170 | - case INDEX_op_or_vec: |
46 | } | 171 | - case INDEX_op_and_vec: |
47 | - r = memory_region_dispatch_read(mr, mr_offset, &val, | 172 | - case INDEX_op_cmp_vec: |
48 | - size_memop(size) | MO_TE, | 173 | return C_O1_I2(w, w, w); |
49 | - iotlbentry->attrs); | 174 | + case INDEX_op_or_vec: |
50 | + r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); | 175 | + return C_O1_I2(w, w, wO); |
51 | if (r != MEMTX_OK) { | 176 | + case INDEX_op_and_vec: |
52 | hwaddr physaddr = mr_offset + | 177 | + return C_O1_I2(w, w, wV); |
53 | section->offset_within_address_space - | 178 | + case INDEX_op_cmp_vec: |
54 | section->offset_within_region; | 179 | + return C_O1_I2(w, w, wZ); |
55 | 180 | ||
56 | - cpu_transaction_failed(cpu, physaddr, addr, size, access_type, | ||
57 | + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, | ||
58 | mmu_idx, iotlbentry->attrs, r, retaddr); | ||
59 | } | ||
60 | if (locked) { | ||
61 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
62 | |||
63 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
64 | int mmu_idx, uint64_t val, target_ulong addr, | ||
65 | - uintptr_t retaddr, int size) | ||
66 | + uintptr_t retaddr, MemOp op) | ||
67 | { | ||
68 | CPUState *cpu = env_cpu(env); | ||
69 | hwaddr mr_offset; | ||
70 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
71 | qemu_mutex_lock_iothread(); | ||
72 | locked = true; | ||
73 | } | ||
74 | - r = memory_region_dispatch_write(mr, mr_offset, val, | ||
75 | - size_memop(size) | MO_TE, | ||
76 | - iotlbentry->attrs); | ||
77 | + r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); | ||
78 | if (r != MEMTX_OK) { | ||
79 | hwaddr physaddr = mr_offset + | ||
80 | section->offset_within_address_space - | ||
81 | section->offset_within_region; | ||
82 | |||
83 | - cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, | ||
84 | - mmu_idx, iotlbentry->attrs, r, retaddr); | ||
85 | + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), | ||
86 | + MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, | ||
87 | + retaddr); | ||
88 | } | ||
89 | if (locked) { | ||
90 | qemu_mutex_unlock_iothread(); | ||
91 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
92 | * access type. | ||
93 | */ | ||
94 | |||
95 | -static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian) | ||
96 | +static inline uint64_t handle_bswap(uint64_t val, MemOp op) | ||
97 | { | ||
98 | - if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) { | ||
99 | - switch (size) { | ||
100 | - case 1: return val; | ||
101 | - case 2: return bswap16(val); | ||
102 | - case 4: return bswap32(val); | ||
103 | - case 8: return bswap64(val); | ||
104 | + if ((memop_big_endian(op) && NEED_BE_BSWAP) || | ||
105 | + (!memop_big_endian(op) && NEED_LE_BSWAP)) { | ||
106 | + switch (op & MO_SIZE) { | ||
107 | + case MO_8: return val; | ||
108 | + case MO_16: return bswap16(val); | ||
109 | + case MO_32: return bswap32(val); | ||
110 | + case MO_64: return bswap64(val); | ||
111 | default: | ||
112 | g_assert_not_reached(); | ||
113 | } | ||
114 | @@ -XXX,XX +XXX,XX @@ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, | ||
115 | |||
116 | static inline uint64_t __attribute__((always_inline)) | ||
117 | load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
118 | - uintptr_t retaddr, size_t size, bool big_endian, bool code_read, | ||
119 | + uintptr_t retaddr, MemOp op, bool code_read, | ||
120 | FullLoadHelper *full_load) | ||
121 | { | ||
122 | uintptr_t mmu_idx = get_mmuidx(oi); | ||
123 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
124 | unsigned a_bits = get_alignment_bits(get_memop(oi)); | ||
125 | void *haddr; | ||
126 | uint64_t res; | ||
127 | + size_t size = memop_size(op); | ||
128 | |||
129 | /* Handle CPU specific unaligned behaviour */ | ||
130 | if (addr & ((1 << a_bits) - 1)) { | ||
131 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
132 | |||
133 | /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ | ||
134 | res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
135 | - mmu_idx, addr, retaddr, access_type, size); | ||
136 | - return handle_bswap(res, size, big_endian); | ||
137 | + mmu_idx, addr, retaddr, access_type, op); | ||
138 | + return handle_bswap(res, op); | ||
139 | } | ||
140 | |||
141 | /* Handle slow unaligned access (it spans two pages or IO). */ | ||
142 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
143 | r2 = full_load(env, addr2, oi, retaddr); | ||
144 | shift = (addr & (size - 1)) * 8; | ||
145 | |||
146 | - if (big_endian) { | ||
147 | + if (memop_big_endian(op)) { | ||
148 | /* Big-endian combine. */ | ||
149 | res = (r1 << shift) | (r2 >> ((size * 8) - shift)); | ||
150 | } else { | ||
151 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
152 | |||
153 | do_aligned_access: | ||
154 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
155 | - switch (size) { | ||
156 | - case 1: | ||
157 | + switch (op) { | ||
158 | + case MO_UB: | ||
159 | res = ldub_p(haddr); | ||
160 | break; | ||
161 | - case 2: | ||
162 | - if (big_endian) { | ||
163 | - res = lduw_be_p(haddr); | ||
164 | - } else { | ||
165 | - res = lduw_le_p(haddr); | ||
166 | - } | ||
167 | + case MO_BEUW: | ||
168 | + res = lduw_be_p(haddr); | ||
169 | break; | ||
170 | - case 4: | ||
171 | - if (big_endian) { | ||
172 | - res = (uint32_t)ldl_be_p(haddr); | ||
173 | - } else { | ||
174 | - res = (uint32_t)ldl_le_p(haddr); | ||
175 | - } | ||
176 | + case MO_LEUW: | ||
177 | + res = lduw_le_p(haddr); | ||
178 | break; | ||
179 | - case 8: | ||
180 | - if (big_endian) { | ||
181 | - res = ldq_be_p(haddr); | ||
182 | - } else { | ||
183 | - res = ldq_le_p(haddr); | ||
184 | - } | ||
185 | + case MO_BEUL: | ||
186 | + res = (uint32_t)ldl_be_p(haddr); | ||
187 | + break; | ||
188 | + case MO_LEUL: | ||
189 | + res = (uint32_t)ldl_le_p(haddr); | ||
190 | + break; | ||
191 | + case MO_BEQ: | ||
192 | + res = ldq_be_p(haddr); | ||
193 | + break; | ||
194 | + case MO_LEQ: | ||
195 | + res = ldq_le_p(haddr); | ||
196 | break; | ||
197 | default: | 181 | default: |
198 | g_assert_not_reached(); | 182 | g_assert_not_reached(); |
199 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | 183 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, |
200 | static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, | 184 | } |
201 | TCGMemOpIdx oi, uintptr_t retaddr) | 185 | } |
186 | |||
187 | +static const ARMInsn vec_cmp_insn[16] = { | ||
188 | + [TCG_COND_EQ] = INSN_VCEQ, | ||
189 | + [TCG_COND_GT] = INSN_VCGT, | ||
190 | + [TCG_COND_GE] = INSN_VCGE, | ||
191 | + [TCG_COND_GTU] = INSN_VCGT_U, | ||
192 | + [TCG_COND_GEU] = INSN_VCGE_U, | ||
193 | +}; | ||
194 | + | ||
195 | +static const ARMInsn vec_cmp0_insn[16] = { | ||
196 | + [TCG_COND_EQ] = INSN_VCEQ0, | ||
197 | + [TCG_COND_GT] = INSN_VCGT0, | ||
198 | + [TCG_COND_GE] = INSN_VCGE0, | ||
199 | + [TCG_COND_LT] = INSN_VCLT0, | ||
200 | + [TCG_COND_LE] = INSN_VCLE0, | ||
201 | +}; | ||
202 | + | ||
203 | static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
204 | unsigned vecl, unsigned vece, | ||
205 | const TCGArg *args, const int *const_args) | ||
202 | { | 206 | { |
203 | - return load_helper(env, addr, oi, retaddr, 1, false, false, | 207 | - g_assert_not_reached(); |
204 | - full_ldub_mmu); | 208 | + TCGType type = vecl + TCG_TYPE_V64; |
205 | + return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); | 209 | + unsigned q = vecl; |
206 | } | 210 | + TCGArg a0, a1, a2; |
207 | 211 | + int cmode, imm8; | |
208 | tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, | 212 | + |
209 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, | 213 | + a0 = args[0]; |
210 | static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, | 214 | + a1 = args[1]; |
211 | TCGMemOpIdx oi, uintptr_t retaddr) | 215 | + a2 = args[2]; |
216 | + | ||
217 | + switch (opc) { | ||
218 | + case INDEX_op_ld_vec: | ||
219 | + tcg_out_ld(s, type, a0, a1, a2); | ||
220 | + return; | ||
221 | + case INDEX_op_st_vec: | ||
222 | + tcg_out_st(s, type, a0, a1, a2); | ||
223 | + return; | ||
224 | + case INDEX_op_dupm_vec: | ||
225 | + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); | ||
226 | + return; | ||
227 | + case INDEX_op_dup2_vec: | ||
228 | + tcg_out_dup2_vec(s, a0, a1, a2); | ||
229 | + return; | ||
230 | + case INDEX_op_add_vec: | ||
231 | + tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); | ||
232 | + return; | ||
233 | + case INDEX_op_sub_vec: | ||
234 | + tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); | ||
235 | + return; | ||
236 | + case INDEX_op_xor_vec: | ||
237 | + tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); | ||
238 | + return; | ||
239 | + | ||
240 | + case INDEX_op_and_vec: | ||
241 | + if (const_args[2]) { | ||
242 | + is_shimm1632(~a2, &cmode, &imm8); | ||
243 | + if (a0 == a1) { | ||
244 | + tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */ | ||
245 | + return; | ||
246 | + } | ||
247 | + tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */ | ||
248 | + a2 = a0; | ||
249 | + } | ||
250 | + tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); | ||
251 | + return; | ||
252 | + | ||
253 | + case INDEX_op_or_vec: | ||
254 | + if (const_args[2]) { | ||
255 | + is_shimm1632(a2, &cmode, &imm8); | ||
256 | + if (a0 == a1) { | ||
257 | + tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */ | ||
258 | + return; | ||
259 | + } | ||
260 | + tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */ | ||
261 | + a2 = a0; | ||
262 | + } | ||
263 | + tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2); | ||
264 | + return; | ||
265 | + | ||
266 | + case INDEX_op_cmp_vec: | ||
267 | + { | ||
268 | + TCGCond cond = args[3]; | ||
269 | + | ||
270 | + if (cond == TCG_COND_NE) { | ||
271 | + if (const_args[2]) { | ||
272 | + tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1); | ||
273 | + } else { | ||
274 | + tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2); | ||
275 | + tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0); | ||
276 | + } | ||
277 | + } else { | ||
278 | + ARMInsn insn; | ||
279 | + | ||
280 | + if (const_args[2]) { | ||
281 | + insn = vec_cmp0_insn[cond]; | ||
282 | + if (insn) { | ||
283 | + tcg_out_vreg2(s, insn, q, vece, a0, a1); | ||
284 | + return; | ||
285 | + } | ||
286 | + tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0); | ||
287 | + a2 = TCG_VEC_TMP; | ||
288 | + } | ||
289 | + insn = vec_cmp_insn[cond]; | ||
290 | + if (insn == 0) { | ||
291 | + TCGArg t; | ||
292 | + t = a1, a1 = a2, a2 = t; | ||
293 | + cond = tcg_swap_cond(cond); | ||
294 | + insn = vec_cmp_insn[cond]; | ||
295 | + tcg_debug_assert(insn != 0); | ||
296 | + } | ||
297 | + tcg_out_vreg3(s, insn, q, vece, a0, a1, a2); | ||
298 | + } | ||
299 | + } | ||
300 | + return; | ||
301 | + | ||
302 | + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ | ||
303 | + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ | ||
304 | + default: | ||
305 | + g_assert_not_reached(); | ||
306 | + } | ||
307 | } | ||
308 | |||
309 | int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
212 | { | 310 | { |
213 | - return load_helper(env, addr, oi, retaddr, 2, false, false, | 311 | - return 0; |
214 | + return load_helper(env, addr, oi, retaddr, MO_LEUW, false, | 312 | + switch (opc) { |
215 | full_le_lduw_mmu); | 313 | + case INDEX_op_add_vec: |
216 | } | 314 | + case INDEX_op_sub_vec: |
217 | 315 | + case INDEX_op_and_vec: | |
218 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, | 316 | + case INDEX_op_or_vec: |
219 | static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, | 317 | + case INDEX_op_xor_vec: |
220 | TCGMemOpIdx oi, uintptr_t retaddr) | 318 | + return 1; |
221 | { | 319 | + case INDEX_op_cmp_vec: |
222 | - return load_helper(env, addr, oi, retaddr, 2, true, false, | 320 | + return vece < MO_64; |
223 | + return load_helper(env, addr, oi, retaddr, MO_BEUW, false, | 321 | + default: |
224 | full_be_lduw_mmu); | 322 | + return 0; |
225 | } | 323 | + } |
226 | 324 | } | |
227 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, | 325 | |
228 | static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, | 326 | void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, |
229 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
230 | { | ||
231 | - return load_helper(env, addr, oi, retaddr, 4, false, false, | ||
232 | + return load_helper(env, addr, oi, retaddr, MO_LEUL, false, | ||
233 | full_le_ldul_mmu); | ||
234 | } | ||
235 | |||
236 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, | ||
237 | static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, | ||
238 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
239 | { | ||
240 | - return load_helper(env, addr, oi, retaddr, 4, true, false, | ||
241 | + return load_helper(env, addr, oi, retaddr, MO_BEUL, false, | ||
242 | full_be_ldul_mmu); | ||
243 | } | ||
244 | |||
245 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, | ||
246 | uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, | ||
247 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
248 | { | ||
249 | - return load_helper(env, addr, oi, retaddr, 8, false, false, | ||
250 | + return load_helper(env, addr, oi, retaddr, MO_LEQ, false, | ||
251 | helper_le_ldq_mmu); | ||
252 | } | ||
253 | |||
254 | uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, | ||
255 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
256 | { | ||
257 | - return load_helper(env, addr, oi, retaddr, 8, true, false, | ||
258 | + return load_helper(env, addr, oi, retaddr, MO_BEQ, false, | ||
259 | helper_be_ldq_mmu); | ||
260 | } | ||
261 | |||
262 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, | ||
263 | |||
264 | static inline void __attribute__((always_inline)) | ||
265 | store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
266 | - TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian) | ||
267 | + TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) | ||
268 | { | ||
269 | uintptr_t mmu_idx = get_mmuidx(oi); | ||
270 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
271 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
272 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); | ||
273 | unsigned a_bits = get_alignment_bits(get_memop(oi)); | ||
274 | void *haddr; | ||
275 | + size_t size = memop_size(op); | ||
276 | |||
277 | /* Handle CPU specific unaligned behaviour */ | ||
278 | if (addr & ((1 << a_bits) - 1)) { | ||
279 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
280 | |||
281 | /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ | ||
282 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
283 | - handle_bswap(val, size, big_endian), | ||
284 | - addr, retaddr, size); | ||
285 | + handle_bswap(val, op), | ||
286 | + addr, retaddr, op); | ||
287 | return; | ||
288 | } | ||
289 | |||
290 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
291 | */ | ||
292 | for (i = 0; i < size; ++i) { | ||
293 | uint8_t val8; | ||
294 | - if (big_endian) { | ||
295 | + if (memop_big_endian(op)) { | ||
296 | /* Big-endian extract. */ | ||
297 | val8 = val >> (((size - 1) * 8) - (i * 8)); | ||
298 | } else { | ||
299 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
300 | |||
301 | do_aligned_access: | ||
302 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
303 | - switch (size) { | ||
304 | - case 1: | ||
305 | + switch (op) { | ||
306 | + case MO_UB: | ||
307 | stb_p(haddr, val); | ||
308 | break; | ||
309 | - case 2: | ||
310 | - if (big_endian) { | ||
311 | - stw_be_p(haddr, val); | ||
312 | - } else { | ||
313 | - stw_le_p(haddr, val); | ||
314 | - } | ||
315 | + case MO_BEUW: | ||
316 | + stw_be_p(haddr, val); | ||
317 | break; | ||
318 | - case 4: | ||
319 | - if (big_endian) { | ||
320 | - stl_be_p(haddr, val); | ||
321 | - } else { | ||
322 | - stl_le_p(haddr, val); | ||
323 | - } | ||
324 | + case MO_LEUW: | ||
325 | + stw_le_p(haddr, val); | ||
326 | break; | ||
327 | - case 8: | ||
328 | - if (big_endian) { | ||
329 | - stq_be_p(haddr, val); | ||
330 | - } else { | ||
331 | - stq_le_p(haddr, val); | ||
332 | - } | ||
333 | + case MO_BEUL: | ||
334 | + stl_be_p(haddr, val); | ||
335 | + break; | ||
336 | + case MO_LEUL: | ||
337 | + stl_le_p(haddr, val); | ||
338 | + break; | ||
339 | + case MO_BEQ: | ||
340 | + stq_be_p(haddr, val); | ||
341 | + break; | ||
342 | + case MO_LEQ: | ||
343 | + stq_le_p(haddr, val); | ||
344 | break; | ||
345 | default: | ||
346 | g_assert_not_reached(); | ||
347 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
348 | void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, | ||
349 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
350 | { | ||
351 | - store_helper(env, addr, val, oi, retaddr, 1, false); | ||
352 | + store_helper(env, addr, val, oi, retaddr, MO_UB); | ||
353 | } | ||
354 | |||
355 | void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | ||
356 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
357 | { | ||
358 | - store_helper(env, addr, val, oi, retaddr, 2, false); | ||
359 | + store_helper(env, addr, val, oi, retaddr, MO_LEUW); | ||
360 | } | ||
361 | |||
362 | void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | ||
363 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
364 | { | ||
365 | - store_helper(env, addr, val, oi, retaddr, 2, true); | ||
366 | + store_helper(env, addr, val, oi, retaddr, MO_BEUW); | ||
367 | } | ||
368 | |||
369 | void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
370 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
371 | { | ||
372 | - store_helper(env, addr, val, oi, retaddr, 4, false); | ||
373 | + store_helper(env, addr, val, oi, retaddr, MO_LEUL); | ||
374 | } | ||
375 | |||
376 | void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
377 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
378 | { | ||
379 | - store_helper(env, addr, val, oi, retaddr, 4, true); | ||
380 | + store_helper(env, addr, val, oi, retaddr, MO_BEUL); | ||
381 | } | ||
382 | |||
383 | void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
384 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
385 | { | ||
386 | - store_helper(env, addr, val, oi, retaddr, 8, false); | ||
387 | + store_helper(env, addr, val, oi, retaddr, MO_LEQ); | ||
388 | } | ||
389 | |||
390 | void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
391 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
392 | { | ||
393 | - store_helper(env, addr, val, oi, retaddr, 8, true); | ||
394 | + store_helper(env, addr, val, oi, retaddr, MO_BEQ); | ||
395 | } | ||
396 | |||
397 | /* First set of helpers allows passing in of OI and RETADDR. This makes | ||
398 | @@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
399 | static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, | ||
400 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
401 | { | ||
402 | - return load_helper(env, addr, oi, retaddr, 1, false, true, | ||
403 | - full_ldub_cmmu); | ||
404 | + return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu); | ||
405 | } | ||
406 | |||
407 | uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, | ||
408 | @@ -XXX,XX +XXX,XX @@ uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, | ||
409 | static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
410 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
411 | { | ||
412 | - return load_helper(env, addr, oi, retaddr, 2, false, true, | ||
413 | + return load_helper(env, addr, oi, retaddr, MO_LEUW, true, | ||
414 | full_le_lduw_cmmu); | ||
415 | } | ||
416 | |||
417 | @@ -XXX,XX +XXX,XX @@ uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, | ||
418 | static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
419 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
420 | { | ||
421 | - return load_helper(env, addr, oi, retaddr, 2, true, true, | ||
422 | + return load_helper(env, addr, oi, retaddr, MO_BEUW, true, | ||
423 | full_be_lduw_cmmu); | ||
424 | } | ||
425 | |||
426 | @@ -XXX,XX +XXX,XX @@ uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, | ||
427 | static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, | ||
428 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
429 | { | ||
430 | - return load_helper(env, addr, oi, retaddr, 4, false, true, | ||
431 | + return load_helper(env, addr, oi, retaddr, MO_LEUL, true, | ||
432 | full_le_ldul_cmmu); | ||
433 | } | ||
434 | |||
435 | @@ -XXX,XX +XXX,XX @@ uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, | ||
436 | static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr, | ||
437 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
438 | { | ||
439 | - return load_helper(env, addr, oi, retaddr, 4, true, true, | ||
440 | + return load_helper(env, addr, oi, retaddr, MO_BEUL, true, | ||
441 | full_be_ldul_cmmu); | ||
442 | } | ||
443 | |||
444 | @@ -XXX,XX +XXX,XX @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, | ||
445 | uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, | ||
446 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
447 | { | ||
448 | - return load_helper(env, addr, oi, retaddr, 8, false, true, | ||
449 | + return load_helper(env, addr, oi, retaddr, MO_LEQ, true, | ||
450 | helper_le_ldq_cmmu); | ||
451 | } | ||
452 | |||
453 | uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, | ||
454 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
455 | { | ||
456 | - return load_helper(env, addr, oi, retaddr, 8, true, true, | ||
457 | + return load_helper(env, addr, oi, retaddr, MO_BEQ, true, | ||
458 | helper_be_ldq_cmmu); | ||
459 | } | ||
460 | -- | 327 | -- |
461 | 2.17.1 | 328 | 2.25.1 |
462 | 329 | ||
463 | 330 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | Now that MemOp has been pushed down into the memory API, and | ||
4 | callers are encoding endianness, we can collapse byte swaps | ||
5 | along the I/O path into the accelerator and target independent | ||
6 | adjust_endianness. | ||
7 | |||
8 | Collapsing byte swaps along the I/O path enables additional endian | ||
9 | inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant | ||
10 | byte swaps cancelling out. | ||
11 | |||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Suggested-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Message-Id: <911ff31af11922a9afba9b7ce128af8b8b80f316.1566466906.git.tony.nguyen@bt.com> | ||
16 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | --- | ||
18 | accel/tcg/cputlb.c | 42 ++-------------------------- | ||
19 | exec.c | 17 +++--------- | ||
20 | hw/virtio/virtio-pci.c | 10 +++---- | ||
21 | memory.c | 33 ++++++++-------------- | ||
22 | memory_ldst.inc.c | 63 ------------------------------------------ | ||
23 | 5 files changed, 23 insertions(+), 142 deletions(-) | ||
24 | |||
25 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/accel/tcg/cputlb.c | ||
28 | +++ b/accel/tcg/cputlb.c | ||
29 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
30 | cpu_loop_exit_atomic(env_cpu(env), retaddr); | ||
31 | } | ||
32 | |||
33 | -#ifdef TARGET_WORDS_BIGENDIAN | ||
34 | -#define NEED_BE_BSWAP 0 | ||
35 | -#define NEED_LE_BSWAP 1 | ||
36 | -#else | ||
37 | -#define NEED_BE_BSWAP 1 | ||
38 | -#define NEED_LE_BSWAP 0 | ||
39 | -#endif | ||
40 | - | ||
41 | -/* | ||
42 | - * Byte Swap Helper | ||
43 | - * | ||
44 | - * This should all dead code away depending on the build host and | ||
45 | - * access type. | ||
46 | - */ | ||
47 | - | ||
48 | -static inline uint64_t handle_bswap(uint64_t val, MemOp op) | ||
49 | -{ | ||
50 | - if ((memop_big_endian(op) && NEED_BE_BSWAP) || | ||
51 | - (!memop_big_endian(op) && NEED_LE_BSWAP)) { | ||
52 | - switch (op & MO_SIZE) { | ||
53 | - case MO_8: return val; | ||
54 | - case MO_16: return bswap16(val); | ||
55 | - case MO_32: return bswap32(val); | ||
56 | - case MO_64: return bswap64(val); | ||
57 | - default: | ||
58 | - g_assert_not_reached(); | ||
59 | - } | ||
60 | - } else { | ||
61 | - return val; | ||
62 | - } | ||
63 | -} | ||
64 | - | ||
65 | /* | ||
66 | * Load Helpers | ||
67 | * | ||
68 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
69 | } | ||
70 | } | ||
71 | |||
72 | - /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ | ||
73 | - res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
74 | - mmu_idx, addr, retaddr, access_type, op); | ||
75 | - return handle_bswap(res, op); | ||
76 | + return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
77 | + mmu_idx, addr, retaddr, access_type, op); | ||
78 | } | ||
79 | |||
80 | /* Handle slow unaligned access (it spans two pages or IO). */ | ||
81 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
82 | } | ||
83 | } | ||
84 | |||
85 | - /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ | ||
86 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
87 | - handle_bswap(val, op), | ||
88 | - addr, retaddr, op); | ||
89 | + val, addr, retaddr, op); | ||
90 | return; | ||
91 | } | ||
92 | |||
93 | diff --git a/exec.c b/exec.c | ||
94 | index XXXXXXX..XXXXXXX 100644 | ||
95 | --- a/exec.c | ||
96 | +++ b/exec.c | ||
97 | @@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, | ||
98 | l = memory_access_size(mr, l, addr1); | ||
99 | /* XXX: could force current_cpu to NULL to avoid | ||
100 | potential bugs */ | ||
101 | - val = ldn_p(buf, l); | ||
102 | - /* | ||
103 | - * TODO: Merge bswap from ldn_p into memory_region_dispatch_write | ||
104 | - * by using ldn_he_p and dropping MO_TE to get a host-endian value. | ||
105 | - */ | ||
106 | + val = ldn_he_p(buf, l); | ||
107 | result |= memory_region_dispatch_write(mr, addr1, val, | ||
108 | - size_memop(l) | MO_TE, | ||
109 | - attrs); | ||
110 | + size_memop(l), attrs); | ||
111 | } else { | ||
112 | /* RAM case */ | ||
113 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | ||
114 | @@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, | ||
115 | /* I/O case */ | ||
116 | release_lock |= prepare_mmio_access(mr); | ||
117 | l = memory_access_size(mr, l, addr1); | ||
118 | - /* | ||
119 | - * TODO: Merge bswap from stn_p into memory_region_dispatch_read | ||
120 | - * by using stn_he_p and dropping MO_TE to get a host-endian value. | ||
121 | - */ | ||
122 | result |= memory_region_dispatch_read(mr, addr1, &val, | ||
123 | - size_memop(l) | MO_TE, attrs); | ||
124 | - stn_p(buf, l, val); | ||
125 | + size_memop(l), attrs); | ||
126 | + stn_he_p(buf, l, val); | ||
127 | } else { | ||
128 | /* RAM case */ | ||
129 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | ||
130 | diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c | ||
131 | index XXXXXXX..XXXXXXX 100644 | ||
132 | --- a/hw/virtio/virtio-pci.c | ||
133 | +++ b/hw/virtio/virtio-pci.c | ||
134 | @@ -XXX,XX +XXX,XX @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, | ||
135 | val = pci_get_byte(buf); | ||
136 | break; | ||
137 | case 2: | ||
138 | - val = cpu_to_le16(pci_get_word(buf)); | ||
139 | + val = pci_get_word(buf); | ||
140 | break; | ||
141 | case 4: | ||
142 | - val = cpu_to_le32(pci_get_long(buf)); | ||
143 | + val = pci_get_long(buf); | ||
144 | break; | ||
145 | default: | ||
146 | /* As length is under guest control, handle illegal values. */ | ||
147 | return; | ||
148 | } | ||
149 | - /* TODO: Merge bswap from cpu_to_leXX into memory_region_dispatch_write. */ | ||
150 | memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, | ||
151 | MEMTXATTRS_UNSPECIFIED); | ||
152 | } | ||
153 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | ||
154 | /* Make sure caller aligned buf properly */ | ||
155 | assert(!(((uintptr_t)buf) & (len - 1))); | ||
156 | |||
157 | - /* TODO: Merge bswap from leXX_to_cpu into memory_region_dispatch_read. */ | ||
158 | memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, | ||
159 | MEMTXATTRS_UNSPECIFIED); | ||
160 | switch (len) { | ||
161 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | ||
162 | pci_set_byte(buf, val); | ||
163 | break; | ||
164 | case 2: | ||
165 | - pci_set_word(buf, le16_to_cpu(val)); | ||
166 | + pci_set_word(buf, val); | ||
167 | break; | ||
168 | case 4: | ||
169 | - pci_set_long(buf, le32_to_cpu(val)); | ||
170 | + pci_set_long(buf, val); | ||
171 | break; | ||
172 | default: | ||
173 | /* As length is under guest control, handle illegal values. */ | ||
174 | diff --git a/memory.c b/memory.c | ||
175 | index XXXXXXX..XXXXXXX 100644 | ||
176 | --- a/memory.c | ||
177 | +++ b/memory.c | ||
178 | @@ -XXX,XX +XXX,XX @@ static bool memory_region_big_endian(MemoryRegion *mr) | ||
179 | #endif | ||
180 | } | ||
181 | |||
182 | -static bool memory_region_wrong_endianness(MemoryRegion *mr) | ||
183 | +static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) | ||
184 | { | ||
185 | -#ifdef TARGET_WORDS_BIGENDIAN | ||
186 | - return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; | ||
187 | -#else | ||
188 | - return mr->ops->endianness == DEVICE_BIG_ENDIAN; | ||
189 | -#endif | ||
190 | -} | ||
191 | - | ||
192 | -static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) | ||
193 | -{ | ||
194 | - if (memory_region_wrong_endianness(mr)) { | ||
195 | - switch (size) { | ||
196 | - case 1: | ||
197 | + if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { | ||
198 | + switch (op & MO_SIZE) { | ||
199 | + case MO_8: | ||
200 | break; | ||
201 | - case 2: | ||
202 | + case MO_16: | ||
203 | *data = bswap16(*data); | ||
204 | break; | ||
205 | - case 4: | ||
206 | + case MO_32: | ||
207 | *data = bswap32(*data); | ||
208 | break; | ||
209 | - case 8: | ||
210 | + case MO_64: | ||
211 | *data = bswap64(*data); | ||
212 | break; | ||
213 | default: | ||
214 | - abort(); | ||
215 | + g_assert_not_reached(); | ||
216 | } | ||
217 | } | ||
218 | } | ||
219 | @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | ||
220 | } | ||
221 | |||
222 | r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); | ||
223 | - adjust_endianness(mr, pval, size); | ||
224 | + adjust_endianness(mr, pval, op); | ||
225 | return r; | ||
226 | } | ||
227 | |||
228 | @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | ||
229 | return MEMTX_DECODE_ERROR; | ||
230 | } | ||
231 | |||
232 | - adjust_endianness(mr, &data, size); | ||
233 | + adjust_endianness(mr, &data, op); | ||
234 | |||
235 | if ((!kvm_eventfds_enabled()) && | ||
236 | memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { | ||
237 | @@ -XXX,XX +XXX,XX @@ void memory_region_add_eventfd(MemoryRegion *mr, | ||
238 | } | ||
239 | |||
240 | if (size) { | ||
241 | - adjust_endianness(mr, &mrfd.data, size); | ||
242 | + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); | ||
243 | } | ||
244 | memory_region_transaction_begin(); | ||
245 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | ||
246 | @@ -XXX,XX +XXX,XX @@ void memory_region_del_eventfd(MemoryRegion *mr, | ||
247 | unsigned i; | ||
248 | |||
249 | if (size) { | ||
250 | - adjust_endianness(mr, &mrfd.data, size); | ||
251 | + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); | ||
252 | } | ||
253 | memory_region_transaction_begin(); | ||
254 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | ||
255 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | ||
256 | index XXXXXXX..XXXXXXX 100644 | ||
257 | --- a/memory_ldst.inc.c | ||
258 | +++ b/memory_ldst.inc.c | ||
259 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | ||
260 | release_lock |= prepare_mmio_access(mr); | ||
261 | |||
262 | /* I/O case */ | ||
263 | - /* TODO: Merge bswap32 into memory_region_dispatch_read. */ | ||
264 | r = memory_region_dispatch_read(mr, addr1, &val, | ||
265 | MO_32 | devend_memop(endian), attrs); | ||
266 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
267 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
268 | - val = bswap32(val); | ||
269 | - } | ||
270 | -#else | ||
271 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
272 | - val = bswap32(val); | ||
273 | - } | ||
274 | -#endif | ||
275 | } else { | ||
276 | /* RAM case */ | ||
277 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
278 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | ||
279 | release_lock |= prepare_mmio_access(mr); | ||
280 | |||
281 | /* I/O case */ | ||
282 | - /* TODO: Merge bswap64 into memory_region_dispatch_read. */ | ||
283 | r = memory_region_dispatch_read(mr, addr1, &val, | ||
284 | MO_64 | devend_memop(endian), attrs); | ||
285 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
286 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
287 | - val = bswap64(val); | ||
288 | - } | ||
289 | -#else | ||
290 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
291 | - val = bswap64(val); | ||
292 | - } | ||
293 | -#endif | ||
294 | } else { | ||
295 | /* RAM case */ | ||
296 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
297 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
298 | release_lock |= prepare_mmio_access(mr); | ||
299 | |||
300 | /* I/O case */ | ||
301 | - /* TODO: Merge bswap16 into memory_region_dispatch_read. */ | ||
302 | r = memory_region_dispatch_read(mr, addr1, &val, | ||
303 | MO_16 | devend_memop(endian), attrs); | ||
304 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
305 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
306 | - val = bswap16(val); | ||
307 | - } | ||
308 | -#else | ||
309 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
310 | - val = bswap16(val); | ||
311 | - } | ||
312 | -#endif | ||
313 | } else { | ||
314 | /* RAM case */ | ||
315 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
316 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
317 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
318 | if (l < 4 || !memory_access_is_direct(mr, true)) { | ||
319 | release_lock |= prepare_mmio_access(mr); | ||
320 | - | ||
321 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
322 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
323 | - val = bswap32(val); | ||
324 | - } | ||
325 | -#else | ||
326 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
327 | - val = bswap32(val); | ||
328 | - } | ||
329 | -#endif | ||
330 | - /* TODO: Merge bswap32 into memory_region_dispatch_write. */ | ||
331 | r = memory_region_dispatch_write(mr, addr1, val, | ||
332 | MO_32 | devend_memop(endian), attrs); | ||
333 | } else { | ||
334 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | ||
335 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
336 | if (l < 2 || !memory_access_is_direct(mr, true)) { | ||
337 | release_lock |= prepare_mmio_access(mr); | ||
338 | - | ||
339 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
340 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
341 | - val = bswap16(val); | ||
342 | - } | ||
343 | -#else | ||
344 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
345 | - val = bswap16(val); | ||
346 | - } | ||
347 | -#endif | ||
348 | - /* TODO: Merge bswap16 into memory_region_dispatch_write. */ | ||
349 | r = memory_region_dispatch_write(mr, addr1, val, | ||
350 | MO_16 | devend_memop(endian), attrs); | ||
351 | } else { | ||
352 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | ||
353 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
354 | if (l < 8 || !memory_access_is_direct(mr, true)) { | ||
355 | release_lock |= prepare_mmio_access(mr); | ||
356 | - | ||
357 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
358 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
359 | - val = bswap64(val); | ||
360 | - } | ||
361 | -#else | ||
362 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
363 | - val = bswap64(val); | ||
364 | - } | ||
365 | -#endif | ||
366 | - /* TODO: Merge bswap64 into memory_region_dispatch_write. */ | ||
367 | r = memory_region_dispatch_write(mr, addr1, val, | ||
368 | MO_64 | devend_memop(endian), attrs); | ||
369 | } else { | ||
370 | -- | ||
371 | 2.17.1 | ||
372 | |||
373 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | Notice new attribute, byte swap, and force the transaction through the | ||
4 | memory slow path. | ||
5 | |||
6 | Required by architectures that can invert endianness of memory | ||
7 | transaction, e.g. SPARC64 has the Invert Endian TTE bit. | ||
8 | |||
9 | Suggested-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-Id: <2a10a1f1c00a894af1212c8f68ef09c2966023c1.1566466906.git.tony.nguyen@bt.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | --- | ||
15 | include/exec/memattrs.h | 2 ++ | ||
16 | accel/tcg/cputlb.c | 12 ++++++++++++ | ||
17 | 2 files changed, 14 insertions(+) | ||
18 | |||
19 | diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/exec/memattrs.h | ||
22 | +++ b/include/exec/memattrs.h | ||
23 | @@ -XXX,XX +XXX,XX @@ typedef struct MemTxAttrs { | ||
24 | unsigned int user:1; | ||
25 | /* Requester ID (for MSI for example) */ | ||
26 | unsigned int requester_id:16; | ||
27 | + /* Invert endianness for this page */ | ||
28 | + unsigned int byte_swap:1; | ||
29 | /* | ||
30 | * The following are target-specific page-table bits. These are not | ||
31 | * related to actual memory transactions at all. However, this structure | ||
32 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/accel/tcg/cputlb.c | ||
35 | +++ b/accel/tcg/cputlb.c | ||
36 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
37 | */ | ||
38 | address |= TLB_RECHECK; | ||
39 | } | ||
40 | + if (attrs.byte_swap) { | ||
41 | + /* Force the access through the I/O slow path. */ | ||
42 | + address |= TLB_MMIO; | ||
43 | + } | ||
44 | if (!memory_region_is_ram(section->mr) && | ||
45 | !memory_region_is_romd(section->mr)) { | ||
46 | /* IO memory case */ | ||
47 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
48 | bool locked = false; | ||
49 | MemTxResult r; | ||
50 | |||
51 | + if (iotlbentry->attrs.byte_swap) { | ||
52 | + op ^= MO_BSWAP; | ||
53 | + } | ||
54 | + | ||
55 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | ||
56 | mr = section->mr; | ||
57 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | ||
58 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
59 | bool locked = false; | ||
60 | MemTxResult r; | ||
61 | |||
62 | + if (iotlbentry->attrs.byte_swap) { | ||
63 | + op ^= MO_BSWAP; | ||
64 | + } | ||
65 | + | ||
66 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | ||
67 | mr = section->mr; | ||
68 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | ||
69 | -- | ||
70 | 2.17.1 | ||
71 | |||
72 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | This bit configures endianness of PCI MMIO devices. It is used by | ||
4 | Solaris and OpenBSD sunhme drivers. | ||
5 | |||
6 | Tested working on OpenBSD. | ||
7 | |||
8 | Unfortunately Solaris 10 had a unrelated keyboard issue blocking | ||
9 | testing... another inch towards Solaris 10 on SPARC64 =) | ||
10 | |||
11 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> | ||
14 | Message-Id: <3c8d5181a584f1b3712d3d8d66801b13cecb4b88.1566466906.git.tony.nguyen@bt.com> | ||
15 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | --- | ||
17 | target/sparc/cpu.h | 2 ++ | ||
18 | target/sparc/mmu_helper.c | 8 +++++++- | ||
19 | 2 files changed, 9 insertions(+), 1 deletion(-) | ||
20 | |||
21 | diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/target/sparc/cpu.h | ||
24 | +++ b/target/sparc/cpu.h | ||
25 | @@ -XXX,XX +XXX,XX @@ enum { | ||
26 | |||
27 | #define TTE_VALID_BIT (1ULL << 63) | ||
28 | #define TTE_NFO_BIT (1ULL << 60) | ||
29 | +#define TTE_IE_BIT (1ULL << 59) | ||
30 | #define TTE_USED_BIT (1ULL << 41) | ||
31 | #define TTE_LOCKED_BIT (1ULL << 6) | ||
32 | #define TTE_SIDEEFFECT_BIT (1ULL << 3) | ||
33 | @@ -XXX,XX +XXX,XX @@ enum { | ||
34 | |||
35 | #define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT) | ||
36 | #define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT) | ||
37 | +#define TTE_IS_IE(tte) ((tte) & TTE_IE_BIT) | ||
38 | #define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT) | ||
39 | #define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT) | ||
40 | #define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT) | ||
41 | diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/target/sparc/mmu_helper.c | ||
44 | +++ b/target/sparc/mmu_helper.c | ||
45 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, | ||
46 | if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { | ||
47 | int do_fault = 0; | ||
48 | |||
49 | + if (TTE_IS_IE(env->dtlb[i].tte)) { | ||
50 | + attrs->byte_swap = true; | ||
51 | + } | ||
52 | + | ||
53 | /* access ok? */ | ||
54 | /* multiple bits in SFSR.FT may be set on TT_DFAULT */ | ||
55 | if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { | ||
56 | @@ -XXX,XX +XXX,XX @@ void dump_mmu(CPUSPARCState *env) | ||
57 | } | ||
58 | if (TTE_IS_VALID(env->dtlb[i].tte)) { | ||
59 | qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" | ||
60 | - ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", | ||
61 | + ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", | ||
62 | i, | ||
63 | env->dtlb[i].tag & (uint64_t)~0x1fffULL, | ||
64 | TTE_PA(env->dtlb[i].tte), | ||
65 | @@ -XXX,XX +XXX,XX @@ void dump_mmu(CPUSPARCState *env) | ||
66 | TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", | ||
67 | TTE_IS_LOCKED(env->dtlb[i].tte) ? | ||
68 | "locked" : "unlocked", | ||
69 | + TTE_IS_IE(env->dtlb[i].tte) ? | ||
70 | + "yes" : "no", | ||
71 | env->dtlb[i].tag & (uint64_t)0x1fffULL, | ||
72 | TTE_IS_GLOBAL(env->dtlb[i].tte) ? | ||
73 | "global" : "local"); | ||
74 | -- | ||
75 | 2.17.1 | ||
76 | |||
77 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | These logical and arithmetic operations are optional, but are |
---|---|---|---|
2 | trivial to accomplish with the existing infrastructure. | ||
2 | 3 | ||
3 | ... and also call it for CONFIG_USER_ONLY. This function probably will | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | also need some refactoring in regards to probing, however, we'll have to | ||
5 | come back to that later, once cleaning up the other mem helpers. | ||
6 | |||
7 | The alignment check always makes sure that the write access falls into a | ||
8 | single page. | ||
9 | |||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
12 | Message-Id: <20190826075112.25637-8-david@redhat.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 6 | --- |
15 | target/s390x/mem_helper.c | 4 +--- | 7 | tcg/arm/tcg-target-con-set.h | 1 + |
16 | 1 file changed, 1 insertion(+), 3 deletions(-) | 8 | tcg/arm/tcg-target.h | 10 +++++----- |
9 | tcg/arm/tcg-target.c.inc | 38 ++++++++++++++++++++++++++++++++++++ | ||
10 | 3 files changed, 44 insertions(+), 5 deletions(-) | ||
17 | 11 | ||
18 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | 12 | diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h |
19 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/s390x/mem_helper.c | 14 | --- a/tcg/arm/tcg-target-con-set.h |
21 | +++ b/target/s390x/mem_helper.c | 15 | +++ b/tcg/arm/tcg-target-con-set.h |
22 | @@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, | 16 | @@ -XXX,XX +XXX,XX @@ C_O0_I4(s, s, s, s) |
23 | } | 17 | C_O1_I1(r, l) |
24 | 18 | C_O1_I1(r, r) | |
25 | /* Sanity check writability of the store address. */ | 19 | C_O1_I1(w, r) |
26 | -#ifndef CONFIG_USER_ONLY | 20 | +C_O1_I1(w, w) |
27 | - probe_write(env, a2, 0, mem_idx, ra); | 21 | C_O1_I1(w, wr) |
28 | -#endif | 22 | C_O1_I2(r, 0, rZ) |
29 | + probe_write(env, a2, 1 << sc, mem_idx, ra); | 23 | C_O1_I2(r, l, l) |
30 | 24 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h | |
31 | /* | 25 | index XXXXXXX..XXXXXXX 100644 |
32 | * Note that the compare-and-swap is atomic, and the store is atomic, | 26 | --- a/tcg/arm/tcg-target.h |
27 | +++ b/tcg/arm/tcg-target.h | ||
28 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; | ||
29 | #define TCG_TARGET_HAS_v128 use_neon_instructions | ||
30 | #define TCG_TARGET_HAS_v256 0 | ||
31 | |||
32 | -#define TCG_TARGET_HAS_andc_vec 0 | ||
33 | -#define TCG_TARGET_HAS_orc_vec 0 | ||
34 | -#define TCG_TARGET_HAS_not_vec 0 | ||
35 | -#define TCG_TARGET_HAS_neg_vec 0 | ||
36 | -#define TCG_TARGET_HAS_abs_vec 0 | ||
37 | +#define TCG_TARGET_HAS_andc_vec 1 | ||
38 | +#define TCG_TARGET_HAS_orc_vec 1 | ||
39 | +#define TCG_TARGET_HAS_not_vec 1 | ||
40 | +#define TCG_TARGET_HAS_neg_vec 1 | ||
41 | +#define TCG_TARGET_HAS_abs_vec 1 | ||
42 | #define TCG_TARGET_HAS_roti_vec 0 | ||
43 | #define TCG_TARGET_HAS_rots_vec 0 | ||
44 | #define TCG_TARGET_HAS_rotv_vec 0 | ||
45 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/tcg/arm/tcg-target.c.inc | ||
48 | +++ b/tcg/arm/tcg-target.c.inc | ||
49 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
50 | |||
51 | INSN_VADD = 0xf2000800, | ||
52 | INSN_VAND = 0xf2000110, | ||
53 | + INSN_VBIC = 0xf2100110, | ||
54 | INSN_VEOR = 0xf3000110, | ||
55 | + INSN_VORN = 0xf2300110, | ||
56 | INSN_VORR = 0xf2200110, | ||
57 | INSN_VSUB = 0xf3000800, | ||
58 | |||
59 | + INSN_VABS = 0xf3b10300, | ||
60 | INSN_VMVN = 0xf3b00580, | ||
61 | + INSN_VNEG = 0xf3b10380, | ||
62 | |||
63 | INSN_VCEQ0 = 0xf3b10100, | ||
64 | INSN_VCGT0 = 0xf3b10000, | ||
65 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
66 | return C_O1_I1(w, r); | ||
67 | case INDEX_op_dup_vec: | ||
68 | return C_O1_I1(w, wr); | ||
69 | + case INDEX_op_abs_vec: | ||
70 | + case INDEX_op_neg_vec: | ||
71 | + case INDEX_op_not_vec: | ||
72 | + return C_O1_I1(w, w); | ||
73 | case INDEX_op_dup2_vec: | ||
74 | case INDEX_op_add_vec: | ||
75 | case INDEX_op_sub_vec: | ||
76 | case INDEX_op_xor_vec: | ||
77 | return C_O1_I2(w, w, w); | ||
78 | case INDEX_op_or_vec: | ||
79 | + case INDEX_op_andc_vec: | ||
80 | return C_O1_I2(w, w, wO); | ||
81 | case INDEX_op_and_vec: | ||
82 | + case INDEX_op_orc_vec: | ||
83 | return C_O1_I2(w, w, wV); | ||
84 | case INDEX_op_cmp_vec: | ||
85 | return C_O1_I2(w, w, wZ); | ||
86 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
87 | case INDEX_op_dup2_vec: | ||
88 | tcg_out_dup2_vec(s, a0, a1, a2); | ||
89 | return; | ||
90 | + case INDEX_op_abs_vec: | ||
91 | + tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1); | ||
92 | + return; | ||
93 | + case INDEX_op_neg_vec: | ||
94 | + tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1); | ||
95 | + return; | ||
96 | + case INDEX_op_not_vec: | ||
97 | + tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1); | ||
98 | + return; | ||
99 | case INDEX_op_add_vec: | ||
100 | tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); | ||
101 | return; | ||
102 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
103 | tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); | ||
104 | return; | ||
105 | |||
106 | + case INDEX_op_andc_vec: | ||
107 | + if (!const_args[2]) { | ||
108 | + tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2); | ||
109 | + return; | ||
110 | + } | ||
111 | + a2 = ~a2; | ||
112 | + /* fall through */ | ||
113 | case INDEX_op_and_vec: | ||
114 | if (const_args[2]) { | ||
115 | is_shimm1632(~a2, &cmode, &imm8); | ||
116 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
117 | tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2); | ||
118 | return; | ||
119 | |||
120 | + case INDEX_op_orc_vec: | ||
121 | + if (!const_args[2]) { | ||
122 | + tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2); | ||
123 | + return; | ||
124 | + } | ||
125 | + a2 = ~a2; | ||
126 | + /* fall through */ | ||
127 | case INDEX_op_or_vec: | ||
128 | if (const_args[2]) { | ||
129 | is_shimm1632(a2, &cmode, &imm8); | ||
130 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
131 | case INDEX_op_add_vec: | ||
132 | case INDEX_op_sub_vec: | ||
133 | case INDEX_op_and_vec: | ||
134 | + case INDEX_op_andc_vec: | ||
135 | case INDEX_op_or_vec: | ||
136 | + case INDEX_op_orc_vec: | ||
137 | case INDEX_op_xor_vec: | ||
138 | + case INDEX_op_not_vec: | ||
139 | return 1; | ||
140 | + case INDEX_op_abs_vec: | ||
141 | case INDEX_op_cmp_vec: | ||
142 | + case INDEX_op_neg_vec: | ||
143 | return vece < MO_64; | ||
144 | default: | ||
145 | return 0; | ||
33 | -- | 146 | -- |
34 | 2.17.1 | 147 | 2.25.1 |
35 | 148 | ||
36 | 149 | diff view generated by jsdifflib |
1 | Let the user-only watchpoint stubs resolve to empty inline functions. | 1 | This consists of the three immediate shifts: shli, shri, sari. |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 5 | --- |
7 | include/hw/core/cpu.h | 23 +++++++++++++++++++++++ | 6 | tcg/arm/tcg-target.h | 2 +- |
8 | exec.c | 26 ++------------------------ | 7 | tcg/arm/tcg-target.c.inc | 27 +++++++++++++++++++++++++++ |
9 | 2 files changed, 25 insertions(+), 24 deletions(-) | 8 | 2 files changed, 28 insertions(+), 1 deletion(-) |
10 | 9 | ||
11 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | 10 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h |
12 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/include/hw/core/cpu.h | 12 | --- a/tcg/arm/tcg-target.h |
14 | +++ b/include/hw/core/cpu.h | 13 | +++ b/tcg/arm/tcg-target.h |
15 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) | 14 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; |
16 | return false; | 15 | #define TCG_TARGET_HAS_roti_vec 0 |
16 | #define TCG_TARGET_HAS_rots_vec 0 | ||
17 | #define TCG_TARGET_HAS_rotv_vec 0 | ||
18 | -#define TCG_TARGET_HAS_shi_vec 0 | ||
19 | +#define TCG_TARGET_HAS_shi_vec 1 | ||
20 | #define TCG_TARGET_HAS_shs_vec 0 | ||
21 | #define TCG_TARGET_HAS_shv_vec 0 | ||
22 | #define TCG_TARGET_HAS_mul_vec 0 | ||
23 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/tcg/arm/tcg-target.c.inc | ||
26 | +++ b/tcg/arm/tcg-target.c.inc | ||
27 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
28 | INSN_VCGE_U = 0xf3000310, | ||
29 | INSN_VCGT_U = 0xf3000300, | ||
30 | |||
31 | + INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ | ||
32 | + INSN_VSARI = 0xf2800010, /* VSHR.S */ | ||
33 | + INSN_VSHRI = 0xf3800010, /* VSHR.U */ | ||
34 | + | ||
35 | INSN_VTST = 0xf2000810, | ||
36 | |||
37 | INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ | ||
38 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vmovi(TCGContext *s, TCGReg rd, | ||
39 | | (extract32(imm8, 7, 1) << 24)); | ||
17 | } | 40 | } |
18 | 41 | ||
19 | +#ifdef CONFIG_USER_ONLY | 42 | +static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q, |
20 | +static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | 43 | + TCGReg rd, TCGReg rm, int l_imm6) |
21 | + int flags, CPUWatchpoint **watchpoint) | ||
22 | +{ | 44 | +{ |
23 | + return -ENOSYS; | 45 | + tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) | |
46 | + (extract32(l_imm6, 6, 1) << 7) | | ||
47 | + (extract32(l_imm6, 0, 6) << 16)); | ||
24 | +} | 48 | +} |
25 | + | 49 | + |
26 | +static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, | 50 | static void tcg_out_vldst(TCGContext *s, ARMInsn insn, |
27 | + vaddr len, int flags) | 51 | TCGReg rd, TCGReg rn, int offset) |
28 | +{ | 52 | { |
29 | + return -ENOSYS; | 53 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) |
30 | +} | 54 | case INDEX_op_abs_vec: |
31 | + | 55 | case INDEX_op_neg_vec: |
32 | +static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, | 56 | case INDEX_op_not_vec: |
33 | + CPUWatchpoint *wp) | 57 | + case INDEX_op_shli_vec: |
34 | +{ | 58 | + case INDEX_op_shri_vec: |
35 | +} | 59 | + case INDEX_op_sari_vec: |
36 | + | 60 | return C_O1_I1(w, w); |
37 | +static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | 61 | case INDEX_op_dup2_vec: |
38 | +{ | 62 | case INDEX_op_add_vec: |
39 | +} | 63 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, |
40 | +#else | 64 | case INDEX_op_xor_vec: |
41 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | 65 | tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); |
42 | int flags, CPUWatchpoint **watchpoint); | 66 | return; |
43 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, | 67 | + case INDEX_op_shli_vec: |
44 | vaddr len, int flags); | 68 | + tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); |
45 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); | 69 | + return; |
46 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask); | 70 | + case INDEX_op_shri_vec: |
47 | +#endif | 71 | + tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2); |
48 | 72 | + return; | |
49 | /** | 73 | + case INDEX_op_sari_vec: |
50 | * cpu_get_address_space: | 74 | + tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); |
51 | diff --git a/exec.c b/exec.c | 75 | + return; |
52 | index XXXXXXX..XXXXXXX 100644 | 76 | |
53 | --- a/exec.c | 77 | case INDEX_op_andc_vec: |
54 | +++ b/exec.c | 78 | if (!const_args[2]) { |
55 | @@ -XXX,XX +XXX,XX @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) | 79 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) |
56 | } | 80 | case INDEX_op_orc_vec: |
57 | #endif | 81 | case INDEX_op_xor_vec: |
58 | 82 | case INDEX_op_not_vec: | |
59 | -#if defined(CONFIG_USER_ONLY) | 83 | + case INDEX_op_shli_vec: |
60 | -void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | 84 | + case INDEX_op_shri_vec: |
61 | - | 85 | + case INDEX_op_sari_vec: |
62 | -{ | 86 | return 1; |
63 | -} | 87 | case INDEX_op_abs_vec: |
64 | - | 88 | case INDEX_op_cmp_vec: |
65 | -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, | ||
66 | - int flags) | ||
67 | -{ | ||
68 | - return -ENOSYS; | ||
69 | -} | ||
70 | - | ||
71 | -void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) | ||
72 | -{ | ||
73 | -} | ||
74 | - | ||
75 | -int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | ||
76 | - int flags, CPUWatchpoint **watchpoint) | ||
77 | -{ | ||
78 | - return -ENOSYS; | ||
79 | -} | ||
80 | -#else | ||
81 | +#ifndef CONFIG_USER_ONLY | ||
82 | /* Add a watchpoint. */ | ||
83 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | ||
84 | int flags, CPUWatchpoint **watchpoint) | ||
85 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | ||
86 | |||
87 | return !(addr > wpend || wp->vaddr > addrend); | ||
88 | } | ||
89 | - | ||
90 | -#endif | ||
91 | +#endif /* !CONFIG_USER_ONLY */ | ||
92 | |||
93 | /* Add a breakpoint. */ | ||
94 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, | ||
95 | -- | 89 | -- |
96 | 2.17.1 | 90 | 2.25.1 |
97 | 91 | ||
98 | 92 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: David Hildenbrand <david@redhat.com> | ||
2 | 1 | ||
3 | We want to perform the same checks in probe_write() to trigger a cpu | ||
4 | exit before doing any modifications. We'll have to pass a PC. | ||
5 | |||
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <20190823100741.9621-9-david@redhat.com> | ||
9 | [rth: Use vaddr for len, like other watchpoint functions; | ||
10 | Move user-only stub to static inline.] | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | --- | ||
13 | include/hw/core/cpu.h | 7 +++++++ | ||
14 | exec.c | 26 ++++++++++++++++++-------- | ||
15 | 2 files changed, 25 insertions(+), 8 deletions(-) | ||
16 | |||
17 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/include/hw/core/cpu.h | ||
20 | +++ b/include/hw/core/cpu.h | ||
21 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, | ||
22 | static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | ||
23 | { | ||
24 | } | ||
25 | + | ||
26 | +static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
27 | + MemTxAttrs atr, int fl, uintptr_t ra) | ||
28 | +{ | ||
29 | +} | ||
30 | #else | ||
31 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | ||
32 | int flags, CPUWatchpoint **watchpoint); | ||
33 | @@ -XXX,XX +XXX,XX @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, | ||
34 | vaddr len, int flags); | ||
35 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); | ||
36 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask); | ||
37 | +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
38 | + MemTxAttrs attrs, int flags, uintptr_t ra); | ||
39 | #endif | ||
40 | |||
41 | /** | ||
42 | diff --git a/exec.c b/exec.c | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/exec.c | ||
45 | +++ b/exec.c | ||
46 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps notdirty_mem_ops = { | ||
47 | }; | ||
48 | |||
49 | /* Generate a debug exception if a watchpoint has been hit. */ | ||
50 | -static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
51 | +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
52 | + MemTxAttrs attrs, int flags, uintptr_t ra) | ||
53 | { | ||
54 | - CPUState *cpu = current_cpu; | ||
55 | CPUClass *cc = CPU_GET_CLASS(cpu); | ||
56 | - target_ulong vaddr; | ||
57 | CPUWatchpoint *wp; | ||
58 | |||
59 | assert(tcg_enabled()); | ||
60 | @@ -XXX,XX +XXX,XX @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
61 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); | ||
62 | return; | ||
63 | } | ||
64 | - vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; | ||
65 | - vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len); | ||
66 | + | ||
67 | + addr = cc->adjust_watchpoint_address(cpu, addr, len); | ||
68 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | ||
69 | - if (cpu_watchpoint_address_matches(wp, vaddr, len) | ||
70 | + if (cpu_watchpoint_address_matches(wp, addr, len) | ||
71 | && (wp->flags & flags)) { | ||
72 | if (flags == BP_MEM_READ) { | ||
73 | wp->flags |= BP_WATCHPOINT_HIT_READ; | ||
74 | } else { | ||
75 | wp->flags |= BP_WATCHPOINT_HIT_WRITE; | ||
76 | } | ||
77 | - wp->hitaddr = vaddr; | ||
78 | + wp->hitaddr = MAX(addr, wp->vaddr); | ||
79 | wp->hitattrs = attrs; | ||
80 | if (!cpu->watchpoint_hit) { | ||
81 | if (wp->flags & BP_CPU && | ||
82 | @@ -XXX,XX +XXX,XX @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
83 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { | ||
84 | cpu->exception_index = EXCP_DEBUG; | ||
85 | mmap_unlock(); | ||
86 | - cpu_loop_exit(cpu); | ||
87 | + cpu_loop_exit_restore(cpu, ra); | ||
88 | } else { | ||
89 | /* Force execution of one insn next time. */ | ||
90 | cpu->cflags_next_tb = 1 | curr_cflags(); | ||
91 | mmap_unlock(); | ||
92 | + if (ra) { | ||
93 | + cpu_restore_state(cpu, ra, true); | ||
94 | + } | ||
95 | cpu_loop_exit_noexc(cpu); | ||
96 | } | ||
97 | } | ||
98 | @@ -XXX,XX +XXX,XX @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
99 | } | ||
100 | } | ||
101 | |||
102 | +static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
103 | +{ | ||
104 | + CPUState *cpu = current_cpu; | ||
105 | + vaddr addr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; | ||
106 | + | ||
107 | + cpu_check_watchpoint(cpu, addr, len, attrs, flags, 0); | ||
108 | +} | ||
109 | + | ||
110 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, | ||
111 | so these check for a hit then pass through to the normal out-of-line | ||
112 | phys routines. */ | ||
113 | -- | ||
114 | 2.17.1 | ||
115 | |||
116 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | We had two different mechanisms to force a recheck of the tlb. | ||
2 | 1 | ||
3 | Before TLB_RECHECK was introduced, we had a PAGE_WRITE_INV bit | ||
4 | that would immediate set TLB_INVALID_MASK, which automatically | ||
5 | means that a second check of the tlb entry fails. | ||
6 | |||
7 | We can use the same mechanism to handle small pages. | ||
8 | Conserve TLB_* bits by removing TLB_RECHECK. | ||
9 | |||
10 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | --- | ||
13 | include/exec/cpu-all.h | 5 +-- | ||
14 | accel/tcg/cputlb.c | 86 +++++++++++------------------------------- | ||
15 | 2 files changed, 24 insertions(+), 67 deletions(-) | ||
16 | |||
17 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/include/exec/cpu-all.h | ||
20 | +++ b/include/exec/cpu-all.h | ||
21 | @@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env); | ||
22 | #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) | ||
23 | /* Set if TLB entry is an IO callback. */ | ||
24 | #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) | ||
25 | -/* Set if TLB entry must have MMU lookup repeated for every access */ | ||
26 | -#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4)) | ||
27 | |||
28 | /* Use this mask to check interception with an alignment mask | ||
29 | * in a TCG backend. | ||
30 | */ | ||
31 | -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ | ||
32 | - | TLB_RECHECK) | ||
33 | +#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) | ||
34 | |||
35 | /** | ||
36 | * tlb_hit_page: return true if page aligned @addr is a hit against the | ||
37 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/accel/tcg/cputlb.c | ||
40 | +++ b/accel/tcg/cputlb.c | ||
41 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
42 | |||
43 | address = vaddr_page; | ||
44 | if (size < TARGET_PAGE_SIZE) { | ||
45 | - /* | ||
46 | - * Slow-path the TLB entries; we will repeat the MMU check and TLB | ||
47 | - * fill on every access. | ||
48 | - */ | ||
49 | - address |= TLB_RECHECK; | ||
50 | + /* Repeat the MMU check and TLB fill on every access. */ | ||
51 | + address |= TLB_INVALID_MASK; | ||
52 | } | ||
53 | if (attrs.byte_swap) { | ||
54 | /* Force the access through the I/O slow path. */ | ||
55 | @@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | ||
56 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ | ||
57 | (ADDR) & TARGET_PAGE_MASK) | ||
58 | |||
59 | -/* NOTE: this function can trigger an exception */ | ||
60 | -/* NOTE2: the returned address is not exactly the physical address: it | ||
61 | - * is actually a ram_addr_t (in system mode; the user mode emulation | ||
62 | - * version of this function returns a guest virtual address). | ||
63 | +/* | ||
64 | + * Return a ram_addr_t for the virtual address for execution. | ||
65 | + * | ||
66 | + * Return -1 if we can't translate and execute from an entire page | ||
67 | + * of RAM. This will force us to execute by loading and translating | ||
68 | + * one insn at a time, without caching. | ||
69 | + * | ||
70 | + * NOTE: This function will trigger an exception if the page is | ||
71 | + * not executable. | ||
72 | */ | ||
73 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | ||
74 | { | ||
75 | @@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | ||
76 | tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); | ||
77 | index = tlb_index(env, mmu_idx, addr); | ||
78 | entry = tlb_entry(env, mmu_idx, addr); | ||
79 | + | ||
80 | + if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { | ||
81 | + /* | ||
82 | + * The MMU protection covers a smaller range than a target | ||
83 | + * page, so we must redo the MMU check for every insn. | ||
84 | + */ | ||
85 | + return -1; | ||
86 | + } | ||
87 | } | ||
88 | assert(tlb_hit(entry->addr_code, addr)); | ||
89 | } | ||
90 | |||
91 | - if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { | ||
92 | - /* | ||
93 | - * Return -1 if we can't translate and execute from an entire | ||
94 | - * page of RAM here, which will cause us to execute by loading | ||
95 | - * and translating one insn at a time, without caching: | ||
96 | - * - TLB_RECHECK: means the MMU protection covers a smaller range | ||
97 | - * than a target page, so we must redo the MMU check every insn | ||
98 | - * - TLB_MMIO: region is not backed by RAM | ||
99 | - */ | ||
100 | + if (unlikely(entry->addr_code & TLB_MMIO)) { | ||
101 | + /* The region is not backed by RAM. */ | ||
102 | return -1; | ||
103 | } | ||
104 | |||
105 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
106 | } | ||
107 | |||
108 | /* Notice an IO access or a needs-MMU-lookup access */ | ||
109 | - if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { | ||
110 | + if (unlikely(tlb_addr & TLB_MMIO)) { | ||
111 | /* There's really nothing that can be done to | ||
112 | support this apart from stop-the-world. */ | ||
113 | goto stop_the_world; | ||
114 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
115 | entry = tlb_entry(env, mmu_idx, addr); | ||
116 | } | ||
117 | tlb_addr = code_read ? entry->addr_code : entry->addr_read; | ||
118 | + tlb_addr &= ~TLB_INVALID_MASK; | ||
119 | } | ||
120 | |||
121 | /* Handle an IO access. */ | ||
122 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
123 | if ((addr & (size - 1)) != 0) { | ||
124 | goto do_unaligned_access; | ||
125 | } | ||
126 | - | ||
127 | - if (tlb_addr & TLB_RECHECK) { | ||
128 | - /* | ||
129 | - * This is a TLB_RECHECK access, where the MMU protection | ||
130 | - * covers a smaller range than a target page, and we must | ||
131 | - * repeat the MMU check here. This tlb_fill() call might | ||
132 | - * longjump out if this access should cause a guest exception. | ||
133 | - */ | ||
134 | - tlb_fill(env_cpu(env), addr, size, | ||
135 | - access_type, mmu_idx, retaddr); | ||
136 | - index = tlb_index(env, mmu_idx, addr); | ||
137 | - entry = tlb_entry(env, mmu_idx, addr); | ||
138 | - | ||
139 | - tlb_addr = code_read ? entry->addr_code : entry->addr_read; | ||
140 | - tlb_addr &= ~TLB_RECHECK; | ||
141 | - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
142 | - /* RAM access */ | ||
143 | - goto do_aligned_access; | ||
144 | - } | ||
145 | - } | ||
146 | - | ||
147 | return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
148 | mmu_idx, addr, retaddr, access_type, op); | ||
149 | } | ||
150 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
151 | return res & MAKE_64BIT_MASK(0, size * 8); | ||
152 | } | ||
153 | |||
154 | - do_aligned_access: | ||
155 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
156 | switch (op) { | ||
157 | case MO_UB: | ||
158 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
159 | if ((addr & (size - 1)) != 0) { | ||
160 | goto do_unaligned_access; | ||
161 | } | ||
162 | - | ||
163 | - if (tlb_addr & TLB_RECHECK) { | ||
164 | - /* | ||
165 | - * This is a TLB_RECHECK access, where the MMU protection | ||
166 | - * covers a smaller range than a target page, and we must | ||
167 | - * repeat the MMU check here. This tlb_fill() call might | ||
168 | - * longjump out if this access should cause a guest exception. | ||
169 | - */ | ||
170 | - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | ||
171 | - mmu_idx, retaddr); | ||
172 | - index = tlb_index(env, mmu_idx, addr); | ||
173 | - entry = tlb_entry(env, mmu_idx, addr); | ||
174 | - | ||
175 | - tlb_addr = tlb_addr_write(entry); | ||
176 | - tlb_addr &= ~TLB_RECHECK; | ||
177 | - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
178 | - /* RAM access */ | ||
179 | - goto do_aligned_access; | ||
180 | - } | ||
181 | - } | ||
182 | - | ||
183 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
184 | val, addr, retaddr, op); | ||
185 | return; | ||
186 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
187 | return; | ||
188 | } | ||
189 | |||
190 | - do_aligned_access: | ||
191 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
192 | switch (op) { | ||
193 | case MO_UB: | ||
194 | -- | ||
195 | 2.17.1 | ||
196 | |||
197 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | We want to move the check for watchpoints from | ||
2 | memory_region_section_get_iotlb to tlb_set_page_with_attrs. | ||
3 | Isolate the loop over watchpoints to an exported function. | ||
4 | 1 | ||
5 | Rename the existing cpu_watchpoint_address_matches to | ||
6 | watchpoint_address_matches, since it doesn't actually | ||
7 | have a cpu argument. | ||
8 | |||
9 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | include/hw/core/cpu.h | 7 +++++++ | ||
13 | exec.c | 45 ++++++++++++++++++++++++++++--------------- | ||
14 | 2 files changed, 36 insertions(+), 16 deletions(-) | ||
15 | |||
16 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/hw/core/cpu.h | ||
19 | +++ b/include/hw/core/cpu.h | ||
20 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
21 | MemTxAttrs atr, int fl, uintptr_t ra) | ||
22 | { | ||
23 | } | ||
24 | + | ||
25 | +static inline int cpu_watchpoint_address_matches(CPUState *cpu, | ||
26 | + vaddr addr, vaddr len) | ||
27 | +{ | ||
28 | + return 0; | ||
29 | +} | ||
30 | #else | ||
31 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | ||
32 | int flags, CPUWatchpoint **watchpoint); | ||
33 | @@ -XXX,XX +XXX,XX @@ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); | ||
34 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask); | ||
35 | void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
36 | MemTxAttrs attrs, int flags, uintptr_t ra); | ||
37 | +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); | ||
38 | #endif | ||
39 | |||
40 | /** | ||
41 | diff --git a/exec.c b/exec.c | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/exec.c | ||
44 | +++ b/exec.c | ||
45 | @@ -XXX,XX +XXX,XX @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | ||
46 | * partially or completely with the address range covered by the | ||
47 | * access). | ||
48 | */ | ||
49 | -static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | ||
50 | - vaddr addr, | ||
51 | - vaddr len) | ||
52 | +static inline bool watchpoint_address_matches(CPUWatchpoint *wp, | ||
53 | + vaddr addr, vaddr len) | ||
54 | { | ||
55 | /* We know the lengths are non-zero, but a little caution is | ||
56 | * required to avoid errors in the case where the range ends | ||
57 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | ||
58 | |||
59 | return !(addr > wpend || wp->vaddr > addrend); | ||
60 | } | ||
61 | + | ||
62 | +/* Return flags for watchpoints that match addr + prot. */ | ||
63 | +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) | ||
64 | +{ | ||
65 | + CPUWatchpoint *wp; | ||
66 | + int ret = 0; | ||
67 | + | ||
68 | + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | ||
69 | + if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) { | ||
70 | + ret |= wp->flags; | ||
71 | + } | ||
72 | + } | ||
73 | + return ret; | ||
74 | +} | ||
75 | #endif /* !CONFIG_USER_ONLY */ | ||
76 | |||
77 | /* Add a breakpoint. */ | ||
78 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | ||
79 | target_ulong *address) | ||
80 | { | ||
81 | hwaddr iotlb; | ||
82 | - CPUWatchpoint *wp; | ||
83 | + int flags, match; | ||
84 | |||
85 | if (memory_region_is_ram(section->mr)) { | ||
86 | /* Normal RAM. */ | ||
87 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | ||
88 | iotlb += xlat; | ||
89 | } | ||
90 | |||
91 | - /* Make accesses to pages with watchpoints go via the | ||
92 | - watchpoint trap routines. */ | ||
93 | - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | ||
94 | - if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { | ||
95 | - /* Avoid trapping reads of pages with a write breakpoint. */ | ||
96 | - if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | ||
97 | - iotlb = PHYS_SECTION_WATCH + paddr; | ||
98 | - *address |= TLB_MMIO; | ||
99 | - break; | ||
100 | - } | ||
101 | - } | ||
102 | + /* Avoid trapping reads of pages with a write breakpoint. */ | ||
103 | + match = (prot & PAGE_READ ? BP_MEM_READ : 0) | ||
104 | + | (prot & PAGE_WRITE ? BP_MEM_WRITE : 0); | ||
105 | + flags = cpu_watchpoint_address_matches(cpu, vaddr, TARGET_PAGE_SIZE); | ||
106 | + if (flags & match) { | ||
107 | + /* | ||
108 | + * Make accesses to pages with watchpoints go via the | ||
109 | + * watchpoint trap routines. | ||
110 | + */ | ||
111 | + iotlb = PHYS_SECTION_WATCH + paddr; | ||
112 | + *address |= TLB_MMIO; | ||
113 | } | ||
114 | |||
115 | return iotlb; | ||
116 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
117 | |||
118 | addr = cc->adjust_watchpoint_address(cpu, addr, len); | ||
119 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | ||
120 | - if (cpu_watchpoint_address_matches(wp, addr, len) | ||
121 | + if (watchpoint_address_matches(wp, addr, len) | ||
122 | && (wp->flags & flags)) { | ||
123 | if (flags == BP_MEM_READ) { | ||
124 | wp->flags |= BP_WATCHPOINT_HIT_READ; | ||
125 | -- | ||
126 | 2.17.1 | ||
127 | |||
128 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | We are currently passing the size of the full write to | ||
2 | the tlb_fill for the second page. Instead pass the real | ||
3 | size of the write to that page. | ||
4 | 1 | ||
5 | This argument is unused within all tlb_fill, except to be | ||
6 | logged via tracing, so in practice this makes no difference. | ||
7 | |||
8 | But in a moment we'll need the value of size2 for watchpoints, | ||
9 | and if we've computed the value we might as well use it. | ||
10 | |||
11 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
12 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | --- | ||
15 | accel/tcg/cputlb.c | 5 ++++- | ||
16 | 1 file changed, 4 insertions(+), 1 deletion(-) | ||
17 | |||
18 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/accel/tcg/cputlb.c | ||
21 | +++ b/accel/tcg/cputlb.c | ||
22 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
23 | uintptr_t index2; | ||
24 | CPUTLBEntry *entry2; | ||
25 | target_ulong page2, tlb_addr2; | ||
26 | + size_t size2; | ||
27 | + | ||
28 | do_unaligned_access: | ||
29 | /* | ||
30 | * Ensure the second page is in the TLB. Note that the first page | ||
31 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
32 | * cannot evict the first. | ||
33 | */ | ||
34 | page2 = (addr + size) & TARGET_PAGE_MASK; | ||
35 | + size2 = (addr + size) & ~TARGET_PAGE_MASK; | ||
36 | index2 = tlb_index(env, mmu_idx, page2); | ||
37 | entry2 = tlb_entry(env, mmu_idx, page2); | ||
38 | tlb_addr2 = tlb_addr_write(entry2); | ||
39 | if (!tlb_hit_page(tlb_addr2, page2) | ||
40 | && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, | ||
41 | page2 & TARGET_PAGE_MASK)) { | ||
42 | - tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE, | ||
43 | + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | ||
44 | mmu_idx, retaddr); | ||
45 | } | ||
46 | |||
47 | -- | ||
48 | 2.17.1 | ||
49 | |||
50 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | We have already aligned page2 to the start of the next page. | ||
2 | There is no reason to do that a second time. | ||
3 | 1 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
5 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | accel/tcg/cputlb.c | 3 +-- | ||
9 | 1 file changed, 1 insertion(+), 2 deletions(-) | ||
10 | |||
11 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/accel/tcg/cputlb.c | ||
14 | +++ b/accel/tcg/cputlb.c | ||
15 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
16 | entry2 = tlb_entry(env, mmu_idx, page2); | ||
17 | tlb_addr2 = tlb_addr_write(entry2); | ||
18 | if (!tlb_hit_page(tlb_addr2, page2) | ||
19 | - && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, | ||
20 | - page2 & TARGET_PAGE_MASK)) { | ||
21 | + && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | ||
22 | tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | ||
23 | mmu_idx, retaddr); | ||
24 | } | ||
25 | -- | ||
26 | 2.17.1 | ||
27 | |||
28 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | We now have a variant for CONFIG_USER_ONLY as well. | ||
4 | |||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Message-Id: <20190826075112.25637-7-david@redhat.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 3 | --- |
10 | target/hppa/op_helper.c | 2 -- | 4 | tcg/arm/tcg-target.h | 2 +- |
11 | 1 file changed, 2 deletions(-) | 5 | tcg/arm/tcg-target.c.inc | 6 ++++++ |
6 | 2 files changed, 7 insertions(+), 1 deletion(-) | ||
12 | 7 | ||
13 | diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c | 8 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h |
14 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/hppa/op_helper.c | 10 | --- a/tcg/arm/tcg-target.h |
16 | +++ b/target/hppa/op_helper.c | 11 | +++ b/tcg/arm/tcg-target.h |
17 | @@ -XXX,XX +XXX,XX @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val, | 12 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; |
13 | #define TCG_TARGET_HAS_shi_vec 1 | ||
14 | #define TCG_TARGET_HAS_shs_vec 0 | ||
15 | #define TCG_TARGET_HAS_shv_vec 0 | ||
16 | -#define TCG_TARGET_HAS_mul_vec 0 | ||
17 | +#define TCG_TARGET_HAS_mul_vec 1 | ||
18 | #define TCG_TARGET_HAS_sat_vec 0 | ||
19 | #define TCG_TARGET_HAS_minmax_vec 0 | ||
20 | #define TCG_TARGET_HAS_bitsel_vec 0 | ||
21 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/tcg/arm/tcg-target.c.inc | ||
24 | +++ b/tcg/arm/tcg-target.c.inc | ||
25 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
26 | INSN_VORN = 0xf2300110, | ||
27 | INSN_VORR = 0xf2200110, | ||
28 | INSN_VSUB = 0xf3000800, | ||
29 | + INSN_VMUL = 0xf2000910, | ||
30 | |||
31 | INSN_VABS = 0xf3b10300, | ||
32 | INSN_VMVN = 0xf3b00580, | ||
33 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
34 | return C_O1_I1(w, w); | ||
35 | case INDEX_op_dup2_vec: | ||
36 | case INDEX_op_add_vec: | ||
37 | + case INDEX_op_mul_vec: | ||
38 | case INDEX_op_sub_vec: | ||
39 | case INDEX_op_xor_vec: | ||
40 | return C_O1_I2(w, w, w); | ||
41 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
42 | case INDEX_op_add_vec: | ||
43 | tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2); | ||
44 | return; | ||
45 | + case INDEX_op_mul_vec: | ||
46 | + tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); | ||
47 | + return; | ||
48 | case INDEX_op_sub_vec: | ||
49 | tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); | ||
50 | return; | ||
51 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
52 | return 1; | ||
53 | case INDEX_op_abs_vec: | ||
54 | case INDEX_op_cmp_vec: | ||
55 | + case INDEX_op_mul_vec: | ||
56 | case INDEX_op_neg_vec: | ||
57 | return vece < MO_64; | ||
18 | default: | 58 | default: |
19 | /* Nothing is stored, but protection is checked and the | ||
20 | cacheline is marked dirty. */ | ||
21 | -#ifndef CONFIG_USER_ONLY | ||
22 | probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra); | ||
23 | -#endif | ||
24 | break; | ||
25 | } | ||
26 | } | ||
27 | -- | 59 | -- |
28 | 2.17.1 | 60 | 2.25.1 |
29 | 61 | ||
30 | 62 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | This is saturating add and subtract, signed and unsigned. |
---|---|---|---|
2 | 2 | ||
3 | Let's call it also for CONFIG_USER_ONLY. While at it, add a FIXME and get | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | rid of one local variable. | ||
5 | |||
6 | MIPS code probably needs a bigger refactoring in regards of | ||
7 | ensure_writable_pages(), similar to s390x, so for example, watchpoints | ||
8 | can be handled reliably later. The actually accessed addresses should | ||
9 | be probed only, not full pages. | ||
10 | |||
11 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
12 | Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
13 | Message-Id: <20190826075112.25637-6-david@redhat.com> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
15 | --- | 5 | --- |
16 | target/mips/op_helper.c | 8 +++----- | 6 | tcg/arm/tcg-target.h | 2 +- |
17 | 1 file changed, 3 insertions(+), 5 deletions(-) | 7 | tcg/arm/tcg-target.c.inc | 24 ++++++++++++++++++++++++ |
8 | 2 files changed, 25 insertions(+), 1 deletion(-) | ||
18 | 9 | ||
19 | diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c | 10 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h |
20 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/mips/op_helper.c | 12 | --- a/tcg/arm/tcg-target.h |
22 | +++ b/target/mips/op_helper.c | 13 | +++ b/tcg/arm/tcg-target.h |
23 | @@ -XXX,XX +XXX,XX @@ static inline void ensure_writable_pages(CPUMIPSState *env, | 14 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; |
24 | int mmu_idx, | 15 | #define TCG_TARGET_HAS_shs_vec 0 |
25 | uintptr_t retaddr) | 16 | #define TCG_TARGET_HAS_shv_vec 0 |
26 | { | 17 | #define TCG_TARGET_HAS_mul_vec 1 |
27 | -#if !defined(CONFIG_USER_ONLY) | 18 | -#define TCG_TARGET_HAS_sat_vec 0 |
28 | - target_ulong page_addr; | 19 | +#define TCG_TARGET_HAS_sat_vec 1 |
29 | + /* FIXME: Probe the actual accesses (pass and use a size) */ | 20 | #define TCG_TARGET_HAS_minmax_vec 0 |
30 | if (unlikely(MSA_PAGESPAN(addr))) { | 21 | #define TCG_TARGET_HAS_bitsel_vec 0 |
31 | /* first page */ | 22 | #define TCG_TARGET_HAS_cmpsel_vec 0 |
32 | probe_write(env, addr, 0, mmu_idx, retaddr); | 23 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
33 | /* second page */ | 24 | index XXXXXXX..XXXXXXX 100644 |
34 | - page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; | 25 | --- a/tcg/arm/tcg-target.c.inc |
35 | - probe_write(env, page_addr, 0, mmu_idx, retaddr); | 26 | +++ b/tcg/arm/tcg-target.c.inc |
36 | + addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; | 27 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
37 | + probe_write(env, addr, 0, mmu_idx, retaddr); | 28 | INSN_VORR = 0xf2200110, |
38 | } | 29 | INSN_VSUB = 0xf3000800, |
39 | -#endif | 30 | INSN_VMUL = 0xf2000910, |
40 | } | 31 | + INSN_VQADD = 0xf2000010, |
41 | 32 | + INSN_VQADD_U = 0xf3000010, | |
42 | void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, | 33 | + INSN_VQSUB = 0xf2000210, |
34 | + INSN_VQSUB_U = 0xf3000210, | ||
35 | |||
36 | INSN_VABS = 0xf3b10300, | ||
37 | INSN_VMVN = 0xf3b00580, | ||
38 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
39 | case INDEX_op_dup2_vec: | ||
40 | case INDEX_op_add_vec: | ||
41 | case INDEX_op_mul_vec: | ||
42 | + case INDEX_op_ssadd_vec: | ||
43 | + case INDEX_op_sssub_vec: | ||
44 | case INDEX_op_sub_vec: | ||
45 | + case INDEX_op_usadd_vec: | ||
46 | + case INDEX_op_ussub_vec: | ||
47 | case INDEX_op_xor_vec: | ||
48 | return C_O1_I2(w, w, w); | ||
49 | case INDEX_op_or_vec: | ||
50 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
51 | case INDEX_op_sub_vec: | ||
52 | tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); | ||
53 | return; | ||
54 | + case INDEX_op_ssadd_vec: | ||
55 | + tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2); | ||
56 | + return; | ||
57 | + case INDEX_op_sssub_vec: | ||
58 | + tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); | ||
59 | + return; | ||
60 | + case INDEX_op_usadd_vec: | ||
61 | + tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); | ||
62 | + return; | ||
63 | + case INDEX_op_ussub_vec: | ||
64 | + tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2); | ||
65 | + return; | ||
66 | case INDEX_op_xor_vec: | ||
67 | tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); | ||
68 | return; | ||
69 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
70 | case INDEX_op_shli_vec: | ||
71 | case INDEX_op_shri_vec: | ||
72 | case INDEX_op_sari_vec: | ||
73 | + case INDEX_op_ssadd_vec: | ||
74 | + case INDEX_op_sssub_vec: | ||
75 | + case INDEX_op_usadd_vec: | ||
76 | + case INDEX_op_ussub_vec: | ||
77 | return 1; | ||
78 | case INDEX_op_abs_vec: | ||
79 | case INDEX_op_cmp_vec: | ||
43 | -- | 80 | -- |
44 | 2.17.1 | 81 | 2.25.1 |
45 | 82 | ||
46 | 83 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | This is minimum and maximum, signed and unsigned. |
---|---|---|---|
2 | 2 | ||
3 | Let's enforce the interface restriction. | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | |||
5 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20190826075112.25637-5-david@redhat.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 5 | --- |
10 | accel/tcg/cputlb.c | 2 ++ | 6 | tcg/arm/tcg-target.h | 2 +- |
11 | accel/tcg/user-exec.c | 2 ++ | 7 | tcg/arm/tcg-target.c.inc | 24 ++++++++++++++++++++++++ |
12 | 2 files changed, 4 insertions(+) | 8 | 2 files changed, 25 insertions(+), 1 deletion(-) |
13 | 9 | ||
14 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 10 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h |
15 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/accel/tcg/cputlb.c | 12 | --- a/tcg/arm/tcg-target.h |
17 | +++ b/accel/tcg/cputlb.c | 13 | +++ b/tcg/arm/tcg-target.h |
18 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 14 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; |
19 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | 15 | #define TCG_TARGET_HAS_shv_vec 0 |
20 | target_ulong tlb_addr = tlb_addr_write(entry); | 16 | #define TCG_TARGET_HAS_mul_vec 1 |
21 | 17 | #define TCG_TARGET_HAS_sat_vec 1 | |
22 | + g_assert(-(addr | TARGET_PAGE_MASK) >= size); | 18 | -#define TCG_TARGET_HAS_minmax_vec 0 |
23 | + | 19 | +#define TCG_TARGET_HAS_minmax_vec 1 |
24 | if (unlikely(!tlb_hit(tlb_addr, addr))) { | 20 | #define TCG_TARGET_HAS_bitsel_vec 0 |
25 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | 21 | #define TCG_TARGET_HAS_cmpsel_vec 0 |
26 | tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | 22 | |
27 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | 23 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
28 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
29 | --- a/accel/tcg/user-exec.c | 25 | --- a/tcg/arm/tcg-target.c.inc |
30 | +++ b/accel/tcg/user-exec.c | 26 | +++ b/tcg/arm/tcg-target.c.inc |
31 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | 27 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
32 | void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 28 | INSN_VQADD_U = 0xf3000010, |
33 | uintptr_t retaddr) | 29 | INSN_VQSUB = 0xf2000210, |
34 | { | 30 | INSN_VQSUB_U = 0xf3000210, |
35 | + g_assert(-(addr | TARGET_PAGE_MASK) >= size); | 31 | + INSN_VMAX = 0xf2000600, |
36 | + | 32 | + INSN_VMAX_U = 0xf3000600, |
37 | if (!guest_addr_valid(addr) || | 33 | + INSN_VMIN = 0xf2000610, |
38 | page_check_range(addr, size, PAGE_WRITE) < 0) { | 34 | + INSN_VMIN_U = 0xf3000610, |
39 | CPUState *cpu = env_cpu(env); | 35 | |
36 | INSN_VABS = 0xf3b10300, | ||
37 | INSN_VMVN = 0xf3b00580, | ||
38 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
39 | case INDEX_op_dup2_vec: | ||
40 | case INDEX_op_add_vec: | ||
41 | case INDEX_op_mul_vec: | ||
42 | + case INDEX_op_smax_vec: | ||
43 | + case INDEX_op_smin_vec: | ||
44 | case INDEX_op_ssadd_vec: | ||
45 | case INDEX_op_sssub_vec: | ||
46 | case INDEX_op_sub_vec: | ||
47 | + case INDEX_op_umax_vec: | ||
48 | + case INDEX_op_umin_vec: | ||
49 | case INDEX_op_usadd_vec: | ||
50 | case INDEX_op_ussub_vec: | ||
51 | case INDEX_op_xor_vec: | ||
52 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
53 | case INDEX_op_mul_vec: | ||
54 | tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2); | ||
55 | return; | ||
56 | + case INDEX_op_smax_vec: | ||
57 | + tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2); | ||
58 | + return; | ||
59 | + case INDEX_op_smin_vec: | ||
60 | + tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2); | ||
61 | + return; | ||
62 | case INDEX_op_sub_vec: | ||
63 | tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2); | ||
64 | return; | ||
65 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
66 | case INDEX_op_sssub_vec: | ||
67 | tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2); | ||
68 | return; | ||
69 | + case INDEX_op_umax_vec: | ||
70 | + tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2); | ||
71 | + return; | ||
72 | + case INDEX_op_umin_vec: | ||
73 | + tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2); | ||
74 | + return; | ||
75 | case INDEX_op_usadd_vec: | ||
76 | tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2); | ||
77 | return; | ||
78 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
79 | case INDEX_op_cmp_vec: | ||
80 | case INDEX_op_mul_vec: | ||
81 | case INDEX_op_neg_vec: | ||
82 | + case INDEX_op_smax_vec: | ||
83 | + case INDEX_op_smin_vec: | ||
84 | + case INDEX_op_umax_vec: | ||
85 | + case INDEX_op_umin_vec: | ||
86 | return vece < MO_64; | ||
87 | default: | ||
88 | return 0; | ||
40 | -- | 89 | -- |
41 | 2.17.1 | 90 | 2.25.1 |
42 | 91 | ||
43 | 92 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | NEON has 3 instructions implementing this 4 argument operation, |
---|---|---|---|
2 | with each insn overlapping a different logical input onto the | ||
3 | destination register. | ||
2 | 4 | ||
3 | ... similar to tlb_vaddr_to_host(); however, allow access to the host | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | page except when TLB_NOTDIRTY or TLB_MMIO is set. | ||
5 | |||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
8 | Message-Id: <20190830100959.26615-2-david@redhat.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 7 | --- |
11 | include/exec/exec-all.h | 4 ++-- | 8 | tcg/arm/tcg-target-con-set.h | 1 + |
12 | accel/tcg/cputlb.c | 21 ++++++++++++++++----- | 9 | tcg/arm/tcg-target.h | 2 +- |
13 | accel/tcg/user-exec.c | 6 ++++-- | 10 | tcg/arm/tcg-target.c.inc | 22 ++++++++++++++++++++-- |
14 | 3 files changed, 22 insertions(+), 9 deletions(-) | 11 | 3 files changed, 22 insertions(+), 3 deletions(-) |
15 | 12 | ||
16 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 13 | diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h |
17 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/exec/exec-all.h | 15 | --- a/tcg/arm/tcg-target-con-set.h |
19 | +++ b/include/exec/exec-all.h | 16 | +++ b/tcg/arm/tcg-target-con-set.h |
20 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | 17 | @@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, w) |
21 | { | 18 | C_O1_I2(w, w, wO) |
22 | } | 19 | C_O1_I2(w, w, wV) |
23 | #endif | 20 | C_O1_I2(w, w, wZ) |
24 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 21 | +C_O1_I3(w, w, w, w) |
25 | - uintptr_t retaddr); | 22 | C_O1_I4(r, r, r, rI, rI) |
26 | +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 23 | C_O1_I4(r, r, rIN, rIK, 0) |
27 | + uintptr_t retaddr); | 24 | C_O2_I1(r, r, l) |
28 | 25 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h | |
29 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | ||
30 | |||
31 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/accel/tcg/cputlb.c | 27 | --- a/tcg/arm/tcg-target.h |
34 | +++ b/accel/tcg/cputlb.c | 28 | +++ b/tcg/arm/tcg-target.h |
35 | @@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | 29 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; |
36 | /* Probe for whether the specified guest write access is permitted. | 30 | #define TCG_TARGET_HAS_mul_vec 1 |
37 | * If it is not permitted then an exception will be taken in the same | 31 | #define TCG_TARGET_HAS_sat_vec 1 |
38 | * way as if this were a real write access (and we will not return). | 32 | #define TCG_TARGET_HAS_minmax_vec 1 |
39 | - * Otherwise the function will return, and there will be a valid | 33 | -#define TCG_TARGET_HAS_bitsel_vec 0 |
40 | - * entry in the TLB for this access. | 34 | +#define TCG_TARGET_HAS_bitsel_vec 1 |
41 | + * If the size is 0 or the page requires I/O access, returns NULL; otherwise, | 35 | #define TCG_TARGET_HAS_cmpsel_vec 0 |
42 | + * returns the address of the host page similar to tlb_vaddr_to_host(). | 36 | |
43 | */ | 37 | #define TCG_TARGET_DEFAULT_MO (0) |
44 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 38 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
45 | - uintptr_t retaddr) | 39 | index XXXXXXX..XXXXXXX 100644 |
46 | +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 40 | --- a/tcg/arm/tcg-target.c.inc |
47 | + uintptr_t retaddr) | 41 | +++ b/tcg/arm/tcg-target.c.inc |
48 | { | 42 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
49 | uintptr_t index = tlb_index(env, mmu_idx, addr); | 43 | INSN_VSARI = 0xf2800010, /* VSHR.S */ |
50 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | 44 | INSN_VSHRI = 0xf3800010, /* VSHR.U */ |
51 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 45 | |
52 | tlb_addr = tlb_addr_write(entry); | 46 | + INSN_VBSL = 0xf3100110, |
53 | } | 47 | + INSN_VBIT = 0xf3200110, |
54 | 48 | + INSN_VBIF = 0xf3300110, | |
55 | + if (!size) { | ||
56 | + return NULL; | ||
57 | + } | ||
58 | + | 49 | + |
59 | /* Handle watchpoints. */ | 50 | INSN_VTST = 0xf2000810, |
60 | - if ((tlb_addr & TLB_WATCHPOINT) && size > 0) { | 51 | |
61 | + if (tlb_addr & TLB_WATCHPOINT) { | 52 | INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */ |
62 | cpu_check_watchpoint(env_cpu(env), addr, size, | 53 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) |
63 | env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | 54 | return C_O1_I2(w, w, wV); |
64 | BP_MEM_WRITE, retaddr); | 55 | case INDEX_op_cmp_vec: |
65 | } | 56 | return C_O1_I2(w, w, wZ); |
66 | + | 57 | - |
67 | + if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) { | 58 | + case INDEX_op_bitsel_vec: |
68 | + /* I/O access */ | 59 | + return C_O1_I3(w, w, w, w); |
69 | + return NULL; | 60 | default: |
70 | + } | ||
71 | + | ||
72 | + return (void *)((uintptr_t)addr + entry->addend); | ||
73 | } | ||
74 | |||
75 | void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, | ||
76 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/accel/tcg/user-exec.c | ||
79 | +++ b/accel/tcg/user-exec.c | ||
80 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
81 | g_assert_not_reached(); | ||
82 | } | ||
83 | |||
84 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
85 | - uintptr_t retaddr) | ||
86 | +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
87 | + uintptr_t retaddr) | ||
88 | { | ||
89 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||
90 | |||
91 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
92 | retaddr); | ||
93 | g_assert_not_reached(); | 61 | g_assert_not_reached(); |
94 | } | 62 | } |
63 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
64 | { | ||
65 | TCGType type = vecl + TCG_TYPE_V64; | ||
66 | unsigned q = vecl; | ||
67 | - TCGArg a0, a1, a2; | ||
68 | + TCGArg a0, a1, a2, a3; | ||
69 | int cmode, imm8; | ||
70 | |||
71 | a0 = args[0]; | ||
72 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
73 | } | ||
74 | return; | ||
75 | |||
76 | + case INDEX_op_bitsel_vec: | ||
77 | + a3 = args[3]; | ||
78 | + if (a0 == a3) { | ||
79 | + tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1); | ||
80 | + } else if (a0 == a2) { | ||
81 | + tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1); | ||
82 | + } else { | ||
83 | + tcg_out_mov(s, type, a0, a1); | ||
84 | + tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3); | ||
85 | + } | ||
86 | + return; | ||
95 | + | 87 | + |
96 | + return size ? g2h(addr) : NULL; | 88 | case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ |
97 | } | 89 | case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ |
98 | 90 | default: | |
99 | #if defined(__i386__) | 91 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) |
92 | case INDEX_op_sssub_vec: | ||
93 | case INDEX_op_usadd_vec: | ||
94 | case INDEX_op_ussub_vec: | ||
95 | + case INDEX_op_bitsel_vec: | ||
96 | return 1; | ||
97 | case INDEX_op_abs_vec: | ||
98 | case INDEX_op_cmp_vec: | ||
100 | -- | 99 | -- |
101 | 2.17.1 | 100 | 2.25.1 |
102 | 101 | ||
103 | 102 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | The three vector shift by vector operations are all implemented via |
---|---|---|---|
2 | expansion. Therefore do not actually set TCG_TARGET_HAS_shv_vec, | ||
3 | as none of shlv_vec, shrv_vec, sarv_vec may actually appear in the | ||
4 | instruction stream, and therefore also do not appear in tcg_target_op_def. | ||
2 | 5 | ||
3 | Factor it out into common code. Similar to the !CONFIG_USER_ONLY variant, | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | let's not allow to cross page boundaries. | ||
5 | |||
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <20190826075112.25637-4-david@redhat.com> | ||
9 | [rth: Move cpu & cc variables inside if block.] | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 8 | --- |
12 | include/exec/exec-all.h | 4 ++-- | 9 | tcg/arm/tcg-target.opc.h | 3 ++ |
13 | accel/tcg/user-exec.c | 14 ++++++++++++++ | 10 | tcg/arm/tcg-target.c.inc | 61 +++++++++++++++++++++++++++++++++++++++- |
14 | target/s390x/mem_helper.c | 7 ------- | 11 | 2 files changed, 63 insertions(+), 1 deletion(-) |
15 | 3 files changed, 16 insertions(+), 9 deletions(-) | ||
16 | 12 | ||
17 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 13 | diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h |
18 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/exec/exec-all.h | 15 | --- a/tcg/arm/tcg-target.opc.h |
20 | +++ b/include/exec/exec-all.h | 16 | +++ b/tcg/arm/tcg-target.opc.h |
21 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 17 | @@ -XXX,XX +XXX,XX @@ |
22 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | 18 | * emitted by tcg_expand_vec_op. For those familiar with GCC internals, |
23 | hwaddr paddr, int prot, | 19 | * consider these to be UNSPEC with names. |
24 | int mmu_idx, target_ulong size); | 20 | */ |
25 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 21 | + |
26 | - uintptr_t retaddr); | 22 | +DEF(arm_sshl_vec, 1, 2, 0, IMPLVEC) |
27 | #else | 23 | +DEF(arm_ushl_vec, 1, 2, 0, IMPLVEC) |
28 | static inline void tlb_init(CPUState *cpu) | 24 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/tcg/arm/tcg-target.c.inc | ||
27 | +++ b/tcg/arm/tcg-target.c.inc | ||
28 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
29 | INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ | ||
30 | INSN_VSARI = 0xf2800010, /* VSHR.S */ | ||
31 | INSN_VSHRI = 0xf3800010, /* VSHR.U */ | ||
32 | + INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ | ||
33 | + INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ | ||
34 | |||
35 | INSN_VBSL = 0xf3100110, | ||
36 | INSN_VBIT = 0xf3200110, | ||
37 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
38 | case INDEX_op_usadd_vec: | ||
39 | case INDEX_op_ussub_vec: | ||
40 | case INDEX_op_xor_vec: | ||
41 | + case INDEX_op_arm_sshl_vec: | ||
42 | + case INDEX_op_arm_ushl_vec: | ||
43 | return C_O1_I2(w, w, w); | ||
44 | case INDEX_op_or_vec: | ||
45 | case INDEX_op_andc_vec: | ||
46 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
47 | case INDEX_op_xor_vec: | ||
48 | tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2); | ||
49 | return; | ||
50 | + case INDEX_op_arm_sshl_vec: | ||
51 | + /* | ||
52 | + * Note that Vm is the data and Vn is the shift count, | ||
53 | + * therefore the arguments appear reversed. | ||
54 | + */ | ||
55 | + tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1); | ||
56 | + return; | ||
57 | + case INDEX_op_arm_ushl_vec: | ||
58 | + /* See above. */ | ||
59 | + tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1); | ||
60 | + return; | ||
61 | case INDEX_op_shli_vec: | ||
62 | tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece)); | ||
63 | return; | ||
64 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
65 | case INDEX_op_umax_vec: | ||
66 | case INDEX_op_umin_vec: | ||
67 | return vece < MO_64; | ||
68 | + case INDEX_op_shlv_vec: | ||
69 | + case INDEX_op_shrv_vec: | ||
70 | + case INDEX_op_sarv_vec: | ||
71 | + return -1; | ||
72 | default: | ||
73 | return 0; | ||
74 | } | ||
75 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
76 | void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | ||
77 | TCGArg a0, ...) | ||
29 | { | 78 | { |
30 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | 79 | - g_assert_not_reached(); |
31 | { | 80 | + va_list va; |
32 | } | 81 | + TCGv_vec v0, v1, v2, t1; |
33 | #endif | 82 | + TCGArg a2; |
34 | +void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
35 | + uintptr_t retaddr); | ||
36 | |||
37 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | ||
38 | |||
39 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/accel/tcg/user-exec.c | ||
42 | +++ b/accel/tcg/user-exec.c | ||
43 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
44 | g_assert_not_reached(); | ||
45 | } | ||
46 | |||
47 | +void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
48 | + uintptr_t retaddr) | ||
49 | +{ | ||
50 | + if (!guest_addr_valid(addr) || | ||
51 | + page_check_range(addr, size, PAGE_WRITE) < 0) { | ||
52 | + CPUState *cpu = env_cpu(env); | ||
53 | + CPUClass *cc = CPU_GET_CLASS(cpu); | ||
54 | + | 83 | + |
55 | + cc->tlb_fill(cpu, addr, size, MMU_DATA_STORE, MMU_USER_IDX, false, | 84 | + va_start(va, a0); |
56 | + retaddr); | 85 | + v0 = temp_tcgv_vec(arg_temp(a0)); |
86 | + v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); | ||
87 | + a2 = va_arg(va, TCGArg); | ||
88 | + va_end(va); | ||
89 | + | ||
90 | + switch (opc) { | ||
91 | + case INDEX_op_shlv_vec: | ||
92 | + /* | ||
93 | + * Merely propagate shlv_vec to arm_ushl_vec. | ||
94 | + * In this way we don't set TCG_TARGET_HAS_shv_vec | ||
95 | + * because everything is done via expansion. | ||
96 | + */ | ||
97 | + v2 = temp_tcgv_vec(arg_temp(a2)); | ||
98 | + vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), | ||
99 | + tcgv_vec_arg(v1), tcgv_vec_arg(v2)); | ||
100 | + break; | ||
101 | + | ||
102 | + case INDEX_op_shrv_vec: | ||
103 | + case INDEX_op_sarv_vec: | ||
104 | + /* Right shifts are negative left shifts for NEON. */ | ||
105 | + v2 = temp_tcgv_vec(arg_temp(a2)); | ||
106 | + t1 = tcg_temp_new_vec(type); | ||
107 | + tcg_gen_neg_vec(vece, t1, v2); | ||
108 | + if (opc == INDEX_op_shrv_vec) { | ||
109 | + opc = INDEX_op_arm_ushl_vec; | ||
110 | + } else { | ||
111 | + opc = INDEX_op_arm_sshl_vec; | ||
112 | + } | ||
113 | + vec_gen_3(opc, type, vece, tcgv_vec_arg(v0), | ||
114 | + tcgv_vec_arg(v1), tcgv_vec_arg(t1)); | ||
115 | + tcg_temp_free_vec(t1); | ||
116 | + break; | ||
117 | + | ||
118 | + default: | ||
57 | + g_assert_not_reached(); | 119 | + g_assert_not_reached(); |
58 | + } | 120 | + } |
59 | +} | ||
60 | + | ||
61 | #if defined(__i386__) | ||
62 | |||
63 | #if defined(__NetBSD__) | ||
64 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/target/s390x/mem_helper.c | ||
67 | +++ b/target/s390x/mem_helper.c | ||
68 | @@ -XXX,XX +XXX,XX @@ uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) | ||
69 | void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | ||
70 | uintptr_t ra) | ||
71 | { | ||
72 | -#ifdef CONFIG_USER_ONLY | ||
73 | - if (!guest_addr_valid(addr) || !guest_addr_valid(addr + len - 1) || | ||
74 | - page_check_range(addr, len, PAGE_WRITE) < 0) { | ||
75 | - s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra); | ||
76 | - } | ||
77 | -#else | ||
78 | /* test the actual access, not just any access to the page due to LAP */ | ||
79 | while (len) { | ||
80 | const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); | ||
81 | @@ -XXX,XX +XXX,XX @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | ||
82 | addr = wrap_address(env, addr + curlen); | ||
83 | len -= curlen; | ||
84 | } | ||
85 | -#endif | ||
86 | } | 121 | } |
87 | 122 | ||
88 | void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) | 123 | static void tcg_out_nop_fill(tcg_insn_unit *p, int count) |
89 | -- | 124 | -- |
90 | 2.17.1 | 125 | 2.25.1 |
91 | 126 | ||
92 | 127 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Implement via expansion, so don't actually set TCG_TARGET_HAS_roti_vec. |
---|---|---|---|
2 | For NEON, this is shift-right followed by shift-left-and-insert. | ||
2 | 3 | ||
3 | If I'm not completely wrong, we are dealing with guest addresses here | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | and not with host addresses. Use the right check. | ||
5 | |||
6 | Fixes: c5a7392cfb96 ("s390x/tcg: Provide probe_write_access helper") | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
9 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
10 | Message-Id: <20190826075112.25637-2-david@redhat.com> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 6 | --- |
13 | target/s390x/mem_helper.c | 2 +- | 7 | tcg/arm/tcg-target-con-set.h | 1 + |
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | 8 | tcg/arm/tcg-target.opc.h | 1 + |
9 | tcg/arm/tcg-target.c.inc | 15 +++++++++++++++ | ||
10 | 3 files changed, 17 insertions(+) | ||
15 | 11 | ||
16 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | 12 | diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h |
17 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/s390x/mem_helper.c | 14 | --- a/tcg/arm/tcg-target-con-set.h |
19 | +++ b/target/s390x/mem_helper.c | 15 | +++ b/tcg/arm/tcg-target-con-set.h |
20 | @@ -XXX,XX +XXX,XX @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | 16 | @@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIK) |
21 | uintptr_t ra) | 17 | C_O1_I2(r, r, rIN) |
22 | { | 18 | C_O1_I2(r, r, ri) |
23 | #ifdef CONFIG_USER_ONLY | 19 | C_O1_I2(r, rZ, rZ) |
24 | - if (!h2g_valid(addr) || !h2g_valid(addr + len - 1) || | 20 | +C_O1_I2(w, 0, w) |
25 | + if (!guest_addr_valid(addr) || !guest_addr_valid(addr + len - 1) || | 21 | C_O1_I2(w, w, w) |
26 | page_check_range(addr, len, PAGE_WRITE) < 0) { | 22 | C_O1_I2(w, w, wO) |
27 | s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra); | 23 | C_O1_I2(w, w, wV) |
24 | diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/tcg/arm/tcg-target.opc.h | ||
27 | +++ b/tcg/arm/tcg-target.opc.h | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | * consider these to be UNSPEC with names. | ||
30 | */ | ||
31 | |||
32 | +DEF(arm_sli_vec, 1, 2, 1, IMPLVEC) | ||
33 | DEF(arm_sshl_vec, 1, 2, 0, IMPLVEC) | ||
34 | DEF(arm_ushl_vec, 1, 2, 0, IMPLVEC) | ||
35 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/tcg/arm/tcg-target.c.inc | ||
38 | +++ b/tcg/arm/tcg-target.c.inc | ||
39 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
40 | INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */ | ||
41 | INSN_VSARI = 0xf2800010, /* VSHR.S */ | ||
42 | INSN_VSHRI = 0xf3800010, /* VSHR.U */ | ||
43 | + INSN_VSLI = 0xf3800510, | ||
44 | INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */ | ||
45 | INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */ | ||
46 | |||
47 | @@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) | ||
48 | case INDEX_op_arm_sshl_vec: | ||
49 | case INDEX_op_arm_ushl_vec: | ||
50 | return C_O1_I2(w, w, w); | ||
51 | + case INDEX_op_arm_sli_vec: | ||
52 | + return C_O1_I2(w, 0, w); | ||
53 | case INDEX_op_or_vec: | ||
54 | case INDEX_op_andc_vec: | ||
55 | return C_O1_I2(w, w, wO); | ||
56 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, | ||
57 | case INDEX_op_sari_vec: | ||
58 | tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2); | ||
59 | return; | ||
60 | + case INDEX_op_arm_sli_vec: | ||
61 | + tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece)); | ||
62 | + return; | ||
63 | |||
64 | case INDEX_op_andc_vec: | ||
65 | if (!const_args[2]) { | ||
66 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) | ||
67 | case INDEX_op_shlv_vec: | ||
68 | case INDEX_op_shrv_vec: | ||
69 | case INDEX_op_sarv_vec: | ||
70 | + case INDEX_op_rotli_vec: | ||
71 | return -1; | ||
72 | default: | ||
73 | return 0; | ||
74 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | ||
75 | tcg_temp_free_vec(t1); | ||
76 | break; | ||
77 | |||
78 | + case INDEX_op_rotli_vec: | ||
79 | + t1 = tcg_temp_new_vec(type); | ||
80 | + tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); | ||
81 | + vec_gen_4(INDEX_op_arm_sli_vec, type, vece, | ||
82 | + tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); | ||
83 | + tcg_temp_free_vec(t1); | ||
84 | + break; | ||
85 | + | ||
86 | default: | ||
87 | g_assert_not_reached(); | ||
28 | } | 88 | } |
29 | -- | 89 | -- |
30 | 2.17.1 | 90 | 2.25.1 |
31 | 91 | ||
32 | 92 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: David Hildenbrand <david@redhat.com> | ||
2 | 1 | ||
3 | Hm... how did that "-" slip in (-TAGRET_PAGE_SIZE would be correct). This | ||
4 | currently makes us exceed one page in a single probe_write() call, | ||
5 | essentially leaving some memory unchecked. | ||
6 | |||
7 | Fixes: c5a7392cfb96 ("s390x/tcg: Provide probe_write_access helper") | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
10 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
11 | Message-Id: <20190826075112.25637-3-david@redhat.com> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | --- | ||
14 | target/s390x/mem_helper.c | 2 +- | ||
15 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
16 | |||
17 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/s390x/mem_helper.c | ||
20 | +++ b/target/s390x/mem_helper.c | ||
21 | @@ -XXX,XX +XXX,XX @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | ||
22 | #else | ||
23 | /* test the actual access, not just any access to the page due to LAP */ | ||
24 | while (len) { | ||
25 | - const uint64_t pagelen = -(addr | -TARGET_PAGE_MASK); | ||
26 | + const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); | ||
27 | const uint64_t curlen = MIN(pagelen, len); | ||
28 | |||
29 | probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); | ||
30 | -- | ||
31 | 2.17.1 | ||
32 | |||
33 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Implement via expansion, so don't actually set TCG_TARGET_HAS_rotv_vec. |
---|---|---|---|
2 | 2 | ||
3 | Let's also allow to probe other access types. | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | |||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Message-Id: <20190830100959.26615-3-david@redhat.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 5 | --- |
10 | include/exec/exec-all.h | 10 ++++++++-- | 6 | tcg/arm/tcg-target.c.inc | 35 ++++++++++++++++++++++++++++++++++- |
11 | accel/tcg/cputlb.c | 43 ++++++++++++++++++++++++++++++----------- | 7 | 1 file changed, 34 insertions(+), 1 deletion(-) |
12 | accel/tcg/user-exec.c | 26 +++++++++++++++++++------ | ||
13 | 3 files changed, 60 insertions(+), 19 deletions(-) | ||
14 | 8 | ||
15 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 9 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
16 | index XXXXXXX..XXXXXXX 100644 | 10 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/exec/exec-all.h | 11 | --- a/tcg/arm/tcg-target.c.inc |
18 | +++ b/include/exec/exec-all.h | 12 | +++ b/tcg/arm/tcg-target.c.inc |
19 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | 13 | @@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) |
14 | case INDEX_op_shrv_vec: | ||
15 | case INDEX_op_sarv_vec: | ||
16 | case INDEX_op_rotli_vec: | ||
17 | + case INDEX_op_rotlv_vec: | ||
18 | + case INDEX_op_rotrv_vec: | ||
19 | return -1; | ||
20 | default: | ||
21 | return 0; | ||
22 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | ||
23 | TCGArg a0, ...) | ||
20 | { | 24 | { |
21 | } | 25 | va_list va; |
22 | #endif | 26 | - TCGv_vec v0, v1, v2, t1; |
23 | -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 27 | + TCGv_vec v0, v1, v2, t1, t2, c1; |
24 | - uintptr_t retaddr); | 28 | TCGArg a2; |
25 | +void *probe_access(CPUArchState *env, target_ulong addr, int size, | 29 | |
26 | + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); | 30 | va_start(va, a0); |
31 | @@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, | ||
32 | tcg_temp_free_vec(t1); | ||
33 | break; | ||
34 | |||
35 | + case INDEX_op_rotlv_vec: | ||
36 | + v2 = temp_tcgv_vec(arg_temp(a2)); | ||
37 | + t1 = tcg_temp_new_vec(type); | ||
38 | + c1 = tcg_constant_vec(type, vece, 8 << vece); | ||
39 | + tcg_gen_sub_vec(vece, t1, v2, c1); | ||
40 | + /* Right shifts are negative left shifts for NEON. */ | ||
41 | + vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), | ||
42 | + tcgv_vec_arg(v1), tcgv_vec_arg(t1)); | ||
43 | + vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0), | ||
44 | + tcgv_vec_arg(v1), tcgv_vec_arg(v2)); | ||
45 | + tcg_gen_or_vec(vece, v0, v0, t1); | ||
46 | + tcg_temp_free_vec(t1); | ||
47 | + break; | ||
27 | + | 48 | + |
28 | +static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, | 49 | + case INDEX_op_rotrv_vec: |
29 | + int mmu_idx, uintptr_t retaddr) | 50 | + v2 = temp_tcgv_vec(arg_temp(a2)); |
30 | +{ | 51 | + t1 = tcg_temp_new_vec(type); |
31 | + return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); | 52 | + t2 = tcg_temp_new_vec(type); |
32 | +} | 53 | + c1 = tcg_constant_vec(type, vece, 8 << vece); |
33 | 54 | + tcg_gen_neg_vec(vece, t1, v2); | |
34 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | 55 | + tcg_gen_sub_vec(vece, t2, c1, v2); |
35 | 56 | + /* Right shifts are negative left shifts for NEON. */ | |
36 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 57 | + vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1), |
37 | index XXXXXXX..XXXXXXX 100644 | 58 | + tcgv_vec_arg(v1), tcgv_vec_arg(t1)); |
38 | --- a/accel/tcg/cputlb.c | 59 | + vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2), |
39 | +++ b/accel/tcg/cputlb.c | 60 | + tcgv_vec_arg(v1), tcgv_vec_arg(t2)); |
40 | @@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | 61 | + tcg_gen_or_vec(vece, v0, t1, t2); |
41 | return qemu_ram_addr_from_host_nofail(p); | 62 | + tcg_temp_free_vec(t1); |
42 | } | 63 | + tcg_temp_free_vec(t2); |
43 | |||
44 | -/* Probe for whether the specified guest write access is permitted. | ||
45 | - * If it is not permitted then an exception will be taken in the same | ||
46 | - * way as if this were a real write access (and we will not return). | ||
47 | +/* | ||
48 | + * Probe for whether the specified guest access is permitted. If it is not | ||
49 | + * permitted then an exception will be taken in the same way as if this | ||
50 | + * were a real access (and we will not return). | ||
51 | * If the size is 0 or the page requires I/O access, returns NULL; otherwise, | ||
52 | * returns the address of the host page similar to tlb_vaddr_to_host(). | ||
53 | */ | ||
54 | -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
55 | - uintptr_t retaddr) | ||
56 | +void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
57 | + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | ||
58 | { | ||
59 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
60 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | ||
61 | - target_ulong tlb_addr = tlb_addr_write(entry); | ||
62 | + target_ulong tlb_addr; | ||
63 | + size_t elt_ofs; | ||
64 | + int wp_access; | ||
65 | |||
66 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||
67 | |||
68 | + switch (access_type) { | ||
69 | + case MMU_DATA_LOAD: | ||
70 | + elt_ofs = offsetof(CPUTLBEntry, addr_read); | ||
71 | + wp_access = BP_MEM_READ; | ||
72 | + break; | 64 | + break; |
73 | + case MMU_DATA_STORE: | ||
74 | + elt_ofs = offsetof(CPUTLBEntry, addr_write); | ||
75 | + wp_access = BP_MEM_WRITE; | ||
76 | + break; | ||
77 | + case MMU_INST_FETCH: | ||
78 | + elt_ofs = offsetof(CPUTLBEntry, addr_code); | ||
79 | + wp_access = BP_MEM_READ; | ||
80 | + break; | ||
81 | + default: | ||
82 | + g_assert_not_reached(); | ||
83 | + } | ||
84 | + tlb_addr = tlb_read_ofs(entry, elt_ofs); | ||
85 | + | 65 | + |
86 | if (unlikely(!tlb_hit(tlb_addr, addr))) { | 66 | default: |
87 | - if (!VICTIM_TLB_HIT(addr_write, addr)) { | ||
88 | - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | ||
89 | - mmu_idx, retaddr); | ||
90 | + if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, | ||
91 | + addr & TARGET_PAGE_MASK)) { | ||
92 | + tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); | ||
93 | /* TLB resize via tlb_fill may have moved the entry. */ | ||
94 | index = tlb_index(env, mmu_idx, addr); | ||
95 | entry = tlb_entry(env, mmu_idx, addr); | ||
96 | } | ||
97 | - tlb_addr = tlb_addr_write(entry); | ||
98 | + tlb_addr = tlb_read_ofs(entry, elt_ofs); | ||
99 | } | ||
100 | |||
101 | if (!size) { | ||
102 | @@ -XXX,XX +XXX,XX @@ void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
103 | if (tlb_addr & TLB_WATCHPOINT) { | ||
104 | cpu_check_watchpoint(env_cpu(env), addr, size, | ||
105 | env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | ||
106 | - BP_MEM_WRITE, retaddr); | ||
107 | + wp_access, retaddr); | ||
108 | } | ||
109 | |||
110 | if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) { | ||
111 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
112 | index XXXXXXX..XXXXXXX 100644 | ||
113 | --- a/accel/tcg/user-exec.c | ||
114 | +++ b/accel/tcg/user-exec.c | ||
115 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
116 | g_assert_not_reached(); | ||
117 | } | ||
118 | |||
119 | -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
120 | - uintptr_t retaddr) | ||
121 | +void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
122 | + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | ||
123 | { | ||
124 | + int flags; | ||
125 | + | ||
126 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||
127 | |||
128 | - if (!guest_addr_valid(addr) || | ||
129 | - page_check_range(addr, size, PAGE_WRITE) < 0) { | ||
130 | + switch (access_type) { | ||
131 | + case MMU_DATA_STORE: | ||
132 | + flags = PAGE_WRITE; | ||
133 | + break; | ||
134 | + case MMU_DATA_LOAD: | ||
135 | + flags = PAGE_READ; | ||
136 | + break; | ||
137 | + case MMU_INST_FETCH: | ||
138 | + flags = PAGE_EXEC; | ||
139 | + break; | ||
140 | + default: | ||
141 | + g_assert_not_reached(); | ||
142 | + } | ||
143 | + | ||
144 | + if (!guest_addr_valid(addr) || page_check_range(addr, size, flags) < 0) { | ||
145 | CPUState *cpu = env_cpu(env); | ||
146 | CPUClass *cc = CPU_GET_CLASS(cpu); | ||
147 | - | ||
148 | - cc->tlb_fill(cpu, addr, size, MMU_DATA_STORE, MMU_USER_IDX, false, | ||
149 | + cc->tlb_fill(cpu, addr, size, access_type, MMU_USER_IDX, false, | ||
150 | retaddr); | ||
151 | g_assert_not_reached(); | 67 | g_assert_not_reached(); |
152 | } | 68 | } |
153 | -- | 69 | -- |
154 | 2.17.1 | 70 | 2.25.1 |
155 | 71 | ||
156 | 72 | diff view generated by jsdifflib |