1 | The following changes since commit fec105c2abda8567ec15230429c41429b5ee307c: | 1 | The following changes since commit 0a301624c2f4ced3331ffd5bce85b4274fe132af: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/kraxel/tags/audio-20190828-pull-request' into staging (2019-09-03 14:03:15 +0100) | 3 | Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20220208' into staging (2022-02-08 11:40:08 +0000) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://github.com/rth7680/qemu.git tags/pull-tcg-20190903 | 7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220211 |
8 | 8 | ||
9 | for you to fetch changes up to c25c283df0f08582df29f1d5d7be1516b851532d: | 9 | for you to fetch changes up to 5c1a101ef6b85537a4ade93c39ea81cadd5c246e: |
10 | 10 | ||
11 | tcg: Factor out probe_write() logic into probe_access() (2019-09-03 08:34:18 -0700) | 11 | tests/tcg/multiarch: Add sigbus.c (2022-02-09 09:00:01 +1100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Allow page table bit to swap endianness. | 14 | Fix safe_syscall_base for sparc64. |
15 | Reorganize watchpoints out of i/o path. | 15 | Fix host signal handling for sparc64-linux. |
16 | Return host address from probe_write / probe_access. | 16 | Speedups for jump cache and work list probing. |
17 | Fix for exception replays. | ||
18 | Raise guest SIGBUS for user-only misaligned accesses. | ||
17 | 19 | ||
18 | ---------------------------------------------------------------- | 20 | ---------------------------------------------------------------- |
19 | David Hildenbrand (11): | 21 | Idan Horowitz (2): |
20 | exec: Factor out core logic of check_watchpoint() | 22 | accel/tcg: Optimize jump cache flush during tlb range flush |
21 | tcg: Check for watchpoints in probe_write() | 23 | softmmu/cpus: Check if the cpu work list is empty atomically |
22 | s390x/tcg: Use guest_addr_valid() instead of h2g_valid() in probe_write_access() | ||
23 | s390x/tcg: Fix length calculation in probe_write_access() | ||
24 | tcg: Factor out CONFIG_USER_ONLY probe_write() from s390x code | ||
25 | tcg: Enforce single page access in probe_write() | ||
26 | mips/tcg: Call probe_write() for CONFIG_USER_ONLY as well | ||
27 | hppa/tcg: Call probe_write() also for CONFIG_USER_ONLY | ||
28 | s390x/tcg: Pass a size to probe_write() in do_csst() | ||
29 | tcg: Make probe_write() return a pointer to the host page | ||
30 | tcg: Factor out probe_write() logic into probe_access() | ||
31 | 24 | ||
32 | Richard Henderson (6): | 25 | Pavel Dovgalyuk (1): |
33 | exec: Move user-only watchpoint stubs inline | 26 | replay: use CF_NOIRQ for special exception-replaying TB |
34 | cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK | ||
35 | exec: Factor out cpu_watchpoint_address_matches | ||
36 | cputlb: Fix size operand for tlb_fill on unaligned store | ||
37 | cputlb: Remove double-alignment in store_helper | ||
38 | cputlb: Handle watchpoints via TLB_WATCHPOINT | ||
39 | 27 | ||
40 | Tony Nguyen (19): | 28 | Richard Henderson (29): |
41 | tcg: TCGMemOp is now accelerator independent MemOp | 29 | common-user/host/sparc64: Fix safe_syscall_base |
42 | memory: Introduce size_memop | 30 | linux-user: Introduce host_signal_mask |
43 | target/mips: Access MemoryRegion with MemOp | 31 | linux-user: Introduce host_sigcontext |
44 | hw/s390x: Access MemoryRegion with MemOp | 32 | linux-user: Move sparc/host-signal.h to sparc64/host-signal.h |
45 | hw/intc/armv7m_nic: Access MemoryRegion with MemOp | 33 | linux-user/include/host/sparc64: Fix host_sigcontext |
46 | hw/virtio: Access MemoryRegion with MemOp | 34 | tcg/i386: Support raising sigbus for user-only |
47 | hw/vfio: Access MemoryRegion with MemOp | 35 | tcg/aarch64: Support raising sigbus for user-only |
48 | exec: Access MemoryRegion with MemOp | 36 | tcg/ppc: Support raising sigbus for user-only |
49 | cputlb: Access MemoryRegion with MemOp | 37 | tcg/riscv: Support raising sigbus for user-only |
50 | memory: Access MemoryRegion with MemOp | 38 | tcg/s390x: Support raising sigbus for user-only |
51 | hw/s390x: Hard code size with MO_{8|16|32|64} | 39 | tcg/tci: Support raising sigbus for user-only |
52 | target/mips: Hard code size with MO_{8|16|32|64} | 40 | tcg/arm: Drop support for armv4 and armv5 hosts |
53 | exec: Hard code size with MO_{8|16|32|64} | 41 | tcg/arm: Remove use_armv5t_instructions |
54 | memory: Access MemoryRegion with endianness | 42 | tcg/arm: Remove use_armv6_instructions |
55 | cputlb: Replace size and endian operands for MemOp | 43 | tcg/arm: Check alignment for ldrd and strd |
56 | memory: Single byte swap along the I/O path | 44 | tcg/arm: Support unaligned access for softmmu |
57 | cputlb: Byte swap memory transaction attribute | 45 | tcg/arm: Reserve a register for guest_base |
58 | target/sparc: Add TLB entry with attributes | 46 | tcg/arm: Support raising sigbus for user-only |
59 | target/sparc: sun4u Invert Endian TTE bit | 47 | tcg/mips: Support unaligned access for user-only |
48 | tcg/mips: Support unaligned access for softmmu | ||
49 | tcg/sparc: Use tcg_out_movi_imm13 in tcg_out_addsub2_i64 | ||
50 | tcg/sparc: Split out tcg_out_movi_imm32 | ||
51 | tcg/sparc: Add scratch argument to tcg_out_movi_int | ||
52 | tcg/sparc: Improve code gen for shifted 32-bit constants | ||
53 | tcg/sparc: Convert patch_reloc to return bool | ||
54 | tcg/sparc: Use the constant pool for 64-bit constants | ||
55 | tcg/sparc: Add tcg_out_jmpl_const for better tail calls | ||
56 | tcg/sparc: Support unaligned access for user-only | ||
57 | tests/tcg/multiarch: Add sigbus.c | ||
60 | 58 | ||
61 | include/exec/cpu-all.h | 8 +- | 59 | WANG Xuerui (2): |
62 | include/exec/exec-all.h | 10 +- | 60 | tcg/loongarch64: Fix fallout from recent MO_Q renaming |
63 | include/exec/memattrs.h | 2 + | 61 | tcg/loongarch64: Support raising sigbus for user-only |
64 | include/exec/memop.h | 134 +++++++++++ | ||
65 | include/exec/memory.h | 12 +- | ||
66 | include/hw/core/cpu.h | 37 +++ | ||
67 | target/arm/translate-a64.h | 2 +- | ||
68 | target/arm/translate.h | 2 +- | ||
69 | target/sparc/cpu.h | 2 + | ||
70 | tcg/tcg-op.h | 80 +++--- | ||
71 | tcg/tcg.h | 101 +------- | ||
72 | trace/mem-internal.h | 4 +- | ||
73 | trace/mem.h | 4 +- | ||
74 | accel/tcg/cputlb.c | 414 ++++++++++++++++++-------------- | ||
75 | accel/tcg/user-exec.c | 32 +++ | ||
76 | exec.c | 177 +++----------- | ||
77 | hw/intc/armv7m_nvic.c | 13 +- | ||
78 | hw/s390x/s390-pci-inst.c | 11 +- | ||
79 | hw/vfio/pci-quirks.c | 7 +- | ||
80 | hw/virtio/virtio-pci.c | 15 +- | ||
81 | memory.c | 58 +++-- | ||
82 | memory_ldst.inc.c | 81 ++----- | ||
83 | target/alpha/translate.c | 2 +- | ||
84 | target/arm/translate-a64.c | 48 ++-- | ||
85 | target/arm/translate-sve.c | 2 +- | ||
86 | target/arm/translate.c | 32 +-- | ||
87 | target/hppa/op_helper.c | 2 - | ||
88 | target/hppa/translate.c | 14 +- | ||
89 | target/i386/translate.c | 132 +++++----- | ||
90 | target/m68k/translate.c | 2 +- | ||
91 | target/microblaze/translate.c | 4 +- | ||
92 | target/mips/op_helper.c | 13 +- | ||
93 | target/mips/translate.c | 8 +- | ||
94 | target/openrisc/translate.c | 4 +- | ||
95 | target/ppc/translate.c | 12 +- | ||
96 | target/riscv/insn_trans/trans_rva.inc.c | 8 +- | ||
97 | target/riscv/insn_trans/trans_rvi.inc.c | 4 +- | ||
98 | target/s390x/mem_helper.c | 13 +- | ||
99 | target/s390x/translate.c | 6 +- | ||
100 | target/s390x/translate_vx.inc.c | 10 +- | ||
101 | target/sparc/mmu_helper.c | 40 +-- | ||
102 | target/sparc/translate.c | 14 +- | ||
103 | target/tilegx/translate.c | 10 +- | ||
104 | target/tricore/translate.c | 8 +- | ||
105 | tcg/aarch64/tcg-target.inc.c | 26 +- | ||
106 | tcg/arm/tcg-target.inc.c | 26 +- | ||
107 | tcg/i386/tcg-target.inc.c | 24 +- | ||
108 | tcg/mips/tcg-target.inc.c | 16 +- | ||
109 | tcg/optimize.c | 2 +- | ||
110 | tcg/ppc/tcg-target.inc.c | 12 +- | ||
111 | tcg/riscv/tcg-target.inc.c | 20 +- | ||
112 | tcg/s390/tcg-target.inc.c | 14 +- | ||
113 | tcg/sparc/tcg-target.inc.c | 6 +- | ||
114 | tcg/tcg-op.c | 38 +-- | ||
115 | tcg/tcg.c | 2 +- | ||
116 | MAINTAINERS | 1 + | ||
117 | tcg/README | 2 +- | ||
118 | 57 files changed, 918 insertions(+), 865 deletions(-) | ||
119 | create mode 100644 include/exec/memop.h | ||
120 | 62 | ||
63 | linux-user/include/host/aarch64/host-signal.h | 16 +- | ||
64 | linux-user/include/host/alpha/host-signal.h | 14 +- | ||
65 | linux-user/include/host/arm/host-signal.h | 14 +- | ||
66 | linux-user/include/host/i386/host-signal.h | 14 +- | ||
67 | linux-user/include/host/loongarch64/host-signal.h | 14 +- | ||
68 | linux-user/include/host/mips/host-signal.h | 14 +- | ||
69 | linux-user/include/host/ppc/host-signal.h | 14 +- | ||
70 | linux-user/include/host/riscv/host-signal.h | 14 +- | ||
71 | linux-user/include/host/s390/host-signal.h | 14 +- | ||
72 | linux-user/include/host/sparc/host-signal.h | 63 ---- | ||
73 | linux-user/include/host/sparc64/host-signal.h | 65 +++- | ||
74 | linux-user/include/host/x86_64/host-signal.h | 14 +- | ||
75 | tcg/aarch64/tcg-target.h | 2 - | ||
76 | tcg/arm/tcg-target.h | 6 +- | ||
77 | tcg/i386/tcg-target.h | 2 - | ||
78 | tcg/loongarch64/tcg-target.h | 2 - | ||
79 | tcg/mips/tcg-target.h | 2 - | ||
80 | tcg/ppc/tcg-target.h | 2 - | ||
81 | tcg/riscv/tcg-target.h | 2 - | ||
82 | tcg/s390x/tcg-target.h | 2 - | ||
83 | accel/tcg/cpu-exec.c | 3 +- | ||
84 | accel/tcg/cputlb.c | 9 + | ||
85 | linux-user/signal.c | 22 +- | ||
86 | softmmu/cpus.c | 7 +- | ||
87 | tcg/tci.c | 20 +- | ||
88 | tests/tcg/multiarch/sigbus.c | 68 ++++ | ||
89 | tcg/aarch64/tcg-target.c.inc | 91 ++++- | ||
90 | tcg/arm/tcg-target.c.inc | 410 +++++++++------------- | ||
91 | tcg/i386/tcg-target.c.inc | 103 +++++- | ||
92 | tcg/loongarch64/tcg-target.c.inc | 73 +++- | ||
93 | tcg/mips/tcg-target.c.inc | 387 ++++++++++++++++++-- | ||
94 | tcg/ppc/tcg-target.c.inc | 98 +++++- | ||
95 | tcg/riscv/tcg-target.c.inc | 63 +++- | ||
96 | tcg/s390x/tcg-target.c.inc | 59 +++- | ||
97 | tcg/sparc/tcg-target.c.inc | 348 +++++++++++++++--- | ||
98 | common-user/host/sparc64/safe-syscall.inc.S | 5 +- | ||
99 | 36 files changed, 1561 insertions(+), 495 deletions(-) | ||
100 | delete mode 100644 linux-user/include/host/sparc/host-signal.h | ||
101 | create mode 100644 tests/tcg/multiarch/sigbus.c | ||
102 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Use the "retl" instead of "ret" instruction alias, since we |
---|---|---|---|
2 | do not allocate a register window in this function. | ||
2 | 3 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | 4 | Fix the offset to the first stacked parameter, which lies |
4 | being converted into a "MemOp op". | 5 | beyond the register window save area. |
5 | 6 | ||
6 | Convert interfaces by using no-op size_memop. | 7 | Fixes: 95c021dac835 ("linux-user/host/sparc64: Add safe-syscall.inc.S") |
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
18 | Message-Id: <af407f0a34dc95ef5aaf2c00dffda7c65df23c3a.1566466906.git.tony.nguyen@bt.com> | ||
19 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
20 | --- | 9 | --- |
21 | target/mips/op_helper.c | 5 +++-- | 10 | common-user/host/sparc64/safe-syscall.inc.S | 5 +++-- |
22 | 1 file changed, 3 insertions(+), 2 deletions(-) | 11 | 1 file changed, 3 insertions(+), 2 deletions(-) |
23 | 12 | ||
24 | diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c | 13 | diff --git a/common-user/host/sparc64/safe-syscall.inc.S b/common-user/host/sparc64/safe-syscall.inc.S |
25 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/target/mips/op_helper.c | 15 | --- a/common-user/host/sparc64/safe-syscall.inc.S |
27 | +++ b/target/mips/op_helper.c | 16 | +++ b/common-user/host/sparc64/safe-syscall.inc.S |
28 | @@ -XXX,XX +XXX,XX @@ | 17 | @@ -XXX,XX +XXX,XX @@ |
29 | #include "exec/helper-proto.h" | 18 | .type safe_syscall_end, @function |
30 | #include "exec/exec-all.h" | 19 | |
31 | #include "exec/cpu_ldst.h" | 20 | #define STACK_BIAS 2047 |
32 | +#include "exec/memop.h" | 21 | -#define PARAM(N) STACK_BIAS + N*8 |
33 | #include "sysemu/kvm.h" | 22 | +#define WINDOW_SIZE 16 * 8 |
34 | #include "fpu/softfloat.h" | 23 | +#define PARAM(N) STACK_BIAS + WINDOW_SIZE + N * 8 |
35 | 24 | ||
36 | @@ -XXX,XX +XXX,XX @@ void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) | 25 | /* |
37 | if (op == 9) { | 26 | * This is the entry point for making a system call. The calling |
38 | /* Index Store Tag */ | 27 | @@ -XXX,XX +XXX,XX @@ safe_syscall_end: |
39 | memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, | 28 | /* code path for having successfully executed the syscall */ |
40 | - 8, MEMTXATTRS_UNSPECIFIED); | 29 | bcs,pn %xcc, 1f |
41 | + size_memop(8), MEMTXATTRS_UNSPECIFIED); | 30 | nop |
42 | } else if (op == 5) { | 31 | - ret |
43 | /* Index Load Tag */ | 32 | + retl |
44 | memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, | 33 | nop |
45 | - 8, MEMTXATTRS_UNSPECIFIED); | 34 | |
46 | + size_memop(8), MEMTXATTRS_UNSPECIFIED); | 35 | /* code path when we didn't execute the syscall */ |
47 | } | ||
48 | #endif | ||
49 | } | ||
50 | -- | 36 | -- |
51 | 2.17.1 | 37 | 2.25.1 |
52 | 38 | ||
53 | 39 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Do not directly access the uc_sigmask member. |
---|---|---|---|
2 | 2 | This is preparation for a sparc64 fix. | |
3 | We now have a variant for CONFIG_USER_ONLY as well. | 3 | |
4 | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | |
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Message-Id: <20190826075112.25637-7-david@redhat.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 7 | --- |
10 | target/hppa/op_helper.c | 2 -- | 8 | linux-user/include/host/aarch64/host-signal.h | 5 +++++ |
11 | 1 file changed, 2 deletions(-) | 9 | linux-user/include/host/alpha/host-signal.h | 5 +++++ |
12 | 10 | linux-user/include/host/arm/host-signal.h | 5 +++++ | |
13 | diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c | 11 | linux-user/include/host/i386/host-signal.h | 5 +++++ |
14 | index XXXXXXX..XXXXXXX 100644 | 12 | .../include/host/loongarch64/host-signal.h | 5 +++++ |
15 | --- a/target/hppa/op_helper.c | 13 | linux-user/include/host/mips/host-signal.h | 5 +++++ |
16 | +++ b/target/hppa/op_helper.c | 14 | linux-user/include/host/ppc/host-signal.h | 5 +++++ |
17 | @@ -XXX,XX +XXX,XX @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val, | 15 | linux-user/include/host/riscv/host-signal.h | 5 +++++ |
18 | default: | 16 | linux-user/include/host/s390/host-signal.h | 5 +++++ |
19 | /* Nothing is stored, but protection is checked and the | 17 | linux-user/include/host/sparc/host-signal.h | 5 +++++ |
20 | cacheline is marked dirty. */ | 18 | linux-user/include/host/x86_64/host-signal.h | 5 +++++ |
21 | -#ifndef CONFIG_USER_ONLY | 19 | linux-user/signal.c | 18 ++++++++---------- |
22 | probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra); | 20 | 12 files changed, 63 insertions(+), 10 deletions(-) |
23 | -#endif | 21 | |
24 | break; | 22 | diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h |
25 | } | 23 | index XXXXXXX..XXXXXXX 100644 |
26 | } | 24 | --- a/linux-user/include/host/aarch64/host-signal.h |
25 | +++ b/linux-user/include/host/aarch64/host-signal.h | ||
26 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
27 | uc->uc_mcontext.pc = pc; | ||
28 | } | ||
29 | |||
30 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
31 | +{ | ||
32 | + return &uc->uc_sigmask; | ||
33 | +} | ||
34 | + | ||
35 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
36 | { | ||
37 | struct _aarch64_ctx *hdr; | ||
38 | diff --git a/linux-user/include/host/alpha/host-signal.h b/linux-user/include/host/alpha/host-signal.h | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/linux-user/include/host/alpha/host-signal.h | ||
41 | +++ b/linux-user/include/host/alpha/host-signal.h | ||
42 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
43 | uc->uc_mcontext.sc_pc = pc; | ||
44 | } | ||
45 | |||
46 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
47 | +{ | ||
48 | + return &uc->uc_sigmask; | ||
49 | +} | ||
50 | + | ||
51 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
52 | { | ||
53 | uint32_t *pc = (uint32_t *)host_signal_pc(uc); | ||
54 | diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h | ||
55 | index XXXXXXX..XXXXXXX 100644 | ||
56 | --- a/linux-user/include/host/arm/host-signal.h | ||
57 | +++ b/linux-user/include/host/arm/host-signal.h | ||
58 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
59 | uc->uc_mcontext.arm_pc = pc; | ||
60 | } | ||
61 | |||
62 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
63 | +{ | ||
64 | + return &uc->uc_sigmask; | ||
65 | +} | ||
66 | + | ||
67 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
68 | { | ||
69 | /* | ||
70 | diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h | ||
71 | index XXXXXXX..XXXXXXX 100644 | ||
72 | --- a/linux-user/include/host/i386/host-signal.h | ||
73 | +++ b/linux-user/include/host/i386/host-signal.h | ||
74 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
75 | uc->uc_mcontext.gregs[REG_EIP] = pc; | ||
76 | } | ||
77 | |||
78 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
79 | +{ | ||
80 | + return &uc->uc_sigmask; | ||
81 | +} | ||
82 | + | ||
83 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
84 | { | ||
85 | return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe | ||
86 | diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h | ||
87 | index XXXXXXX..XXXXXXX 100644 | ||
88 | --- a/linux-user/include/host/loongarch64/host-signal.h | ||
89 | +++ b/linux-user/include/host/loongarch64/host-signal.h | ||
90 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
91 | uc->uc_mcontext.__pc = pc; | ||
92 | } | ||
93 | |||
94 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
95 | +{ | ||
96 | + return &uc->uc_sigmask; | ||
97 | +} | ||
98 | + | ||
99 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
100 | { | ||
101 | const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc); | ||
102 | diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/linux-user/include/host/mips/host-signal.h | ||
105 | +++ b/linux-user/include/host/mips/host-signal.h | ||
106 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
107 | uc->uc_mcontext.pc = pc; | ||
108 | } | ||
109 | |||
110 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
111 | +{ | ||
112 | + return &uc->uc_sigmask; | ||
113 | +} | ||
114 | + | ||
115 | #if defined(__misp16) || defined(__mips_micromips) | ||
116 | #error "Unsupported encoding" | ||
117 | #endif | ||
118 | diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h | ||
119 | index XXXXXXX..XXXXXXX 100644 | ||
120 | --- a/linux-user/include/host/ppc/host-signal.h | ||
121 | +++ b/linux-user/include/host/ppc/host-signal.h | ||
122 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
123 | uc->uc_mcontext.regs->nip = pc; | ||
124 | } | ||
125 | |||
126 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
127 | +{ | ||
128 | + return &uc->uc_sigmask; | ||
129 | +} | ||
130 | + | ||
131 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
132 | { | ||
133 | return uc->uc_mcontext.regs->trap != 0x400 | ||
134 | diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h | ||
135 | index XXXXXXX..XXXXXXX 100644 | ||
136 | --- a/linux-user/include/host/riscv/host-signal.h | ||
137 | +++ b/linux-user/include/host/riscv/host-signal.h | ||
138 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
139 | uc->uc_mcontext.__gregs[REG_PC] = pc; | ||
140 | } | ||
141 | |||
142 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
143 | +{ | ||
144 | + return &uc->uc_sigmask; | ||
145 | +} | ||
146 | + | ||
147 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
148 | { | ||
149 | /* | ||
150 | diff --git a/linux-user/include/host/s390/host-signal.h b/linux-user/include/host/s390/host-signal.h | ||
151 | index XXXXXXX..XXXXXXX 100644 | ||
152 | --- a/linux-user/include/host/s390/host-signal.h | ||
153 | +++ b/linux-user/include/host/s390/host-signal.h | ||
154 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
155 | uc->uc_mcontext.psw.addr = pc; | ||
156 | } | ||
157 | |||
158 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
159 | +{ | ||
160 | + return &uc->uc_sigmask; | ||
161 | +} | ||
162 | + | ||
163 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
164 | { | ||
165 | uint16_t *pinsn = (uint16_t *)host_signal_pc(uc); | ||
166 | diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h | ||
167 | index XXXXXXX..XXXXXXX 100644 | ||
168 | --- a/linux-user/include/host/sparc/host-signal.h | ||
169 | +++ b/linux-user/include/host/sparc/host-signal.h | ||
170 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
171 | #endif | ||
172 | } | ||
173 | |||
174 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
175 | +{ | ||
176 | + return &uc->uc_sigmask; | ||
177 | +} | ||
178 | + | ||
179 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
180 | { | ||
181 | uint32_t insn = *(uint32_t *)host_signal_pc(uc); | ||
182 | diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h | ||
183 | index XXXXXXX..XXXXXXX 100644 | ||
184 | --- a/linux-user/include/host/x86_64/host-signal.h | ||
185 | +++ b/linux-user/include/host/x86_64/host-signal.h | ||
186 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
187 | uc->uc_mcontext.gregs[REG_RIP] = pc; | ||
188 | } | ||
189 | |||
190 | +static inline void *host_signal_mask(ucontext_t *uc) | ||
191 | +{ | ||
192 | + return &uc->uc_sigmask; | ||
193 | +} | ||
194 | + | ||
195 | static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
196 | { | ||
197 | return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe | ||
198 | diff --git a/linux-user/signal.c b/linux-user/signal.c | ||
199 | index XXXXXXX..XXXXXXX 100644 | ||
200 | --- a/linux-user/signal.c | ||
201 | +++ b/linux-user/signal.c | ||
202 | @@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) | ||
203 | int guest_sig; | ||
204 | uintptr_t pc = 0; | ||
205 | bool sync_sig = false; | ||
206 | + void *sigmask = host_signal_mask(uc); | ||
207 | |||
208 | /* | ||
209 | * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special | ||
210 | @@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) | ||
211 | if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) { | ||
212 | /* If this was a write to a TB protected page, restart. */ | ||
213 | if (is_write && | ||
214 | - handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask, | ||
215 | - pc, guest_addr)) { | ||
216 | + handle_sigsegv_accerr_write(cpu, sigmask, pc, guest_addr)) { | ||
217 | return; | ||
218 | } | ||
219 | |||
220 | @@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) | ||
221 | } | ||
222 | } | ||
223 | |||
224 | - sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); | ||
225 | + sigprocmask(SIG_SETMASK, sigmask, NULL); | ||
226 | cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc); | ||
227 | } else { | ||
228 | - sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); | ||
229 | + sigprocmask(SIG_SETMASK, sigmask, NULL); | ||
230 | if (info->si_code == BUS_ADRALN) { | ||
231 | cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc); | ||
232 | } | ||
233 | @@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) | ||
234 | * now and it getting out to the main loop. Signals will be | ||
235 | * unblocked again in process_pending_signals(). | ||
236 | * | ||
237 | - * WARNING: we cannot use sigfillset() here because the uc_sigmask | ||
238 | + * WARNING: we cannot use sigfillset() here because the sigmask | ||
239 | * field is a kernel sigset_t, which is much smaller than the | ||
240 | * libc sigset_t which sigfillset() operates on. Using sigfillset() | ||
241 | * would write 0xff bytes off the end of the structure and trash | ||
242 | * data on the struct. | ||
243 | - * We can't use sizeof(uc->uc_sigmask) either, because the libc | ||
244 | - * headers define the struct field with the wrong (too large) type. | ||
245 | */ | ||
246 | - memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); | ||
247 | - sigdelset(&uc->uc_sigmask, SIGSEGV); | ||
248 | - sigdelset(&uc->uc_sigmask, SIGBUS); | ||
249 | + memset(sigmask, 0xff, SIGSET_T_SIZE); | ||
250 | + sigdelset(sigmask, SIGSEGV); | ||
251 | + sigdelset(sigmask, SIGBUS); | ||
252 | |||
253 | /* interrupt the virtual CPU as soon as possible */ | ||
254 | cpu_exit(thread_cpu); | ||
27 | -- | 255 | -- |
28 | 2.17.1 | 256 | 2.25.1 |
29 | 257 | ||
30 | 258 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Do not directly access ucontext_t as the third signal parameter. |
---|---|---|---|
2 | This is preparation for a sparc64 fix. | ||
2 | 3 | ||
3 | Let's enforce the interface restriction. | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | |
5 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-Id: <20190826075112.25637-5-david@redhat.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 7 | --- |
10 | accel/tcg/cputlb.c | 2 ++ | 8 | linux-user/include/host/aarch64/host-signal.h | 13 ++++++++----- |
11 | accel/tcg/user-exec.c | 2 ++ | 9 | linux-user/include/host/alpha/host-signal.h | 11 +++++++---- |
12 | 2 files changed, 4 insertions(+) | 10 | linux-user/include/host/arm/host-signal.h | 11 +++++++---- |
11 | linux-user/include/host/i386/host-signal.h | 11 +++++++---- | ||
12 | linux-user/include/host/loongarch64/host-signal.h | 11 +++++++---- | ||
13 | linux-user/include/host/mips/host-signal.h | 11 +++++++---- | ||
14 | linux-user/include/host/ppc/host-signal.h | 11 +++++++---- | ||
15 | linux-user/include/host/riscv/host-signal.h | 11 +++++++---- | ||
16 | linux-user/include/host/s390/host-signal.h | 11 +++++++---- | ||
17 | linux-user/include/host/sparc/host-signal.h | 11 +++++++---- | ||
18 | linux-user/include/host/x86_64/host-signal.h | 11 +++++++---- | ||
19 | linux-user/signal.c | 4 ++-- | ||
20 | 12 files changed, 80 insertions(+), 47 deletions(-) | ||
13 | 21 | ||
14 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 22 | diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h |
15 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/accel/tcg/cputlb.c | 24 | --- a/linux-user/include/host/aarch64/host-signal.h |
17 | +++ b/accel/tcg/cputlb.c | 25 | +++ b/linux-user/include/host/aarch64/host-signal.h |
18 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 26 | @@ -XXX,XX +XXX,XX @@ |
19 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | 27 | #ifndef AARCH64_HOST_SIGNAL_H |
20 | target_ulong tlb_addr = tlb_addr_write(entry); | 28 | #define AARCH64_HOST_SIGNAL_H |
21 | 29 | ||
22 | + g_assert(-(addr | TARGET_PAGE_MASK) >= size); | 30 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ |
23 | + | 31 | +typedef ucontext_t host_sigcontext; |
24 | if (unlikely(!tlb_hit(tlb_addr, addr))) { | 32 | + |
25 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | 33 | /* Pre-3.16 kernel headers don't have these, so provide fallback definitions */ |
26 | tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | 34 | #ifndef ESR_MAGIC |
27 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | 35 | #define ESR_MAGIC 0x45535201 |
28 | index XXXXXXX..XXXXXXX 100644 | 36 | @@ -XXX,XX +XXX,XX @@ struct esr_context { |
29 | --- a/accel/tcg/user-exec.c | 37 | }; |
30 | +++ b/accel/tcg/user-exec.c | 38 | #endif |
31 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | 39 | |
32 | void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 40 | -static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc) |
33 | uintptr_t retaddr) | 41 | +static inline struct _aarch64_ctx *first_ctx(host_sigcontext *uc) |
34 | { | 42 | { |
35 | + g_assert(-(addr | TARGET_PAGE_MASK) >= size); | 43 | return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved; |
36 | + | 44 | } |
37 | if (!guest_addr_valid(addr) || | 45 | @@ -XXX,XX +XXX,XX @@ static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr) |
38 | page_check_range(addr, size, PAGE_WRITE) < 0) { | 46 | return (struct _aarch64_ctx *)((char *)hdr + hdr->size); |
39 | CPUState *cpu = env_cpu(env); | 47 | } |
48 | |||
49 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
50 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
51 | { | ||
52 | return uc->uc_mcontext.pc; | ||
53 | } | ||
54 | |||
55 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
56 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
57 | { | ||
58 | uc->uc_mcontext.pc = pc; | ||
59 | } | ||
60 | |||
61 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
62 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
63 | { | ||
64 | return &uc->uc_sigmask; | ||
65 | } | ||
66 | |||
67 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
68 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
69 | { | ||
70 | struct _aarch64_ctx *hdr; | ||
71 | uint32_t insn; | ||
72 | diff --git a/linux-user/include/host/alpha/host-signal.h b/linux-user/include/host/alpha/host-signal.h | ||
73 | index XXXXXXX..XXXXXXX 100644 | ||
74 | --- a/linux-user/include/host/alpha/host-signal.h | ||
75 | +++ b/linux-user/include/host/alpha/host-signal.h | ||
76 | @@ -XXX,XX +XXX,XX @@ | ||
77 | #ifndef ALPHA_HOST_SIGNAL_H | ||
78 | #define ALPHA_HOST_SIGNAL_H | ||
79 | |||
80 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
81 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
82 | +typedef ucontext_t host_sigcontext; | ||
83 | + | ||
84 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
85 | { | ||
86 | return uc->uc_mcontext.sc_pc; | ||
87 | } | ||
88 | |||
89 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
90 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
91 | { | ||
92 | uc->uc_mcontext.sc_pc = pc; | ||
93 | } | ||
94 | |||
95 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
96 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
97 | { | ||
98 | return &uc->uc_sigmask; | ||
99 | } | ||
100 | |||
101 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
102 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
103 | { | ||
104 | uint32_t *pc = (uint32_t *)host_signal_pc(uc); | ||
105 | uint32_t insn = *pc; | ||
106 | diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h | ||
107 | index XXXXXXX..XXXXXXX 100644 | ||
108 | --- a/linux-user/include/host/arm/host-signal.h | ||
109 | +++ b/linux-user/include/host/arm/host-signal.h | ||
110 | @@ -XXX,XX +XXX,XX @@ | ||
111 | #ifndef ARM_HOST_SIGNAL_H | ||
112 | #define ARM_HOST_SIGNAL_H | ||
113 | |||
114 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
115 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
116 | +typedef ucontext_t host_sigcontext; | ||
117 | + | ||
118 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
119 | { | ||
120 | return uc->uc_mcontext.arm_pc; | ||
121 | } | ||
122 | |||
123 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
124 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
125 | { | ||
126 | uc->uc_mcontext.arm_pc = pc; | ||
127 | } | ||
128 | |||
129 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
130 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
131 | { | ||
132 | return &uc->uc_sigmask; | ||
133 | } | ||
134 | |||
135 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
136 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
137 | { | ||
138 | /* | ||
139 | * In the FSR, bit 11 is WnR, assuming a v6 or | ||
140 | diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h | ||
141 | index XXXXXXX..XXXXXXX 100644 | ||
142 | --- a/linux-user/include/host/i386/host-signal.h | ||
143 | +++ b/linux-user/include/host/i386/host-signal.h | ||
144 | @@ -XXX,XX +XXX,XX @@ | ||
145 | #ifndef I386_HOST_SIGNAL_H | ||
146 | #define I386_HOST_SIGNAL_H | ||
147 | |||
148 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
149 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
150 | +typedef ucontext_t host_sigcontext; | ||
151 | + | ||
152 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
153 | { | ||
154 | return uc->uc_mcontext.gregs[REG_EIP]; | ||
155 | } | ||
156 | |||
157 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
158 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
159 | { | ||
160 | uc->uc_mcontext.gregs[REG_EIP] = pc; | ||
161 | } | ||
162 | |||
163 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
164 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
165 | { | ||
166 | return &uc->uc_sigmask; | ||
167 | } | ||
168 | |||
169 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
170 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
171 | { | ||
172 | return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe | ||
173 | && (uc->uc_mcontext.gregs[REG_ERR] & 0x2); | ||
174 | diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h | ||
175 | index XXXXXXX..XXXXXXX 100644 | ||
176 | --- a/linux-user/include/host/loongarch64/host-signal.h | ||
177 | +++ b/linux-user/include/host/loongarch64/host-signal.h | ||
178 | @@ -XXX,XX +XXX,XX @@ | ||
179 | #ifndef LOONGARCH64_HOST_SIGNAL_H | ||
180 | #define LOONGARCH64_HOST_SIGNAL_H | ||
181 | |||
182 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
183 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
184 | +typedef ucontext_t host_sigcontext; | ||
185 | + | ||
186 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
187 | { | ||
188 | return uc->uc_mcontext.__pc; | ||
189 | } | ||
190 | |||
191 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
192 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
193 | { | ||
194 | uc->uc_mcontext.__pc = pc; | ||
195 | } | ||
196 | |||
197 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
198 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
199 | { | ||
200 | return &uc->uc_sigmask; | ||
201 | } | ||
202 | |||
203 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
204 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
205 | { | ||
206 | const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc); | ||
207 | uint32_t insn = pinsn[0]; | ||
208 | diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h | ||
209 | index XXXXXXX..XXXXXXX 100644 | ||
210 | --- a/linux-user/include/host/mips/host-signal.h | ||
211 | +++ b/linux-user/include/host/mips/host-signal.h | ||
212 | @@ -XXX,XX +XXX,XX @@ | ||
213 | #ifndef MIPS_HOST_SIGNAL_H | ||
214 | #define MIPS_HOST_SIGNAL_H | ||
215 | |||
216 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
217 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
218 | +typedef ucontext_t host_sigcontext; | ||
219 | + | ||
220 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
221 | { | ||
222 | return uc->uc_mcontext.pc; | ||
223 | } | ||
224 | |||
225 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
226 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
227 | { | ||
228 | uc->uc_mcontext.pc = pc; | ||
229 | } | ||
230 | |||
231 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
232 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
233 | { | ||
234 | return &uc->uc_sigmask; | ||
235 | } | ||
236 | @@ -XXX,XX +XXX,XX @@ static inline void *host_signal_mask(ucontext_t *uc) | ||
237 | #error "Unsupported encoding" | ||
238 | #endif | ||
239 | |||
240 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
241 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
242 | { | ||
243 | uint32_t insn = *(uint32_t *)host_signal_pc(uc); | ||
244 | |||
245 | diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h | ||
246 | index XXXXXXX..XXXXXXX 100644 | ||
247 | --- a/linux-user/include/host/ppc/host-signal.h | ||
248 | +++ b/linux-user/include/host/ppc/host-signal.h | ||
249 | @@ -XXX,XX +XXX,XX @@ | ||
250 | #ifndef PPC_HOST_SIGNAL_H | ||
251 | #define PPC_HOST_SIGNAL_H | ||
252 | |||
253 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
254 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
255 | +typedef ucontext_t host_sigcontext; | ||
256 | + | ||
257 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
258 | { | ||
259 | return uc->uc_mcontext.regs->nip; | ||
260 | } | ||
261 | |||
262 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
263 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
264 | { | ||
265 | uc->uc_mcontext.regs->nip = pc; | ||
266 | } | ||
267 | |||
268 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
269 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
270 | { | ||
271 | return &uc->uc_sigmask; | ||
272 | } | ||
273 | |||
274 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
275 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
276 | { | ||
277 | return uc->uc_mcontext.regs->trap != 0x400 | ||
278 | && (uc->uc_mcontext.regs->dsisr & 0x02000000); | ||
279 | diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h | ||
280 | index XXXXXXX..XXXXXXX 100644 | ||
281 | --- a/linux-user/include/host/riscv/host-signal.h | ||
282 | +++ b/linux-user/include/host/riscv/host-signal.h | ||
283 | @@ -XXX,XX +XXX,XX @@ | ||
284 | #ifndef RISCV_HOST_SIGNAL_H | ||
285 | #define RISCV_HOST_SIGNAL_H | ||
286 | |||
287 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
288 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
289 | +typedef ucontext_t host_sigcontext; | ||
290 | + | ||
291 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
292 | { | ||
293 | return uc->uc_mcontext.__gregs[REG_PC]; | ||
294 | } | ||
295 | |||
296 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
297 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
298 | { | ||
299 | uc->uc_mcontext.__gregs[REG_PC] = pc; | ||
300 | } | ||
301 | |||
302 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
303 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
304 | { | ||
305 | return &uc->uc_sigmask; | ||
306 | } | ||
307 | |||
308 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
309 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
310 | { | ||
311 | /* | ||
312 | * Detect store by reading the instruction at the program counter. | ||
313 | diff --git a/linux-user/include/host/s390/host-signal.h b/linux-user/include/host/s390/host-signal.h | ||
314 | index XXXXXXX..XXXXXXX 100644 | ||
315 | --- a/linux-user/include/host/s390/host-signal.h | ||
316 | +++ b/linux-user/include/host/s390/host-signal.h | ||
317 | @@ -XXX,XX +XXX,XX @@ | ||
318 | #ifndef S390_HOST_SIGNAL_H | ||
319 | #define S390_HOST_SIGNAL_H | ||
320 | |||
321 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
322 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
323 | +typedef ucontext_t host_sigcontext; | ||
324 | + | ||
325 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
326 | { | ||
327 | return uc->uc_mcontext.psw.addr; | ||
328 | } | ||
329 | |||
330 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
331 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
332 | { | ||
333 | uc->uc_mcontext.psw.addr = pc; | ||
334 | } | ||
335 | |||
336 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
337 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
338 | { | ||
339 | return &uc->uc_sigmask; | ||
340 | } | ||
341 | |||
342 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
343 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
344 | { | ||
345 | uint16_t *pinsn = (uint16_t *)host_signal_pc(uc); | ||
346 | |||
347 | diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h | ||
348 | index XXXXXXX..XXXXXXX 100644 | ||
349 | --- a/linux-user/include/host/sparc/host-signal.h | ||
350 | +++ b/linux-user/include/host/sparc/host-signal.h | ||
351 | @@ -XXX,XX +XXX,XX @@ | ||
352 | #ifndef SPARC_HOST_SIGNAL_H | ||
353 | #define SPARC_HOST_SIGNAL_H | ||
354 | |||
355 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
356 | +/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */ | ||
357 | +typedef ucontext_t host_sigcontext; | ||
358 | + | ||
359 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
360 | { | ||
361 | #ifdef __arch64__ | ||
362 | return uc->uc_mcontext.mc_gregs[MC_PC]; | ||
363 | @@ -XXX,XX +XXX,XX @@ static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
364 | #endif | ||
365 | } | ||
366 | |||
367 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
368 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
369 | { | ||
370 | #ifdef __arch64__ | ||
371 | uc->uc_mcontext.mc_gregs[MC_PC] = pc; | ||
372 | @@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
373 | #endif | ||
374 | } | ||
375 | |||
376 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
377 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
378 | { | ||
379 | return &uc->uc_sigmask; | ||
380 | } | ||
381 | |||
382 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
383 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
384 | { | ||
385 | uint32_t insn = *(uint32_t *)host_signal_pc(uc); | ||
386 | |||
387 | diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h | ||
388 | index XXXXXXX..XXXXXXX 100644 | ||
389 | --- a/linux-user/include/host/x86_64/host-signal.h | ||
390 | +++ b/linux-user/include/host/x86_64/host-signal.h | ||
391 | @@ -XXX,XX +XXX,XX @@ | ||
392 | #ifndef X86_64_HOST_SIGNAL_H | ||
393 | #define X86_64_HOST_SIGNAL_H | ||
394 | |||
395 | -static inline uintptr_t host_signal_pc(ucontext_t *uc) | ||
396 | +/* The third argument to a SA_SIGINFO handler is ucontext_t. */ | ||
397 | +typedef ucontext_t host_sigcontext; | ||
398 | + | ||
399 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
400 | { | ||
401 | return uc->uc_mcontext.gregs[REG_RIP]; | ||
402 | } | ||
403 | |||
404 | -static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) | ||
405 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
406 | { | ||
407 | uc->uc_mcontext.gregs[REG_RIP] = pc; | ||
408 | } | ||
409 | |||
410 | -static inline void *host_signal_mask(ucontext_t *uc) | ||
411 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
412 | { | ||
413 | return &uc->uc_sigmask; | ||
414 | } | ||
415 | |||
416 | -static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
417 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
418 | { | ||
419 | return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe | ||
420 | && (uc->uc_mcontext.gregs[REG_ERR] & 0x2); | ||
421 | diff --git a/linux-user/signal.c b/linux-user/signal.c | ||
422 | index XXXXXXX..XXXXXXX 100644 | ||
423 | --- a/linux-user/signal.c | ||
424 | +++ b/linux-user/signal.c | ||
425 | @@ -XXX,XX +XXX,XX @@ void queue_signal(CPUArchState *env, int sig, int si_type, | ||
426 | /* Adjust the signal context to rewind out of safe-syscall if we're in it */ | ||
427 | static inline void rewind_if_in_safe_syscall(void *puc) | ||
428 | { | ||
429 | - ucontext_t *uc = (ucontext_t *)puc; | ||
430 | + host_sigcontext *uc = (host_sigcontext *)puc; | ||
431 | uintptr_t pcreg = host_signal_pc(uc); | ||
432 | |||
433 | if (pcreg > (uintptr_t)safe_syscall_start | ||
434 | @@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) | ||
435 | CPUState *cpu = env_cpu(env); | ||
436 | TaskState *ts = cpu->opaque; | ||
437 | target_siginfo_t tinfo; | ||
438 | - ucontext_t *uc = puc; | ||
439 | + host_sigcontext *uc = puc; | ||
440 | struct emulated_sigtable *k; | ||
441 | int guest_sig; | ||
442 | uintptr_t pc = 0; | ||
40 | -- | 443 | -- |
41 | 2.17.1 | 444 | 2.25.1 |
42 | 445 | ||
43 | 446 | diff view generated by jsdifflib |
1 | The raising of exceptions from check_watchpoint, buried inside | 1 | We do not support sparc32 as a host, so there's no point in |
---|---|---|---|
2 | of the I/O subsystem, is fundamentally broken. We do not have | 2 | sparc64 redirecting to sparc. |
3 | the helper return address with which we can unwind guest state. | ||
4 | 3 | ||
5 | Replace PHYS_SECTION_WATCH and io_mem_watch with TLB_WATCHPOINT. | 4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
6 | Move the call to cpu_check_watchpoint into the cputlb helpers | ||
7 | where we do have the helper return address. | ||
8 | |||
9 | This allows watchpoints on RAM to bypass the full i/o access path. | ||
10 | |||
11 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
12 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 6 | --- |
15 | include/exec/cpu-all.h | 5 +- | 7 | linux-user/include/host/sparc/host-signal.h | 71 ------------------- |
16 | accel/tcg/cputlb.c | 89 ++++++++++++++++++++++++++++---- | 8 | linux-user/include/host/sparc64/host-signal.h | 64 ++++++++++++++++- |
17 | exec.c | 114 +++-------------------------------------- | 9 | 2 files changed, 63 insertions(+), 72 deletions(-) |
18 | 3 files changed, 90 insertions(+), 118 deletions(-) | 10 | delete mode 100644 linux-user/include/host/sparc/host-signal.h |
19 | 11 | ||
20 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h | 12 | diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h |
21 | index XXXXXXX..XXXXXXX 100644 | 13 | deleted file mode 100644 |
22 | --- a/include/exec/cpu-all.h | 14 | index XXXXXXX..XXXXXXX |
23 | +++ b/include/exec/cpu-all.h | 15 | --- a/linux-user/include/host/sparc/host-signal.h |
24 | @@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env); | 16 | +++ /dev/null |
25 | #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) | 17 | @@ -XXX,XX +XXX,XX @@ |
26 | /* Set if TLB entry is an IO callback. */ | 18 | -/* |
27 | #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) | 19 | - * host-signal.h: signal info dependent on the host architecture |
28 | +/* Set if TLB entry contains a watchpoint. */ | 20 | - * |
29 | +#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4)) | 21 | - * Copyright (c) 2003-2005 Fabrice Bellard |
30 | 22 | - * Copyright (c) 2021 Linaro Limited | |
31 | /* Use this mask to check interception with an alignment mask | 23 | - * |
32 | * in a TCG backend. | 24 | - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. |
33 | */ | 25 | - * See the COPYING file in the top-level directory. |
34 | -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) | 26 | - */ |
35 | +#define TLB_FLAGS_MASK \ | ||
36 | + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT) | ||
37 | |||
38 | /** | ||
39 | * tlb_hit_page: return true if page aligned @addr is a hit against the | ||
40 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/accel/tcg/cputlb.c | ||
43 | +++ b/accel/tcg/cputlb.c | ||
44 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
45 | hwaddr iotlb, xlat, sz, paddr_page; | ||
46 | target_ulong vaddr_page; | ||
47 | int asidx = cpu_asidx_from_attrs(cpu, attrs); | ||
48 | + int wp_flags; | ||
49 | |||
50 | assert_cpu_is_self(cpu); | ||
51 | |||
52 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
53 | code_address = address; | ||
54 | iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, | ||
55 | paddr_page, xlat, prot, &address); | ||
56 | + wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, | ||
57 | + TARGET_PAGE_SIZE); | ||
58 | |||
59 | index = tlb_index(env, mmu_idx, vaddr_page); | ||
60 | te = tlb_entry(env, mmu_idx, vaddr_page); | ||
61 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
62 | tn.addend = addend - vaddr_page; | ||
63 | if (prot & PAGE_READ) { | ||
64 | tn.addr_read = address; | ||
65 | + if (wp_flags & BP_MEM_READ) { | ||
66 | + tn.addr_read |= TLB_WATCHPOINT; | ||
67 | + } | ||
68 | } else { | ||
69 | tn.addr_read = -1; | ||
70 | } | ||
71 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
72 | if (prot & PAGE_WRITE_INV) { | ||
73 | tn.addr_write |= TLB_INVALID_MASK; | ||
74 | } | ||
75 | + if (wp_flags & BP_MEM_WRITE) { | ||
76 | + tn.addr_write |= TLB_WATCHPOINT; | ||
77 | + } | ||
78 | } | ||
79 | |||
80 | copy_tlb_helper_locked(te, &tn); | ||
81 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
82 | tlb_addr &= ~TLB_INVALID_MASK; | ||
83 | } | ||
84 | |||
85 | - /* Handle an IO access. */ | ||
86 | + /* Handle anything that isn't just a straight memory access. */ | ||
87 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
88 | + CPUIOTLBEntry *iotlbentry; | ||
89 | + | ||
90 | + /* For anything that is unaligned, recurse through full_load. */ | ||
91 | if ((addr & (size - 1)) != 0) { | ||
92 | goto do_unaligned_access; | ||
93 | } | ||
94 | - return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
95 | - mmu_idx, addr, retaddr, access_type, op); | ||
96 | + | ||
97 | + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
98 | + | ||
99 | + /* Handle watchpoints. */ | ||
100 | + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | ||
101 | + /* On watchpoint hit, this will longjmp out. */ | ||
102 | + cpu_check_watchpoint(env_cpu(env), addr, size, | ||
103 | + iotlbentry->attrs, BP_MEM_READ, retaddr); | ||
104 | + | ||
105 | + /* The backing page may or may not require I/O. */ | ||
106 | + tlb_addr &= ~TLB_WATCHPOINT; | ||
107 | + if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) { | ||
108 | + goto do_aligned_access; | ||
109 | + } | ||
110 | + } | ||
111 | + | ||
112 | + /* Handle I/O access. */ | ||
113 | + return io_readx(env, iotlbentry, mmu_idx, addr, | ||
114 | + retaddr, access_type, op); | ||
115 | } | ||
116 | |||
117 | /* Handle slow unaligned access (it spans two pages or IO). */ | ||
118 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
119 | return res & MAKE_64BIT_MASK(0, size * 8); | ||
120 | } | ||
121 | |||
122 | + do_aligned_access: | ||
123 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
124 | switch (op) { | ||
125 | case MO_UB: | ||
126 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
127 | tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; | ||
128 | } | ||
129 | |||
130 | - /* Handle an IO access. */ | ||
131 | + /* Handle anything that isn't just a straight memory access. */ | ||
132 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
133 | + CPUIOTLBEntry *iotlbentry; | ||
134 | + | ||
135 | + /* For anything that is unaligned, recurse through byte stores. */ | ||
136 | if ((addr & (size - 1)) != 0) { | ||
137 | goto do_unaligned_access; | ||
138 | } | ||
139 | - io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
140 | - val, addr, retaddr, op); | ||
141 | + | ||
142 | + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; | ||
143 | + | ||
144 | + /* Handle watchpoints. */ | ||
145 | + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | ||
146 | + /* On watchpoint hit, this will longjmp out. */ | ||
147 | + cpu_check_watchpoint(env_cpu(env), addr, size, | ||
148 | + iotlbentry->attrs, BP_MEM_WRITE, retaddr); | ||
149 | + | ||
150 | + /* The backing page may or may not require I/O. */ | ||
151 | + tlb_addr &= ~TLB_WATCHPOINT; | ||
152 | + if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) { | ||
153 | + goto do_aligned_access; | ||
154 | + } | ||
155 | + } | ||
156 | + | ||
157 | + /* Handle I/O access. */ | ||
158 | + io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op); | ||
159 | return; | ||
160 | } | ||
161 | |||
162 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
163 | index2 = tlb_index(env, mmu_idx, page2); | ||
164 | entry2 = tlb_entry(env, mmu_idx, page2); | ||
165 | tlb_addr2 = tlb_addr_write(entry2); | ||
166 | - if (!tlb_hit_page(tlb_addr2, page2) | ||
167 | - && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | ||
168 | - tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | ||
169 | - mmu_idx, retaddr); | ||
170 | + if (!tlb_hit_page(tlb_addr2, page2)) { | ||
171 | + if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | ||
172 | + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | ||
173 | + mmu_idx, retaddr); | ||
174 | + index2 = tlb_index(env, mmu_idx, page2); | ||
175 | + entry2 = tlb_entry(env, mmu_idx, page2); | ||
176 | + } | ||
177 | + tlb_addr2 = tlb_addr_write(entry2); | ||
178 | + } | ||
179 | + | ||
180 | + /* | ||
181 | + * Handle watchpoints. Since this may trap, all checks | ||
182 | + * must happen before any store. | ||
183 | + */ | ||
184 | + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { | ||
185 | + cpu_check_watchpoint(env_cpu(env), addr, size - size2, | ||
186 | + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | ||
187 | + BP_MEM_WRITE, retaddr); | ||
188 | + } | ||
189 | + if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { | ||
190 | + cpu_check_watchpoint(env_cpu(env), page2, size2, | ||
191 | + env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, | ||
192 | + BP_MEM_WRITE, retaddr); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
197 | return; | ||
198 | } | ||
199 | |||
200 | + do_aligned_access: | ||
201 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
202 | switch (op) { | ||
203 | case MO_UB: | ||
204 | diff --git a/exec.c b/exec.c | ||
205 | index XXXXXXX..XXXXXXX 100644 | ||
206 | --- a/exec.c | ||
207 | +++ b/exec.c | ||
208 | @@ -XXX,XX +XXX,XX @@ typedef struct subpage_t { | ||
209 | #define PHYS_SECTION_UNASSIGNED 0 | ||
210 | #define PHYS_SECTION_NOTDIRTY 1 | ||
211 | #define PHYS_SECTION_ROM 2 | ||
212 | -#define PHYS_SECTION_WATCH 3 | ||
213 | |||
214 | static void io_mem_init(void); | ||
215 | static void memory_map_init(void); | ||
216 | static void tcg_log_global_after_sync(MemoryListener *listener); | ||
217 | static void tcg_commit(MemoryListener *listener); | ||
218 | |||
219 | -static MemoryRegion io_mem_watch; | ||
220 | - | 27 | - |
221 | /** | 28 | -#ifndef SPARC_HOST_SIGNAL_H |
222 | * CPUAddressSpace: all the information a CPU needs about an AddressSpace | 29 | -#define SPARC_HOST_SIGNAL_H |
223 | * @cpu: the CPU whose AddressSpace this is | ||
224 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | ||
225 | target_ulong *address) | ||
226 | { | ||
227 | hwaddr iotlb; | ||
228 | - int flags, match; | ||
229 | |||
230 | if (memory_region_is_ram(section->mr)) { | ||
231 | /* Normal RAM. */ | ||
232 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | ||
233 | iotlb += xlat; | ||
234 | } | ||
235 | |||
236 | - /* Avoid trapping reads of pages with a write breakpoint. */ | ||
237 | - match = (prot & PAGE_READ ? BP_MEM_READ : 0) | ||
238 | - | (prot & PAGE_WRITE ? BP_MEM_WRITE : 0); | ||
239 | - flags = cpu_watchpoint_address_matches(cpu, vaddr, TARGET_PAGE_SIZE); | ||
240 | - if (flags & match) { | ||
241 | - /* | ||
242 | - * Make accesses to pages with watchpoints go via the | ||
243 | - * watchpoint trap routines. | ||
244 | - */ | ||
245 | - iotlb = PHYS_SECTION_WATCH + paddr; | ||
246 | - *address |= TLB_MMIO; | ||
247 | - } | ||
248 | - | 30 | - |
249 | return iotlb; | 31 | -/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */ |
250 | } | 32 | -typedef ucontext_t host_sigcontext; |
251 | #endif /* defined(CONFIG_USER_ONLY) */ | 33 | - |
252 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | 34 | -static inline uintptr_t host_signal_pc(host_sigcontext *uc) |
253 | |||
254 | assert(tcg_enabled()); | ||
255 | if (cpu->watchpoint_hit) { | ||
256 | - /* We re-entered the check after replacing the TB. Now raise | ||
257 | - * the debug interrupt so that is will trigger after the | ||
258 | - * current instruction. */ | ||
259 | + /* | ||
260 | + * We re-entered the check after replacing the TB. | ||
261 | + * Now raise the debug interrupt so that it will | ||
262 | + * trigger after the current instruction. | ||
263 | + */ | ||
264 | + qemu_mutex_lock_iothread(); | ||
265 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); | ||
266 | + qemu_mutex_unlock_iothread(); | ||
267 | return; | ||
268 | } | ||
269 | |||
270 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
271 | } | ||
272 | } | ||
273 | |||
274 | -static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
275 | -{ | 35 | -{ |
276 | - CPUState *cpu = current_cpu; | 36 | -#ifdef __arch64__ |
277 | - vaddr addr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; | 37 | - return uc->uc_mcontext.mc_gregs[MC_PC]; |
278 | - | 38 | -#else |
279 | - cpu_check_watchpoint(cpu, addr, len, attrs, flags, 0); | 39 | - return uc->uc_mcontext.gregs[REG_PC]; |
40 | -#endif | ||
280 | -} | 41 | -} |
281 | - | 42 | - |
282 | -/* Watchpoint access routines. Watchpoints are inserted using TLB tricks, | 43 | -static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) |
283 | - so these check for a hit then pass through to the normal out-of-line | ||
284 | - phys routines. */ | ||
285 | -static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, | ||
286 | - unsigned size, MemTxAttrs attrs) | ||
287 | -{ | 44 | -{ |
288 | - MemTxResult res; | 45 | -#ifdef __arch64__ |
289 | - uint64_t data; | 46 | - uc->uc_mcontext.mc_gregs[MC_PC] = pc; |
290 | - int asidx = cpu_asidx_from_attrs(current_cpu, attrs); | 47 | -#else |
291 | - AddressSpace *as = current_cpu->cpu_ases[asidx].as; | 48 | - uc->uc_mcontext.gregs[REG_PC] = pc; |
292 | - | 49 | -#endif |
293 | - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ); | ||
294 | - switch (size) { | ||
295 | - case 1: | ||
296 | - data = address_space_ldub(as, addr, attrs, &res); | ||
297 | - break; | ||
298 | - case 2: | ||
299 | - data = address_space_lduw(as, addr, attrs, &res); | ||
300 | - break; | ||
301 | - case 4: | ||
302 | - data = address_space_ldl(as, addr, attrs, &res); | ||
303 | - break; | ||
304 | - case 8: | ||
305 | - data = address_space_ldq(as, addr, attrs, &res); | ||
306 | - break; | ||
307 | - default: abort(); | ||
308 | - } | ||
309 | - *pdata = data; | ||
310 | - return res; | ||
311 | -} | 50 | -} |
312 | - | 51 | - |
313 | -static MemTxResult watch_mem_write(void *opaque, hwaddr addr, | 52 | -static inline void *host_signal_mask(host_sigcontext *uc) |
314 | - uint64_t val, unsigned size, | ||
315 | - MemTxAttrs attrs) | ||
316 | -{ | 53 | -{ |
317 | - MemTxResult res; | 54 | - return &uc->uc_sigmask; |
318 | - int asidx = cpu_asidx_from_attrs(current_cpu, attrs); | ||
319 | - AddressSpace *as = current_cpu->cpu_ases[asidx].as; | ||
320 | - | ||
321 | - check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE); | ||
322 | - switch (size) { | ||
323 | - case 1: | ||
324 | - address_space_stb(as, addr, val, attrs, &res); | ||
325 | - break; | ||
326 | - case 2: | ||
327 | - address_space_stw(as, addr, val, attrs, &res); | ||
328 | - break; | ||
329 | - case 4: | ||
330 | - address_space_stl(as, addr, val, attrs, &res); | ||
331 | - break; | ||
332 | - case 8: | ||
333 | - address_space_stq(as, addr, val, attrs, &res); | ||
334 | - break; | ||
335 | - default: abort(); | ||
336 | - } | ||
337 | - return res; | ||
338 | -} | 55 | -} |
339 | - | 56 | - |
340 | -static const MemoryRegionOps watch_mem_ops = { | 57 | -static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) |
341 | - .read_with_attrs = watch_mem_read, | 58 | -{ |
342 | - .write_with_attrs = watch_mem_write, | 59 | - uint32_t insn = *(uint32_t *)host_signal_pc(uc); |
343 | - .endianness = DEVICE_NATIVE_ENDIAN, | ||
344 | - .valid = { | ||
345 | - .min_access_size = 1, | ||
346 | - .max_access_size = 8, | ||
347 | - .unaligned = false, | ||
348 | - }, | ||
349 | - .impl = { | ||
350 | - .min_access_size = 1, | ||
351 | - .max_access_size = 8, | ||
352 | - .unaligned = false, | ||
353 | - }, | ||
354 | -}; | ||
355 | - | 60 | - |
356 | static MemTxResult flatview_read(FlatView *fv, hwaddr addr, | 61 | - if ((insn >> 30) == 3) { |
357 | MemTxAttrs attrs, uint8_t *buf, hwaddr len); | 62 | - switch ((insn >> 19) & 0x3f) { |
358 | static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, | 63 | - case 0x05: /* stb */ |
359 | @@ -XXX,XX +XXX,XX @@ static void io_mem_init(void) | 64 | - case 0x15: /* stba */ |
360 | memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, | 65 | - case 0x06: /* sth */ |
361 | NULL, UINT64_MAX); | 66 | - case 0x16: /* stha */ |
362 | memory_region_clear_global_locking(&io_mem_notdirty); | 67 | - case 0x04: /* st */ |
68 | - case 0x14: /* sta */ | ||
69 | - case 0x07: /* std */ | ||
70 | - case 0x17: /* stda */ | ||
71 | - case 0x0e: /* stx */ | ||
72 | - case 0x1e: /* stxa */ | ||
73 | - case 0x24: /* stf */ | ||
74 | - case 0x34: /* stfa */ | ||
75 | - case 0x27: /* stdf */ | ||
76 | - case 0x37: /* stdfa */ | ||
77 | - case 0x26: /* stqf */ | ||
78 | - case 0x36: /* stqfa */ | ||
79 | - case 0x25: /* stfsr */ | ||
80 | - case 0x3c: /* casa */ | ||
81 | - case 0x3e: /* casxa */ | ||
82 | - return true; | ||
83 | - } | ||
84 | - } | ||
85 | - return false; | ||
86 | -} | ||
363 | - | 87 | - |
364 | - memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, | 88 | -#endif |
365 | - NULL, UINT64_MAX); | 89 | diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h |
366 | } | 90 | index XXXXXXX..XXXXXXX 100644 |
367 | 91 | --- a/linux-user/include/host/sparc64/host-signal.h | |
368 | AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) | 92 | +++ b/linux-user/include/host/sparc64/host-signal.h |
369 | @@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) | 93 | @@ -1 +1,63 @@ |
370 | assert(n == PHYS_SECTION_NOTDIRTY); | 94 | -#include "../sparc/host-signal.h" |
371 | n = dummy_section(&d->map, fv, &io_mem_rom); | 95 | +/* |
372 | assert(n == PHYS_SECTION_ROM); | 96 | + * host-signal.h: signal info dependent on the host architecture |
373 | - n = dummy_section(&d->map, fv, &io_mem_watch); | 97 | + * |
374 | - assert(n == PHYS_SECTION_WATCH); | 98 | + * Copyright (c) 2003-2005 Fabrice Bellard |
375 | 99 | + * Copyright (c) 2021 Linaro Limited | |
376 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; | 100 | + * |
377 | 101 | + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. | |
102 | + * See the COPYING file in the top-level directory. | ||
103 | + */ | ||
104 | + | ||
105 | +#ifndef SPARC64_HOST_SIGNAL_H | ||
106 | +#define SPARC64_HOST_SIGNAL_H | ||
107 | + | ||
108 | +/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */ | ||
109 | +typedef ucontext_t host_sigcontext; | ||
110 | + | ||
111 | +static inline uintptr_t host_signal_pc(host_sigcontext *uc) | ||
112 | +{ | ||
113 | + return uc->uc_mcontext.mc_gregs[MC_PC]; | ||
114 | +} | ||
115 | + | ||
116 | +static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) | ||
117 | +{ | ||
118 | + uc->uc_mcontext.mc_gregs[MC_PC] = pc; | ||
119 | +} | ||
120 | + | ||
121 | +static inline void *host_signal_mask(host_sigcontext *uc) | ||
122 | +{ | ||
123 | + return &uc->uc_sigmask; | ||
124 | +} | ||
125 | + | ||
126 | +static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
127 | +{ | ||
128 | + uint32_t insn = *(uint32_t *)host_signal_pc(uc); | ||
129 | + | ||
130 | + if ((insn >> 30) == 3) { | ||
131 | + switch ((insn >> 19) & 0x3f) { | ||
132 | + case 0x05: /* stb */ | ||
133 | + case 0x15: /* stba */ | ||
134 | + case 0x06: /* sth */ | ||
135 | + case 0x16: /* stha */ | ||
136 | + case 0x04: /* st */ | ||
137 | + case 0x14: /* sta */ | ||
138 | + case 0x07: /* std */ | ||
139 | + case 0x17: /* stda */ | ||
140 | + case 0x0e: /* stx */ | ||
141 | + case 0x1e: /* stxa */ | ||
142 | + case 0x24: /* stf */ | ||
143 | + case 0x34: /* stfa */ | ||
144 | + case 0x27: /* stdf */ | ||
145 | + case 0x37: /* stdfa */ | ||
146 | + case 0x26: /* stqf */ | ||
147 | + case 0x36: /* stqfa */ | ||
148 | + case 0x25: /* stfsr */ | ||
149 | + case 0x3c: /* casa */ | ||
150 | + case 0x3e: /* casxa */ | ||
151 | + return true; | ||
152 | + } | ||
153 | + } | ||
154 | + return false; | ||
155 | +} | ||
156 | + | ||
157 | +#endif | ||
378 | -- | 158 | -- |
379 | 2.17.1 | 159 | 2.25.1 |
380 | 160 | ||
381 | 161 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Sparc64 is unique on linux in *not* passing ucontext_t as |
---|---|---|---|
2 | the third argument to a SA_SIGINFO handler. It passes the | ||
3 | old struct sigcontext instead. | ||
2 | 4 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | 5 | Set both pc and npc in host_signal_set_pc. |
4 | being converted into a "MemOp op". | ||
5 | 6 | ||
6 | Convert interfaces by using no-op size_memop. | 7 | Fixes: 8b5bd461935b ("linux-user/host/sparc: Populate host_signal.h") |
7 | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | |
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Message-Id: <21113bae2f54b45176701e0bf595937031368ae6.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
19 | --- | 10 | --- |
20 | hw/intc/armv7m_nvic.c | 12 ++++++++---- | 11 | linux-user/include/host/sparc64/host-signal.h | 17 +++++++++-------- |
21 | 1 file changed, 8 insertions(+), 4 deletions(-) | 12 | 1 file changed, 9 insertions(+), 8 deletions(-) |
22 | 13 | ||
23 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 14 | diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h |
24 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/hw/intc/armv7m_nvic.c | 16 | --- a/linux-user/include/host/sparc64/host-signal.h |
26 | +++ b/hw/intc/armv7m_nvic.c | 17 | +++ b/linux-user/include/host/sparc64/host-signal.h |
27 | @@ -XXX,XX +XXX,XX @@ | 18 | @@ -XXX,XX +XXX,XX @@ |
28 | #include "hw/qdev-properties.h" | 19 | #ifndef SPARC64_HOST_SIGNAL_H |
29 | #include "target/arm/cpu.h" | 20 | #define SPARC64_HOST_SIGNAL_H |
30 | #include "exec/exec-all.h" | 21 | |
31 | +#include "exec/memop.h" | 22 | -/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */ |
32 | #include "qemu/log.h" | 23 | -typedef ucontext_t host_sigcontext; |
33 | #include "qemu/module.h" | 24 | +/* The third argument to a SA_SIGINFO handler is struct sigcontext. */ |
34 | #include "trace.h" | 25 | +typedef struct sigcontext host_sigcontext; |
35 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, | 26 | |
36 | if (attrs.secure) { | 27 | -static inline uintptr_t host_signal_pc(host_sigcontext *uc) |
37 | /* S accesses to the alias act like NS accesses to the real region */ | 28 | +static inline uintptr_t host_signal_pc(host_sigcontext *sc) |
38 | attrs.secure = 0; | 29 | { |
39 | - return memory_region_dispatch_write(mr, addr, value, size, attrs); | 30 | - return uc->uc_mcontext.mc_gregs[MC_PC]; |
40 | + return memory_region_dispatch_write(mr, addr, value, size_memop(size), | 31 | + return sc->sigc_regs.tpc; |
41 | + attrs); | ||
42 | } else { | ||
43 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | ||
44 | if (attrs.user) { | ||
45 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, | ||
46 | if (attrs.secure) { | ||
47 | /* S accesses to the alias act like NS accesses to the real region */ | ||
48 | attrs.secure = 0; | ||
49 | - return memory_region_dispatch_read(mr, addr, data, size, attrs); | ||
50 | + return memory_region_dispatch_read(mr, addr, data, size_memop(size), | ||
51 | + attrs); | ||
52 | } else { | ||
53 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | ||
54 | if (attrs.user) { | ||
55 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, | ||
56 | |||
57 | /* Direct the access to the correct systick */ | ||
58 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | ||
59 | - return memory_region_dispatch_write(mr, addr, value, size, attrs); | ||
60 | + return memory_region_dispatch_write(mr, addr, value, size_memop(size), | ||
61 | + attrs); | ||
62 | } | 32 | } |
63 | 33 | ||
64 | static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | 34 | -static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc) |
65 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | 35 | +static inline void host_signal_set_pc(host_sigcontext *sc, uintptr_t pc) |
66 | 36 | { | |
67 | /* Direct the access to the correct systick */ | 37 | - uc->uc_mcontext.mc_gregs[MC_PC] = pc; |
68 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | 38 | + sc->sigc_regs.tpc = pc; |
69 | - return memory_region_dispatch_read(mr, addr, data, size, attrs); | 39 | + sc->sigc_regs.tnpc = pc + 4; |
70 | + return memory_region_dispatch_read(mr, addr, data, size_memop(size), attrs); | ||
71 | } | 40 | } |
72 | 41 | ||
73 | static const MemoryRegionOps nvic_systick_ops = { | 42 | -static inline void *host_signal_mask(host_sigcontext *uc) |
43 | +static inline void *host_signal_mask(host_sigcontext *sc) | ||
44 | { | ||
45 | - return &uc->uc_sigmask; | ||
46 | + return &sc->sigc_mask; | ||
47 | } | ||
48 | |||
49 | static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc) | ||
74 | -- | 50 | -- |
75 | 2.17.1 | 51 | 2.25.1 |
76 | 52 | ||
77 | 53 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | From: Idan Horowitz <idan.horowitz@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Let size > 0 indicate a promise to write to those bytes. | 3 | When the length of the range is large enough, clearing the whole cache is |
4 | Check for write watchpoints in the probed range. | 4 | faster than iterating over the (possibly extremely large) set of pages |
5 | contained in the range. | ||
5 | 6 | ||
6 | Suggested-by: Richard Henderson <richard.henderson@linaro.org> | 7 | This mimics the pre-existing similar optimization done on the flush of the |
8 | tlb itself. | ||
9 | |||
10 | Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com> | ||
11 | Message-Id: <20220110164754.1066025-1-idan.horowitz@gmail.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
9 | Message-Id: <20190823100741.9621-10-david@redhat.com> | ||
10 | [rth: Recompute index after tlb_fill; check TLB_WATCHPOINT.] | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 14 | --- |
13 | accel/tcg/cputlb.c | 15 +++++++++++++-- | 15 | accel/tcg/cputlb.c | 9 +++++++++ |
14 | 1 file changed, 13 insertions(+), 2 deletions(-) | 16 | 1 file changed, 9 insertions(+) |
15 | 17 | ||
16 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 18 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c |
17 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/accel/tcg/cputlb.c | 20 | --- a/accel/tcg/cputlb.c |
19 | +++ b/accel/tcg/cputlb.c | 21 | +++ b/accel/tcg/cputlb.c |
20 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 22 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, |
21 | { | 23 | } |
22 | uintptr_t index = tlb_index(env, mmu_idx, addr); | 24 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
23 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | 25 | |
24 | + target_ulong tlb_addr = tlb_addr_write(entry); | 26 | + /* |
25 | 27 | + * If the length is larger than the jump cache size, then it will take | |
26 | - if (!tlb_hit(tlb_addr_write(entry), addr)) { | 28 | + * longer to clear each entry individually than it will to clear it all. |
27 | - /* TLB entry is for a different page */ | 29 | + */ |
28 | + if (unlikely(!tlb_hit(tlb_addr, addr))) { | 30 | + if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { |
29 | if (!VICTIM_TLB_HIT(addr_write, addr)) { | 31 | + cpu_tb_jmp_cache_clear(cpu); |
30 | tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | 32 | + return; |
31 | mmu_idx, retaddr); | ||
32 | + /* TLB resize via tlb_fill may have moved the entry. */ | ||
33 | + index = tlb_index(env, mmu_idx, addr); | ||
34 | + entry = tlb_entry(env, mmu_idx, addr); | ||
35 | } | ||
36 | + tlb_addr = tlb_addr_write(entry); | ||
37 | + } | 33 | + } |
38 | + | 34 | + |
39 | + /* Handle watchpoints. */ | 35 | for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { |
40 | + if ((tlb_addr & TLB_WATCHPOINT) && size > 0) { | 36 | tb_flush_jmp_cache(cpu, d.addr + i); |
41 | + cpu_check_watchpoint(env_cpu(env), addr, size, | ||
42 | + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | ||
43 | + BP_MEM_WRITE, retaddr); | ||
44 | } | 37 | } |
45 | } | ||
46 | |||
47 | -- | 38 | -- |
48 | 2.17.1 | 39 | 2.25.1 |
49 | 40 | ||
50 | 41 | diff view generated by jsdifflib |
1 | We have already aligned page2 to the start of the next page. | 1 | From: Idan Horowitz <idan.horowitz@gmail.com> |
---|---|---|---|
2 | There is no reason to do that a second time. | ||
3 | 2 | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 3 | Instead of taking the lock of the cpu work list in order to check if it's |
5 | Reviewed-by: David Hildenbrand <david@redhat.com> | 4 | empty, we can just read the head pointer atomically. This decreases |
5 | cpu_work_list_empty's share from 5% to 1.3% in a profile of icount-enabled | ||
6 | aarch64-softmmu. | ||
7 | |||
8 | Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com> | ||
9 | Message-Id: <20220114004358.299534-1-idan.horowitz@gmail.com> | ||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
7 | --- | 12 | --- |
8 | accel/tcg/cputlb.c | 3 +-- | 13 | softmmu/cpus.c | 7 +------ |
9 | 1 file changed, 1 insertion(+), 2 deletions(-) | 14 | 1 file changed, 1 insertion(+), 6 deletions(-) |
10 | 15 | ||
11 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 16 | diff --git a/softmmu/cpus.c b/softmmu/cpus.c |
12 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/accel/tcg/cputlb.c | 18 | --- a/softmmu/cpus.c |
14 | +++ b/accel/tcg/cputlb.c | 19 | +++ b/softmmu/cpus.c |
15 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 20 | @@ -XXX,XX +XXX,XX @@ bool cpu_is_stopped(CPUState *cpu) |
16 | entry2 = tlb_entry(env, mmu_idx, page2); | 21 | |
17 | tlb_addr2 = tlb_addr_write(entry2); | 22 | bool cpu_work_list_empty(CPUState *cpu) |
18 | if (!tlb_hit_page(tlb_addr2, page2) | 23 | { |
19 | - && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, | 24 | - bool ret; |
20 | - page2 & TARGET_PAGE_MASK)) { | 25 | - |
21 | + && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { | 26 | - qemu_mutex_lock(&cpu->work_mutex); |
22 | tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | 27 | - ret = QSIMPLEQ_EMPTY(&cpu->work_list); |
23 | mmu_idx, retaddr); | 28 | - qemu_mutex_unlock(&cpu->work_mutex); |
24 | } | 29 | - return ret; |
30 | + return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list); | ||
31 | } | ||
32 | |||
33 | bool cpu_thread_is_idle(CPUState *cpu) | ||
25 | -- | 34 | -- |
26 | 2.17.1 | 35 | 2.25.1 |
27 | 36 | ||
28 | 37 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | From: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru> |
---|---|---|---|
2 | 2 | ||
3 | Temporarily no-op size_memop was introduced to aid the conversion of | 3 | Commit aff0e204cb1f1c036a496c94c15f5dfafcd9b4b4 introduced CF_NOIRQ usage, |
4 | memory_region_dispatch_{read|write} operand "unsigned size" into | 4 | but one case was forgotten. Record/replay uses one special TB which is not |
5 | "MemOp op". | 5 | really executed, but used to cause a correct exception in replay mode. |
6 | This patch adds CF_NOIRQ flag for such block. | ||
6 | 7 | ||
7 | Now size_memop is implemented, again hard coded size but with | 8 | Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru> |
8 | MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. | ||
9 | |||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
12 | Message-Id: <99f69701cad294db638f84abebc58115e1b9de9a.1566466906.git.tony.nguyen@bt.com> | 10 | Message-Id: <164362834054.1754532.7678416881159817273.stgit@pasha-ThinkPad-X280> |
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 12 | --- |
15 | memory_ldst.inc.c | 18 +++++++++--------- | 13 | accel/tcg/cpu-exec.c | 3 ++- |
16 | 1 file changed, 9 insertions(+), 9 deletions(-) | 14 | 1 file changed, 2 insertions(+), 1 deletion(-) |
17 | 15 | ||
18 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | 16 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c |
19 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/memory_ldst.inc.c | 18 | --- a/accel/tcg/cpu-exec.c |
21 | +++ b/memory_ldst.inc.c | 19 | +++ b/accel/tcg/cpu-exec.c |
22 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | 20 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
23 | release_lock |= prepare_mmio_access(mr); | 21 | if (replay_has_exception() |
24 | 22 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { | |
25 | /* I/O case */ | 23 | /* Execute just one insn to trigger exception pending in the log */ |
26 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(4), attrs); | 24 | - cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1; |
27 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_32, attrs); | 25 | + cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) |
28 | #if defined(TARGET_WORDS_BIGENDIAN) | 26 | + | CF_NOIRQ | 1; |
29 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
30 | val = bswap32(val); | ||
31 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | ||
32 | release_lock |= prepare_mmio_access(mr); | ||
33 | |||
34 | /* I/O case */ | ||
35 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(8), attrs); | ||
36 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_64, attrs); | ||
37 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
38 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
39 | val = bswap64(val); | ||
40 | @@ -XXX,XX +XXX,XX @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, | ||
41 | release_lock |= prepare_mmio_access(mr); | ||
42 | |||
43 | /* I/O case */ | ||
44 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(1), attrs); | ||
45 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs); | ||
46 | } else { | ||
47 | /* RAM case */ | ||
48 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
49 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
50 | release_lock |= prepare_mmio_access(mr); | ||
51 | |||
52 | /* I/O case */ | ||
53 | - r = memory_region_dispatch_read(mr, addr1, &val, size_memop(2), attrs); | ||
54 | + r = memory_region_dispatch_read(mr, addr1, &val, MO_16, attrs); | ||
55 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
56 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
57 | val = bswap16(val); | ||
58 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, | ||
59 | if (l < 4 || !memory_access_is_direct(mr, true)) { | ||
60 | release_lock |= prepare_mmio_access(mr); | ||
61 | |||
62 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | ||
63 | + r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); | ||
64 | } else { | ||
65 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
66 | stl_p(ptr, val); | ||
67 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
68 | val = bswap32(val); | ||
69 | } | 27 | } |
70 | #endif | 28 | #endif |
71 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | 29 | return false; |
72 | + r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); | ||
73 | } else { | ||
74 | /* RAM case */ | ||
75 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
76 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, | ||
77 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
78 | if (!memory_access_is_direct(mr, true)) { | ||
79 | release_lock |= prepare_mmio_access(mr); | ||
80 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(1), attrs); | ||
81 | + r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs); | ||
82 | } else { | ||
83 | /* RAM case */ | ||
84 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
85 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | ||
86 | val = bswap16(val); | ||
87 | } | ||
88 | #endif | ||
89 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(2), attrs); | ||
90 | + r = memory_region_dispatch_write(mr, addr1, val, MO_16, attrs); | ||
91 | } else { | ||
92 | /* RAM case */ | ||
93 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
94 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | ||
95 | val = bswap64(val); | ||
96 | } | ||
97 | #endif | ||
98 | - r = memory_region_dispatch_write(mr, addr1, val, size_memop(8), attrs); | ||
99 | + r = memory_region_dispatch_write(mr, addr1, val, MO_64, attrs); | ||
100 | } else { | ||
101 | /* RAM case */ | ||
102 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
103 | -- | 30 | -- |
104 | 2.17.1 | 31 | 2.25.1 |
105 | 32 | ||
106 | 33 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | From: WANG Xuerui <git@xen0n.name> |
---|---|---|---|
2 | 2 | ||
3 | Hm... how did that "-" slip in (-TAGRET_PAGE_SIZE would be correct). This | 3 | Apparently we were left behind; just renaming MO_Q to MO_UQ is enough. |
4 | currently makes us exceed one page in a single probe_write() call, | ||
5 | essentially leaving some memory unchecked. | ||
6 | 4 | ||
7 | Fixes: c5a7392cfb96 ("s390x/tcg: Provide probe_write_access helper") | 5 | Fixes: fc313c64345453c7 ("exec/memop: Adding signedness to quad definitions") |
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: WANG Xuerui <git@xen0n.name> |
9 | Signed-off-by: David Hildenbrand <david@redhat.com> | 7 | Message-Id: <20220206162106.1092364-1-i.qemu@xen0n.name> |
10 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
11 | Message-Id: <20190826075112.25637-3-david@redhat.com> | ||
12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
13 | --- | 9 | --- |
14 | target/s390x/mem_helper.c | 2 +- | 10 | tcg/loongarch64/tcg-target.c.inc | 2 +- |
15 | 1 file changed, 1 insertion(+), 1 deletion(-) | 11 | 1 file changed, 1 insertion(+), 1 deletion(-) |
16 | 12 | ||
17 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | 13 | diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc |
18 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/s390x/mem_helper.c | 15 | --- a/tcg/loongarch64/tcg-target.c.inc |
20 | +++ b/target/s390x/mem_helper.c | 16 | +++ b/tcg/loongarch64/tcg-target.c.inc |
21 | @@ -XXX,XX +XXX,XX @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | 17 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, |
22 | #else | 18 | case MO_SL: |
23 | /* test the actual access, not just any access to the page due to LAP */ | 19 | tcg_out_opc_ldx_w(s, rd, rj, rk); |
24 | while (len) { | 20 | break; |
25 | - const uint64_t pagelen = -(addr | -TARGET_PAGE_MASK); | 21 | - case MO_Q: |
26 | + const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); | 22 | + case MO_UQ: |
27 | const uint64_t curlen = MIN(pagelen, len); | 23 | tcg_out_opc_ldx_d(s, rd, rj, rk); |
28 | 24 | break; | |
29 | probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); | 25 | default: |
30 | -- | 26 | -- |
31 | 2.17.1 | 27 | 2.25.1 |
32 | 28 | ||
33 | 29 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | Let's call it also for CONFIG_USER_ONLY. While at it, add a FIXME and get | ||
4 | rid of one local variable. | ||
5 | |||
6 | MIPS code probably needs a bigger refactoring in regards of | ||
7 | ensure_writable_pages(), similar to s390x, so for example, watchpoints | ||
8 | can be handled reliably later. The actually accessed addresses should | ||
9 | be probed only, not full pages. | ||
10 | |||
11 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
12 | Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
13 | Message-Id: <20190826075112.25637-6-david@redhat.com> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
15 | --- | 3 | --- |
16 | target/mips/op_helper.c | 8 +++----- | 4 | tcg/i386/tcg-target.h | 2 - |
17 | 1 file changed, 3 insertions(+), 5 deletions(-) | 5 | tcg/i386/tcg-target.c.inc | 103 ++++++++++++++++++++++++++++++++++++-- |
6 | 2 files changed, 98 insertions(+), 7 deletions(-) | ||
18 | 7 | ||
19 | diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c | 8 | diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h |
20 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/mips/op_helper.c | 10 | --- a/tcg/i386/tcg-target.h |
22 | +++ b/target/mips/op_helper.c | 11 | +++ b/tcg/i386/tcg-target.h |
23 | @@ -XXX,XX +XXX,XX @@ static inline void ensure_writable_pages(CPUMIPSState *env, | 12 | @@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, |
24 | int mmu_idx, | 13 | |
25 | uintptr_t retaddr) | 14 | #define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe |
26 | { | 15 | |
27 | -#if !defined(CONFIG_USER_ONLY) | 16 | -#ifdef CONFIG_SOFTMMU |
28 | - target_ulong page_addr; | 17 | #define TCG_TARGET_NEED_LDST_LABELS |
29 | + /* FIXME: Probe the actual accesses (pass and use a size) */ | ||
30 | if (unlikely(MSA_PAGESPAN(addr))) { | ||
31 | /* first page */ | ||
32 | probe_write(env, addr, 0, mmu_idx, retaddr); | ||
33 | /* second page */ | ||
34 | - page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; | ||
35 | - probe_write(env, page_addr, 0, mmu_idx, retaddr); | ||
36 | + addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; | ||
37 | + probe_write(env, addr, 0, mmu_idx, retaddr); | ||
38 | } | ||
39 | -#endif | 18 | -#endif |
19 | #define TCG_TARGET_NEED_POOL_LABELS | ||
20 | |||
21 | #endif | ||
22 | diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/tcg/i386/tcg-target.c.inc | ||
25 | +++ b/tcg/i386/tcg-target.c.inc | ||
26 | @@ -XXX,XX +XXX,XX @@ | ||
27 | * THE SOFTWARE. | ||
28 | */ | ||
29 | |||
30 | +#include "../tcg-ldst.c.inc" | ||
31 | #include "../tcg-pool.c.inc" | ||
32 | |||
33 | #ifdef CONFIG_DEBUG_TCG | ||
34 | @@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
35 | #define OPC_VZEROUPPER (0x77 | P_EXT) | ||
36 | #define OPC_XCHG_ax_r32 (0x90) | ||
37 | |||
38 | -#define OPC_GRP3_Ev (0xf7) | ||
39 | -#define OPC_GRP5 (0xff) | ||
40 | +#define OPC_GRP3_Eb (0xf6) | ||
41 | +#define OPC_GRP3_Ev (0xf7) | ||
42 | +#define OPC_GRP5 (0xff) | ||
43 | #define OPC_GRP14 (0x73 | P_EXT | P_DATA16) | ||
44 | |||
45 | /* Group 1 opcode extensions for 0x80-0x83. | ||
46 | @@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) | ||
47 | #define SHIFT_SAR 7 | ||
48 | |||
49 | /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */ | ||
50 | +#define EXT3_TESTi 0 | ||
51 | #define EXT3_NOT 2 | ||
52 | #define EXT3_NEG 3 | ||
53 | #define EXT3_MUL 4 | ||
54 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n) | ||
40 | } | 55 | } |
41 | 56 | ||
42 | void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, | 57 | #if defined(CONFIG_SOFTMMU) |
58 | -#include "../tcg-ldst.c.inc" | ||
59 | - | ||
60 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, | ||
61 | * int mmu_idx, uintptr_t ra) | ||
62 | */ | ||
63 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
64 | tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); | ||
65 | return true; | ||
66 | } | ||
67 | -#elif TCG_TARGET_REG_BITS == 32 | ||
68 | +#else | ||
69 | + | ||
70 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, | ||
71 | + TCGReg addrhi, unsigned a_bits) | ||
72 | +{ | ||
73 | + unsigned a_mask = (1 << a_bits) - 1; | ||
74 | + TCGLabelQemuLdst *label; | ||
75 | + | ||
76 | + /* | ||
77 | + * We are expecting a_bits to max out at 7, so we can usually use testb. | ||
78 | + * For i686, we have to use testl for %esi/%edi. | ||
79 | + */ | ||
80 | + if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) { | ||
81 | + tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo); | ||
82 | + tcg_out8(s, a_mask); | ||
83 | + } else { | ||
84 | + tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo); | ||
85 | + tcg_out32(s, a_mask); | ||
86 | + } | ||
87 | + | ||
88 | + /* jne slow_path */ | ||
89 | + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); | ||
90 | + | ||
91 | + label = new_ldst_label(s); | ||
92 | + label->is_ld = is_ld; | ||
93 | + label->addrlo_reg = addrlo; | ||
94 | + label->addrhi_reg = addrhi; | ||
95 | + label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4); | ||
96 | + label->label_ptr[0] = s->code_ptr; | ||
97 | + | ||
98 | + s->code_ptr += 4; | ||
99 | +} | ||
100 | + | ||
101 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) | ||
102 | +{ | ||
103 | + /* resolve label address */ | ||
104 | + tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4); | ||
105 | + | ||
106 | + if (TCG_TARGET_REG_BITS == 32) { | ||
107 | + int ofs = 0; | ||
108 | + | ||
109 | + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); | ||
110 | + ofs += 4; | ||
111 | + | ||
112 | + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); | ||
113 | + ofs += 4; | ||
114 | + if (TARGET_LONG_BITS == 64) { | ||
115 | + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); | ||
116 | + ofs += 4; | ||
117 | + } | ||
118 | + | ||
119 | + tcg_out_pushi(s, (uintptr_t)l->raddr); | ||
120 | + } else { | ||
121 | + tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], | ||
122 | + l->addrlo_reg); | ||
123 | + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); | ||
124 | + | ||
125 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr); | ||
126 | + tcg_out_push(s, TCG_REG_RAX); | ||
127 | + } | ||
128 | + | ||
129 | + /* "Tail call" to the helper, with the return address back inline. */ | ||
130 | + tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld | ||
131 | + : helper_unaligned_st)); | ||
132 | + return true; | ||
133 | +} | ||
134 | + | ||
135 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
136 | +{ | ||
137 | + return tcg_out_fail_alignment(s, l); | ||
138 | +} | ||
139 | + | ||
140 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
141 | +{ | ||
142 | + return tcg_out_fail_alignment(s, l); | ||
143 | +} | ||
144 | + | ||
145 | +#if TCG_TARGET_REG_BITS == 32 | ||
146 | # define x86_guest_base_seg 0 | ||
147 | # define x86_guest_base_index -1 | ||
148 | # define x86_guest_base_offset guest_base | ||
149 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) | ||
150 | return 0; | ||
151 | } | ||
152 | # endif | ||
153 | +#endif | ||
154 | #endif /* SOFTMMU */ | ||
155 | |||
156 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
157 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
158 | #if defined(CONFIG_SOFTMMU) | ||
159 | int mem_index; | ||
160 | tcg_insn_unit *label_ptr[2]; | ||
161 | +#else | ||
162 | + unsigned a_bits; | ||
163 | #endif | ||
164 | |||
165 | datalo = *args++; | ||
166 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
167 | add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi, | ||
168 | s->code_ptr, label_ptr); | ||
169 | #else | ||
170 | + a_bits = get_alignment_bits(opc); | ||
171 | + if (a_bits) { | ||
172 | + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
173 | + } | ||
174 | + | ||
175 | tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index, | ||
176 | x86_guest_base_offset, x86_guest_base_seg, | ||
177 | is64, opc); | ||
178 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
179 | #if defined(CONFIG_SOFTMMU) | ||
180 | int mem_index; | ||
181 | tcg_insn_unit *label_ptr[2]; | ||
182 | +#else | ||
183 | + unsigned a_bits; | ||
184 | #endif | ||
185 | |||
186 | datalo = *args++; | ||
187 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
188 | add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi, | ||
189 | s->code_ptr, label_ptr); | ||
190 | #else | ||
191 | + a_bits = get_alignment_bits(opc); | ||
192 | + if (a_bits) { | ||
193 | + tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); | ||
194 | + } | ||
195 | + | ||
196 | tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index, | ||
197 | x86_guest_base_offset, x86_guest_base_seg, opc); | ||
198 | #endif | ||
43 | -- | 199 | -- |
44 | 2.17.1 | 200 | 2.25.1 |
45 | 201 | ||
46 | 202 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | Preparation for collapsing the two byte swaps, adjust_endianness and | ||
4 | handle_bswap, along the I/O path. | ||
5 | |||
6 | Target dependant attributes are conditionalized upon NEED_CPU_H. | ||
7 | |||
8 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
9 | Acked-by: David Gibson <david@gibson.dropbear.id.au> | ||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Acked-by: Cornelia Huck <cohuck@redhat.com> | ||
12 | Message-Id: <81d9cd7d7f5aaadfa772d6c48ecee834e9cf7882.1566466906.git.tony.nguyen@bt.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 3 | --- |
15 | include/exec/memop.h | 110 ++++++++++++++++++++ | 4 | tcg/aarch64/tcg-target.h | 2 - |
16 | target/arm/translate-a64.h | 2 +- | 5 | tcg/aarch64/tcg-target.c.inc | 91 +++++++++++++++++++++++++++++------- |
17 | target/arm/translate.h | 2 +- | 6 | 2 files changed, 74 insertions(+), 19 deletions(-) |
18 | tcg/tcg-op.h | 80 +++++++------- | ||
19 | tcg/tcg.h | 101 ++---------------- | ||
20 | trace/mem-internal.h | 4 +- | ||
21 | trace/mem.h | 4 +- | ||
22 | accel/tcg/cputlb.c | 2 +- | ||
23 | target/alpha/translate.c | 2 +- | ||
24 | target/arm/translate-a64.c | 48 ++++----- | ||
25 | target/arm/translate-sve.c | 2 +- | ||
26 | target/arm/translate.c | 32 +++--- | ||
27 | target/hppa/translate.c | 14 +-- | ||
28 | target/i386/translate.c | 132 ++++++++++++------------ | ||
29 | target/m68k/translate.c | 2 +- | ||
30 | target/microblaze/translate.c | 4 +- | ||
31 | target/mips/translate.c | 8 +- | ||
32 | target/openrisc/translate.c | 4 +- | ||
33 | target/ppc/translate.c | 12 +-- | ||
34 | target/riscv/insn_trans/trans_rva.inc.c | 8 +- | ||
35 | target/riscv/insn_trans/trans_rvi.inc.c | 4 +- | ||
36 | target/s390x/translate.c | 6 +- | ||
37 | target/s390x/translate_vx.inc.c | 10 +- | ||
38 | target/sparc/translate.c | 14 +-- | ||
39 | target/tilegx/translate.c | 10 +- | ||
40 | target/tricore/translate.c | 8 +- | ||
41 | tcg/aarch64/tcg-target.inc.c | 26 ++--- | ||
42 | tcg/arm/tcg-target.inc.c | 26 ++--- | ||
43 | tcg/i386/tcg-target.inc.c | 24 ++--- | ||
44 | tcg/mips/tcg-target.inc.c | 16 +-- | ||
45 | tcg/optimize.c | 2 +- | ||
46 | tcg/ppc/tcg-target.inc.c | 12 +-- | ||
47 | tcg/riscv/tcg-target.inc.c | 20 ++-- | ||
48 | tcg/s390/tcg-target.inc.c | 14 +-- | ||
49 | tcg/sparc/tcg-target.inc.c | 6 +- | ||
50 | tcg/tcg-op.c | 38 +++---- | ||
51 | tcg/tcg.c | 2 +- | ||
52 | MAINTAINERS | 1 + | ||
53 | tcg/README | 2 +- | ||
54 | 39 files changed, 418 insertions(+), 396 deletions(-) | ||
55 | create mode 100644 include/exec/memop.h | ||
56 | 7 | ||
57 | diff --git a/include/exec/memop.h b/include/exec/memop.h | 8 | diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h |
58 | new file mode 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
59 | index XXXXXXX..XXXXXXX | 10 | --- a/tcg/aarch64/tcg-target.h |
60 | --- /dev/null | 11 | +++ b/tcg/aarch64/tcg-target.h |
61 | +++ b/include/exec/memop.h | 12 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
13 | |||
14 | void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); | ||
15 | |||
16 | -#ifdef CONFIG_SOFTMMU | ||
17 | #define TCG_TARGET_NEED_LDST_LABELS | ||
18 | -#endif | ||
19 | #define TCG_TARGET_NEED_POOL_LABELS | ||
20 | |||
21 | #endif /* AARCH64_TCG_TARGET_H */ | ||
22 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/tcg/aarch64/tcg-target.c.inc | ||
25 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
62 | @@ -XXX,XX +XXX,XX @@ | 26 | @@ -XXX,XX +XXX,XX @@ |
63 | +/* | 27 | * See the COPYING file in the top-level directory for details. |
64 | + * Constants for memory operations | ||
65 | + * | ||
66 | + * Authors: | ||
67 | + * Richard Henderson <rth@twiddle.net> | ||
68 | + * | ||
69 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
70 | + * See the COPYING file in the top-level directory. | ||
71 | + * | ||
72 | + */ | ||
73 | + | ||
74 | +#ifndef MEMOP_H | ||
75 | +#define MEMOP_H | ||
76 | + | ||
77 | +typedef enum MemOp { | ||
78 | + MO_8 = 0, | ||
79 | + MO_16 = 1, | ||
80 | + MO_32 = 2, | ||
81 | + MO_64 = 3, | ||
82 | + MO_SIZE = 3, /* Mask for the above. */ | ||
83 | + | ||
84 | + MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ | ||
85 | + | ||
86 | + MO_BSWAP = 8, /* Host reverse endian. */ | ||
87 | +#ifdef HOST_WORDS_BIGENDIAN | ||
88 | + MO_LE = MO_BSWAP, | ||
89 | + MO_BE = 0, | ||
90 | +#else | ||
91 | + MO_LE = 0, | ||
92 | + MO_BE = MO_BSWAP, | ||
93 | +#endif | ||
94 | +#ifdef NEED_CPU_H | ||
95 | +#ifdef TARGET_WORDS_BIGENDIAN | ||
96 | + MO_TE = MO_BE, | ||
97 | +#else | ||
98 | + MO_TE = MO_LE, | ||
99 | +#endif | ||
100 | +#endif | ||
101 | + | ||
102 | + /* | ||
103 | + * MO_UNALN accesses are never checked for alignment. | ||
104 | + * MO_ALIGN accesses will result in a call to the CPU's | ||
105 | + * do_unaligned_access hook if the guest address is not aligned. | ||
106 | + * The default depends on whether the target CPU defines | ||
107 | + * TARGET_ALIGNED_ONLY. | ||
108 | + * | ||
109 | + * Some architectures (e.g. ARMv8) need the address which is aligned | ||
110 | + * to a size more than the size of the memory access. | ||
111 | + * Some architectures (e.g. SPARCv9) need an address which is aligned, | ||
112 | + * but less strictly than the natural alignment. | ||
113 | + * | ||
114 | + * MO_ALIGN supposes the alignment size is the size of a memory access. | ||
115 | + * | ||
116 | + * There are three options: | ||
117 | + * - unaligned access permitted (MO_UNALN). | ||
118 | + * - an alignment to the size of an access (MO_ALIGN); | ||
119 | + * - an alignment to a specified size, which may be more or less than | ||
120 | + * the access size (MO_ALIGN_x where 'x' is a size in bytes); | ||
121 | + */ | ||
122 | + MO_ASHIFT = 4, | ||
123 | + MO_AMASK = 7 << MO_ASHIFT, | ||
124 | +#ifdef NEED_CPU_H | ||
125 | +#ifdef TARGET_ALIGNED_ONLY | ||
126 | + MO_ALIGN = 0, | ||
127 | + MO_UNALN = MO_AMASK, | ||
128 | +#else | ||
129 | + MO_ALIGN = MO_AMASK, | ||
130 | + MO_UNALN = 0, | ||
131 | +#endif | ||
132 | +#endif | ||
133 | + MO_ALIGN_2 = 1 << MO_ASHIFT, | ||
134 | + MO_ALIGN_4 = 2 << MO_ASHIFT, | ||
135 | + MO_ALIGN_8 = 3 << MO_ASHIFT, | ||
136 | + MO_ALIGN_16 = 4 << MO_ASHIFT, | ||
137 | + MO_ALIGN_32 = 5 << MO_ASHIFT, | ||
138 | + MO_ALIGN_64 = 6 << MO_ASHIFT, | ||
139 | + | ||
140 | + /* Combinations of the above, for ease of use. */ | ||
141 | + MO_UB = MO_8, | ||
142 | + MO_UW = MO_16, | ||
143 | + MO_UL = MO_32, | ||
144 | + MO_SB = MO_SIGN | MO_8, | ||
145 | + MO_SW = MO_SIGN | MO_16, | ||
146 | + MO_SL = MO_SIGN | MO_32, | ||
147 | + MO_Q = MO_64, | ||
148 | + | ||
149 | + MO_LEUW = MO_LE | MO_UW, | ||
150 | + MO_LEUL = MO_LE | MO_UL, | ||
151 | + MO_LESW = MO_LE | MO_SW, | ||
152 | + MO_LESL = MO_LE | MO_SL, | ||
153 | + MO_LEQ = MO_LE | MO_Q, | ||
154 | + | ||
155 | + MO_BEUW = MO_BE | MO_UW, | ||
156 | + MO_BEUL = MO_BE | MO_UL, | ||
157 | + MO_BESW = MO_BE | MO_SW, | ||
158 | + MO_BESL = MO_BE | MO_SL, | ||
159 | + MO_BEQ = MO_BE | MO_Q, | ||
160 | + | ||
161 | +#ifdef NEED_CPU_H | ||
162 | + MO_TEUW = MO_TE | MO_UW, | ||
163 | + MO_TEUL = MO_TE | MO_UL, | ||
164 | + MO_TESW = MO_TE | MO_SW, | ||
165 | + MO_TESL = MO_TE | MO_SL, | ||
166 | + MO_TEQ = MO_TE | MO_Q, | ||
167 | +#endif | ||
168 | + | ||
169 | + MO_SSIZE = MO_SIZE | MO_SIGN, | ||
170 | +} MemOp; | ||
171 | + | ||
172 | +#endif | ||
173 | diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h | ||
174 | index XXXXXXX..XXXXXXX 100644 | ||
175 | --- a/target/arm/translate-a64.h | ||
176 | +++ b/target/arm/translate-a64.h | ||
177 | @@ -XXX,XX +XXX,XX @@ static inline void assert_fp_access_checked(DisasContext *s) | ||
178 | * the FP/vector register Qn. | ||
179 | */ | 28 | */ |
180 | static inline int vec_reg_offset(DisasContext *s, int regno, | 29 | |
181 | - int element, TCGMemOp size) | 30 | +#include "../tcg-ldst.c.inc" |
182 | + int element, MemOp size) | 31 | #include "../tcg-pool.c.inc" |
183 | { | ||
184 | int element_size = 1 << size; | ||
185 | int offs = element * element_size; | ||
186 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
187 | index XXXXXXX..XXXXXXX 100644 | ||
188 | --- a/target/arm/translate.h | ||
189 | +++ b/target/arm/translate.h | ||
190 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
191 | int condexec_cond; | ||
192 | int thumb; | ||
193 | int sctlr_b; | ||
194 | - TCGMemOp be_data; | ||
195 | + MemOp be_data; | ||
196 | #if !defined(CONFIG_USER_ONLY) | ||
197 | int user; | ||
198 | #endif | ||
199 | diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h | ||
200 | index XXXXXXX..XXXXXXX 100644 | ||
201 | --- a/tcg/tcg-op.h | ||
202 | +++ b/tcg/tcg-op.h | ||
203 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void); | ||
204 | #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 | ||
205 | #endif | ||
206 | |||
207 | -void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); | ||
208 | -void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); | ||
209 | -void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); | ||
210 | -void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); | ||
211 | +void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, MemOp); | ||
212 | +void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, MemOp); | ||
213 | +void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, MemOp); | ||
214 | +void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp); | ||
215 | |||
216 | static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) | ||
217 | { | ||
218 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) | ||
219 | } | ||
220 | |||
221 | void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32, | ||
222 | - TCGArg, TCGMemOp); | ||
223 | + TCGArg, MemOp); | ||
224 | void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64, | ||
225 | - TCGArg, TCGMemOp); | ||
226 | + TCGArg, MemOp); | ||
227 | |||
228 | -void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
229 | -void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
230 | +void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
231 | +void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
232 | |||
233 | -void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
234 | -void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
235 | -void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
236 | -void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
237 | -void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
238 | -void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
239 | -void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
240 | -void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
241 | -void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
242 | -void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
243 | -void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
244 | -void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
245 | -void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
246 | -void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
247 | -void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
248 | -void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
249 | +void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
250 | +void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
251 | +void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
252 | +void tcg_gen_atomic_fetch_and_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
253 | +void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
254 | +void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
255 | +void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
256 | +void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
257 | +void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
258 | +void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
259 | +void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
260 | +void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
261 | +void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
262 | +void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
263 | +void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
264 | +void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
265 | |||
266 | -void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
267 | -void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
268 | -void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
269 | -void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
270 | -void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
271 | -void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
272 | -void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
273 | -void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
274 | -void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
275 | -void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
276 | -void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
277 | -void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
278 | -void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
279 | -void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
280 | -void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp); | ||
281 | -void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp); | ||
282 | +void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
283 | +void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
284 | +void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
285 | +void tcg_gen_atomic_and_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
286 | +void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
287 | +void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
288 | +void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
289 | +void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
290 | +void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
291 | +void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
292 | +void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
293 | +void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
294 | +void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
295 | +void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
296 | +void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); | ||
297 | +void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); | ||
298 | |||
299 | void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); | ||
300 | void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32); | ||
301 | diff --git a/tcg/tcg.h b/tcg/tcg.h | ||
302 | index XXXXXXX..XXXXXXX 100644 | ||
303 | --- a/tcg/tcg.h | ||
304 | +++ b/tcg/tcg.h | ||
305 | @@ -XXX,XX +XXX,XX @@ | ||
306 | #define TCG_H | ||
307 | |||
308 | #include "cpu.h" | ||
309 | +#include "exec/memop.h" | ||
310 | #include "exec/tb-context.h" | ||
311 | #include "qemu/bitops.h" | 32 | #include "qemu/bitops.h" |
312 | #include "qemu/queue.h" | 33 | |
313 | @@ -XXX,XX +XXX,XX @@ typedef enum TCGType { | 34 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
314 | #endif | 35 | I3404_ANDI = 0x12000000, |
315 | } TCGType; | 36 | I3404_ORRI = 0x32000000, |
316 | 37 | I3404_EORI = 0x52000000, | |
317 | -/* Constants for qemu_ld and qemu_st for the Memory Operation field. */ | 38 | + I3404_ANDSI = 0x72000000, |
318 | -typedef enum TCGMemOp { | 39 | |
319 | - MO_8 = 0, | 40 | /* Move wide immediate instructions. */ |
320 | - MO_16 = 1, | 41 | I3405_MOVN = 0x12800000, |
321 | - MO_32 = 2, | 42 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target) |
322 | - MO_64 = 3, | 43 | if (offset == sextract64(offset, 0, 26)) { |
323 | - MO_SIZE = 3, /* Mask for the above. */ | 44 | tcg_out_insn(s, 3206, B, offset); |
324 | - | ||
325 | - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ | ||
326 | - | ||
327 | - MO_BSWAP = 8, /* Host reverse endian. */ | ||
328 | -#ifdef HOST_WORDS_BIGENDIAN | ||
329 | - MO_LE = MO_BSWAP, | ||
330 | - MO_BE = 0, | ||
331 | -#else | ||
332 | - MO_LE = 0, | ||
333 | - MO_BE = MO_BSWAP, | ||
334 | -#endif | ||
335 | -#ifdef TARGET_WORDS_BIGENDIAN | ||
336 | - MO_TE = MO_BE, | ||
337 | -#else | ||
338 | - MO_TE = MO_LE, | ||
339 | -#endif | ||
340 | - | ||
341 | - /* | ||
342 | - * MO_UNALN accesses are never checked for alignment. | ||
343 | - * MO_ALIGN accesses will result in a call to the CPU's | ||
344 | - * do_unaligned_access hook if the guest address is not aligned. | ||
345 | - * The default depends on whether the target CPU defines | ||
346 | - * TARGET_ALIGNED_ONLY. | ||
347 | - * | ||
348 | - * Some architectures (e.g. ARMv8) need the address which is aligned | ||
349 | - * to a size more than the size of the memory access. | ||
350 | - * Some architectures (e.g. SPARCv9) need an address which is aligned, | ||
351 | - * but less strictly than the natural alignment. | ||
352 | - * | ||
353 | - * MO_ALIGN supposes the alignment size is the size of a memory access. | ||
354 | - * | ||
355 | - * There are three options: | ||
356 | - * - unaligned access permitted (MO_UNALN). | ||
357 | - * - an alignment to the size of an access (MO_ALIGN); | ||
358 | - * - an alignment to a specified size, which may be more or less than | ||
359 | - * the access size (MO_ALIGN_x where 'x' is a size in bytes); | ||
360 | - */ | ||
361 | - MO_ASHIFT = 4, | ||
362 | - MO_AMASK = 7 << MO_ASHIFT, | ||
363 | -#ifdef TARGET_ALIGNED_ONLY | ||
364 | - MO_ALIGN = 0, | ||
365 | - MO_UNALN = MO_AMASK, | ||
366 | -#else | ||
367 | - MO_ALIGN = MO_AMASK, | ||
368 | - MO_UNALN = 0, | ||
369 | -#endif | ||
370 | - MO_ALIGN_2 = 1 << MO_ASHIFT, | ||
371 | - MO_ALIGN_4 = 2 << MO_ASHIFT, | ||
372 | - MO_ALIGN_8 = 3 << MO_ASHIFT, | ||
373 | - MO_ALIGN_16 = 4 << MO_ASHIFT, | ||
374 | - MO_ALIGN_32 = 5 << MO_ASHIFT, | ||
375 | - MO_ALIGN_64 = 6 << MO_ASHIFT, | ||
376 | - | ||
377 | - /* Combinations of the above, for ease of use. */ | ||
378 | - MO_UB = MO_8, | ||
379 | - MO_UW = MO_16, | ||
380 | - MO_UL = MO_32, | ||
381 | - MO_SB = MO_SIGN | MO_8, | ||
382 | - MO_SW = MO_SIGN | MO_16, | ||
383 | - MO_SL = MO_SIGN | MO_32, | ||
384 | - MO_Q = MO_64, | ||
385 | - | ||
386 | - MO_LEUW = MO_LE | MO_UW, | ||
387 | - MO_LEUL = MO_LE | MO_UL, | ||
388 | - MO_LESW = MO_LE | MO_SW, | ||
389 | - MO_LESL = MO_LE | MO_SL, | ||
390 | - MO_LEQ = MO_LE | MO_Q, | ||
391 | - | ||
392 | - MO_BEUW = MO_BE | MO_UW, | ||
393 | - MO_BEUL = MO_BE | MO_UL, | ||
394 | - MO_BESW = MO_BE | MO_SW, | ||
395 | - MO_BESL = MO_BE | MO_SL, | ||
396 | - MO_BEQ = MO_BE | MO_Q, | ||
397 | - | ||
398 | - MO_TEUW = MO_TE | MO_UW, | ||
399 | - MO_TEUL = MO_TE | MO_UL, | ||
400 | - MO_TESW = MO_TE | MO_SW, | ||
401 | - MO_TESL = MO_TE | MO_SL, | ||
402 | - MO_TEQ = MO_TE | MO_Q, | ||
403 | - | ||
404 | - MO_SSIZE = MO_SIZE | MO_SIGN, | ||
405 | -} TCGMemOp; | ||
406 | - | ||
407 | /** | ||
408 | * get_alignment_bits | ||
409 | - * @memop: TCGMemOp value | ||
410 | + * @memop: MemOp value | ||
411 | * | ||
412 | * Extract the alignment size from the memop. | ||
413 | */ | ||
414 | -static inline unsigned get_alignment_bits(TCGMemOp memop) | ||
415 | +static inline unsigned get_alignment_bits(MemOp memop) | ||
416 | { | ||
417 | unsigned a = memop & MO_AMASK; | ||
418 | |||
419 | @@ -XXX,XX +XXX,XX @@ static inline size_t tcg_current_code_size(TCGContext *s) | ||
420 | return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); | ||
421 | } | ||
422 | |||
423 | -/* Combine the TCGMemOp and mmu_idx parameters into a single value. */ | ||
424 | +/* Combine the MemOp and mmu_idx parameters into a single value. */ | ||
425 | typedef uint32_t TCGMemOpIdx; | ||
426 | |||
427 | /** | ||
428 | @@ -XXX,XX +XXX,XX @@ typedef uint32_t TCGMemOpIdx; | ||
429 | * | ||
430 | * Encode these values into a single parameter. | ||
431 | */ | ||
432 | -static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) | ||
433 | +static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) | ||
434 | { | ||
435 | tcg_debug_assert(idx <= 15); | ||
436 | return (op << 4) | idx; | ||
437 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) | ||
438 | * | ||
439 | * Extract the memory operation from the combined value. | ||
440 | */ | ||
441 | -static inline TCGMemOp get_memop(TCGMemOpIdx oi) | ||
442 | +static inline MemOp get_memop(TCGMemOpIdx oi) | ||
443 | { | ||
444 | return oi >> 4; | ||
445 | } | ||
446 | diff --git a/trace/mem-internal.h b/trace/mem-internal.h | ||
447 | index XXXXXXX..XXXXXXX 100644 | ||
448 | --- a/trace/mem-internal.h | ||
449 | +++ b/trace/mem-internal.h | ||
450 | @@ -XXX,XX +XXX,XX @@ | ||
451 | #define TRACE_MEM_ST (1ULL << 5) /* store (y/n) */ | ||
452 | |||
453 | static inline uint8_t trace_mem_build_info( | ||
454 | - int size_shift, bool sign_extend, TCGMemOp endianness, bool store) | ||
455 | + int size_shift, bool sign_extend, MemOp endianness, bool store) | ||
456 | { | ||
457 | uint8_t res; | ||
458 | |||
459 | @@ -XXX,XX +XXX,XX @@ static inline uint8_t trace_mem_build_info( | ||
460 | return res; | ||
461 | } | ||
462 | |||
463 | -static inline uint8_t trace_mem_get_info(TCGMemOp op, bool store) | ||
464 | +static inline uint8_t trace_mem_get_info(MemOp op, bool store) | ||
465 | { | ||
466 | return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN), | ||
467 | op & MO_BSWAP, store); | ||
468 | diff --git a/trace/mem.h b/trace/mem.h | ||
469 | index XXXXXXX..XXXXXXX 100644 | ||
470 | --- a/trace/mem.h | ||
471 | +++ b/trace/mem.h | ||
472 | @@ -XXX,XX +XXX,XX @@ | ||
473 | * | ||
474 | * Return a value for the 'info' argument in guest memory access traces. | ||
475 | */ | ||
476 | -static uint8_t trace_mem_get_info(TCGMemOp op, bool store); | ||
477 | +static uint8_t trace_mem_get_info(MemOp op, bool store); | ||
478 | |||
479 | /** | ||
480 | * trace_mem_build_info: | ||
481 | @@ -XXX,XX +XXX,XX @@ static uint8_t trace_mem_get_info(TCGMemOp op, bool store); | ||
482 | * Return a value for the 'info' argument in guest memory access traces. | ||
483 | */ | ||
484 | static uint8_t trace_mem_build_info(int size_shift, bool sign_extend, | ||
485 | - TCGMemOp endianness, bool store); | ||
486 | + MemOp endianness, bool store); | ||
487 | |||
488 | |||
489 | #include "trace/mem-internal.h" | ||
490 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
491 | index XXXXXXX..XXXXXXX 100644 | ||
492 | --- a/accel/tcg/cputlb.c | ||
493 | +++ b/accel/tcg/cputlb.c | ||
494 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
495 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
496 | CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); | ||
497 | target_ulong tlb_addr = tlb_addr_write(tlbe); | ||
498 | - TCGMemOp mop = get_memop(oi); | ||
499 | + MemOp mop = get_memop(oi); | ||
500 | int a_bits = get_alignment_bits(mop); | ||
501 | int s_bits = mop & MO_SIZE; | ||
502 | void *hostaddr; | ||
503 | diff --git a/target/alpha/translate.c b/target/alpha/translate.c | ||
504 | index XXXXXXX..XXXXXXX 100644 | ||
505 | --- a/target/alpha/translate.c | ||
506 | +++ b/target/alpha/translate.c | ||
507 | @@ -XXX,XX +XXX,XX @@ static inline void gen_store_mem(DisasContext *ctx, | ||
508 | |||
509 | static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, | ||
510 | int32_t disp16, int mem_idx, | ||
511 | - TCGMemOp op) | ||
512 | + MemOp op) | ||
513 | { | ||
514 | TCGLabel *lab_fail, *lab_done; | ||
515 | TCGv addr, val; | ||
516 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
517 | index XXXXXXX..XXXXXXX 100644 | ||
518 | --- a/target/arm/translate-a64.c | ||
519 | +++ b/target/arm/translate-a64.c | ||
520 | @@ -XXX,XX +XXX,XX @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64); | ||
521 | typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); | ||
522 | typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); | ||
523 | typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); | ||
524 | -typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp); | ||
525 | +typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); | ||
526 | |||
527 | /* initialize TCG globals. */ | ||
528 | void a64_translate_init(void) | ||
529 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) | ||
530 | * Dn, Sn, Hn or Bn). | ||
531 | * (Note that this is not the same mapping as for A32; see cpu.h) | ||
532 | */ | ||
533 | -static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size) | ||
534 | +static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) | ||
535 | { | ||
536 | return vec_reg_offset(s, regno, 0, size); | ||
537 | } | ||
538 | @@ -XXX,XX +XXX,XX @@ static void do_gpr_ld_memidx(DisasContext *s, | ||
539 | bool iss_valid, unsigned int iss_srt, | ||
540 | bool iss_sf, bool iss_ar) | ||
541 | { | ||
542 | - TCGMemOp memop = s->be_data + size; | ||
543 | + MemOp memop = s->be_data + size; | ||
544 | |||
545 | g_assert(size <= 3); | ||
546 | |||
547 | @@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) | ||
548 | TCGv_i64 tmphi; | ||
549 | |||
550 | if (size < 4) { | ||
551 | - TCGMemOp memop = s->be_data + size; | ||
552 | + MemOp memop = s->be_data + size; | ||
553 | tmphi = tcg_const_i64(0); | ||
554 | tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop); | ||
555 | } else { | 45 | } else { |
556 | @@ -XXX,XX +XXX,XX @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) | 46 | - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); |
557 | 47 | - tcg_out_insn(s, 3207, BR, TCG_REG_TMP); | |
558 | /* Get value of an element within a vector register */ | 48 | + /* Choose X9 as a call-clobbered non-LR temporary. */ |
559 | static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, | 49 | + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target); |
560 | - int element, TCGMemOp memop) | 50 | + tcg_out_insn(s, 3207, BR, TCG_REG_X9); |
561 | + int element, MemOp memop) | ||
562 | { | ||
563 | int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); | ||
564 | switch (memop) { | ||
565 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, | ||
566 | } | ||
567 | |||
568 | static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, | ||
569 | - int element, TCGMemOp memop) | ||
570 | + int element, MemOp memop) | ||
571 | { | ||
572 | int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); | ||
573 | switch (memop) { | ||
574 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, | ||
575 | |||
576 | /* Set value of an element within a vector register */ | ||
577 | static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, | ||
578 | - int element, TCGMemOp memop) | ||
579 | + int element, MemOp memop) | ||
580 | { | ||
581 | int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); | ||
582 | switch (memop) { | ||
583 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, | ||
584 | } | ||
585 | |||
586 | static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, | ||
587 | - int destidx, int element, TCGMemOp memop) | ||
588 | + int destidx, int element, MemOp memop) | ||
589 | { | ||
590 | int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); | ||
591 | switch (memop) { | ||
592 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, | ||
593 | |||
594 | /* Store from vector register to memory */ | ||
595 | static void do_vec_st(DisasContext *s, int srcidx, int element, | ||
596 | - TCGv_i64 tcg_addr, int size, TCGMemOp endian) | ||
597 | + TCGv_i64 tcg_addr, int size, MemOp endian) | ||
598 | { | ||
599 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | ||
600 | |||
601 | @@ -XXX,XX +XXX,XX @@ static void do_vec_st(DisasContext *s, int srcidx, int element, | ||
602 | |||
603 | /* Load from memory to vector register */ | ||
604 | static void do_vec_ld(DisasContext *s, int destidx, int element, | ||
605 | - TCGv_i64 tcg_addr, int size, TCGMemOp endian) | ||
606 | + TCGv_i64 tcg_addr, int size, MemOp endian) | ||
607 | { | ||
608 | TCGv_i64 tcg_tmp = tcg_temp_new_i64(); | ||
609 | |||
610 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
611 | TCGv_i64 addr, int size, bool is_pair) | ||
612 | { | ||
613 | int idx = get_mem_index(s); | ||
614 | - TCGMemOp memop = s->be_data; | ||
615 | + MemOp memop = s->be_data; | ||
616 | |||
617 | g_assert(size <= 3); | ||
618 | if (is_pair) { | ||
619 | @@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) | ||
620 | bool is_postidx = extract32(insn, 23, 1); | ||
621 | bool is_q = extract32(insn, 30, 1); | ||
622 | TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; | ||
623 | - TCGMemOp endian = s->be_data; | ||
624 | + MemOp endian = s->be_data; | ||
625 | |||
626 | int ebytes; /* bytes per element */ | ||
627 | int elements; /* elements per vector */ | ||
628 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) | ||
629 | unsigned int mos, type, rm, cond, rn, rd; | ||
630 | TCGv_i64 t_true, t_false, t_zero; | ||
631 | DisasCompare64 c; | ||
632 | - TCGMemOp sz; | ||
633 | + MemOp sz; | ||
634 | |||
635 | mos = extract32(insn, 29, 3); | ||
636 | type = extract32(insn, 22, 2); | ||
637 | @@ -XXX,XX +XXX,XX @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) | ||
638 | int mos = extract32(insn, 29, 3); | ||
639 | uint64_t imm; | ||
640 | TCGv_i64 tcg_res; | ||
641 | - TCGMemOp sz; | ||
642 | + MemOp sz; | ||
643 | |||
644 | if (mos || imm5) { | ||
645 | unallocated_encoding(s); | ||
646 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, | ||
647 | { | ||
648 | if (esize == size) { | ||
649 | int element; | ||
650 | - TCGMemOp msize = esize == 16 ? MO_16 : MO_32; | ||
651 | + MemOp msize = esize == 16 ? MO_16 : MO_32; | ||
652 | TCGv_i32 tcg_elem; | ||
653 | |||
654 | /* We should have one register left here */ | ||
655 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, | ||
656 | int shift = (2 * esize) - immhb; | ||
657 | int elements = is_scalar ? 1 : (64 / esize); | ||
658 | bool round = extract32(opcode, 0, 1); | ||
659 | - TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); | ||
660 | + MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); | ||
661 | TCGv_i64 tcg_rn, tcg_rd, tcg_round; | ||
662 | TCGv_i32 tcg_rd_narrowed; | ||
663 | TCGv_i64 tcg_final; | ||
664 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, | ||
665 | } | ||
666 | }; | ||
667 | NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; | ||
668 | - TCGMemOp memop = scalar ? size : MO_32; | ||
669 | + MemOp memop = scalar ? size : MO_32; | ||
670 | int maxpass = scalar ? 1 : is_q ? 4 : 2; | ||
671 | |||
672 | for (pass = 0; pass < maxpass; pass++) { | ||
673 | @@ -XXX,XX +XXX,XX @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, | ||
674 | TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16); | ||
675 | TCGv_i32 tcg_shift = NULL; | ||
676 | |||
677 | - TCGMemOp mop = size | (is_signed ? MO_SIGN : 0); | ||
678 | + MemOp mop = size | (is_signed ? MO_SIGN : 0); | ||
679 | int pass; | ||
680 | |||
681 | if (fracbits || size == MO_64) { | ||
682 | @@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, | ||
683 | int dsize = is_q ? 128 : 64; | ||
684 | int esize = 8 << size; | ||
685 | int elements = dsize/esize; | ||
686 | - TCGMemOp memop = size | (is_u ? 0 : MO_SIGN); | ||
687 | + MemOp memop = size | (is_u ? 0 : MO_SIGN); | ||
688 | TCGv_i64 tcg_rn = new_tmp_a64(s); | ||
689 | TCGv_i64 tcg_rd = new_tmp_a64(s); | ||
690 | TCGv_i64 tcg_round; | ||
691 | @@ -XXX,XX +XXX,XX @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, | ||
692 | TCGv_i64 tcg_op1 = tcg_temp_new_i64(); | ||
693 | TCGv_i64 tcg_op2 = tcg_temp_new_i64(); | ||
694 | TCGv_i64 tcg_passres; | ||
695 | - TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); | ||
696 | + MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); | ||
697 | |||
698 | int elt = pass + is_q * 2; | ||
699 | |||
700 | @@ -XXX,XX +XXX,XX @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, | ||
701 | |||
702 | if (size == 2) { | ||
703 | /* 32 + 32 -> 64 op */ | ||
704 | - TCGMemOp memop = size + (u ? 0 : MO_SIGN); | ||
705 | + MemOp memop = size + (u ? 0 : MO_SIGN); | ||
706 | |||
707 | for (pass = 0; pass < maxpass; pass++) { | ||
708 | TCGv_i64 tcg_op1 = tcg_temp_new_i64(); | ||
709 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
710 | |||
711 | switch (is_fp) { | ||
712 | case 1: /* normal fp */ | ||
713 | - /* convert insn encoded size to TCGMemOp size */ | ||
714 | + /* convert insn encoded size to MemOp size */ | ||
715 | switch (size) { | ||
716 | case 0: /* half-precision */ | ||
717 | size = MO_16; | ||
718 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
719 | return; | ||
720 | } | ||
721 | |||
722 | - /* Given TCGMemOp size, adjust register and indexing. */ | ||
723 | + /* Given MemOp size, adjust register and indexing. */ | ||
724 | switch (size) { | ||
725 | case MO_16: | ||
726 | index = h << 2 | l << 1 | m; | ||
727 | @@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) | ||
728 | TCGv_i64 tcg_res[2]; | ||
729 | int pass; | ||
730 | bool satop = extract32(opcode, 0, 1); | ||
731 | - TCGMemOp memop = MO_32; | ||
732 | + MemOp memop = MO_32; | ||
733 | |||
734 | if (satop || !u) { | ||
735 | memop |= MO_SIGN; | ||
736 | diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c | ||
737 | index XXXXXXX..XXXXXXX 100644 | ||
738 | --- a/target/arm/translate-sve.c | ||
739 | +++ b/target/arm/translate-sve.c | ||
740 | @@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) | ||
741 | */ | ||
742 | |||
743 | /* The memory mode of the dtype. */ | ||
744 | -static const TCGMemOp dtype_mop[16] = { | ||
745 | +static const MemOp dtype_mop[16] = { | ||
746 | MO_UB, MO_UB, MO_UB, MO_UB, | ||
747 | MO_SL, MO_UW, MO_UW, MO_UW, | ||
748 | MO_SW, MO_SW, MO_UL, MO_UL, | ||
749 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
750 | index XXXXXXX..XXXXXXX 100644 | ||
751 | --- a/target/arm/translate.c | ||
752 | +++ b/target/arm/translate.c | ||
753 | @@ -XXX,XX +XXX,XX @@ typedef enum ISSInfo { | ||
754 | } ISSInfo; | ||
755 | |||
756 | /* Save the syndrome information for a Data Abort */ | ||
757 | -static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo) | ||
758 | +static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo) | ||
759 | { | ||
760 | uint32_t syn; | ||
761 | int sas = memop & MO_SIZE; | ||
762 | @@ -XXX,XX +XXX,XX @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) | ||
763 | * that the address argument is TCGv_i32 rather than TCGv. | ||
764 | */ | ||
765 | |||
766 | -static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) | ||
767 | +static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) | ||
768 | { | ||
769 | TCGv addr = tcg_temp_new(); | ||
770 | tcg_gen_extu_i32_tl(addr, a32); | ||
771 | @@ -XXX,XX +XXX,XX @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op) | ||
772 | } | ||
773 | |||
774 | static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
775 | - int index, TCGMemOp opc) | ||
776 | + int index, MemOp opc) | ||
777 | { | ||
778 | TCGv addr; | ||
779 | |||
780 | @@ -XXX,XX +XXX,XX @@ static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
781 | } | ||
782 | |||
783 | static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, | ||
784 | - int index, TCGMemOp opc) | ||
785 | + int index, MemOp opc) | ||
786 | { | ||
787 | TCGv addr; | ||
788 | |||
789 | @@ -XXX,XX +XXX,XX @@ static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) | ||
790 | } | ||
791 | |||
792 | static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | ||
793 | - int index, TCGMemOp opc) | ||
794 | + int index, MemOp opc) | ||
795 | { | ||
796 | TCGv addr = gen_aa32_addr(s, a32, opc); | ||
797 | tcg_gen_qemu_ld_i64(val, addr, index, opc); | ||
798 | @@ -XXX,XX +XXX,XX @@ static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, | ||
799 | } | ||
800 | |||
801 | static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, | ||
802 | - int index, TCGMemOp opc) | ||
803 | + int index, MemOp opc) | ||
804 | { | ||
805 | TCGv addr = gen_aa32_addr(s, a32, opc); | ||
806 | |||
807 | @@ -XXX,XX +XXX,XX @@ neon_reg_offset (int reg, int n) | ||
808 | * where 0 is the least significant end of the register. | ||
809 | */ | ||
810 | static inline long | ||
811 | -neon_element_offset(int reg, int element, TCGMemOp size) | ||
812 | +neon_element_offset(int reg, int element, MemOp size) | ||
813 | { | ||
814 | int element_size = 1 << size; | ||
815 | int ofs = element * element_size; | ||
816 | @@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass) | ||
817 | return tmp; | ||
818 | } | ||
819 | |||
820 | -static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) | ||
821 | +static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop) | ||
822 | { | ||
823 | long offset = neon_element_offset(reg, ele, mop & MO_SIZE); | ||
824 | |||
825 | @@ -XXX,XX +XXX,XX @@ static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) | ||
826 | } | 51 | } |
827 | } | 52 | } |
828 | 53 | ||
829 | -static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop) | 54 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, |
830 | +static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop) | ||
831 | { | ||
832 | long offset = neon_element_offset(reg, ele, mop & MO_SIZE); | ||
833 | |||
834 | @@ -XXX,XX +XXX,XX @@ static void neon_store_reg(int reg, int pass, TCGv_i32 var) | ||
835 | tcg_temp_free_i32(var); | ||
836 | } | ||
837 | |||
838 | -static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) | ||
839 | +static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var) | ||
840 | { | ||
841 | long offset = neon_element_offset(reg, ele, size); | ||
842 | |||
843 | @@ -XXX,XX +XXX,XX @@ static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) | ||
844 | } | 55 | } |
845 | } | 56 | } |
846 | 57 | ||
847 | -static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var) | 58 | -#ifdef CONFIG_SOFTMMU |
848 | +static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var) | 59 | -#include "../tcg-ldst.c.inc" |
849 | { | 60 | +static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) |
850 | long offset = neon_element_offset(reg, ele, size); | 61 | +{ |
851 | 62 | + ptrdiff_t offset = tcg_pcrel_diff(s, target); | |
852 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) | 63 | + tcg_debug_assert(offset == sextract64(offset, 0, 21)); |
853 | int n; | 64 | + tcg_out_insn(s, 3406, ADR, rd, offset); |
854 | int vec_size; | 65 | +} |
855 | int mmu_idx; | 66 | |
856 | - TCGMemOp endian; | 67 | +#ifdef CONFIG_SOFTMMU |
857 | + MemOp endian; | 68 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, |
858 | TCGv_i32 addr; | 69 | * MemOpIdx oi, uintptr_t ra) |
859 | TCGv_i32 tmp; | ||
860 | TCGv_i32 tmp2; | ||
861 | @@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ||
862 | } else if ((insn & 0x380) == 0) { | ||
863 | /* VDUP */ | ||
864 | int element; | ||
865 | - TCGMemOp size; | ||
866 | + MemOp size; | ||
867 | |||
868 | if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { | ||
869 | return 1; | ||
870 | @@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, | ||
871 | TCGv_i32 addr, int size) | ||
872 | { | ||
873 | TCGv_i32 tmp = tcg_temp_new_i32(); | ||
874 | - TCGMemOp opc = size | MO_ALIGN | s->be_data; | ||
875 | + MemOp opc = size | MO_ALIGN | s->be_data; | ||
876 | |||
877 | s->is_ldex = true; | ||
878 | |||
879 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | ||
880 | TCGv taddr; | ||
881 | TCGLabel *done_label; | ||
882 | TCGLabel *fail_label; | ||
883 | - TCGMemOp opc = size | MO_ALIGN | s->be_data; | ||
884 | + MemOp opc = size | MO_ALIGN | s->be_data; | ||
885 | |||
886 | /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { | ||
887 | [addr] = {Rt}; | ||
888 | @@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) | ||
889 | */ | ||
890 | |||
891 | TCGv taddr; | ||
892 | - TCGMemOp opc = s->be_data; | ||
893 | + MemOp opc = s->be_data; | ||
894 | |||
895 | rm = (insn) & 0xf; | ||
896 | |||
897 | diff --git a/target/hppa/translate.c b/target/hppa/translate.c | ||
898 | index XXXXXXX..XXXXXXX 100644 | ||
899 | --- a/target/hppa/translate.c | ||
900 | +++ b/target/hppa/translate.c | ||
901 | @@ -XXX,XX +XXX,XX @@ static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, | ||
902 | */ | 70 | */ |
903 | static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, | 71 | @@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[MO_SIZE + 1] = { |
904 | unsigned rx, int scale, target_sreg disp, | 72 | #endif |
905 | - unsigned sp, int modify, TCGMemOp mop) | 73 | }; |
906 | + unsigned sp, int modify, MemOp mop) | 74 | |
907 | { | 75 | -static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) |
908 | TCGv_reg ofs; | 76 | -{ |
909 | TCGv_tl addr; | 77 | - ptrdiff_t offset = tcg_pcrel_diff(s, target); |
910 | @@ -XXX,XX +XXX,XX @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, | 78 | - tcg_debug_assert(offset == sextract64(offset, 0, 21)); |
911 | 79 | - tcg_out_insn(s, 3406, ADR, rd, offset); | |
912 | static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, | 80 | -} |
913 | unsigned rx, int scale, target_sreg disp, | 81 | - |
914 | - unsigned sp, int modify, TCGMemOp mop) | 82 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) |
915 | + unsigned sp, int modify, MemOp mop) | 83 | { |
916 | { | 84 | MemOpIdx oi = lb->oi; |
917 | TCGv_reg ofs; | 85 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, |
918 | TCGv_tl addr; | 86 | tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); |
919 | @@ -XXX,XX +XXX,XX @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, | ||
920 | |||
921 | static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, | ||
922 | unsigned rx, int scale, target_sreg disp, | ||
923 | - unsigned sp, int modify, TCGMemOp mop) | ||
924 | + unsigned sp, int modify, MemOp mop) | ||
925 | { | ||
926 | TCGv_reg ofs; | ||
927 | TCGv_tl addr; | ||
928 | @@ -XXX,XX +XXX,XX @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, | ||
929 | |||
930 | static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, | ||
931 | unsigned rx, int scale, target_sreg disp, | ||
932 | - unsigned sp, int modify, TCGMemOp mop) | ||
933 | + unsigned sp, int modify, MemOp mop) | ||
934 | { | ||
935 | TCGv_reg ofs; | ||
936 | TCGv_tl addr; | ||
937 | @@ -XXX,XX +XXX,XX @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, | ||
938 | |||
939 | static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, | ||
940 | unsigned rx, int scale, target_sreg disp, | ||
941 | - unsigned sp, int modify, TCGMemOp mop) | ||
942 | + unsigned sp, int modify, MemOp mop) | ||
943 | { | ||
944 | TCGv_reg dest; | ||
945 | |||
946 | @@ -XXX,XX +XXX,XX @@ static bool trans_fldd(DisasContext *ctx, arg_ldst *a) | ||
947 | |||
948 | static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, | ||
949 | target_sreg disp, unsigned sp, | ||
950 | - int modify, TCGMemOp mop) | ||
951 | + int modify, MemOp mop) | ||
952 | { | ||
953 | nullify_over(ctx); | ||
954 | do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); | ||
955 | @@ -XXX,XX +XXX,XX @@ static bool trans_st(DisasContext *ctx, arg_ldst *a) | ||
956 | |||
957 | static bool trans_ldc(DisasContext *ctx, arg_ldst *a) | ||
958 | { | ||
959 | - TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size; | ||
960 | + MemOp mop = MO_TEUL | MO_ALIGN_16 | a->size; | ||
961 | TCGv_reg zero, dest, ofs; | ||
962 | TCGv_tl addr; | ||
963 | |||
964 | diff --git a/target/i386/translate.c b/target/i386/translate.c | ||
965 | index XXXXXXX..XXXXXXX 100644 | ||
966 | --- a/target/i386/translate.c | ||
967 | +++ b/target/i386/translate.c | ||
968 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
969 | /* current insn context */ | ||
970 | int override; /* -1 if no override */ | ||
971 | int prefix; | ||
972 | - TCGMemOp aflag; | ||
973 | - TCGMemOp dflag; | ||
974 | + MemOp aflag; | ||
975 | + MemOp dflag; | ||
976 | target_ulong pc_start; | ||
977 | target_ulong pc; /* pc = eip + cs_base */ | ||
978 | /* current block context */ | ||
979 | @@ -XXX,XX +XXX,XX @@ static void gen_eob(DisasContext *s); | ||
980 | static void gen_jr(DisasContext *s, TCGv dest); | ||
981 | static void gen_jmp(DisasContext *s, target_ulong eip); | ||
982 | static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); | ||
983 | -static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d); | ||
984 | +static void gen_op(DisasContext *s1, int op, MemOp ot, int d); | ||
985 | |||
986 | /* i386 arith/logic operations */ | ||
987 | enum { | ||
988 | @@ -XXX,XX +XXX,XX @@ static inline bool byte_reg_is_xH(DisasContext *s, int reg) | ||
989 | } | 87 | } |
990 | 88 | ||
991 | /* Select the size of a push/pop operation. */ | 89 | +#else |
992 | -static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) | 90 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, |
993 | +static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) | 91 | + unsigned a_bits) |
994 | { | 92 | +{ |
995 | if (CODE64(s)) { | 93 | + unsigned a_mask = (1 << a_bits) - 1; |
996 | return ot == MO_16 ? MO_16 : MO_64; | 94 | + TCGLabelQemuLdst *label = new_ldst_label(s); |
997 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) | 95 | + |
998 | } | 96 | + label->is_ld = is_ld; |
999 | 97 | + label->addrlo_reg = addr_reg; | |
1000 | /* Select the size of the stack pointer. */ | 98 | + |
1001 | -static inline TCGMemOp mo_stacksize(DisasContext *s) | 99 | + /* tst addr, #mask */ |
1002 | +static inline MemOp mo_stacksize(DisasContext *s) | 100 | + tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask); |
1003 | { | 101 | + |
1004 | return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; | 102 | + label->label_ptr[0] = s->code_ptr; |
1005 | } | 103 | + |
1006 | 104 | + /* b.ne slow_path */ | |
1007 | /* Select only size 64 else 32. Used for SSE operand sizes. */ | 105 | + tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); |
1008 | -static inline TCGMemOp mo_64_32(TCGMemOp ot) | 106 | + |
1009 | +static inline MemOp mo_64_32(MemOp ot) | 107 | + label->raddr = tcg_splitwx_to_rx(s->code_ptr); |
1010 | { | 108 | +} |
1011 | #ifdef TARGET_X86_64 | 109 | + |
1012 | return ot == MO_64 ? MO_64 : MO_32; | 110 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) |
1013 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOp mo_64_32(TCGMemOp ot) | 111 | +{ |
1014 | 112 | + if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { | |
1015 | /* Select size 8 if lsb of B is clear, else OT. Used for decoding | 113 | + return false; |
1016 | byte vs word opcodes. */ | 114 | + } |
1017 | -static inline TCGMemOp mo_b_d(int b, TCGMemOp ot) | 115 | + |
1018 | +static inline MemOp mo_b_d(int b, MemOp ot) | 116 | + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg); |
1019 | { | 117 | + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); |
1020 | return b & 1 ? ot : MO_8; | 118 | + |
1021 | } | 119 | + /* "Tail call" to the helper, with the return address back inline. */ |
1022 | 120 | + tcg_out_adr(s, TCG_REG_LR, l->raddr); | |
1023 | /* Select size 8 if lsb of B is clear, else OT capped at 32. | 121 | + tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld |
1024 | Used for decoding operand size of port opcodes. */ | 122 | + : helper_unaligned_st)); |
1025 | -static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) | 123 | + return true; |
1026 | +static inline MemOp mo_b_d32(int b, MemOp ot) | 124 | +} |
1027 | { | 125 | + |
1028 | return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; | 126 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
1029 | } | 127 | +{ |
1030 | 128 | + return tcg_out_fail_alignment(s, l); | |
1031 | -static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) | 129 | +} |
1032 | +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) | 130 | + |
1033 | { | 131 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
1034 | switch(ot) { | 132 | +{ |
1035 | case MO_8: | 133 | + return tcg_out_fail_alignment(s, l); |
1036 | @@ -XXX,XX +XXX,XX @@ static void gen_op_mov_reg_v(DisasContext *s, TCGMemOp ot, int reg, TCGv t0) | 134 | +} |
1037 | } | ||
1038 | |||
1039 | static inline | ||
1040 | -void gen_op_mov_v_reg(DisasContext *s, TCGMemOp ot, TCGv t0, int reg) | ||
1041 | +void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) | ||
1042 | { | ||
1043 | if (ot == MO_8 && byte_reg_is_xH(s, reg)) { | ||
1044 | tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); | ||
1045 | @@ -XXX,XX +XXX,XX @@ static inline void gen_op_jmp_v(TCGv dest) | ||
1046 | } | ||
1047 | |||
1048 | static inline | ||
1049 | -void gen_op_add_reg_im(DisasContext *s, TCGMemOp size, int reg, int32_t val) | ||
1050 | +void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) | ||
1051 | { | ||
1052 | tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); | ||
1053 | gen_op_mov_reg_v(s, size, reg, s->tmp0); | ||
1054 | } | ||
1055 | |||
1056 | -static inline void gen_op_add_reg_T0(DisasContext *s, TCGMemOp size, int reg) | ||
1057 | +static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) | ||
1058 | { | ||
1059 | tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); | ||
1060 | gen_op_mov_reg_v(s, size, reg, s->tmp0); | ||
1061 | @@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_im(DisasContext *s, target_ulong pc) | ||
1062 | /* Compute SEG:REG into A0. SEG is selected from the override segment | ||
1063 | (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to | ||
1064 | indicate no override. */ | ||
1065 | -static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0, | ||
1066 | +static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, | ||
1067 | int def_seg, int ovr_seg) | ||
1068 | { | ||
1069 | switch (aflag) { | ||
1070 | @@ -XXX,XX +XXX,XX @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) | ||
1071 | gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); | ||
1072 | } | ||
1073 | |||
1074 | -static inline void gen_op_movl_T0_Dshift(DisasContext *s, TCGMemOp ot) | ||
1075 | +static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) | ||
1076 | { | ||
1077 | tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df)); | ||
1078 | tcg_gen_shli_tl(s->T0, s->T0, ot); | ||
1079 | }; | ||
1080 | |||
1081 | -static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign) | ||
1082 | +static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) | ||
1083 | { | ||
1084 | switch (size) { | ||
1085 | case MO_8: | ||
1086 | @@ -XXX,XX +XXX,XX @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign) | ||
1087 | } | ||
1088 | } | ||
1089 | |||
1090 | -static void gen_extu(TCGMemOp ot, TCGv reg) | ||
1091 | +static void gen_extu(MemOp ot, TCGv reg) | ||
1092 | { | ||
1093 | gen_ext_tl(reg, reg, ot, false); | ||
1094 | } | ||
1095 | |||
1096 | -static void gen_exts(TCGMemOp ot, TCGv reg) | ||
1097 | +static void gen_exts(MemOp ot, TCGv reg) | ||
1098 | { | ||
1099 | gen_ext_tl(reg, reg, ot, true); | ||
1100 | } | ||
1101 | |||
1102 | static inline | ||
1103 | -void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) | ||
1104 | +void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) | ||
1105 | { | ||
1106 | tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); | ||
1107 | gen_extu(size, s->tmp0); | ||
1108 | @@ -XXX,XX +XXX,XX @@ void gen_op_jnz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) | ||
1109 | } | ||
1110 | |||
1111 | static inline | ||
1112 | -void gen_op_jz_ecx(DisasContext *s, TCGMemOp size, TCGLabel *label1) | ||
1113 | +void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) | ||
1114 | { | ||
1115 | tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); | ||
1116 | gen_extu(size, s->tmp0); | ||
1117 | tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); | ||
1118 | } | ||
1119 | |||
1120 | -static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n) | ||
1121 | +static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) | ||
1122 | { | ||
1123 | switch (ot) { | ||
1124 | case MO_8: | ||
1125 | @@ -XXX,XX +XXX,XX @@ static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n) | ||
1126 | } | ||
1127 | } | ||
1128 | |||
1129 | -static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) | ||
1130 | +static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n) | ||
1131 | { | ||
1132 | switch (ot) { | ||
1133 | case MO_8: | ||
1134 | @@ -XXX,XX +XXX,XX @@ static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | -static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, | ||
1139 | +static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip, | ||
1140 | uint32_t svm_flags) | ||
1141 | { | ||
1142 | target_ulong next_eip; | ||
1143 | @@ -XXX,XX +XXX,XX @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, | ||
1144 | } | ||
1145 | } | ||
1146 | |||
1147 | -static inline void gen_movs(DisasContext *s, TCGMemOp ot) | ||
1148 | +static inline void gen_movs(DisasContext *s, MemOp ot) | ||
1149 | { | ||
1150 | gen_string_movl_A0_ESI(s); | ||
1151 | gen_op_ld_v(s, ot, s->T0, s->A0); | ||
1152 | @@ -XXX,XX +XXX,XX @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) | ||
1153 | return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; | ||
1154 | default: | ||
1155 | { | ||
1156 | - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1157 | + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1158 | TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true); | ||
1159 | return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; | ||
1160 | } | ||
1161 | @@ -XXX,XX +XXX,XX @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) | ||
1162 | .mask = -1 }; | ||
1163 | default: | ||
1164 | { | ||
1165 | - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1166 | + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; | ||
1167 | TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); | ||
1168 | return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; | ||
1169 | } | ||
1170 | @@ -XXX,XX +XXX,XX @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) | ||
1171 | static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) | ||
1172 | { | ||
1173 | int inv, jcc_op, cond; | ||
1174 | - TCGMemOp size; | ||
1175 | + MemOp size; | ||
1176 | CCPrepare cc; | ||
1177 | TCGv t0; | ||
1178 | |||
1179 | @@ -XXX,XX +XXX,XX @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) | ||
1180 | return l2; | ||
1181 | } | ||
1182 | |||
1183 | -static inline void gen_stos(DisasContext *s, TCGMemOp ot) | ||
1184 | +static inline void gen_stos(DisasContext *s, MemOp ot) | ||
1185 | { | ||
1186 | gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); | ||
1187 | gen_string_movl_A0_EDI(s); | ||
1188 | @@ -XXX,XX +XXX,XX @@ static inline void gen_stos(DisasContext *s, TCGMemOp ot) | ||
1189 | gen_op_add_reg_T0(s, s->aflag, R_EDI); | ||
1190 | } | ||
1191 | |||
1192 | -static inline void gen_lods(DisasContext *s, TCGMemOp ot) | ||
1193 | +static inline void gen_lods(DisasContext *s, MemOp ot) | ||
1194 | { | ||
1195 | gen_string_movl_A0_ESI(s); | ||
1196 | gen_op_ld_v(s, ot, s->T0, s->A0); | ||
1197 | @@ -XXX,XX +XXX,XX @@ static inline void gen_lods(DisasContext *s, TCGMemOp ot) | ||
1198 | gen_op_add_reg_T0(s, s->aflag, R_ESI); | ||
1199 | } | ||
1200 | |||
1201 | -static inline void gen_scas(DisasContext *s, TCGMemOp ot) | ||
1202 | +static inline void gen_scas(DisasContext *s, MemOp ot) | ||
1203 | { | ||
1204 | gen_string_movl_A0_EDI(s); | ||
1205 | gen_op_ld_v(s, ot, s->T1, s->A0); | ||
1206 | @@ -XXX,XX +XXX,XX @@ static inline void gen_scas(DisasContext *s, TCGMemOp ot) | ||
1207 | gen_op_add_reg_T0(s, s->aflag, R_EDI); | ||
1208 | } | ||
1209 | |||
1210 | -static inline void gen_cmps(DisasContext *s, TCGMemOp ot) | ||
1211 | +static inline void gen_cmps(DisasContext *s, MemOp ot) | ||
1212 | { | ||
1213 | gen_string_movl_A0_EDI(s); | ||
1214 | gen_op_ld_v(s, ot, s->T1, s->A0); | ||
1215 | @@ -XXX,XX +XXX,XX @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | -static inline void gen_ins(DisasContext *s, TCGMemOp ot) | ||
1220 | +static inline void gen_ins(DisasContext *s, MemOp ot) | ||
1221 | { | ||
1222 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | ||
1223 | gen_io_start(); | ||
1224 | @@ -XXX,XX +XXX,XX @@ static inline void gen_ins(DisasContext *s, TCGMemOp ot) | ||
1225 | } | ||
1226 | } | ||
1227 | |||
1228 | -static inline void gen_outs(DisasContext *s, TCGMemOp ot) | ||
1229 | +static inline void gen_outs(DisasContext *s, MemOp ot) | ||
1230 | { | ||
1231 | if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { | ||
1232 | gen_io_start(); | ||
1233 | @@ -XXX,XX +XXX,XX @@ static inline void gen_outs(DisasContext *s, TCGMemOp ot) | ||
1234 | /* same method as Valgrind : we generate jumps to current or next | ||
1235 | instruction */ | ||
1236 | #define GEN_REPZ(op) \ | ||
1237 | -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ | ||
1238 | +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ | ||
1239 | target_ulong cur_eip, target_ulong next_eip) \ | ||
1240 | { \ | ||
1241 | TCGLabel *l2; \ | ||
1242 | @@ -XXX,XX +XXX,XX @@ static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ | ||
1243 | } | ||
1244 | |||
1245 | #define GEN_REPZ2(op) \ | ||
1246 | -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ | ||
1247 | +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ | ||
1248 | target_ulong cur_eip, \ | ||
1249 | target_ulong next_eip, \ | ||
1250 | int nz) \ | ||
1251 | @@ -XXX,XX +XXX,XX @@ static void gen_illegal_opcode(DisasContext *s) | ||
1252 | } | ||
1253 | |||
1254 | /* if d == OR_TMP0, it means memory operand (address in A0) */ | ||
1255 | -static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) | ||
1256 | +static void gen_op(DisasContext *s1, int op, MemOp ot, int d) | ||
1257 | { | ||
1258 | if (d != OR_TMP0) { | ||
1259 | if (s1->prefix & PREFIX_LOCK) { | ||
1260 | @@ -XXX,XX +XXX,XX @@ static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d) | ||
1261 | } | ||
1262 | |||
1263 | /* if d == OR_TMP0, it means memory operand (address in A0) */ | ||
1264 | -static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) | ||
1265 | +static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) | ||
1266 | { | ||
1267 | if (s1->prefix & PREFIX_LOCK) { | ||
1268 | if (d != OR_TMP0) { | ||
1269 | @@ -XXX,XX +XXX,XX @@ static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c) | ||
1270 | set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); | ||
1271 | } | ||
1272 | |||
1273 | -static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, | ||
1274 | +static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, | ||
1275 | TCGv shm1, TCGv count, bool is_right) | ||
1276 | { | ||
1277 | TCGv_i32 z32, s32, oldop; | ||
1278 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, | ||
1279 | set_cc_op(s, CC_OP_DYNAMIC); | ||
1280 | } | ||
1281 | |||
1282 | -static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1283 | +static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, | ||
1284 | int is_right, int is_arith) | ||
1285 | { | ||
1286 | target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1287 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1288 | gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); | ||
1289 | } | ||
1290 | |||
1291 | -static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1292 | +static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, | ||
1293 | int is_right, int is_arith) | ||
1294 | { | ||
1295 | int mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1296 | @@ -XXX,XX +XXX,XX @@ static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1297 | } | ||
1298 | } | ||
1299 | |||
1300 | -static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) | ||
1301 | +static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) | ||
1302 | { | ||
1303 | target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1304 | TCGv_i32 t0, t1; | ||
1305 | @@ -XXX,XX +XXX,XX @@ static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) | ||
1306 | set_cc_op(s, CC_OP_DYNAMIC); | ||
1307 | } | ||
1308 | |||
1309 | -static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1310 | +static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, | ||
1311 | int is_right) | ||
1312 | { | ||
1313 | int mask = (ot == MO_64 ? 0x3f : 0x1f); | ||
1314 | @@ -XXX,XX +XXX,XX @@ static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, | ||
1315 | } | ||
1316 | |||
1317 | /* XXX: add faster immediate = 1 case */ | ||
1318 | -static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1319 | +static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, | ||
1320 | int is_right) | ||
1321 | { | ||
1322 | gen_compute_eflags(s); | ||
1323 | @@ -XXX,XX +XXX,XX @@ static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1324 | } | ||
1325 | |||
1326 | /* XXX: add faster immediate case */ | ||
1327 | -static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1328 | +static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, | ||
1329 | bool is_right, TCGv count_in) | ||
1330 | { | ||
1331 | target_ulong mask = (ot == MO_64 ? 63 : 31); | ||
1332 | @@ -XXX,XX +XXX,XX @@ static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, | ||
1333 | tcg_temp_free(count); | ||
1334 | } | ||
1335 | |||
1336 | -static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) | ||
1337 | +static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) | ||
1338 | { | ||
1339 | if (s != OR_TMP1) | ||
1340 | gen_op_mov_v_reg(s1, ot, s1->T1, s); | ||
1341 | @@ -XXX,XX +XXX,XX @@ static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | -static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c) | ||
1346 | +static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c) | ||
1347 | { | ||
1348 | switch(op) { | ||
1349 | case OP_ROL: | ||
1350 | @@ -XXX,XX +XXX,XX @@ static void gen_add_A0_ds_seg(DisasContext *s) | ||
1351 | /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == | ||
1352 | OR_TMP0 */ | ||
1353 | static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, | ||
1354 | - TCGMemOp ot, int reg, int is_store) | ||
1355 | + MemOp ot, int reg, int is_store) | ||
1356 | { | ||
1357 | int mod, rm; | ||
1358 | |||
1359 | @@ -XXX,XX +XXX,XX @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | -static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) | ||
1364 | +static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) | ||
1365 | { | ||
1366 | uint32_t ret; | ||
1367 | |||
1368 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) | ||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | -static inline int insn_const_size(TCGMemOp ot) | ||
1373 | +static inline int insn_const_size(MemOp ot) | ||
1374 | { | ||
1375 | if (ot <= MO_32) { | ||
1376 | return 1 << ot; | ||
1377 | @@ -XXX,XX +XXX,XX @@ static inline void gen_jcc(DisasContext *s, int b, | ||
1378 | } | ||
1379 | } | ||
1380 | |||
1381 | -static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, | ||
1382 | +static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, | ||
1383 | int modrm, int reg) | ||
1384 | { | ||
1385 | CCPrepare cc; | ||
1386 | @@ -XXX,XX +XXX,XX @@ static inline void gen_stack_update(DisasContext *s, int addend) | ||
1387 | /* Generate a push. It depends on ss32, addseg and dflag. */ | ||
1388 | static void gen_push_v(DisasContext *s, TCGv val) | ||
1389 | { | ||
1390 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1391 | - TCGMemOp a_ot = mo_stacksize(s); | ||
1392 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1393 | + MemOp a_ot = mo_stacksize(s); | ||
1394 | int size = 1 << d_ot; | ||
1395 | TCGv new_esp = s->A0; | ||
1396 | |||
1397 | @@ -XXX,XX +XXX,XX @@ static void gen_push_v(DisasContext *s, TCGv val) | ||
1398 | } | ||
1399 | |||
1400 | /* two step pop is necessary for precise exceptions */ | ||
1401 | -static TCGMemOp gen_pop_T0(DisasContext *s) | ||
1402 | +static MemOp gen_pop_T0(DisasContext *s) | ||
1403 | { | ||
1404 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1405 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1406 | |||
1407 | gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); | ||
1408 | gen_op_ld_v(s, d_ot, s->T0, s->A0); | ||
1409 | @@ -XXX,XX +XXX,XX @@ static TCGMemOp gen_pop_T0(DisasContext *s) | ||
1410 | return d_ot; | ||
1411 | } | ||
1412 | |||
1413 | -static inline void gen_pop_update(DisasContext *s, TCGMemOp ot) | ||
1414 | +static inline void gen_pop_update(DisasContext *s, MemOp ot) | ||
1415 | { | ||
1416 | gen_stack_update(s, 1 << ot); | ||
1417 | } | ||
1418 | @@ -XXX,XX +XXX,XX @@ static inline void gen_stack_A0(DisasContext *s) | ||
1419 | |||
1420 | static void gen_pusha(DisasContext *s) | ||
1421 | { | ||
1422 | - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1423 | - TCGMemOp d_ot = s->dflag; | ||
1424 | + MemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1425 | + MemOp d_ot = s->dflag; | ||
1426 | int size = 1 << d_ot; | ||
1427 | int i; | ||
1428 | |||
1429 | @@ -XXX,XX +XXX,XX @@ static void gen_pusha(DisasContext *s) | ||
1430 | |||
1431 | static void gen_popa(DisasContext *s) | ||
1432 | { | ||
1433 | - TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1434 | - TCGMemOp d_ot = s->dflag; | ||
1435 | + MemOp s_ot = s->ss32 ? MO_32 : MO_16; | ||
1436 | + MemOp d_ot = s->dflag; | ||
1437 | int size = 1 << d_ot; | ||
1438 | int i; | ||
1439 | |||
1440 | @@ -XXX,XX +XXX,XX @@ static void gen_popa(DisasContext *s) | ||
1441 | |||
1442 | static void gen_enter(DisasContext *s, int esp_addend, int level) | ||
1443 | { | ||
1444 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1445 | - TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; | ||
1446 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1447 | + MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; | ||
1448 | int size = 1 << d_ot; | ||
1449 | |||
1450 | /* Push BP; compute FrameTemp into T1. */ | ||
1451 | @@ -XXX,XX +XXX,XX @@ static void gen_enter(DisasContext *s, int esp_addend, int level) | ||
1452 | |||
1453 | static void gen_leave(DisasContext *s) | ||
1454 | { | ||
1455 | - TCGMemOp d_ot = mo_pushpop(s, s->dflag); | ||
1456 | - TCGMemOp a_ot = mo_stacksize(s); | ||
1457 | + MemOp d_ot = mo_pushpop(s, s->dflag); | ||
1458 | + MemOp a_ot = mo_stacksize(s); | ||
1459 | |||
1460 | gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); | ||
1461 | gen_op_ld_v(s, d_ot, s->T0, s->A0); | ||
1462 | @@ -XXX,XX +XXX,XX @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, | ||
1463 | SSEFunc_0_eppi sse_fn_eppi; | ||
1464 | SSEFunc_0_ppi sse_fn_ppi; | ||
1465 | SSEFunc_0_eppt sse_fn_eppt; | ||
1466 | - TCGMemOp ot; | ||
1467 | + MemOp ot; | ||
1468 | |||
1469 | b &= 0xff; | ||
1470 | if (s->prefix & PREFIX_DATA) | ||
1471 | @@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) | ||
1472 | CPUX86State *env = cpu->env_ptr; | ||
1473 | int b, prefixes; | ||
1474 | int shift; | ||
1475 | - TCGMemOp ot, aflag, dflag; | ||
1476 | + MemOp ot, aflag, dflag; | ||
1477 | int modrm, reg, rm, mod, op, opreg, val; | ||
1478 | target_ulong next_eip, tval; | ||
1479 | int rex_w, rex_r; | ||
1480 | @@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) | ||
1481 | case 0x1be: /* movsbS Gv, Eb */ | ||
1482 | case 0x1bf: /* movswS Gv, Eb */ | ||
1483 | { | ||
1484 | - TCGMemOp d_ot; | ||
1485 | - TCGMemOp s_ot; | ||
1486 | + MemOp d_ot; | ||
1487 | + MemOp s_ot; | ||
1488 | |||
1489 | /* d_ot is the size of destination */ | ||
1490 | d_ot = dflag; | ||
1491 | diff --git a/target/m68k/translate.c b/target/m68k/translate.c | ||
1492 | index XXXXXXX..XXXXXXX 100644 | ||
1493 | --- a/target/m68k/translate.c | ||
1494 | +++ b/target/m68k/translate.c | ||
1495 | @@ -XXX,XX +XXX,XX @@ DISAS_INSN(cas) | ||
1496 | uint16_t ext; | ||
1497 | TCGv load; | ||
1498 | TCGv cmp; | ||
1499 | - TCGMemOp opc; | ||
1500 | + MemOp opc; | ||
1501 | |||
1502 | switch ((insn >> 9) & 3) { | ||
1503 | case 1: | ||
1504 | diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c | ||
1505 | index XXXXXXX..XXXXXXX 100644 | ||
1506 | --- a/target/microblaze/translate.c | ||
1507 | +++ b/target/microblaze/translate.c | ||
1508 | @@ -XXX,XX +XXX,XX @@ static void dec_load(DisasContext *dc) | ||
1509 | unsigned int size; | ||
1510 | bool rev = false, ex = false, ea = false; | ||
1511 | int mem_index = cpu_mmu_index(&dc->cpu->env, false); | ||
1512 | - TCGMemOp mop; | ||
1513 | + MemOp mop; | ||
1514 | |||
1515 | mop = dc->opcode & 3; | ||
1516 | size = 1 << mop; | ||
1517 | @@ -XXX,XX +XXX,XX @@ static void dec_store(DisasContext *dc) | ||
1518 | unsigned int size; | ||
1519 | bool rev = false, ex = false, ea = false; | ||
1520 | int mem_index = cpu_mmu_index(&dc->cpu->env, false); | ||
1521 | - TCGMemOp mop; | ||
1522 | + MemOp mop; | ||
1523 | |||
1524 | mop = dc->opcode & 3; | ||
1525 | size = 1 << mop; | ||
1526 | diff --git a/target/mips/translate.c b/target/mips/translate.c | ||
1527 | index XXXXXXX..XXXXXXX 100644 | ||
1528 | --- a/target/mips/translate.c | ||
1529 | +++ b/target/mips/translate.c | ||
1530 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
1531 | int32_t CP0_Config5; | ||
1532 | /* Routine used to access memory */ | ||
1533 | int mem_idx; | ||
1534 | - TCGMemOp default_tcg_memop_mask; | ||
1535 | + MemOp default_tcg_memop_mask; | ||
1536 | uint32_t hflags, saved_hflags; | ||
1537 | target_ulong btarget; | ||
1538 | bool ulri; | ||
1539 | @@ -XXX,XX +XXX,XX @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, | ||
1540 | |||
1541 | /* Store conditional */ | ||
1542 | static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, | ||
1543 | - TCGMemOp tcg_mo, bool eva) | ||
1544 | + MemOp tcg_mo, bool eva) | ||
1545 | { | ||
1546 | TCGv addr, t0, val; | ||
1547 | TCGLabel *l1 = gen_new_label(); | ||
1548 | @@ -XXX,XX +XXX,XX @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) | ||
1549 | } | ||
1550 | |||
1551 | static inline void gen_r6_ld(target_long addr, int reg, int memidx, | ||
1552 | - TCGMemOp memop) | ||
1553 | + MemOp memop) | ||
1554 | { | ||
1555 | TCGv t0 = tcg_const_tl(addr); | ||
1556 | tcg_gen_qemu_ld_tl(t0, t0, memidx, memop); | ||
1557 | @@ -XXX,XX +XXX,XX @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) | ||
1558 | extract32(ctx->opcode, 0, 8); | ||
1559 | TCGv va = tcg_temp_new(); | ||
1560 | TCGv t1 = tcg_temp_new(); | ||
1561 | - TCGMemOp memop = (extract32(ctx->opcode, 8, 3)) == | ||
1562 | + MemOp memop = (extract32(ctx->opcode, 8, 3)) == | ||
1563 | NM_P_LS_UAWM ? MO_UNALN : 0; | ||
1564 | |||
1565 | count = (count == 0) ? 8 : count; | ||
1566 | diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c | ||
1567 | index XXXXXXX..XXXXXXX 100644 | ||
1568 | --- a/target/openrisc/translate.c | ||
1569 | +++ b/target/openrisc/translate.c | ||
1570 | @@ -XXX,XX +XXX,XX @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a) | ||
1571 | return true; | ||
1572 | } | ||
1573 | |||
1574 | -static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop) | ||
1575 | +static void do_load(DisasContext *dc, arg_load *a, MemOp mop) | ||
1576 | { | ||
1577 | TCGv ea; | ||
1578 | |||
1579 | @@ -XXX,XX +XXX,XX @@ static bool trans_l_swa(DisasContext *dc, arg_store *a) | ||
1580 | return true; | ||
1581 | } | ||
1582 | |||
1583 | -static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop) | ||
1584 | +static void do_store(DisasContext *dc, arg_store *a, MemOp mop) | ||
1585 | { | ||
1586 | TCGv t0 = tcg_temp_new(); | ||
1587 | tcg_gen_addi_tl(t0, cpu_R[a->a], a->i); | ||
1588 | diff --git a/target/ppc/translate.c b/target/ppc/translate.c | ||
1589 | index XXXXXXX..XXXXXXX 100644 | ||
1590 | --- a/target/ppc/translate.c | ||
1591 | +++ b/target/ppc/translate.c | ||
1592 | @@ -XXX,XX +XXX,XX @@ struct DisasContext { | ||
1593 | int mem_idx; | ||
1594 | int access_type; | ||
1595 | /* Translation flags */ | ||
1596 | - TCGMemOp default_tcg_memop_mask; | ||
1597 | + MemOp default_tcg_memop_mask; | ||
1598 | #if defined(TARGET_PPC64) | ||
1599 | bool sf_mode; | ||
1600 | bool has_cfar; | ||
1601 | @@ -XXX,XX +XXX,XX @@ static void gen_isync(DisasContext *ctx) | ||
1602 | |||
1603 | #define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE)) | ||
1604 | |||
1605 | -static void gen_load_locked(DisasContext *ctx, TCGMemOp memop) | ||
1606 | +static void gen_load_locked(DisasContext *ctx, MemOp memop) | ||
1607 | { | ||
1608 | TCGv gpr = cpu_gpr[rD(ctx->opcode)]; | ||
1609 | TCGv t0 = tcg_temp_new(); | ||
1610 | @@ -XXX,XX +XXX,XX @@ LARX(lbarx, DEF_MEMOP(MO_UB)) | ||
1611 | LARX(lharx, DEF_MEMOP(MO_UW)) | ||
1612 | LARX(lwarx, DEF_MEMOP(MO_UL)) | ||
1613 | |||
1614 | -static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop, | ||
1615 | +static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, | ||
1616 | TCGv EA, TCGCond cond, int addend) | ||
1617 | { | ||
1618 | TCGv t = tcg_temp_new(); | ||
1619 | @@ -XXX,XX +XXX,XX @@ static void gen_fetch_inc_conditional(DisasContext *ctx, TCGMemOp memop, | ||
1620 | tcg_temp_free(u); | ||
1621 | } | ||
1622 | |||
1623 | -static void gen_ld_atomic(DisasContext *ctx, TCGMemOp memop) | ||
1624 | +static void gen_ld_atomic(DisasContext *ctx, MemOp memop) | ||
1625 | { | ||
1626 | uint32_t gpr_FC = FC(ctx->opcode); | ||
1627 | TCGv EA = tcg_temp_new(); | ||
1628 | @@ -XXX,XX +XXX,XX @@ static void gen_ldat(DisasContext *ctx) | ||
1629 | } | ||
1630 | #endif | ||
1631 | |||
1632 | -static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop) | ||
1633 | +static void gen_st_atomic(DisasContext *ctx, MemOp memop) | ||
1634 | { | ||
1635 | uint32_t gpr_FC = FC(ctx->opcode); | ||
1636 | TCGv EA = tcg_temp_new(); | ||
1637 | @@ -XXX,XX +XXX,XX @@ static void gen_stdat(DisasContext *ctx) | ||
1638 | } | ||
1639 | #endif | ||
1640 | |||
1641 | -static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop) | ||
1642 | +static void gen_conditional_store(DisasContext *ctx, MemOp memop) | ||
1643 | { | ||
1644 | TCGLabel *l1 = gen_new_label(); | ||
1645 | TCGLabel *l2 = gen_new_label(); | ||
1646 | diff --git a/target/riscv/insn_trans/trans_rva.inc.c b/target/riscv/insn_trans/trans_rva.inc.c | ||
1647 | index XXXXXXX..XXXXXXX 100644 | ||
1648 | --- a/target/riscv/insn_trans/trans_rva.inc.c | ||
1649 | +++ b/target/riscv/insn_trans/trans_rva.inc.c | ||
1650 | @@ -XXX,XX +XXX,XX @@ | ||
1651 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
1652 | */ | ||
1653 | |||
1654 | -static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1655 | +static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) | ||
1656 | { | ||
1657 | TCGv src1 = tcg_temp_new(); | ||
1658 | /* Put addr in load_res, data in load_val. */ | ||
1659 | @@ -XXX,XX +XXX,XX @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1660 | return true; | ||
1661 | } | ||
1662 | |||
1663 | -static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1664 | +static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) | ||
1665 | { | ||
1666 | TCGv src1 = tcg_temp_new(); | ||
1667 | TCGv src2 = tcg_temp_new(); | ||
1668 | @@ -XXX,XX +XXX,XX @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) | ||
1669 | } | ||
1670 | |||
1671 | static bool gen_amo(DisasContext *ctx, arg_atomic *a, | ||
1672 | - void(*func)(TCGv, TCGv, TCGv, TCGArg, TCGMemOp), | ||
1673 | - TCGMemOp mop) | ||
1674 | + void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), | ||
1675 | + MemOp mop) | ||
1676 | { | ||
1677 | TCGv src1 = tcg_temp_new(); | ||
1678 | TCGv src2 = tcg_temp_new(); | ||
1679 | diff --git a/target/riscv/insn_trans/trans_rvi.inc.c b/target/riscv/insn_trans/trans_rvi.inc.c | ||
1680 | index XXXXXXX..XXXXXXX 100644 | ||
1681 | --- a/target/riscv/insn_trans/trans_rvi.inc.c | ||
1682 | +++ b/target/riscv/insn_trans/trans_rvi.inc.c | ||
1683 | @@ -XXX,XX +XXX,XX @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) | ||
1684 | return gen_branch(ctx, a, TCG_COND_GEU); | ||
1685 | } | ||
1686 | |||
1687 | -static bool gen_load(DisasContext *ctx, arg_lb *a, TCGMemOp memop) | ||
1688 | +static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) | ||
1689 | { | ||
1690 | TCGv t0 = tcg_temp_new(); | ||
1691 | TCGv t1 = tcg_temp_new(); | ||
1692 | @@ -XXX,XX +XXX,XX @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a) | ||
1693 | return gen_load(ctx, a, MO_TEUW); | ||
1694 | } | ||
1695 | |||
1696 | -static bool gen_store(DisasContext *ctx, arg_sb *a, TCGMemOp memop) | ||
1697 | +static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) | ||
1698 | { | ||
1699 | TCGv t0 = tcg_temp_new(); | ||
1700 | TCGv dat = tcg_temp_new(); | ||
1701 | diff --git a/target/s390x/translate.c b/target/s390x/translate.c | ||
1702 | index XXXXXXX..XXXXXXX 100644 | ||
1703 | --- a/target/s390x/translate.c | ||
1704 | +++ b/target/s390x/translate.c | ||
1705 | @@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_offset(uint8_t reg) | ||
1706 | return offsetof(CPUS390XState, vregs[reg][0]); | ||
1707 | } | ||
1708 | |||
1709 | -static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es) | ||
1710 | +static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) | ||
1711 | { | ||
1712 | /* Convert element size (es) - e.g. MO_8 - to bytes */ | ||
1713 | const uint8_t bytes = 1 << es; | ||
1714 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_csst(DisasContext *s, DisasOps *o) | ||
1715 | #ifndef CONFIG_USER_ONLY | ||
1716 | static DisasJumpType op_csp(DisasContext *s, DisasOps *o) | ||
1717 | { | ||
1718 | - TCGMemOp mop = s->insn->data; | ||
1719 | + MemOp mop = s->insn->data; | ||
1720 | TCGv_i64 addr, old, cc; | ||
1721 | TCGLabel *lab = gen_new_label(); | ||
1722 | |||
1723 | @@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) | ||
1724 | static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) | ||
1725 | { | ||
1726 | TCGv_i64 a1, a2; | ||
1727 | - TCGMemOp mop = s->insn->data; | ||
1728 | + MemOp mop = s->insn->data; | ||
1729 | |||
1730 | /* In a parallel context, stop the world and single step. */ | ||
1731 | if (tb_cflags(s->base.tb) & CF_PARALLEL) { | ||
1732 | diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c | ||
1733 | index XXXXXXX..XXXXXXX 100644 | ||
1734 | --- a/target/s390x/translate_vx.inc.c | ||
1735 | +++ b/target/s390x/translate_vx.inc.c | ||
1736 | @@ -XXX,XX +XXX,XX @@ | ||
1737 | #define FPF_LONG 3 | ||
1738 | #define FPF_EXT 4 | ||
1739 | |||
1740 | -static inline bool valid_vec_element(uint8_t enr, TCGMemOp es) | ||
1741 | +static inline bool valid_vec_element(uint8_t enr, MemOp es) | ||
1742 | { | ||
1743 | return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1)); | ||
1744 | } | ||
1745 | |||
1746 | static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, | ||
1747 | - TCGMemOp memop) | ||
1748 | + MemOp memop) | ||
1749 | { | ||
1750 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1751 | |||
1752 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, | ||
1753 | } | ||
1754 | |||
1755 | static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, | ||
1756 | - TCGMemOp memop) | ||
1757 | + MemOp memop) | ||
1758 | { | ||
1759 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1760 | |||
1761 | @@ -XXX,XX +XXX,XX @@ static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, | ||
1762 | } | ||
1763 | |||
1764 | static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, | ||
1765 | - TCGMemOp memop) | ||
1766 | + MemOp memop) | ||
1767 | { | ||
1768 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1769 | |||
1770 | @@ -XXX,XX +XXX,XX @@ static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, | ||
1771 | } | ||
1772 | |||
1773 | static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr, | ||
1774 | - TCGMemOp memop) | ||
1775 | + MemOp memop) | ||
1776 | { | ||
1777 | const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); | ||
1778 | |||
1779 | diff --git a/target/sparc/translate.c b/target/sparc/translate.c | ||
1780 | index XXXXXXX..XXXXXXX 100644 | ||
1781 | --- a/target/sparc/translate.c | ||
1782 | +++ b/target/sparc/translate.c | ||
1783 | @@ -XXX,XX +XXX,XX @@ static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, | ||
1784 | } | ||
1785 | |||
1786 | static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, | ||
1787 | - TCGv addr, int mmu_idx, TCGMemOp memop) | ||
1788 | + TCGv addr, int mmu_idx, MemOp memop) | ||
1789 | { | ||
1790 | gen_address_mask(dc, addr); | ||
1791 | tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop); | ||
1792 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
1793 | ASIType type; | ||
1794 | int asi; | ||
1795 | int mem_idx; | ||
1796 | - TCGMemOp memop; | ||
1797 | + MemOp memop; | ||
1798 | } DisasASI; | ||
1799 | |||
1800 | -static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) | ||
1801 | +static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) | ||
1802 | { | ||
1803 | int asi = GET_FIELD(insn, 19, 26); | ||
1804 | ASIType type = GET_ASI_HELPER; | ||
1805 | @@ -XXX,XX +XXX,XX @@ static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop) | ||
1806 | } | ||
1807 | |||
1808 | static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, | ||
1809 | - int insn, TCGMemOp memop) | ||
1810 | + int insn, MemOp memop) | ||
1811 | { | ||
1812 | DisasASI da = get_asi(dc, insn, memop); | ||
1813 | |||
1814 | @@ -XXX,XX +XXX,XX @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, | ||
1815 | } | ||
1816 | |||
1817 | static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, | ||
1818 | - int insn, TCGMemOp memop) | ||
1819 | + int insn, MemOp memop) | ||
1820 | { | ||
1821 | DisasASI da = get_asi(dc, insn, memop); | ||
1822 | |||
1823 | @@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, | ||
1824 | case GET_ASI_BLOCK: | ||
1825 | /* Valid for lddfa on aligned registers only. */ | ||
1826 | if (size == 8 && (rd & 7) == 0) { | ||
1827 | - TCGMemOp memop; | ||
1828 | + MemOp memop; | ||
1829 | TCGv eight; | ||
1830 | int i; | ||
1831 | |||
1832 | @@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, | ||
1833 | case GET_ASI_BLOCK: | ||
1834 | /* Valid for stdfa on aligned registers only. */ | ||
1835 | if (size == 8 && (rd & 7) == 0) { | ||
1836 | - TCGMemOp memop; | ||
1837 | + MemOp memop; | ||
1838 | TCGv eight; | ||
1839 | int i; | ||
1840 | |||
1841 | diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c | ||
1842 | index XXXXXXX..XXXXXXX 100644 | ||
1843 | --- a/target/tilegx/translate.c | ||
1844 | +++ b/target/tilegx/translate.c | ||
1845 | @@ -XXX,XX +XXX,XX @@ static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd) | ||
1846 | } | ||
1847 | |||
1848 | static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca, | ||
1849 | - unsigned srcb, TCGMemOp memop, const char *name) | ||
1850 | + unsigned srcb, MemOp memop, const char *name) | ||
1851 | { | ||
1852 | if (dest) { | ||
1853 | return TILEGX_EXCP_OPCODE_UNKNOWN; | ||
1854 | @@ -XXX,XX +XXX,XX @@ static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca, | ||
1855 | } | ||
1856 | |||
1857 | static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb, | ||
1858 | - int imm, TCGMemOp memop, const char *name) | ||
1859 | + int imm, MemOp memop, const char *name) | ||
1860 | { | ||
1861 | TCGv tsrca = load_gr(dc, srca); | ||
1862 | TCGv tsrcb = load_gr(dc, srcb); | ||
1863 | @@ -XXX,XX +XXX,XX @@ static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext, | ||
1864 | { | ||
1865 | TCGv tdest, tsrca; | ||
1866 | const char *mnemonic; | ||
1867 | - TCGMemOp memop; | ||
1868 | + MemOp memop; | ||
1869 | TileExcp ret = TILEGX_EXCP_NONE; | ||
1870 | bool prefetch_nofault = false; | ||
1871 | |||
1872 | @@ -XXX,XX +XXX,XX @@ static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext, | ||
1873 | TCGv tsrca = load_gr(dc, srca); | ||
1874 | bool prefetch_nofault = false; | ||
1875 | const char *mnemonic; | ||
1876 | - TCGMemOp memop; | ||
1877 | + MemOp memop; | ||
1878 | int i2, i3; | ||
1879 | TCGv t0; | ||
1880 | |||
1881 | @@ -XXX,XX +XXX,XX @@ static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle) | ||
1882 | unsigned srca = get_SrcA_Y2(bundle); | ||
1883 | unsigned srcbdest = get_SrcBDest_Y2(bundle); | ||
1884 | const char *mnemonic; | ||
1885 | - TCGMemOp memop; | ||
1886 | + MemOp memop; | ||
1887 | bool prefetch_nofault = false; | ||
1888 | |||
1889 | switch (OEY2(opc, mode)) { | ||
1890 | diff --git a/target/tricore/translate.c b/target/tricore/translate.c | ||
1891 | index XXXXXXX..XXXXXXX 100644 | ||
1892 | --- a/target/tricore/translate.c | ||
1893 | +++ b/target/tricore/translate.c | ||
1894 | @@ -XXX,XX +XXX,XX @@ static inline void generate_trap(DisasContext *ctx, int class, int tin); | ||
1895 | /* Functions for load/save to/from memory */ | ||
1896 | |||
1897 | static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, | ||
1898 | - int16_t con, TCGMemOp mop) | ||
1899 | + int16_t con, MemOp mop) | ||
1900 | { | ||
1901 | TCGv temp = tcg_temp_new(); | ||
1902 | tcg_gen_addi_tl(temp, r2, con); | ||
1903 | @@ -XXX,XX +XXX,XX @@ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, | ||
1904 | } | ||
1905 | |||
1906 | static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, | ||
1907 | - int16_t con, TCGMemOp mop) | ||
1908 | + int16_t con, MemOp mop) | ||
1909 | { | ||
1910 | TCGv temp = tcg_temp_new(); | ||
1911 | tcg_gen_addi_tl(temp, r2, con); | ||
1912 | @@ -XXX,XX +XXX,XX @@ static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, | ||
1913 | } | ||
1914 | |||
1915 | static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, | ||
1916 | - TCGMemOp mop) | ||
1917 | + MemOp mop) | ||
1918 | { | ||
1919 | TCGv temp = tcg_temp_new(); | ||
1920 | tcg_gen_addi_tl(temp, r2, off); | ||
1921 | @@ -XXX,XX +XXX,XX @@ static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, | ||
1922 | } | ||
1923 | |||
1924 | static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, | ||
1925 | - TCGMemOp mop) | ||
1926 | + MemOp mop) | ||
1927 | { | ||
1928 | TCGv temp = tcg_temp_new(); | ||
1929 | tcg_gen_addi_tl(temp, r2, off); | ||
1930 | diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c | ||
1931 | index XXXXXXX..XXXXXXX 100644 | ||
1932 | --- a/tcg/aarch64/tcg-target.inc.c | ||
1933 | +++ b/tcg/aarch64/tcg-target.inc.c | ||
1934 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) | ||
1935 | tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); | ||
1936 | } | ||
1937 | |||
1938 | -static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, | ||
1939 | +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, | ||
1940 | TCGReg rd, TCGReg rn) | ||
1941 | { | ||
1942 | /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ | ||
1943 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, | ||
1944 | tcg_out_sbfm(s, ext, rd, rn, 0, bits); | ||
1945 | } | ||
1946 | |||
1947 | -static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, | ||
1948 | +static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, | ||
1949 | TCGReg rd, TCGReg rn) | ||
1950 | { | ||
1951 | /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ | ||
1952 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) | ||
1953 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
1954 | { | ||
1955 | TCGMemOpIdx oi = lb->oi; | ||
1956 | - TCGMemOp opc = get_memop(oi); | ||
1957 | - TCGMemOp size = opc & MO_SIZE; | ||
1958 | + MemOp opc = get_memop(oi); | ||
1959 | + MemOp size = opc & MO_SIZE; | ||
1960 | |||
1961 | if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { | ||
1962 | return false; | ||
1963 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
1964 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
1965 | { | ||
1966 | TCGMemOpIdx oi = lb->oi; | ||
1967 | - TCGMemOp opc = get_memop(oi); | ||
1968 | - TCGMemOp size = opc & MO_SIZE; | ||
1969 | + MemOp opc = get_memop(oi); | ||
1970 | + MemOp size = opc & MO_SIZE; | ||
1971 | |||
1972 | if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { | ||
1973 | return false; | ||
1974 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); | ||
1975 | slow path for the failure case, which will be patched later when finalizing | ||
1976 | the slow path. Generated code returns the host addend in X1, | ||
1977 | clobbers X0,X2,X3,TMP. */ | ||
1978 | -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, | ||
1979 | +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, | ||
1980 | tcg_insn_unit **label_ptr, int mem_index, | ||
1981 | bool is_read) | ||
1982 | { | ||
1983 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, | ||
1984 | |||
1985 | #endif /* CONFIG_SOFTMMU */ | 135 | #endif /* CONFIG_SOFTMMU */ |
1986 | 136 | ||
1987 | -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, | 137 | static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, |
1988 | +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, | ||
1989 | TCGReg data_r, TCGReg addr_r, | 138 | TCGReg data_r, TCGReg addr_r, |
1990 | TCGType otype, TCGReg off_r) | 139 | TCGType otype, TCGReg off_r) |
1991 | { | 140 | { |
1992 | - const TCGMemOp bswap = memop & MO_BSWAP; | 141 | - /* Byte swapping is left to middle-end expansion. */ |
1993 | + const MemOp bswap = memop & MO_BSWAP; | 142 | - tcg_debug_assert((memop & MO_BSWAP) == 0); |
1994 | 143 | - | |
1995 | switch (memop & MO_SSIZE) { | 144 | switch (memop & MO_SSIZE) { |
1996 | case MO_UB: | 145 | case MO_UB: |
1997 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, | 146 | tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r); |
1998 | } | 147 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, |
1999 | } | ||
2000 | |||
2001 | -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, | ||
2002 | +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, | ||
2003 | TCGReg data_r, TCGReg addr_r, | 148 | TCGReg data_r, TCGReg addr_r, |
2004 | TCGType otype, TCGReg off_r) | 149 | TCGType otype, TCGReg off_r) |
2005 | { | 150 | { |
2006 | - const TCGMemOp bswap = memop & MO_BSWAP; | 151 | - /* Byte swapping is left to middle-end expansion. */ |
2007 | + const MemOp bswap = memop & MO_BSWAP; | 152 | - tcg_debug_assert((memop & MO_BSWAP) == 0); |
2008 | 153 | - | |
2009 | switch (memop & MO_SIZE) { | 154 | switch (memop & MO_SIZE) { |
2010 | case MO_8: | 155 | case MO_8: |
2011 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, | 156 | tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r); |
2012 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | 157 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
2013 | TCGMemOpIdx oi, TCGType ext) | 158 | { |
2014 | { | 159 | MemOp memop = get_memop(oi); |
2015 | - TCGMemOp memop = get_memop(oi); | ||
2016 | + MemOp memop = get_memop(oi); | ||
2017 | const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | 160 | const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; |
2018 | #ifdef CONFIG_SOFTMMU | 161 | + |
2019 | unsigned mem_index = get_mmuidx(oi); | 162 | + /* Byte swapping is left to middle-end expansion. */ |
2020 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | 163 | + tcg_debug_assert((memop & MO_BSWAP) == 0); |
2021 | static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | 164 | + |
2022 | TCGMemOpIdx oi) | ||
2023 | { | ||
2024 | - TCGMemOp memop = get_memop(oi); | ||
2025 | + MemOp memop = get_memop(oi); | ||
2026 | const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | ||
2027 | #ifdef CONFIG_SOFTMMU | ||
2028 | unsigned mem_index = get_mmuidx(oi); | ||
2029 | diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c | ||
2030 | index XXXXXXX..XXXXXXX 100644 | ||
2031 | --- a/tcg/arm/tcg-target.inc.c | ||
2032 | +++ b/tcg/arm/tcg-target.inc.c | ||
2033 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); | ||
2034 | containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ | ||
2035 | |||
2036 | static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
2037 | - TCGMemOp opc, int mem_index, bool is_load) | ||
2038 | + MemOp opc, int mem_index, bool is_load) | ||
2039 | { | ||
2040 | int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) | ||
2041 | : offsetof(CPUTLBEntry, addr_write)); | ||
2042 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2043 | { | ||
2044 | TCGReg argreg, datalo, datahi; | ||
2045 | TCGMemOpIdx oi = lb->oi; | ||
2046 | - TCGMemOp opc = get_memop(oi); | ||
2047 | + MemOp opc = get_memop(oi); | ||
2048 | void *func; | ||
2049 | |||
2050 | if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { | ||
2051 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2052 | { | ||
2053 | TCGReg argreg, datalo, datahi; | ||
2054 | TCGMemOpIdx oi = lb->oi; | ||
2055 | - TCGMemOp opc = get_memop(oi); | ||
2056 | + MemOp opc = get_memop(oi); | ||
2057 | |||
2058 | if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { | ||
2059 | return false; | ||
2060 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2061 | } | ||
2062 | #endif /* SOFTMMU */ | ||
2063 | |||
2064 | -static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, | ||
2065 | +static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
2066 | TCGReg datalo, TCGReg datahi, | ||
2067 | TCGReg addrlo, TCGReg addend) | ||
2068 | { | ||
2069 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2070 | + MemOp bswap = opc & MO_BSWAP; | ||
2071 | |||
2072 | switch (opc & MO_SSIZE) { | ||
2073 | case MO_UB: | ||
2074 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, | ||
2075 | } | ||
2076 | } | ||
2077 | |||
2078 | -static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, | ||
2079 | +static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, | ||
2080 | TCGReg datalo, TCGReg datahi, | ||
2081 | TCGReg addrlo) | ||
2082 | { | ||
2083 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2084 | + MemOp bswap = opc & MO_BSWAP; | ||
2085 | |||
2086 | switch (opc & MO_SSIZE) { | ||
2087 | case MO_UB: | ||
2088 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2089 | { | ||
2090 | TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); | ||
2091 | TCGMemOpIdx oi; | ||
2092 | - TCGMemOp opc; | ||
2093 | + MemOp opc; | ||
2094 | #ifdef CONFIG_SOFTMMU | ||
2095 | int mem_index; | ||
2096 | TCGReg addend; | ||
2097 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2098 | #endif | ||
2099 | } | ||
2100 | |||
2101 | -static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, | ||
2102 | +static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, | ||
2103 | TCGReg datalo, TCGReg datahi, | ||
2104 | TCGReg addrlo, TCGReg addend) | ||
2105 | { | ||
2106 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2107 | + MemOp bswap = opc & MO_BSWAP; | ||
2108 | |||
2109 | switch (opc & MO_SIZE) { | ||
2110 | case MO_8: | ||
2111 | @@ -XXX,XX +XXX,XX @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, | ||
2112 | } | ||
2113 | } | ||
2114 | |||
2115 | -static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, | ||
2116 | +static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, | ||
2117 | TCGReg datalo, TCGReg datahi, | ||
2118 | TCGReg addrlo) | ||
2119 | { | ||
2120 | - TCGMemOp bswap = opc & MO_BSWAP; | ||
2121 | + MemOp bswap = opc & MO_BSWAP; | ||
2122 | |||
2123 | switch (opc & MO_SIZE) { | ||
2124 | case MO_8: | ||
2125 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
2126 | { | ||
2127 | TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); | ||
2128 | TCGMemOpIdx oi; | ||
2129 | - TCGMemOp opc; | ||
2130 | + MemOp opc; | ||
2131 | #ifdef CONFIG_SOFTMMU | ||
2132 | int mem_index; | ||
2133 | TCGReg addend; | ||
2134 | diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c | ||
2135 | index XXXXXXX..XXXXXXX 100644 | ||
2136 | --- a/tcg/i386/tcg-target.inc.c | ||
2137 | +++ b/tcg/i386/tcg-target.inc.c | ||
2138 | @@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[16] = { | ||
2139 | First argument register is clobbered. */ | ||
2140 | |||
2141 | static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
2142 | - int mem_index, TCGMemOp opc, | ||
2143 | + int mem_index, MemOp opc, | ||
2144 | tcg_insn_unit **label_ptr, int which) | ||
2145 | { | ||
2146 | const TCGReg r0 = TCG_REG_L0; | ||
2147 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, | ||
2148 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2149 | { | ||
2150 | TCGMemOpIdx oi = l->oi; | ||
2151 | - TCGMemOp opc = get_memop(oi); | ||
2152 | + MemOp opc = get_memop(oi); | ||
2153 | TCGReg data_reg; | ||
2154 | tcg_insn_unit **label_ptr = &l->label_ptr[0]; | ||
2155 | int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); | ||
2156 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2157 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2158 | { | ||
2159 | TCGMemOpIdx oi = l->oi; | ||
2160 | - TCGMemOp opc = get_memop(oi); | ||
2161 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2162 | + MemOp opc = get_memop(oi); | ||
2163 | + MemOp s_bits = opc & MO_SIZE; | ||
2164 | tcg_insn_unit **label_ptr = &l->label_ptr[0]; | ||
2165 | TCGReg retaddr; | ||
2166 | |||
2167 | @@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void) | ||
2168 | |||
2169 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
2170 | TCGReg base, int index, intptr_t ofs, | ||
2171 | - int seg, bool is64, TCGMemOp memop) | ||
2172 | + int seg, bool is64, MemOp memop) | ||
2173 | { | ||
2174 | - const TCGMemOp real_bswap = memop & MO_BSWAP; | ||
2175 | - TCGMemOp bswap = real_bswap; | ||
2176 | + const MemOp real_bswap = memop & MO_BSWAP; | ||
2177 | + MemOp bswap = real_bswap; | ||
2178 | int rexw = is64 * P_REXW; | ||
2179 | int movop = OPC_MOVL_GvEv; | ||
2180 | |||
2181 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2182 | TCGReg datalo, datahi, addrlo; | ||
2183 | TCGReg addrhi __attribute__((unused)); | ||
2184 | TCGMemOpIdx oi; | ||
2185 | - TCGMemOp opc; | ||
2186 | + MemOp opc; | ||
2187 | #if defined(CONFIG_SOFTMMU) | ||
2188 | int mem_index; | ||
2189 | tcg_insn_unit *label_ptr[2]; | ||
2190 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
2191 | |||
2192 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, | ||
2193 | TCGReg base, int index, intptr_t ofs, | ||
2194 | - int seg, TCGMemOp memop) | ||
2195 | + int seg, MemOp memop) | ||
2196 | { | ||
2197 | /* ??? Ideally we wouldn't need a scratch register. For user-only, | ||
2198 | we could perform the bswap twice to restore the original value | ||
2199 | instead of moving to the scratch. But as it is, the L constraint | ||
2200 | means that TCG_REG_L0 is definitely free here. */ | ||
2201 | const TCGReg scratch = TCG_REG_L0; | ||
2202 | - const TCGMemOp real_bswap = memop & MO_BSWAP; | ||
2203 | - TCGMemOp bswap = real_bswap; | ||
2204 | + const MemOp real_bswap = memop & MO_BSWAP; | ||
2205 | + MemOp bswap = real_bswap; | ||
2206 | int movop = OPC_MOVL_EvGv; | ||
2207 | |||
2208 | if (have_movbe && real_bswap) { | ||
2209 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
2210 | TCGReg datalo, datahi, addrlo; | ||
2211 | TCGReg addrhi __attribute__((unused)); | ||
2212 | TCGMemOpIdx oi; | ||
2213 | - TCGMemOp opc; | ||
2214 | + MemOp opc; | ||
2215 | #if defined(CONFIG_SOFTMMU) | ||
2216 | int mem_index; | ||
2217 | tcg_insn_unit *label_ptr[2]; | ||
2218 | diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c | ||
2219 | index XXXXXXX..XXXXXXX 100644 | ||
2220 | --- a/tcg/mips/tcg-target.inc.c | ||
2221 | +++ b/tcg/mips/tcg-target.inc.c | ||
2222 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, | ||
2223 | TCGReg addrh, TCGMemOpIdx oi, | ||
2224 | tcg_insn_unit *label_ptr[2], bool is_load) | ||
2225 | { | ||
2226 | - TCGMemOp opc = get_memop(oi); | ||
2227 | + MemOp opc = get_memop(oi); | ||
2228 | unsigned s_bits = opc & MO_SIZE; | ||
2229 | unsigned a_bits = get_alignment_bits(opc); | ||
2230 | int mem_index = get_mmuidx(oi); | ||
2231 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, | ||
2232 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2233 | { | ||
2234 | TCGMemOpIdx oi = l->oi; | ||
2235 | - TCGMemOp opc = get_memop(oi); | ||
2236 | + MemOp opc = get_memop(oi); | ||
2237 | TCGReg v0; | ||
2238 | int i; | ||
2239 | |||
2240 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2241 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2242 | { | ||
2243 | TCGMemOpIdx oi = l->oi; | ||
2244 | - TCGMemOp opc = get_memop(oi); | ||
2245 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2246 | + MemOp opc = get_memop(oi); | ||
2247 | + MemOp s_bits = opc & MO_SIZE; | ||
2248 | int i; | ||
2249 | |||
2250 | /* resolve label address */ | ||
2251 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2252 | #endif | ||
2253 | |||
2254 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2255 | - TCGReg base, TCGMemOp opc, bool is_64) | ||
2256 | + TCGReg base, MemOp opc, bool is_64) | ||
2257 | { | ||
2258 | switch (opc & (MO_SSIZE | MO_BSWAP)) { | ||
2259 | case MO_UB: | ||
2260 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2261 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2262 | TCGReg data_regl, data_regh; | ||
2263 | TCGMemOpIdx oi; | ||
2264 | - TCGMemOp opc; | ||
2265 | + MemOp opc; | ||
2266 | #if defined(CONFIG_SOFTMMU) | ||
2267 | tcg_insn_unit *label_ptr[2]; | ||
2268 | #endif | ||
2269 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2270 | } | ||
2271 | |||
2272 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2273 | - TCGReg base, TCGMemOp opc) | ||
2274 | + TCGReg base, MemOp opc) | ||
2275 | { | ||
2276 | /* Don't clutter the code below with checks to avoid bswapping ZERO. */ | ||
2277 | if ((lo | hi) == 0) { | ||
2278 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
2279 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2280 | TCGReg data_regl, data_regh; | ||
2281 | TCGMemOpIdx oi; | ||
2282 | - TCGMemOp opc; | ||
2283 | + MemOp opc; | ||
2284 | #if defined(CONFIG_SOFTMMU) | ||
2285 | tcg_insn_unit *label_ptr[2]; | ||
2286 | #endif | ||
2287 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
2288 | index XXXXXXX..XXXXXXX 100644 | ||
2289 | --- a/tcg/optimize.c | ||
2290 | +++ b/tcg/optimize.c | ||
2291 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
2292 | CASE_OP_32_64(qemu_ld): | ||
2293 | { | ||
2294 | TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; | ||
2295 | - TCGMemOp mop = get_memop(oi); | ||
2296 | + MemOp mop = get_memop(oi); | ||
2297 | if (!(mop & MO_SIGN)) { | ||
2298 | mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; | ||
2299 | } | ||
2300 | diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c | ||
2301 | index XXXXXXX..XXXXXXX 100644 | ||
2302 | --- a/tcg/ppc/tcg-target.inc.c | ||
2303 | +++ b/tcg/ppc/tcg-target.inc.c | ||
2304 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); | ||
2305 | in CR7, loads the addend of the TLB into R3, and returns the register | ||
2306 | containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ | ||
2307 | |||
2308 | -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc, | ||
2309 | +static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, | ||
2310 | TCGReg addrlo, TCGReg addrhi, | ||
2311 | int mem_index, bool is_read) | ||
2312 | { | ||
2313 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, | ||
2314 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2315 | { | ||
2316 | TCGMemOpIdx oi = lb->oi; | ||
2317 | - TCGMemOp opc = get_memop(oi); | ||
2318 | + MemOp opc = get_memop(oi); | ||
2319 | TCGReg hi, lo, arg = TCG_REG_R3; | ||
2320 | |||
2321 | if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { | ||
2322 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2323 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2324 | { | ||
2325 | TCGMemOpIdx oi = lb->oi; | ||
2326 | - TCGMemOp opc = get_memop(oi); | ||
2327 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2328 | + MemOp opc = get_memop(oi); | ||
2329 | + MemOp s_bits = opc & MO_SIZE; | ||
2330 | TCGReg hi, lo, arg = TCG_REG_R3; | ||
2331 | |||
2332 | if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { | ||
2333 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2334 | TCGReg datalo, datahi, addrlo, rbase; | ||
2335 | TCGReg addrhi __attribute__((unused)); | ||
2336 | TCGMemOpIdx oi; | ||
2337 | - TCGMemOp opc, s_bits; | ||
2338 | + MemOp opc, s_bits; | ||
2339 | #ifdef CONFIG_SOFTMMU | ||
2340 | int mem_index; | ||
2341 | tcg_insn_unit *label_ptr; | ||
2342 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
2343 | TCGReg datalo, datahi, addrlo, rbase; | ||
2344 | TCGReg addrhi __attribute__((unused)); | ||
2345 | TCGMemOpIdx oi; | ||
2346 | - TCGMemOp opc, s_bits; | ||
2347 | + MemOp opc, s_bits; | ||
2348 | #ifdef CONFIG_SOFTMMU | ||
2349 | int mem_index; | ||
2350 | tcg_insn_unit *label_ptr; | ||
2351 | diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c | ||
2352 | index XXXXXXX..XXXXXXX 100644 | ||
2353 | --- a/tcg/riscv/tcg-target.inc.c | ||
2354 | +++ b/tcg/riscv/tcg-target.inc.c | ||
2355 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, | ||
2356 | TCGReg addrh, TCGMemOpIdx oi, | ||
2357 | tcg_insn_unit **label_ptr, bool is_load) | ||
2358 | { | ||
2359 | - TCGMemOp opc = get_memop(oi); | ||
2360 | + MemOp opc = get_memop(oi); | ||
2361 | unsigned s_bits = opc & MO_SIZE; | ||
2362 | unsigned a_bits = get_alignment_bits(opc); | ||
2363 | tcg_target_long compare_mask; | ||
2364 | @@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, | ||
2365 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2366 | { | ||
2367 | TCGMemOpIdx oi = l->oi; | ||
2368 | - TCGMemOp opc = get_memop(oi); | ||
2369 | + MemOp opc = get_memop(oi); | ||
2370 | TCGReg a0 = tcg_target_call_iarg_regs[0]; | ||
2371 | TCGReg a1 = tcg_target_call_iarg_regs[1]; | ||
2372 | TCGReg a2 = tcg_target_call_iarg_regs[2]; | ||
2373 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2374 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2375 | { | ||
2376 | TCGMemOpIdx oi = l->oi; | ||
2377 | - TCGMemOp opc = get_memop(oi); | ||
2378 | - TCGMemOp s_bits = opc & MO_SIZE; | ||
2379 | + MemOp opc = get_memop(oi); | ||
2380 | + MemOp s_bits = opc & MO_SIZE; | ||
2381 | TCGReg a0 = tcg_target_call_iarg_regs[0]; | ||
2382 | TCGReg a1 = tcg_target_call_iarg_regs[1]; | ||
2383 | TCGReg a2 = tcg_target_call_iarg_regs[2]; | ||
2384 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
2385 | #endif /* CONFIG_SOFTMMU */ | ||
2386 | |||
2387 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2388 | - TCGReg base, TCGMemOp opc, bool is_64) | ||
2389 | + TCGReg base, MemOp opc, bool is_64) | ||
2390 | { | ||
2391 | - const TCGMemOp bswap = opc & MO_BSWAP; | ||
2392 | + const MemOp bswap = opc & MO_BSWAP; | ||
2393 | |||
2394 | /* We don't yet handle byteswapping, assert */ | ||
2395 | g_assert(!bswap); | ||
2396 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2397 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2398 | TCGReg data_regl, data_regh; | ||
2399 | TCGMemOpIdx oi; | ||
2400 | - TCGMemOp opc; | ||
2401 | + MemOp opc; | ||
2402 | #if defined(CONFIG_SOFTMMU) | ||
2403 | tcg_insn_unit *label_ptr[1]; | ||
2404 | #endif | ||
2405 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
2406 | } | ||
2407 | |||
2408 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
2409 | - TCGReg base, TCGMemOp opc) | ||
2410 | + TCGReg base, MemOp opc) | ||
2411 | { | ||
2412 | - const TCGMemOp bswap = opc & MO_BSWAP; | ||
2413 | + const MemOp bswap = opc & MO_BSWAP; | ||
2414 | |||
2415 | /* We don't yet handle byteswapping, assert */ | ||
2416 | g_assert(!bswap); | ||
2417 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
2418 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
2419 | TCGReg data_regl, data_regh; | ||
2420 | TCGMemOpIdx oi; | ||
2421 | - TCGMemOp opc; | ||
2422 | + MemOp opc; | ||
2423 | #if defined(CONFIG_SOFTMMU) | ||
2424 | tcg_insn_unit *label_ptr[1]; | ||
2425 | #endif | ||
2426 | diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c | ||
2427 | index XXXXXXX..XXXXXXX 100644 | ||
2428 | --- a/tcg/s390/tcg-target.inc.c | ||
2429 | +++ b/tcg/s390/tcg-target.inc.c | ||
2430 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) | ||
2431 | } | ||
2432 | } | ||
2433 | |||
2434 | -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | ||
2435 | +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, | ||
2436 | TCGReg base, TCGReg index, int disp) | ||
2437 | { | ||
2438 | switch (opc & (MO_SSIZE | MO_BSWAP)) { | ||
2439 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | ||
2440 | } | ||
2441 | } | ||
2442 | |||
2443 | -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, | ||
2444 | +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, | ||
2445 | TCGReg base, TCGReg index, int disp) | ||
2446 | { | ||
2447 | switch (opc & (MO_SIZE | MO_BSWAP)) { | ||
2448 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); | ||
2449 | |||
2450 | /* Load and compare a TLB entry, leaving the flags set. Loads the TLB | ||
2451 | addend into R2. Returns a register with the santitized guest address. */ | ||
2452 | -static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, | ||
2453 | +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, | ||
2454 | int mem_index, bool is_ld) | ||
2455 | { | ||
2456 | unsigned s_bits = opc & MO_SIZE; | ||
2457 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2458 | TCGReg addr_reg = lb->addrlo_reg; | ||
2459 | TCGReg data_reg = lb->datalo_reg; | ||
2460 | TCGMemOpIdx oi = lb->oi; | ||
2461 | - TCGMemOp opc = get_memop(oi); | ||
2462 | + MemOp opc = get_memop(oi); | ||
2463 | |||
2464 | if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, | ||
2465 | (intptr_t)s->code_ptr, 2)) { | ||
2466 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
2467 | TCGReg addr_reg = lb->addrlo_reg; | ||
2468 | TCGReg data_reg = lb->datalo_reg; | ||
2469 | TCGMemOpIdx oi = lb->oi; | ||
2470 | - TCGMemOp opc = get_memop(oi); | ||
2471 | + MemOp opc = get_memop(oi); | ||
2472 | |||
2473 | if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, | ||
2474 | (intptr_t)s->code_ptr, 2)) { | ||
2475 | @@ -XXX,XX +XXX,XX @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, | ||
2476 | static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
2477 | TCGMemOpIdx oi) | ||
2478 | { | ||
2479 | - TCGMemOp opc = get_memop(oi); | ||
2480 | + MemOp opc = get_memop(oi); | ||
2481 | #ifdef CONFIG_SOFTMMU | 165 | #ifdef CONFIG_SOFTMMU |
2482 | unsigned mem_index = get_mmuidx(oi); | 166 | unsigned mem_index = get_mmuidx(oi); |
2483 | tcg_insn_unit *label_ptr; | 167 | tcg_insn_unit *label_ptr; |
2484 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | 168 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
2485 | static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | 169 | add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, |
2486 | TCGMemOpIdx oi) | 170 | s->code_ptr, label_ptr); |
2487 | { | 171 | #else /* !CONFIG_SOFTMMU */ |
2488 | - TCGMemOp opc = get_memop(oi); | 172 | + unsigned a_bits = get_alignment_bits(memop); |
2489 | + MemOp opc = get_memop(oi); | 173 | + if (a_bits) { |
174 | + tcg_out_test_alignment(s, true, addr_reg, a_bits); | ||
175 | + } | ||
176 | if (USE_GUEST_BASE) { | ||
177 | tcg_out_qemu_ld_direct(s, memop, ext, data_reg, | ||
178 | TCG_REG_GUEST_BASE, otype, addr_reg); | ||
179 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
180 | { | ||
181 | MemOp memop = get_memop(oi); | ||
182 | const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; | ||
183 | + | ||
184 | + /* Byte swapping is left to middle-end expansion. */ | ||
185 | + tcg_debug_assert((memop & MO_BSWAP) == 0); | ||
186 | + | ||
2490 | #ifdef CONFIG_SOFTMMU | 187 | #ifdef CONFIG_SOFTMMU |
2491 | unsigned mem_index = get_mmuidx(oi); | 188 | unsigned mem_index = get_mmuidx(oi); |
2492 | tcg_insn_unit *label_ptr; | 189 | tcg_insn_unit *label_ptr; |
2493 | diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c | 190 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
2494 | index XXXXXXX..XXXXXXX 100644 | 191 | add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, |
2495 | --- a/tcg/sparc/tcg-target.inc.c | 192 | data_reg, addr_reg, s->code_ptr, label_ptr); |
2496 | +++ b/tcg/sparc/tcg-target.inc.c | 193 | #else /* !CONFIG_SOFTMMU */ |
2497 | @@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); | 194 | + unsigned a_bits = get_alignment_bits(memop); |
2498 | is in the returned register, maybe %o0. The TLB addend is in %o1. */ | 195 | + if (a_bits) { |
2499 | 196 | + tcg_out_test_alignment(s, false, addr_reg, a_bits); | |
2500 | static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, | 197 | + } |
2501 | - TCGMemOp opc, int which) | 198 | if (USE_GUEST_BASE) { |
2502 | + MemOp opc, int which) | 199 | tcg_out_qemu_st_direct(s, memop, data_reg, |
2503 | { | 200 | TCG_REG_GUEST_BASE, otype, addr_reg); |
2504 | int fast_off = TLB_MASK_TABLE_OFS(mem_index); | ||
2505 | int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); | ||
2506 | @@ -XXX,XX +XXX,XX @@ static const int qemu_st_opc[16] = { | ||
2507 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | ||
2508 | TCGMemOpIdx oi, bool is_64) | ||
2509 | { | ||
2510 | - TCGMemOp memop = get_memop(oi); | ||
2511 | + MemOp memop = get_memop(oi); | ||
2512 | #ifdef CONFIG_SOFTMMU | ||
2513 | unsigned memi = get_mmuidx(oi); | ||
2514 | TCGReg addrz, param; | ||
2515 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | ||
2516 | static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, | ||
2517 | TCGMemOpIdx oi) | ||
2518 | { | ||
2519 | - TCGMemOp memop = get_memop(oi); | ||
2520 | + MemOp memop = get_memop(oi); | ||
2521 | #ifdef CONFIG_SOFTMMU | ||
2522 | unsigned memi = get_mmuidx(oi); | ||
2523 | TCGReg addrz, param; | ||
2524 | diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c | ||
2525 | index XXXXXXX..XXXXXXX 100644 | ||
2526 | --- a/tcg/tcg-op.c | ||
2527 | +++ b/tcg/tcg-op.c | ||
2528 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void) | ||
2529 | } | ||
2530 | } | ||
2531 | |||
2532 | -static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) | ||
2533 | +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) | ||
2534 | { | ||
2535 | /* Trigger the asserts within as early as possible. */ | ||
2536 | (void)get_alignment_bits(op); | ||
2537 | @@ -XXX,XX +XXX,XX @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) | ||
2538 | } | ||
2539 | |||
2540 | static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, | ||
2541 | - TCGMemOp memop, TCGArg idx) | ||
2542 | + MemOp memop, TCGArg idx) | ||
2543 | { | ||
2544 | TCGMemOpIdx oi = make_memop_idx(memop, idx); | ||
2545 | #if TARGET_LONG_BITS == 32 | ||
2546 | @@ -XXX,XX +XXX,XX @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, | ||
2547 | } | ||
2548 | |||
2549 | static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, | ||
2550 | - TCGMemOp memop, TCGArg idx) | ||
2551 | + MemOp memop, TCGArg idx) | ||
2552 | { | ||
2553 | TCGMemOpIdx oi = make_memop_idx(memop, idx); | ||
2554 | #if TARGET_LONG_BITS == 32 | ||
2555 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_req_mo(TCGBar type) | ||
2556 | } | ||
2557 | } | ||
2558 | |||
2559 | -void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2560 | +void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2561 | { | ||
2562 | - TCGMemOp orig_memop; | ||
2563 | + MemOp orig_memop; | ||
2564 | |||
2565 | tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); | ||
2566 | memop = tcg_canonicalize_memop(memop, 0, 0); | ||
2567 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2568 | } | ||
2569 | } | ||
2570 | |||
2571 | -void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2572 | +void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2573 | { | ||
2574 | TCGv_i32 swap = NULL; | ||
2575 | |||
2576 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2577 | } | ||
2578 | } | ||
2579 | |||
2580 | -void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2581 | +void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2582 | { | ||
2583 | - TCGMemOp orig_memop; | ||
2584 | + MemOp orig_memop; | ||
2585 | |||
2586 | if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { | ||
2587 | tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); | ||
2588 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2589 | } | ||
2590 | } | ||
2591 | |||
2592 | -void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2593 | +void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) | ||
2594 | { | ||
2595 | TCGv_i64 swap = NULL; | ||
2596 | |||
2597 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) | ||
2598 | } | ||
2599 | } | ||
2600 | |||
2601 | -static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) | ||
2602 | +static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) | ||
2603 | { | ||
2604 | switch (opc & MO_SSIZE) { | ||
2605 | case MO_SB: | ||
2606 | @@ -XXX,XX +XXX,XX @@ static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc) | ||
2607 | } | ||
2608 | } | ||
2609 | |||
2610 | -static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc) | ||
2611 | +static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) | ||
2612 | { | ||
2613 | switch (opc & MO_SSIZE) { | ||
2614 | case MO_SB: | ||
2615 | @@ -XXX,XX +XXX,XX @@ static void * const table_cmpxchg[16] = { | ||
2616 | }; | ||
2617 | |||
2618 | void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, | ||
2619 | - TCGv_i32 newv, TCGArg idx, TCGMemOp memop) | ||
2620 | + TCGv_i32 newv, TCGArg idx, MemOp memop) | ||
2621 | { | ||
2622 | memop = tcg_canonicalize_memop(memop, 0, 0); | ||
2623 | |||
2624 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, | ||
2625 | } | ||
2626 | |||
2627 | void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, | ||
2628 | - TCGv_i64 newv, TCGArg idx, TCGMemOp memop) | ||
2629 | + TCGv_i64 newv, TCGArg idx, MemOp memop) | ||
2630 | { | ||
2631 | memop = tcg_canonicalize_memop(memop, 1, 0); | ||
2632 | |||
2633 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, | ||
2634 | } | ||
2635 | |||
2636 | static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2637 | - TCGArg idx, TCGMemOp memop, bool new_val, | ||
2638 | + TCGArg idx, MemOp memop, bool new_val, | ||
2639 | void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) | ||
2640 | { | ||
2641 | TCGv_i32 t1 = tcg_temp_new_i32(); | ||
2642 | @@ -XXX,XX +XXX,XX @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2643 | } | ||
2644 | |||
2645 | static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2646 | - TCGArg idx, TCGMemOp memop, void * const table[]) | ||
2647 | + TCGArg idx, MemOp memop, void * const table[]) | ||
2648 | { | ||
2649 | gen_atomic_op_i32 gen; | ||
2650 | |||
2651 | @@ -XXX,XX +XXX,XX @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, | ||
2652 | } | ||
2653 | |||
2654 | static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | ||
2655 | - TCGArg idx, TCGMemOp memop, bool new_val, | ||
2656 | + TCGArg idx, MemOp memop, bool new_val, | ||
2657 | void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) | ||
2658 | { | ||
2659 | TCGv_i64 t1 = tcg_temp_new_i64(); | ||
2660 | @@ -XXX,XX +XXX,XX @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | ||
2661 | } | ||
2662 | |||
2663 | static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, | ||
2664 | - TCGArg idx, TCGMemOp memop, void * const table[]) | ||
2665 | + TCGArg idx, MemOp memop, void * const table[]) | ||
2666 | { | ||
2667 | memop = tcg_canonicalize_memop(memop, 1, 0); | ||
2668 | |||
2669 | @@ -XXX,XX +XXX,XX @@ static void * const table_##NAME[16] = { \ | ||
2670 | WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ | ||
2671 | }; \ | ||
2672 | void tcg_gen_atomic_##NAME##_i32 \ | ||
2673 | - (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \ | ||
2674 | + (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ | ||
2675 | { \ | ||
2676 | if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ | ||
2677 | do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ | ||
2678 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_##NAME##_i32 \ | ||
2679 | } \ | ||
2680 | } \ | ||
2681 | void tcg_gen_atomic_##NAME##_i64 \ | ||
2682 | - (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \ | ||
2683 | + (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ | ||
2684 | { \ | ||
2685 | if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ | ||
2686 | do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ | ||
2687 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
2688 | index XXXXXXX..XXXXXXX 100644 | ||
2689 | --- a/tcg/tcg.c | ||
2690 | +++ b/tcg/tcg.c | ||
2691 | @@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) | ||
2692 | case INDEX_op_qemu_st_i64: | ||
2693 | { | ||
2694 | TCGMemOpIdx oi = op->args[k++]; | ||
2695 | - TCGMemOp op = get_memop(oi); | ||
2696 | + MemOp op = get_memop(oi); | ||
2697 | unsigned ix = get_mmuidx(oi); | ||
2698 | |||
2699 | if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { | ||
2700 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
2701 | index XXXXXXX..XXXXXXX 100644 | ||
2702 | --- a/MAINTAINERS | ||
2703 | +++ b/MAINTAINERS | ||
2704 | @@ -XXX,XX +XXX,XX @@ M: Paolo Bonzini <pbonzini@redhat.com> | ||
2705 | S: Supported | ||
2706 | F: include/exec/ioport.h | ||
2707 | F: ioport.c | ||
2708 | +F: include/exec/memop.h | ||
2709 | F: include/exec/memory.h | ||
2710 | F: include/exec/ram_addr.h | ||
2711 | F: memory.c | ||
2712 | diff --git a/tcg/README b/tcg/README | ||
2713 | index XXXXXXX..XXXXXXX 100644 | ||
2714 | --- a/tcg/README | ||
2715 | +++ b/tcg/README | ||
2716 | @@ -XXX,XX +XXX,XX @@ Both t0 and t1 may be split into little-endian ordered pairs of registers | ||
2717 | if dealing with 64-bit quantities on a 32-bit host. | ||
2718 | |||
2719 | The memidx selects the qemu tlb index to use (e.g. user or kernel access). | ||
2720 | -The flags are the TCGMemOp bits, selecting the sign, width, and endianness | ||
2721 | +The flags are the MemOp bits, selecting the sign, width, and endianness | ||
2722 | of the memory access. | ||
2723 | |||
2724 | For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a | ||
2725 | -- | 201 | -- |
2726 | 2.17.1 | 202 | 2.25.1 |
2727 | 203 | ||
2728 | 204 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | Append MemTxAttrs to interfaces so we can pass along up coming Invert | ||
4 | Endian TTE bit on SPARC64. | ||
5 | |||
6 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <f8fcc3138570c460ef289a6b34ba7715ba36f99e.1566466906.git.tony.nguyen@bt.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 3 | --- |
11 | target/sparc/mmu_helper.c | 32 ++++++++++++++++++-------------- | 4 | tcg/ppc/tcg-target.h | 2 - |
12 | 1 file changed, 18 insertions(+), 14 deletions(-) | 5 | tcg/ppc/tcg-target.c.inc | 98 ++++++++++++++++++++++++++++++++++++---- |
6 | 2 files changed, 90 insertions(+), 10 deletions(-) | ||
13 | 7 | ||
14 | diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c | 8 | diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h |
15 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/target/sparc/mmu_helper.c | 10 | --- a/tcg/ppc/tcg-target.h |
17 | +++ b/target/sparc/mmu_helper.c | 11 | +++ b/tcg/ppc/tcg-target.h |
18 | @@ -XXX,XX +XXX,XX @@ static const int perm_table[2][8] = { | 12 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); |
19 | }; | 13 | #define TCG_TARGET_DEFAULT_MO (0) |
20 | 14 | #define TCG_TARGET_HAS_MEMORY_BSWAP 1 | |
21 | static int get_physical_address(CPUSPARCState *env, hwaddr *physical, | 15 | |
22 | - int *prot, int *access_index, | 16 | -#ifdef CONFIG_SOFTMMU |
23 | + int *prot, int *access_index, MemTxAttrs *attrs, | 17 | #define TCG_TARGET_NEED_LDST_LABELS |
24 | target_ulong address, int rw, int mmu_idx, | 18 | -#endif |
25 | target_ulong *page_size) | 19 | #define TCG_TARGET_NEED_POOL_LABELS |
26 | { | 20 | |
27 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | 21 | #endif |
28 | target_ulong vaddr; | 22 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc |
29 | target_ulong page_size; | 23 | index XXXXXXX..XXXXXXX 100644 |
30 | int error_code = 0, prot, access_index; | 24 | --- a/tcg/ppc/tcg-target.c.inc |
31 | + MemTxAttrs attrs = {}; | 25 | +++ b/tcg/ppc/tcg-target.c.inc |
32 | 26 | @@ -XXX,XX +XXX,XX @@ | |
33 | /* | 27 | |
34 | * TODO: If we ever need tlb_vaddr_to_host for this target, | 28 | #include "elf.h" |
35 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | 29 | #include "../tcg-pool.c.inc" |
36 | assert(!probe); | 30 | +#include "../tcg-ldst.c.inc" |
37 | 31 | ||
38 | address &= TARGET_PAGE_MASK; | 32 | /* |
39 | - error_code = get_physical_address(env, &paddr, &prot, &access_index, | 33 | * Standardize on the _CALL_FOO symbols used by GCC: |
40 | + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, | 34 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, |
41 | address, access_type, | ||
42 | mmu_idx, &page_size); | ||
43 | vaddr = address; | ||
44 | @@ -XXX,XX +XXX,XX @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | -static int get_physical_address_data(CPUSPARCState *env, | ||
49 | - hwaddr *physical, int *prot, | ||
50 | +static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, | ||
51 | + int *prot, MemTxAttrs *attrs, | ||
52 | target_ulong address, int rw, int mmu_idx) | ||
53 | { | ||
54 | CPUState *cs = env_cpu(env); | ||
55 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address_data(CPUSPARCState *env, | ||
56 | return 1; | ||
57 | } | ||
58 | |||
59 | -static int get_physical_address_code(CPUSPARCState *env, | ||
60 | - hwaddr *physical, int *prot, | ||
61 | +static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, | ||
62 | + int *prot, MemTxAttrs *attrs, | ||
63 | target_ulong address, int mmu_idx) | ||
64 | { | ||
65 | CPUState *cs = env_cpu(env); | ||
66 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address_code(CPUSPARCState *env, | ||
67 | } | ||
68 | |||
69 | static int get_physical_address(CPUSPARCState *env, hwaddr *physical, | ||
70 | - int *prot, int *access_index, | ||
71 | + int *prot, int *access_index, MemTxAttrs *attrs, | ||
72 | target_ulong address, int rw, int mmu_idx, | ||
73 | target_ulong *page_size) | ||
74 | { | ||
75 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, | ||
76 | } | ||
77 | |||
78 | if (rw == 2) { | ||
79 | - return get_physical_address_code(env, physical, prot, address, | ||
80 | + return get_physical_address_code(env, physical, prot, attrs, address, | ||
81 | mmu_idx); | ||
82 | } else { | ||
83 | - return get_physical_address_data(env, physical, prot, address, rw, | ||
84 | - mmu_idx); | ||
85 | + return get_physical_address_data(env, physical, prot, attrs, address, | ||
86 | + rw, mmu_idx); | ||
87 | } | 35 | } |
88 | } | 36 | } |
89 | 37 | ||
90 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | 38 | -static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) |
91 | target_ulong vaddr; | 39 | +static void tcg_out_call_int(TCGContext *s, int lk, |
92 | hwaddr paddr; | 40 | + const tcg_insn_unit *target) |
93 | target_ulong page_size; | 41 | { |
94 | + MemTxAttrs attrs = {}; | 42 | #ifdef _CALL_AIX |
95 | int error_code = 0, prot, access_index; | 43 | /* Look through the descriptor. If the branch is in range, and we |
96 | 44 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) | |
97 | address &= TARGET_PAGE_MASK; | 45 | |
98 | - error_code = get_physical_address(env, &paddr, &prot, &access_index, | 46 | if (in_range_b(diff) && toc == (uint32_t)toc) { |
99 | + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, | 47 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc); |
100 | address, access_type, | 48 | - tcg_out_b(s, LK, tgt); |
101 | mmu_idx, &page_size); | 49 | + tcg_out_b(s, lk, tgt); |
102 | if (likely(error_code == 0)) { | 50 | } else { |
103 | @@ -XXX,XX +XXX,XX @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, | 51 | /* Fold the low bits of the constant into the addresses below. */ |
104 | env->dmmu.mmu_primary_context, | 52 | intptr_t arg = (intptr_t)target; |
105 | env->dmmu.mmu_secondary_context); | 53 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) |
106 | 54 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs); | |
107 | - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); | 55 | tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); |
108 | + tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, | 56 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP); |
109 | + page_size); | 57 | - tcg_out32(s, BCCTR | BO_ALWAYS | LK); |
110 | return true; | 58 | + tcg_out32(s, BCCTR | BO_ALWAYS | lk); |
111 | } | 59 | } |
112 | if (probe) { | 60 | #elif defined(_CALL_ELF) && _CALL_ELF == 2 |
113 | @@ -XXX,XX +XXX,XX @@ static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, | 61 | intptr_t diff; |
114 | { | 62 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) |
115 | target_ulong page_size; | 63 | |
116 | int prot, access_index; | 64 | diff = tcg_pcrel_diff(s, target); |
117 | + MemTxAttrs attrs = {}; | 65 | if (in_range_b(diff)) { |
118 | 66 | - tcg_out_b(s, LK, target); | |
119 | - return get_physical_address(env, phys, &prot, &access_index, addr, rw, | 67 | + tcg_out_b(s, lk, target); |
120 | - mmu_idx, &page_size); | 68 | } else { |
121 | + return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, | 69 | tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR); |
122 | + rw, mmu_idx, &page_size); | 70 | - tcg_out32(s, BCCTR | BO_ALWAYS | LK); |
71 | + tcg_out32(s, BCCTR | BO_ALWAYS | lk); | ||
72 | } | ||
73 | #else | ||
74 | - tcg_out_b(s, LK, target); | ||
75 | + tcg_out_b(s, lk, target); | ||
76 | #endif | ||
123 | } | 77 | } |
124 | 78 | ||
125 | #if defined(TARGET_SPARC64) | 79 | +static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) |
80 | +{ | ||
81 | + tcg_out_call_int(s, LK, target); | ||
82 | +} | ||
83 | + | ||
84 | static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = { | ||
85 | [MO_UB] = LBZX, | ||
86 | [MO_UW] = LHZX, | ||
87 | @@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_exts_opc[4] = { | ||
88 | }; | ||
89 | |||
90 | #if defined (CONFIG_SOFTMMU) | ||
91 | -#include "../tcg-ldst.c.inc" | ||
92 | - | ||
93 | /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, | ||
94 | * int mmu_idx, uintptr_t ra) | ||
95 | */ | ||
96 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
97 | tcg_out_b(s, 0, lb->raddr); | ||
98 | return true; | ||
99 | } | ||
100 | +#else | ||
101 | + | ||
102 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, | ||
103 | + TCGReg addrhi, unsigned a_bits) | ||
104 | +{ | ||
105 | + unsigned a_mask = (1 << a_bits) - 1; | ||
106 | + TCGLabelQemuLdst *label = new_ldst_label(s); | ||
107 | + | ||
108 | + label->is_ld = is_ld; | ||
109 | + label->addrlo_reg = addrlo; | ||
110 | + label->addrhi_reg = addrhi; | ||
111 | + | ||
112 | + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ | ||
113 | + tcg_debug_assert(a_bits < 16); | ||
114 | + tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask)); | ||
115 | + | ||
116 | + label->label_ptr[0] = s->code_ptr; | ||
117 | + tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); | ||
118 | + | ||
119 | + label->raddr = tcg_splitwx_to_rx(s->code_ptr); | ||
120 | +} | ||
121 | + | ||
122 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) | ||
123 | +{ | ||
124 | + if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { | ||
125 | + return false; | ||
126 | + } | ||
127 | + | ||
128 | + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
129 | + TCGReg arg = TCG_REG_R4; | ||
130 | +#ifdef TCG_TARGET_CALL_ALIGN_ARGS | ||
131 | + arg |= 1; | ||
132 | +#endif | ||
133 | + if (l->addrlo_reg != arg) { | ||
134 | + tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); | ||
135 | + tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); | ||
136 | + } else if (l->addrhi_reg != arg + 1) { | ||
137 | + tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg); | ||
138 | + tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg); | ||
139 | + } else { | ||
140 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg); | ||
141 | + tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1); | ||
142 | + tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0); | ||
143 | + } | ||
144 | + } else { | ||
145 | + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg); | ||
146 | + } | ||
147 | + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0); | ||
148 | + | ||
149 | + /* "Tail call" to the helper, with the return address back inline. */ | ||
150 | + tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld | ||
151 | + : helper_unaligned_st)); | ||
152 | + return true; | ||
153 | +} | ||
154 | + | ||
155 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
156 | +{ | ||
157 | + return tcg_out_fail_alignment(s, l); | ||
158 | +} | ||
159 | + | ||
160 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
161 | +{ | ||
162 | + return tcg_out_fail_alignment(s, l); | ||
163 | +} | ||
164 | + | ||
165 | #endif /* SOFTMMU */ | ||
166 | |||
167 | static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
168 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
169 | #ifdef CONFIG_SOFTMMU | ||
170 | int mem_index; | ||
171 | tcg_insn_unit *label_ptr; | ||
172 | +#else | ||
173 | + unsigned a_bits; | ||
174 | #endif | ||
175 | |||
176 | datalo = *args++; | ||
177 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
178 | |||
179 | rbase = TCG_REG_R3; | ||
180 | #else /* !CONFIG_SOFTMMU */ | ||
181 | + a_bits = get_alignment_bits(opc); | ||
182 | + if (a_bits) { | ||
183 | + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); | ||
184 | + } | ||
185 | rbase = guest_base ? TCG_GUEST_BASE_REG : 0; | ||
186 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | ||
187 | tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); | ||
188 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
189 | #ifdef CONFIG_SOFTMMU | ||
190 | int mem_index; | ||
191 | tcg_insn_unit *label_ptr; | ||
192 | +#else | ||
193 | + unsigned a_bits; | ||
194 | #endif | ||
195 | |||
196 | datalo = *args++; | ||
197 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
198 | |||
199 | rbase = TCG_REG_R3; | ||
200 | #else /* !CONFIG_SOFTMMU */ | ||
201 | + a_bits = get_alignment_bits(opc); | ||
202 | + if (a_bits) { | ||
203 | + tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); | ||
204 | + } | ||
205 | rbase = guest_base ? TCG_GUEST_BASE_REG : 0; | ||
206 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | ||
207 | tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); | ||
126 | -- | 208 | -- |
127 | 2.17.1 | 209 | 2.25.1 |
128 | 210 | ||
129 | 211 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | ||
---|---|---|---|
2 | |||
3 | ... similar to tlb_vaddr_to_host(); however, allow access to the host | ||
4 | page except when TLB_NOTDIRTY or TLB_MMIO is set. | ||
5 | |||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
8 | Message-Id: <20190830100959.26615-2-david@redhat.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 1 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 2 | --- |
11 | include/exec/exec-all.h | 4 ++-- | 3 | tcg/riscv/tcg-target.h | 2 -- |
12 | accel/tcg/cputlb.c | 21 ++++++++++++++++----- | 4 | tcg/riscv/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++++++++++-- |
13 | accel/tcg/user-exec.c | 6 ++++-- | 5 | 2 files changed, 61 insertions(+), 4 deletions(-) |
14 | 3 files changed, 22 insertions(+), 9 deletions(-) | ||
15 | 6 | ||
16 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 7 | diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h |
17 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/exec/exec-all.h | 9 | --- a/tcg/riscv/tcg-target.h |
19 | +++ b/include/exec/exec-all.h | 10 | +++ b/tcg/riscv/tcg-target.h |
20 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | 11 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); |
21 | { | 12 | |
13 | #define TCG_TARGET_DEFAULT_MO (0) | ||
14 | |||
15 | -#ifdef CONFIG_SOFTMMU | ||
16 | #define TCG_TARGET_NEED_LDST_LABELS | ||
17 | -#endif | ||
18 | #define TCG_TARGET_NEED_POOL_LABELS | ||
19 | |||
20 | #define TCG_TARGET_HAS_MEMORY_BSWAP 0 | ||
21 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/tcg/riscv/tcg-target.c.inc | ||
24 | +++ b/tcg/riscv/tcg-target.c.inc | ||
25 | @@ -XXX,XX +XXX,XX @@ | ||
26 | * THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | +#include "../tcg-ldst.c.inc" | ||
30 | #include "../tcg-pool.c.inc" | ||
31 | |||
32 | #ifdef CONFIG_DEBUG_TCG | ||
33 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) | ||
34 | */ | ||
35 | |||
36 | #if defined(CONFIG_SOFTMMU) | ||
37 | -#include "../tcg-ldst.c.inc" | ||
38 | - | ||
39 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, | ||
40 | * MemOpIdx oi, uintptr_t ra) | ||
41 | */ | ||
42 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
43 | tcg_out_goto(s, l->raddr); | ||
44 | return true; | ||
22 | } | 45 | } |
23 | #endif | 46 | +#else |
24 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 47 | + |
25 | - uintptr_t retaddr); | 48 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, |
26 | +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 49 | + unsigned a_bits) |
27 | + uintptr_t retaddr); | 50 | +{ |
28 | 51 | + unsigned a_mask = (1 << a_bits) - 1; | |
29 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | 52 | + TCGLabelQemuLdst *l = new_ldst_label(s); |
30 | 53 | + | |
31 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 54 | + l->is_ld = is_ld; |
32 | index XXXXXXX..XXXXXXX 100644 | 55 | + l->addrlo_reg = addr_reg; |
33 | --- a/accel/tcg/cputlb.c | 56 | + |
34 | +++ b/accel/tcg/cputlb.c | 57 | + /* We are expecting a_bits to max out at 7, so we can always use andi. */ |
35 | @@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | 58 | + tcg_debug_assert(a_bits < 12); |
36 | /* Probe for whether the specified guest write access is permitted. | 59 | + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask); |
37 | * If it is not permitted then an exception will be taken in the same | 60 | + |
38 | * way as if this were a real write access (and we will not return). | 61 | + l->label_ptr[0] = s->code_ptr; |
39 | - * Otherwise the function will return, and there will be a valid | 62 | + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0); |
40 | - * entry in the TLB for this access. | 63 | + |
41 | + * If the size is 0 or the page requires I/O access, returns NULL; otherwise, | 64 | + l->raddr = tcg_splitwx_to_rx(s->code_ptr); |
42 | + * returns the address of the host page similar to tlb_vaddr_to_host(). | 65 | +} |
43 | */ | 66 | + |
44 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 67 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) |
45 | - uintptr_t retaddr) | 68 | +{ |
46 | +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 69 | + /* resolve label address */ |
47 | + uintptr_t retaddr) | 70 | + if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { |
48 | { | 71 | + return false; |
49 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
50 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | ||
51 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
52 | tlb_addr = tlb_addr_write(entry); | ||
53 | } | ||
54 | |||
55 | + if (!size) { | ||
56 | + return NULL; | ||
57 | + } | 72 | + } |
58 | + | 73 | + |
59 | /* Handle watchpoints. */ | 74 | + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); |
60 | - if ((tlb_addr & TLB_WATCHPOINT) && size > 0) { | 75 | + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); |
61 | + if (tlb_addr & TLB_WATCHPOINT) { | 76 | + |
62 | cpu_check_watchpoint(env_cpu(env), addr, size, | 77 | + /* tail call, with the return address back inline. */ |
63 | env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | 78 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); |
64 | BP_MEM_WRITE, retaddr); | 79 | + tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld |
80 | + : helper_unaligned_st), true); | ||
81 | + return true; | ||
82 | +} | ||
83 | + | ||
84 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
85 | +{ | ||
86 | + return tcg_out_fail_alignment(s, l); | ||
87 | +} | ||
88 | + | ||
89 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
90 | +{ | ||
91 | + return tcg_out_fail_alignment(s, l); | ||
92 | +} | ||
93 | + | ||
94 | #endif /* CONFIG_SOFTMMU */ | ||
95 | |||
96 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
97 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
98 | MemOp opc; | ||
99 | #if defined(CONFIG_SOFTMMU) | ||
100 | tcg_insn_unit *label_ptr[1]; | ||
101 | +#else | ||
102 | + unsigned a_bits; | ||
103 | #endif | ||
104 | TCGReg base = TCG_REG_TMP0; | ||
105 | |||
106 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
107 | tcg_out_ext32u(s, base, addr_regl); | ||
108 | addr_regl = base; | ||
65 | } | 109 | } |
66 | + | 110 | + a_bits = get_alignment_bits(opc); |
67 | + if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) { | 111 | + if (a_bits) { |
68 | + /* I/O access */ | 112 | + tcg_out_test_alignment(s, true, addr_regl, a_bits); |
69 | + return NULL; | ||
70 | + } | 113 | + } |
71 | + | 114 | if (guest_base != 0) { |
72 | + return (void *)((uintptr_t)addr + entry->addend); | 115 | tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); |
73 | } | ||
74 | |||
75 | void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, | ||
76 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/accel/tcg/user-exec.c | ||
79 | +++ b/accel/tcg/user-exec.c | ||
80 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
81 | g_assert_not_reached(); | ||
82 | } | ||
83 | |||
84 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
85 | - uintptr_t retaddr) | ||
86 | +void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
87 | + uintptr_t retaddr) | ||
88 | { | ||
89 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||
90 | |||
91 | @@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
92 | retaddr); | ||
93 | g_assert_not_reached(); | ||
94 | } | 116 | } |
95 | + | 117 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) |
96 | + return size ? g2h(addr) : NULL; | 118 | MemOp opc; |
97 | } | 119 | #if defined(CONFIG_SOFTMMU) |
98 | 120 | tcg_insn_unit *label_ptr[1]; | |
99 | #if defined(__i386__) | 121 | +#else |
122 | + unsigned a_bits; | ||
123 | #endif | ||
124 | TCGReg base = TCG_REG_TMP0; | ||
125 | |||
126 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
127 | tcg_out_ext32u(s, base, addr_regl); | ||
128 | addr_regl = base; | ||
129 | } | ||
130 | + a_bits = get_alignment_bits(opc); | ||
131 | + if (a_bits) { | ||
132 | + tcg_out_test_alignment(s, false, addr_regl, a_bits); | ||
133 | + } | ||
134 | if (guest_base != 0) { | ||
135 | tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); | ||
136 | } | ||
100 | -- | 137 | -- |
101 | 2.17.1 | 138 | 2.25.1 |
102 | 139 | ||
103 | 140 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Introduce no-op size_memop to aid preparatory conversion of | ||
7 | interfaces. | ||
8 | |||
9 | Once interfaces are converted, size_memop will be implemented to | ||
10 | return a MemOp from size in bytes. | ||
11 | |||
12 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Message-Id: <35b8ee74020f67cf40848fb7d5f127cf96c851d6.1566466906.git.tony.nguyen@bt.com> | ||
15 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
16 | --- | 3 | --- |
17 | include/exec/memop.h | 10 ++++++++++ | 4 | tcg/s390x/tcg-target.h | 2 -- |
18 | 1 file changed, 10 insertions(+) | 5 | tcg/s390x/tcg-target.c.inc | 59 ++++++++++++++++++++++++++++++++++++-- |
6 | 2 files changed, 57 insertions(+), 4 deletions(-) | ||
19 | 7 | ||
20 | diff --git a/include/exec/memop.h b/include/exec/memop.h | 8 | diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h |
21 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/exec/memop.h | 10 | --- a/tcg/s390x/tcg-target.h |
23 | +++ b/include/exec/memop.h | 11 | +++ b/tcg/s390x/tcg-target.h |
24 | @@ -XXX,XX +XXX,XX @@ typedef enum MemOp { | 12 | @@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, |
25 | MO_SSIZE = MO_SIZE | MO_SIGN, | 13 | /* no need to flush icache explicitly */ |
26 | } MemOp; | 14 | } |
27 | 15 | ||
28 | +/* Size in bytes to MemOp. */ | 16 | -#ifdef CONFIG_SOFTMMU |
29 | +static inline unsigned size_memop(unsigned size) | 17 | #define TCG_TARGET_NEED_LDST_LABELS |
18 | -#endif | ||
19 | #define TCG_TARGET_NEED_POOL_LABELS | ||
20 | |||
21 | #endif | ||
22 | diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/tcg/s390x/tcg-target.c.inc | ||
25 | +++ b/tcg/s390x/tcg-target.c.inc | ||
26 | @@ -XXX,XX +XXX,XX @@ | ||
27 | #error "unsupported code generation mode" | ||
28 | #endif | ||
29 | |||
30 | +#include "../tcg-ldst.c.inc" | ||
31 | #include "../tcg-pool.c.inc" | ||
32 | #include "elf.h" | ||
33 | |||
34 | @@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode { | ||
35 | RI_OIHL = 0xa509, | ||
36 | RI_OILH = 0xa50a, | ||
37 | RI_OILL = 0xa50b, | ||
38 | + RI_TMLL = 0xa701, | ||
39 | |||
40 | RIE_CGIJ = 0xec7c, | ||
41 | RIE_CGRJ = 0xec64, | ||
42 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, | ||
43 | } | ||
44 | |||
45 | #if defined(CONFIG_SOFTMMU) | ||
46 | -#include "../tcg-ldst.c.inc" | ||
47 | - | ||
48 | /* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ | ||
49 | QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); | ||
50 | QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); | ||
51 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
52 | return true; | ||
53 | } | ||
54 | #else | ||
55 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, | ||
56 | + TCGReg addrlo, unsigned a_bits) | ||
30 | +{ | 57 | +{ |
31 | + /* | 58 | + unsigned a_mask = (1 << a_bits) - 1; |
32 | + * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} | 59 | + TCGLabelQemuLdst *l = new_ldst_label(s); |
33 | + * "unsigned size" operand into a "MemOp op". | 60 | + |
34 | + */ | 61 | + l->is_ld = is_ld; |
35 | + return size; | 62 | + l->addrlo_reg = addrlo; |
63 | + | ||
64 | + /* We are expecting a_bits to max out at 7, much lower than TMLL. */ | ||
65 | + tcg_debug_assert(a_bits < 16); | ||
66 | + tcg_out_insn(s, RI, TMLL, addrlo, a_mask); | ||
67 | + | ||
68 | + tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */ | ||
69 | + l->label_ptr[0] = s->code_ptr; | ||
70 | + s->code_ptr += 1; | ||
71 | + | ||
72 | + l->raddr = tcg_splitwx_to_rx(s->code_ptr); | ||
36 | +} | 73 | +} |
37 | + | 74 | + |
75 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) | ||
76 | +{ | ||
77 | + if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL, | ||
78 | + (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) { | ||
79 | + return false; | ||
80 | + } | ||
81 | + | ||
82 | + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg); | ||
83 | + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); | ||
84 | + | ||
85 | + /* "Tail call" to the helper, with the return address back inline. */ | ||
86 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr); | ||
87 | + tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld | ||
88 | + : helper_unaligned_st)); | ||
89 | + return true; | ||
90 | +} | ||
91 | + | ||
92 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
93 | +{ | ||
94 | + return tcg_out_fail_alignment(s, l); | ||
95 | +} | ||
96 | + | ||
97 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
98 | +{ | ||
99 | + return tcg_out_fail_alignment(s, l); | ||
100 | +} | ||
101 | + | ||
102 | static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, | ||
103 | TCGReg *index_reg, tcg_target_long *disp) | ||
104 | { | ||
105 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
106 | #else | ||
107 | TCGReg index_reg; | ||
108 | tcg_target_long disp; | ||
109 | + unsigned a_bits = get_alignment_bits(opc); | ||
110 | |||
111 | + if (a_bits) { | ||
112 | + tcg_out_test_alignment(s, true, addr_reg, a_bits); | ||
113 | + } | ||
114 | tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); | ||
115 | tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); | ||
116 | #endif | ||
117 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, | ||
118 | #else | ||
119 | TCGReg index_reg; | ||
120 | tcg_target_long disp; | ||
121 | + unsigned a_bits = get_alignment_bits(opc); | ||
122 | |||
123 | + if (a_bits) { | ||
124 | + tcg_out_test_alignment(s, false, addr_reg, a_bits); | ||
125 | + } | ||
126 | tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); | ||
127 | tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); | ||
38 | #endif | 128 | #endif |
39 | -- | 129 | -- |
40 | 2.17.1 | 130 | 2.25.1 |
41 | 131 | ||
42 | 132 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
---|---|---|---|
2 | |||
3 | This bit configures endianness of PCI MMIO devices. It is used by | ||
4 | Solaris and OpenBSD sunhme drivers. | ||
5 | |||
6 | Tested working on OpenBSD. | ||
7 | |||
8 | Unfortunately Solaris 10 had a unrelated keyboard issue blocking | ||
9 | testing... another inch towards Solaris 10 on SPARC64 =) | ||
10 | |||
11 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> | ||
14 | Message-Id: <3c8d5181a584f1b3712d3d8d66801b13cecb4b88.1566466906.git.tony.nguyen@bt.com> | ||
15 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
16 | --- | 3 | --- |
17 | target/sparc/cpu.h | 2 ++ | 4 | tcg/tci.c | 20 ++++++++++++++------ |
18 | target/sparc/mmu_helper.c | 8 +++++++- | 5 | 1 file changed, 14 insertions(+), 6 deletions(-) |
19 | 2 files changed, 9 insertions(+), 1 deletion(-) | ||
20 | 6 | ||
21 | diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h | 7 | diff --git a/tcg/tci.c b/tcg/tci.c |
22 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/target/sparc/cpu.h | 9 | --- a/tcg/tci.c |
24 | +++ b/target/sparc/cpu.h | 10 | +++ b/tcg/tci.c |
25 | @@ -XXX,XX +XXX,XX @@ enum { | 11 | @@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) |
26 | 12 | static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, | |
27 | #define TTE_VALID_BIT (1ULL << 63) | 13 | MemOpIdx oi, const void *tb_ptr) |
28 | #define TTE_NFO_BIT (1ULL << 60) | 14 | { |
29 | +#define TTE_IE_BIT (1ULL << 59) | 15 | - MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); |
30 | #define TTE_USED_BIT (1ULL << 41) | 16 | + MemOp mop = get_memop(oi); |
31 | #define TTE_LOCKED_BIT (1ULL << 6) | 17 | uintptr_t ra = (uintptr_t)tb_ptr; |
32 | #define TTE_SIDEEFFECT_BIT (1ULL << 3) | 18 | |
33 | @@ -XXX,XX +XXX,XX @@ enum { | 19 | #ifdef CONFIG_SOFTMMU |
34 | 20 | - switch (mop) { | |
35 | #define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT) | 21 | + switch (mop & (MO_BSWAP | MO_SSIZE)) { |
36 | #define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT) | 22 | case MO_UB: |
37 | +#define TTE_IS_IE(tte) ((tte) & TTE_IE_BIT) | 23 | return helper_ret_ldub_mmu(env, taddr, oi, ra); |
38 | #define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT) | 24 | case MO_SB: |
39 | #define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT) | 25 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, |
40 | #define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT) | 26 | } |
41 | diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c | 27 | #else |
42 | index XXXXXXX..XXXXXXX 100644 | 28 | void *haddr = g2h(env_cpu(env), taddr); |
43 | --- a/target/sparc/mmu_helper.c | 29 | + unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; |
44 | +++ b/target/sparc/mmu_helper.c | 30 | uint64_t ret; |
45 | @@ -XXX,XX +XXX,XX @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, | 31 | |
46 | if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { | 32 | set_helper_retaddr(ra); |
47 | int do_fault = 0; | 33 | - switch (mop) { |
48 | 34 | + if (taddr & a_mask) { | |
49 | + if (TTE_IS_IE(env->dtlb[i].tte)) { | 35 | + helper_unaligned_ld(env, taddr); |
50 | + attrs->byte_swap = true; | 36 | + } |
51 | + } | 37 | + switch (mop & (MO_BSWAP | MO_SSIZE)) { |
52 | + | 38 | case MO_UB: |
53 | /* access ok? */ | 39 | ret = ldub_p(haddr); |
54 | /* multiple bits in SFSR.FT may be set on TT_DFAULT */ | 40 | break; |
55 | if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { | 41 | @@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, |
56 | @@ -XXX,XX +XXX,XX @@ void dump_mmu(CPUSPARCState *env) | 42 | static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, |
57 | } | 43 | MemOpIdx oi, const void *tb_ptr) |
58 | if (TTE_IS_VALID(env->dtlb[i].tte)) { | 44 | { |
59 | qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx" | 45 | - MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); |
60 | - ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", | 46 | + MemOp mop = get_memop(oi); |
61 | + ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n", | 47 | uintptr_t ra = (uintptr_t)tb_ptr; |
62 | i, | 48 | |
63 | env->dtlb[i].tag & (uint64_t)~0x1fffULL, | 49 | #ifdef CONFIG_SOFTMMU |
64 | TTE_PA(env->dtlb[i].tte), | 50 | - switch (mop) { |
65 | @@ -XXX,XX +XXX,XX @@ void dump_mmu(CPUSPARCState *env) | 51 | + switch (mop & (MO_BSWAP | MO_SIZE)) { |
66 | TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", | 52 | case MO_UB: |
67 | TTE_IS_LOCKED(env->dtlb[i].tte) ? | 53 | helper_ret_stb_mmu(env, taddr, val, oi, ra); |
68 | "locked" : "unlocked", | 54 | break; |
69 | + TTE_IS_IE(env->dtlb[i].tte) ? | 55 | @@ -XXX,XX +XXX,XX @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, |
70 | + "yes" : "no", | 56 | } |
71 | env->dtlb[i].tag & (uint64_t)0x1fffULL, | 57 | #else |
72 | TTE_IS_GLOBAL(env->dtlb[i].tte) ? | 58 | void *haddr = g2h(env_cpu(env), taddr); |
73 | "global" : "local"); | 59 | + unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; |
60 | |||
61 | set_helper_retaddr(ra); | ||
62 | - switch (mop) { | ||
63 | + if (taddr & a_mask) { | ||
64 | + helper_unaligned_st(env, taddr); | ||
65 | + } | ||
66 | + switch (mop & (MO_BSWAP | MO_SIZE)) { | ||
67 | case MO_UB: | ||
68 | stb_p(haddr, val); | ||
69 | break; | ||
74 | -- | 70 | -- |
75 | 2.17.1 | 71 | 2.25.1 |
76 | 72 | ||
77 | 73 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | From: WANG Xuerui <git@xen0n.name> |
---|---|---|---|
2 | 2 | ||
3 | Notice new attribute, byte swap, and force the transaction through the | 3 | Signed-off-by: WANG Xuerui <git@xen0n.name> |
4 | memory slow path. | ||
5 | |||
6 | Required by architectures that can invert endianness of memory | ||
7 | transaction, e.g. SPARC64 has the Invert Endian TTE bit. | ||
8 | |||
9 | Suggested-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
12 | Message-Id: <2a10a1f1c00a894af1212c8f68ef09c2966023c1.1566466906.git.tony.nguyen@bt.com> | 5 | Message-Id: <20220106134238.3936163-1-git@xen0n.name> |
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 7 | --- |
15 | include/exec/memattrs.h | 2 ++ | 8 | tcg/loongarch64/tcg-target.h | 2 - |
16 | accel/tcg/cputlb.c | 12 ++++++++++++ | 9 | tcg/loongarch64/tcg-target.c.inc | 71 +++++++++++++++++++++++++++++++- |
17 | 2 files changed, 14 insertions(+) | 10 | 2 files changed, 69 insertions(+), 4 deletions(-) |
18 | 11 | ||
19 | diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h | 12 | diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h |
20 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/include/exec/memattrs.h | 14 | --- a/tcg/loongarch64/tcg-target.h |
22 | +++ b/include/exec/memattrs.h | 15 | +++ b/tcg/loongarch64/tcg-target.h |
23 | @@ -XXX,XX +XXX,XX @@ typedef struct MemTxAttrs { | 16 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); |
24 | unsigned int user:1; | 17 | |
25 | /* Requester ID (for MSI for example) */ | 18 | #define TCG_TARGET_DEFAULT_MO (0) |
26 | unsigned int requester_id:16; | 19 | |
27 | + /* Invert endianness for this page */ | 20 | -#ifdef CONFIG_SOFTMMU |
28 | + unsigned int byte_swap:1; | 21 | #define TCG_TARGET_NEED_LDST_LABELS |
29 | /* | 22 | -#endif |
30 | * The following are target-specific page-table bits. These are not | 23 | |
31 | * related to actual memory transactions at all. However, this structure | 24 | #define TCG_TARGET_HAS_MEMORY_BSWAP 0 |
32 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 25 | |
26 | diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc | ||
33 | index XXXXXXX..XXXXXXX 100644 | 27 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/accel/tcg/cputlb.c | 28 | --- a/tcg/loongarch64/tcg-target.c.inc |
35 | +++ b/accel/tcg/cputlb.c | 29 | +++ b/tcg/loongarch64/tcg-target.c.inc |
36 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 30 | @@ -XXX,XX +XXX,XX @@ |
37 | */ | 31 | * THE SOFTWARE. |
38 | address |= TLB_RECHECK; | 32 | */ |
39 | } | 33 | |
40 | + if (attrs.byte_swap) { | 34 | +#include "../tcg-ldst.c.inc" |
41 | + /* Force the access through the I/O slow path. */ | 35 | + |
42 | + address |= TLB_MMIO; | 36 | #ifdef CONFIG_DEBUG_TCG |
43 | + } | 37 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
44 | if (!memory_region_is_ram(section->mr) && | 38 | "zero", |
45 | !memory_region_is_romd(section->mr)) { | 39 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, |
46 | /* IO memory case */ | 40 | */ |
47 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 41 | |
48 | bool locked = false; | 42 | #if defined(CONFIG_SOFTMMU) |
49 | MemTxResult r; | 43 | -#include "../tcg-ldst.c.inc" |
50 | 44 | - | |
51 | + if (iotlbentry->attrs.byte_swap) { | 45 | /* |
52 | + op ^= MO_BSWAP; | 46 | * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, |
47 | * MemOpIdx oi, uintptr_t ra) | ||
48 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
49 | |||
50 | return tcg_out_goto(s, l->raddr); | ||
51 | } | ||
52 | +#else | ||
53 | + | ||
54 | +/* | ||
55 | + * Alignment helpers for user-mode emulation | ||
56 | + */ | ||
57 | + | ||
58 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, | ||
59 | + unsigned a_bits) | ||
60 | +{ | ||
61 | + TCGLabelQemuLdst *l = new_ldst_label(s); | ||
62 | + | ||
63 | + l->is_ld = is_ld; | ||
64 | + l->addrlo_reg = addr_reg; | ||
65 | + | ||
66 | + /* | ||
67 | + * Without micro-architecture details, we don't know which of bstrpick or | ||
68 | + * andi is faster, so use bstrpick as it's not constrained by imm field | ||
69 | + * width. (Not to say alignments >= 2^12 are going to happen any time | ||
70 | + * soon, though) | ||
71 | + */ | ||
72 | + tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); | ||
73 | + | ||
74 | + l->label_ptr[0] = s->code_ptr; | ||
75 | + tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); | ||
76 | + | ||
77 | + l->raddr = tcg_splitwx_to_rx(s->code_ptr); | ||
78 | +} | ||
79 | + | ||
80 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) | ||
81 | +{ | ||
82 | + /* resolve label address */ | ||
83 | + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { | ||
84 | + return false; | ||
53 | + } | 85 | + } |
54 | + | 86 | + |
55 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | 87 | + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); |
56 | mr = section->mr; | 88 | + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); |
57 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | 89 | + |
58 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 90 | + /* tail call, with the return address back inline. */ |
59 | bool locked = false; | 91 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); |
60 | MemTxResult r; | 92 | + tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld |
61 | 93 | + : helper_unaligned_st), true); | |
62 | + if (iotlbentry->attrs.byte_swap) { | 94 | + return true; |
63 | + op ^= MO_BSWAP; | 95 | +} |
96 | + | ||
97 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
98 | +{ | ||
99 | + return tcg_out_fail_alignment(s, l); | ||
100 | +} | ||
101 | + | ||
102 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
103 | +{ | ||
104 | + return tcg_out_fail_alignment(s, l); | ||
105 | +} | ||
106 | + | ||
107 | #endif /* CONFIG_SOFTMMU */ | ||
108 | |||
109 | /* | ||
110 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) | ||
111 | MemOp opc; | ||
112 | #if defined(CONFIG_SOFTMMU) | ||
113 | tcg_insn_unit *label_ptr[1]; | ||
114 | +#else | ||
115 | + unsigned a_bits; | ||
116 | #endif | ||
117 | TCGReg base; | ||
118 | |||
119 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) | ||
120 | data_regl, addr_regl, | ||
121 | s->code_ptr, label_ptr); | ||
122 | #else | ||
123 | + a_bits = get_alignment_bits(opc); | ||
124 | + if (a_bits) { | ||
125 | + tcg_out_test_alignment(s, true, addr_regl, a_bits); | ||
64 | + } | 126 | + } |
65 | + | 127 | base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); |
66 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); | 128 | TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; |
67 | mr = section->mr; | 129 | tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type); |
68 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; | 130 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) |
131 | MemOp opc; | ||
132 | #if defined(CONFIG_SOFTMMU) | ||
133 | tcg_insn_unit *label_ptr[1]; | ||
134 | +#else | ||
135 | + unsigned a_bits; | ||
136 | #endif | ||
137 | TCGReg base; | ||
138 | |||
139 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) | ||
140 | data_regl, addr_regl, | ||
141 | s->code_ptr, label_ptr); | ||
142 | #else | ||
143 | + a_bits = get_alignment_bits(opc); | ||
144 | + if (a_bits) { | ||
145 | + tcg_out_test_alignment(s, false, addr_regl, a_bits); | ||
146 | + } | ||
147 | base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); | ||
148 | TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; | ||
149 | tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc); | ||
69 | -- | 150 | -- |
70 | 2.17.1 | 151 | 2.25.1 |
71 | 152 | ||
72 | 153 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Support for unaligned accesses is difficult for pre-v6 hosts. |
---|---|---|---|
2 | While debian still builds for armv4, we cannot use a compile | ||
3 | time test, so test the architecture at runtime and error out. | ||
2 | 4 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
17 | Message-Id: <e70ff5814ac3656974180db6375397c43b0bc8b8.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
19 | --- | 7 | --- |
20 | hw/vfio/pci-quirks.c | 6 ++++-- | 8 | tcg/arm/tcg-target.c.inc | 5 +++++ |
21 | 1 file changed, 4 insertions(+), 2 deletions(-) | 9 | 1 file changed, 5 insertions(+) |
22 | 10 | ||
23 | diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c | 11 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
24 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/hw/vfio/pci-quirks.c | 13 | --- a/tcg/arm/tcg-target.c.inc |
26 | +++ b/hw/vfio/pci-quirks.c | 14 | +++ b/tcg/arm/tcg-target.c.inc |
27 | @@ -XXX,XX +XXX,XX @@ | 15 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s) |
28 | */ | 16 | if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') { |
29 | 17 | arm_arch = pl[1] - '0'; | |
30 | #include "qemu/osdep.h" | 18 | } |
31 | +#include "exec/memop.h" | 19 | + |
32 | #include "qemu/units.h" | 20 | + if (arm_arch < 6) { |
33 | #include "qemu/error-report.h" | 21 | + error_report("TCG: ARMv%d is unsupported; exiting", arm_arch); |
34 | #include "qemu/main-loop.h" | 22 | + exit(EXIT_FAILURE); |
35 | @@ -XXX,XX +XXX,XX @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, | 23 | + } |
36 | |||
37 | /* Write to the proper guest MSI-X table instead */ | ||
38 | memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, | ||
39 | - offset, val, size, | ||
40 | + offset, val, size_memop(size), | ||
41 | MEMTXATTRS_UNSPECIFIED); | ||
42 | } | ||
43 | return; /* Do not write guest MSI-X data to hardware */ | ||
44 | @@ -XXX,XX +XXX,XX @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, | ||
45 | if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { | ||
46 | hwaddr offset = rtl->addr & 0xfff; | ||
47 | memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, | ||
48 | - &data, size, MEMTXATTRS_UNSPECIFIED); | ||
49 | + &data, size_memop(size), | ||
50 | + MEMTXATTRS_UNSPECIFIED); | ||
51 | trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); | ||
52 | } | 24 | } |
53 | 25 | ||
26 | tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; | ||
54 | -- | 27 | -- |
55 | 2.17.1 | 28 | 2.25.1 |
56 | 29 | ||
57 | 30 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | This is now always true, since we require armv6. |
---|---|---|---|
2 | 2 | ||
3 | Let's also allow to probe other access types. | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | |||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Message-Id: <20190830100959.26615-3-david@redhat.com> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
9 | --- | 5 | --- |
10 | include/exec/exec-all.h | 10 ++++++++-- | 6 | tcg/arm/tcg-target.h | 3 +-- |
11 | accel/tcg/cputlb.c | 43 ++++++++++++++++++++++++++++++----------- | 7 | tcg/arm/tcg-target.c.inc | 35 ++++++----------------------------- |
12 | accel/tcg/user-exec.c | 26 +++++++++++++++++++------ | 8 | 2 files changed, 7 insertions(+), 31 deletions(-) |
13 | 3 files changed, 60 insertions(+), 19 deletions(-) | ||
14 | 9 | ||
15 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 10 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h |
16 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/exec/exec-all.h | 12 | --- a/tcg/arm/tcg-target.h |
18 | +++ b/include/exec/exec-all.h | 13 | +++ b/tcg/arm/tcg-target.h |
19 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | 14 | @@ -XXX,XX +XXX,XX @@ |
20 | { | 15 | |
16 | extern int arm_arch; | ||
17 | |||
18 | -#define use_armv5t_instructions (__ARM_ARCH >= 5 || arm_arch >= 5) | ||
19 | #define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6) | ||
20 | #define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) | ||
21 | |||
22 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; | ||
23 | #define TCG_TARGET_HAS_eqv_i32 0 | ||
24 | #define TCG_TARGET_HAS_nand_i32 0 | ||
25 | #define TCG_TARGET_HAS_nor_i32 0 | ||
26 | -#define TCG_TARGET_HAS_clz_i32 use_armv5t_instructions | ||
27 | +#define TCG_TARGET_HAS_clz_i32 1 | ||
28 | #define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions | ||
29 | #define TCG_TARGET_HAS_ctpop_i32 0 | ||
30 | #define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions | ||
31 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/tcg/arm/tcg-target.c.inc | ||
34 | +++ b/tcg/arm/tcg-target.c.inc | ||
35 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn) | ||
36 | * Unless the C portion of QEMU is compiled as thumb, we don't need | ||
37 | * true BX semantics; merely a branch to an address held in a register. | ||
38 | */ | ||
39 | - if (use_armv5t_instructions) { | ||
40 | - tcg_out_bx_reg(s, cond, rn); | ||
41 | - } else { | ||
42 | - tcg_out_mov_reg(s, cond, TCG_REG_PC, rn); | ||
43 | - } | ||
44 | + tcg_out_bx_reg(s, cond, rn); | ||
21 | } | 45 | } |
22 | #endif | 46 | |
23 | -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 47 | static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc, |
24 | - uintptr_t retaddr); | 48 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr) |
25 | +void *probe_access(CPUArchState *env, target_ulong addr, int size, | 49 | } |
26 | + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); | 50 | |
27 | + | 51 | /* LDR is interworking from v5t. */ |
28 | +static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, | 52 | - if (arm_mode || use_armv5t_instructions) { |
29 | + int mmu_idx, uintptr_t retaddr) | 53 | - tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); |
30 | +{ | 54 | - return; |
31 | + return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); | 55 | - } |
32 | +} | 56 | - |
33 | 57 | - /* else v4t */ | |
34 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | 58 | - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); |
35 | 59 | - tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP); | |
36 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 60 | + tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); |
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/accel/tcg/cputlb.c | ||
39 | +++ b/accel/tcg/cputlb.c | ||
40 | @@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | ||
41 | return qemu_ram_addr_from_host_nofail(p); | ||
42 | } | 61 | } |
43 | 62 | ||
44 | -/* Probe for whether the specified guest write access is permitted. | 63 | /* |
45 | - * If it is not permitted then an exception will be taken in the same | 64 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr) |
46 | - * way as if this were a real write access (and we will not return). | 65 | if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) { |
47 | +/* | 66 | if (arm_mode) { |
48 | + * Probe for whether the specified guest access is permitted. If it is not | 67 | tcg_out_bl_imm(s, COND_AL, disp); |
49 | + * permitted then an exception will be taken in the same way as if this | 68 | - return; |
50 | + * were a real access (and we will not return). | 69 | - } |
51 | * If the size is 0 or the page requires I/O access, returns NULL; otherwise, | 70 | - if (use_armv5t_instructions) { |
52 | * returns the address of the host page similar to tlb_vaddr_to_host(). | 71 | + } else { |
53 | */ | 72 | tcg_out_blx_imm(s, disp); |
54 | -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 73 | - return; |
55 | - uintptr_t retaddr) | ||
56 | +void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
57 | + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | ||
58 | { | ||
59 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
60 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); | ||
61 | - target_ulong tlb_addr = tlb_addr_write(entry); | ||
62 | + target_ulong tlb_addr; | ||
63 | + size_t elt_ofs; | ||
64 | + int wp_access; | ||
65 | |||
66 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||
67 | |||
68 | + switch (access_type) { | ||
69 | + case MMU_DATA_LOAD: | ||
70 | + elt_ofs = offsetof(CPUTLBEntry, addr_read); | ||
71 | + wp_access = BP_MEM_READ; | ||
72 | + break; | ||
73 | + case MMU_DATA_STORE: | ||
74 | + elt_ofs = offsetof(CPUTLBEntry, addr_write); | ||
75 | + wp_access = BP_MEM_WRITE; | ||
76 | + break; | ||
77 | + case MMU_INST_FETCH: | ||
78 | + elt_ofs = offsetof(CPUTLBEntry, addr_code); | ||
79 | + wp_access = BP_MEM_READ; | ||
80 | + break; | ||
81 | + default: | ||
82 | + g_assert_not_reached(); | ||
83 | + } | ||
84 | + tlb_addr = tlb_read_ofs(entry, elt_ofs); | ||
85 | + | ||
86 | if (unlikely(!tlb_hit(tlb_addr, addr))) { | ||
87 | - if (!VICTIM_TLB_HIT(addr_write, addr)) { | ||
88 | - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | ||
89 | - mmu_idx, retaddr); | ||
90 | + if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, | ||
91 | + addr & TARGET_PAGE_MASK)) { | ||
92 | + tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); | ||
93 | /* TLB resize via tlb_fill may have moved the entry. */ | ||
94 | index = tlb_index(env, mmu_idx, addr); | ||
95 | entry = tlb_entry(env, mmu_idx, addr); | ||
96 | } | 74 | } |
97 | - tlb_addr = tlb_addr_write(entry); | 75 | + return; |
98 | + tlb_addr = tlb_read_ofs(entry, elt_ofs); | ||
99 | } | 76 | } |
100 | 77 | ||
101 | if (!size) { | 78 | - if (use_armv5t_instructions) { |
102 | @@ -XXX,XX +XXX,XX @@ void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 79 | - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); |
103 | if (tlb_addr & TLB_WATCHPOINT) { | 80 | - tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); |
104 | cpu_check_watchpoint(env_cpu(env), addr, size, | 81 | - } else if (arm_mode) { |
105 | env_tlb(env)->d[mmu_idx].iotlb[index].attrs, | 82 | - /* ??? Know that movi_pool emits exactly 1 insn. */ |
106 | - BP_MEM_WRITE, retaddr); | 83 | - tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC); |
107 | + wp_access, retaddr); | 84 | - tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri); |
108 | } | 85 | - } else { |
109 | 86 | - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); | |
110 | if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) { | 87 | - tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC); |
111 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | 88 | - tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP); |
112 | index XXXXXXX..XXXXXXX 100644 | 89 | - } |
113 | --- a/accel/tcg/user-exec.c | 90 | + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); |
114 | +++ b/accel/tcg/user-exec.c | 91 | + tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP); |
115 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
116 | g_assert_not_reached(); | ||
117 | } | 92 | } |
118 | 93 | ||
119 | -void *probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 94 | static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) |
120 | - uintptr_t retaddr) | ||
121 | +void *probe_access(CPUArchState *env, target_ulong addr, int size, | ||
122 | + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | ||
123 | { | ||
124 | + int flags; | ||
125 | + | ||
126 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); | ||
127 | |||
128 | - if (!guest_addr_valid(addr) || | ||
129 | - page_check_range(addr, size, PAGE_WRITE) < 0) { | ||
130 | + switch (access_type) { | ||
131 | + case MMU_DATA_STORE: | ||
132 | + flags = PAGE_WRITE; | ||
133 | + break; | ||
134 | + case MMU_DATA_LOAD: | ||
135 | + flags = PAGE_READ; | ||
136 | + break; | ||
137 | + case MMU_INST_FETCH: | ||
138 | + flags = PAGE_EXEC; | ||
139 | + break; | ||
140 | + default: | ||
141 | + g_assert_not_reached(); | ||
142 | + } | ||
143 | + | ||
144 | + if (!guest_addr_valid(addr) || page_check_range(addr, size, flags) < 0) { | ||
145 | CPUState *cpu = env_cpu(env); | ||
146 | CPUClass *cc = CPU_GET_CLASS(cpu); | ||
147 | - | ||
148 | - cc->tlb_fill(cpu, addr, size, MMU_DATA_STORE, MMU_USER_IDX, false, | ||
149 | + cc->tlb_fill(cpu, addr, size, access_type, MMU_USER_IDX, false, | ||
150 | retaddr); | ||
151 | g_assert_not_reached(); | ||
152 | } | ||
153 | -- | 95 | -- |
154 | 2.17.1 | 96 | 2.25.1 |
155 | 97 | ||
156 | 98 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | This is now always true, since we require armv6. |
---|---|---|---|
2 | 2 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
17 | Message-Id: <ebf1f78029d5ac1de1739a11d679740a87a1f02f.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
19 | --- | 5 | --- |
20 | hw/virtio/virtio-pci.c | 7 +++++-- | 6 | tcg/arm/tcg-target.h | 1 - |
21 | 1 file changed, 5 insertions(+), 2 deletions(-) | 7 | tcg/arm/tcg-target.c.inc | 192 ++++++--------------------------------- |
8 | 2 files changed, 27 insertions(+), 166 deletions(-) | ||
22 | 9 | ||
23 | diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c | 10 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h |
24 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/hw/virtio/virtio-pci.c | 12 | --- a/tcg/arm/tcg-target.h |
26 | +++ b/hw/virtio/virtio-pci.c | 13 | +++ b/tcg/arm/tcg-target.h |
27 | @@ -XXX,XX +XXX,XX @@ | 14 | @@ -XXX,XX +XXX,XX @@ |
28 | 15 | ||
29 | #include "qemu/osdep.h" | 16 | extern int arm_arch; |
30 | 17 | ||
31 | +#include "exec/memop.h" | 18 | -#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6) |
32 | #include "standard-headers/linux/virtio_pci.h" | 19 | #define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) |
33 | #include "hw/virtio/virtio.h" | 20 | |
34 | #include "migration/qemu-file-types.h" | 21 | #undef TCG_TARGET_STACK_GROWSUP |
35 | @@ -XXX,XX +XXX,XX @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, | 22 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
36 | /* As length is under guest control, handle illegal values. */ | 23 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/tcg/arm/tcg-target.c.inc | ||
25 | +++ b/tcg/arm/tcg-target.c.inc | ||
26 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, | ||
27 | static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, | ||
28 | TCGReg rn, TCGReg rm) | ||
29 | { | ||
30 | - /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */ | ||
31 | - if (!use_armv6_instructions && rd == rn) { | ||
32 | - if (rd == rm) { | ||
33 | - /* rd == rn == rm; copy an input to tmp first. */ | ||
34 | - tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); | ||
35 | - rm = rn = TCG_REG_TMP; | ||
36 | - } else { | ||
37 | - rn = rm; | ||
38 | - rm = rd; | ||
39 | - } | ||
40 | - } | ||
41 | /* mul */ | ||
42 | tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); | ||
43 | } | ||
44 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, | ||
45 | static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, | ||
46 | TCGReg rd1, TCGReg rn, TCGReg rm) | ||
47 | { | ||
48 | - /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ | ||
49 | - if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { | ||
50 | - if (rd0 == rm || rd1 == rm) { | ||
51 | - tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); | ||
52 | - rn = TCG_REG_TMP; | ||
53 | - } else { | ||
54 | - TCGReg t = rn; | ||
55 | - rn = rm; | ||
56 | - rm = t; | ||
57 | - } | ||
58 | - } | ||
59 | /* umull */ | ||
60 | tcg_out32(s, (cond << 28) | 0x00800090 | | ||
61 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); | ||
62 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, | ||
63 | static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, | ||
64 | TCGReg rd1, TCGReg rn, TCGReg rm) | ||
65 | { | ||
66 | - /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */ | ||
67 | - if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) { | ||
68 | - if (rd0 == rm || rd1 == rm) { | ||
69 | - tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn); | ||
70 | - rn = TCG_REG_TMP; | ||
71 | - } else { | ||
72 | - TCGReg t = rn; | ||
73 | - rn = rm; | ||
74 | - rm = t; | ||
75 | - } | ||
76 | - } | ||
77 | /* smull */ | ||
78 | tcg_out32(s, (cond << 28) | 0x00c00090 | | ||
79 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); | ||
80 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_udiv(TCGContext *s, ARMCond cond, | ||
81 | |||
82 | static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) | ||
83 | { | ||
84 | - if (use_armv6_instructions) { | ||
85 | - /* sxtb */ | ||
86 | - tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); | ||
87 | - } else { | ||
88 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
89 | - rd, 0, rn, SHIFT_IMM_LSL(24)); | ||
90 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
91 | - rd, 0, rd, SHIFT_IMM_ASR(24)); | ||
92 | - } | ||
93 | + /* sxtb */ | ||
94 | + tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn); | ||
95 | } | ||
96 | |||
97 | static void __attribute__((unused)) | ||
98 | @@ -XXX,XX +XXX,XX @@ tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) | ||
99 | |||
100 | static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) | ||
101 | { | ||
102 | - if (use_armv6_instructions) { | ||
103 | - /* sxth */ | ||
104 | - tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); | ||
105 | - } else { | ||
106 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
107 | - rd, 0, rn, SHIFT_IMM_LSL(16)); | ||
108 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
109 | - rd, 0, rd, SHIFT_IMM_ASR(16)); | ||
110 | - } | ||
111 | + /* sxth */ | ||
112 | + tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn); | ||
113 | } | ||
114 | |||
115 | static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) | ||
116 | { | ||
117 | - if (use_armv6_instructions) { | ||
118 | - /* uxth */ | ||
119 | - tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); | ||
120 | - } else { | ||
121 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
122 | - rd, 0, rn, SHIFT_IMM_LSL(16)); | ||
123 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
124 | - rd, 0, rd, SHIFT_IMM_LSR(16)); | ||
125 | - } | ||
126 | + /* uxth */ | ||
127 | + tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn); | ||
128 | } | ||
129 | |||
130 | static void tcg_out_bswap16(TCGContext *s, ARMCond cond, | ||
131 | TCGReg rd, TCGReg rn, int flags) | ||
132 | { | ||
133 | - if (use_armv6_instructions) { | ||
134 | - if (flags & TCG_BSWAP_OS) { | ||
135 | - /* revsh */ | ||
136 | - tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); | ||
137 | - return; | ||
138 | - } | ||
139 | - | ||
140 | - /* rev16 */ | ||
141 | - tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); | ||
142 | - if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { | ||
143 | - /* uxth */ | ||
144 | - tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); | ||
145 | - } | ||
146 | + if (flags & TCG_BSWAP_OS) { | ||
147 | + /* revsh */ | ||
148 | + tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); | ||
37 | return; | 149 | return; |
38 | } | 150 | } |
39 | - memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED); | 151 | |
40 | + memory_region_dispatch_write(mr, addr, val, size_memop(len), | 152 | - if (flags == 0) { |
41 | + MEMTXATTRS_UNSPECIFIED); | 153 | - /* |
42 | } | 154 | - * For stores, no input or output extension: |
43 | 155 | - * rn = xxAB | |
44 | static void | 156 | - * lsr tmp, rn, #8 tmp = 0xxA |
45 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | 157 | - * and tmp, tmp, #0xff tmp = 000A |
46 | /* Make sure caller aligned buf properly */ | 158 | - * orr rd, tmp, rn, lsl #8 rd = xABA |
47 | assert(!(((uintptr_t)buf) & (len - 1))); | 159 | - */ |
48 | 160 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | |
49 | - memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED); | 161 | - TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8)); |
50 | + memory_region_dispatch_read(mr, addr, &val, size_memop(len), | 162 | - tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff); |
51 | + MEMTXATTRS_UNSPECIFIED); | 163 | - tcg_out_dat_reg(s, cond, ARITH_ORR, |
52 | switch (len) { | 164 | - rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8)); |
53 | case 1: | 165 | - return; |
54 | pci_set_byte(buf, val); | 166 | + /* rev16 */ |
167 | + tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); | ||
168 | + if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { | ||
169 | + /* uxth */ | ||
170 | + tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); | ||
171 | } | ||
172 | - | ||
173 | - /* | ||
174 | - * Byte swap, leaving the result at the top of the register. | ||
175 | - * We will then shift down, zero or sign-extending. | ||
176 | - */ | ||
177 | - if (flags & TCG_BSWAP_IZ) { | ||
178 | - /* | ||
179 | - * rn = 00AB | ||
180 | - * ror tmp, rn, #8 tmp = B00A | ||
181 | - * orr tmp, tmp, tmp, lsl #16 tmp = BA00 | ||
182 | - */ | ||
183 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
184 | - TCG_REG_TMP, 0, rn, SHIFT_IMM_ROR(8)); | ||
185 | - tcg_out_dat_reg(s, cond, ARITH_ORR, | ||
186 | - TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP, | ||
187 | - SHIFT_IMM_LSL(16)); | ||
188 | - } else { | ||
189 | - /* | ||
190 | - * rn = xxAB | ||
191 | - * and tmp, rn, #0xff00 tmp = 00A0 | ||
192 | - * lsl tmp, tmp, #8 tmp = 0A00 | ||
193 | - * orr tmp, tmp, rn, lsl #24 tmp = BA00 | ||
194 | - */ | ||
195 | - tcg_out_dat_rI(s, cond, ARITH_AND, TCG_REG_TMP, rn, 0xff00, 1); | ||
196 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
197 | - TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSL(8)); | ||
198 | - tcg_out_dat_reg(s, cond, ARITH_ORR, | ||
199 | - TCG_REG_TMP, TCG_REG_TMP, rn, SHIFT_IMM_LSL(24)); | ||
200 | - } | ||
201 | - tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, TCG_REG_TMP, | ||
202 | - (flags & TCG_BSWAP_OS | ||
203 | - ? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8))); | ||
204 | } | ||
205 | |||
206 | static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) | ||
207 | { | ||
208 | - if (use_armv6_instructions) { | ||
209 | - /* rev */ | ||
210 | - tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); | ||
211 | - } else { | ||
212 | - tcg_out_dat_reg(s, cond, ARITH_EOR, | ||
213 | - TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16)); | ||
214 | - tcg_out_dat_imm(s, cond, ARITH_BIC, | ||
215 | - TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800); | ||
216 | - tcg_out_dat_reg(s, cond, ARITH_MOV, | ||
217 | - rd, 0, rn, SHIFT_IMM_ROR(8)); | ||
218 | - tcg_out_dat_reg(s, cond, ARITH_EOR, | ||
219 | - rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8)); | ||
220 | - } | ||
221 | + /* rev */ | ||
222 | + tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); | ||
223 | } | ||
224 | |||
225 | static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, | ||
226 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) | ||
227 | { | ||
228 | if (use_armv7_instructions) { | ||
229 | tcg_out32(s, INSN_DMB_ISH); | ||
230 | - } else if (use_armv6_instructions) { | ||
231 | + } else { | ||
232 | tcg_out32(s, INSN_DMB_MCR); | ||
233 | } | ||
234 | } | ||
235 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, | ||
236 | if (argreg & 1) { | ||
237 | argreg++; | ||
238 | } | ||
239 | - if (use_armv6_instructions && argreg >= 4 | ||
240 | - && (arglo & 1) == 0 && arghi == arglo + 1) { | ||
241 | + if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) { | ||
242 | tcg_out_strd_8(s, COND_AL, arglo, | ||
243 | TCG_REG_CALL_STACK, (argreg - 4) * 4); | ||
244 | return argreg + 2; | ||
245 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
246 | int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) | ||
247 | : offsetof(CPUTLBEntry, addr_write)); | ||
248 | int fast_off = TLB_MASK_TABLE_OFS(mem_index); | ||
249 | - int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); | ||
250 | - int table_off = fast_off + offsetof(CPUTLBDescFast, table); | ||
251 | unsigned s_bits = opc & MO_SIZE; | ||
252 | unsigned a_bits = get_alignment_bits(opc); | ||
253 | |||
254 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
255 | } | ||
256 | |||
257 | /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ | ||
258 | - if (use_armv6_instructions) { | ||
259 | - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); | ||
260 | - } else { | ||
261 | - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off); | ||
262 | - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off); | ||
263 | - } | ||
264 | + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); | ||
265 | |||
266 | /* Extract the tlb index from the address into R0. */ | ||
267 | tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, | ||
268 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
269 | * Load the tlb comparator into R2/R3 and the fast path addend into R1. | ||
270 | */ | ||
271 | if (cmp_off == 0) { | ||
272 | - if (use_armv6_instructions && TARGET_LONG_BITS == 64) { | ||
273 | + if (TARGET_LONG_BITS == 64) { | ||
274 | tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); | ||
275 | } else { | ||
276 | tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); | ||
277 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
278 | } else { | ||
279 | tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | ||
280 | TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); | ||
281 | - if (use_armv6_instructions && TARGET_LONG_BITS == 64) { | ||
282 | + if (TARGET_LONG_BITS == 64) { | ||
283 | tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); | ||
284 | } else { | ||
285 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); | ||
286 | } | ||
287 | } | ||
288 | - if (!use_armv6_instructions && TARGET_LONG_BITS == 64) { | ||
289 | - tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4); | ||
290 | - } | ||
291 | |||
292 | /* Load the tlb addend. */ | ||
293 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, | ||
294 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
295 | TCGReg argreg, datalo, datahi; | ||
296 | MemOpIdx oi = lb->oi; | ||
297 | MemOp opc = get_memop(oi); | ||
298 | - void *func; | ||
299 | |||
300 | if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { | ||
301 | return false; | ||
302 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
303 | argreg = tcg_out_arg_imm32(s, argreg, oi); | ||
304 | argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); | ||
305 | |||
306 | - /* For armv6 we can use the canonical unsigned helpers and minimize | ||
307 | - icache usage. For pre-armv6, use the signed helpers since we do | ||
308 | - not have a single insn sign-extend. */ | ||
309 | - if (use_armv6_instructions) { | ||
310 | - func = qemu_ld_helpers[opc & MO_SIZE]; | ||
311 | - } else { | ||
312 | - func = qemu_ld_helpers[opc & MO_SSIZE]; | ||
313 | - if (opc & MO_SIGN) { | ||
314 | - opc = MO_UL; | ||
315 | - } | ||
316 | - } | ||
317 | - tcg_out_call(s, func); | ||
318 | + /* Use the canonical unsigned helpers and minimize icache usage. */ | ||
319 | + tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]); | ||
320 | |||
321 | datalo = lb->datalo_reg; | ||
322 | datahi = lb->datahi_reg; | ||
323 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
324 | break; | ||
325 | case MO_UQ: | ||
326 | /* Avoid ldrd for user-only emulation, to handle unaligned. */ | ||
327 | - if (USING_SOFTMMU && use_armv6_instructions | ||
328 | + if (USING_SOFTMMU | ||
329 | && (datalo & 1) == 0 && datahi == datalo + 1) { | ||
330 | tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); | ||
331 | } else if (datalo != addend) { | ||
332 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, | ||
333 | break; | ||
334 | case MO_UQ: | ||
335 | /* Avoid ldrd for user-only emulation, to handle unaligned. */ | ||
336 | - if (USING_SOFTMMU && use_armv6_instructions | ||
337 | + if (USING_SOFTMMU | ||
338 | && (datalo & 1) == 0 && datahi == datalo + 1) { | ||
339 | tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); | ||
340 | } else if (datalo == addrlo) { | ||
341 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, | ||
342 | break; | ||
343 | case MO_64: | ||
344 | /* Avoid strd for user-only emulation, to handle unaligned. */ | ||
345 | - if (USING_SOFTMMU && use_armv6_instructions | ||
346 | + if (USING_SOFTMMU | ||
347 | && (datalo & 1) == 0 && datahi == datalo + 1) { | ||
348 | tcg_out_strd_r(s, cond, datalo, addrlo, addend); | ||
349 | } else { | ||
350 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, | ||
351 | break; | ||
352 | case MO_64: | ||
353 | /* Avoid strd for user-only emulation, to handle unaligned. */ | ||
354 | - if (USING_SOFTMMU && use_armv6_instructions | ||
355 | + if (USING_SOFTMMU | ||
356 | && (datalo & 1) == 0 && datahi == datalo + 1) { | ||
357 | tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); | ||
358 | } else { | ||
55 | -- | 359 | -- |
56 | 2.17.1 | 360 | 2.25.1 |
57 | 361 | ||
58 | 362 | diff view generated by jsdifflib |
1 | We want to move the check for watchpoints from | 1 | We will shortly allow the use of unaligned memory accesses, |
---|---|---|---|
2 | memory_region_section_get_iotlb to tlb_set_page_with_attrs. | 2 | and these require proper alignment. Use get_alignment_bits |
3 | Isolate the loop over watchpoints to an exported function. | 3 | to verify and remove USING_SOFTMMU. |
4 | 4 | ||
5 | Rename the existing cpu_watchpoint_address_matches to | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
6 | watchpoint_address_matches, since it doesn't actually | ||
7 | have a cpu argument. | ||
8 | |||
9 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 7 | --- |
12 | include/hw/core/cpu.h | 7 +++++++ | 8 | tcg/arm/tcg-target.c.inc | 23 ++++++++--------------- |
13 | exec.c | 45 ++++++++++++++++++++++++++++--------------- | 9 | 1 file changed, 8 insertions(+), 15 deletions(-) |
14 | 2 files changed, 36 insertions(+), 16 deletions(-) | ||
15 | 10 | ||
16 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | 11 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
17 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/hw/core/cpu.h | 13 | --- a/tcg/arm/tcg-target.c.inc |
19 | +++ b/include/hw/core/cpu.h | 14 | +++ b/tcg/arm/tcg-target.c.inc |
20 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | 15 | @@ -XXX,XX +XXX,XX @@ bool use_idiv_instructions; |
21 | MemTxAttrs atr, int fl, uintptr_t ra) | 16 | bool use_neon_instructions; |
22 | { | ||
23 | } | ||
24 | + | ||
25 | +static inline int cpu_watchpoint_address_matches(CPUState *cpu, | ||
26 | + vaddr addr, vaddr len) | ||
27 | +{ | ||
28 | + return 0; | ||
29 | +} | ||
30 | #else | ||
31 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | ||
32 | int flags, CPUWatchpoint **watchpoint); | ||
33 | @@ -XXX,XX +XXX,XX @@ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); | ||
34 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask); | ||
35 | void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
36 | MemTxAttrs attrs, int flags, uintptr_t ra); | ||
37 | +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); | ||
38 | #endif | 17 | #endif |
39 | 18 | ||
40 | /** | 19 | -/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */ |
41 | diff --git a/exec.c b/exec.c | 20 | -#ifdef CONFIG_SOFTMMU |
42 | index XXXXXXX..XXXXXXX 100644 | 21 | -# define USING_SOFTMMU 1 |
43 | --- a/exec.c | 22 | -#else |
44 | +++ b/exec.c | 23 | -# define USING_SOFTMMU 0 |
45 | @@ -XXX,XX +XXX,XX @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | 24 | -#endif |
46 | * partially or completely with the address range covered by the | 25 | - |
47 | * access). | 26 | #ifdef CONFIG_DEBUG_TCG |
48 | */ | 27 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
49 | -static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | 28 | "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", |
50 | - vaddr addr, | 29 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, |
51 | - vaddr len) | 30 | tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); |
52 | +static inline bool watchpoint_address_matches(CPUWatchpoint *wp, | 31 | break; |
53 | + vaddr addr, vaddr len) | 32 | case MO_UQ: |
54 | { | 33 | - /* Avoid ldrd for user-only emulation, to handle unaligned. */ |
55 | /* We know the lengths are non-zero, but a little caution is | 34 | - if (USING_SOFTMMU |
56 | * required to avoid errors in the case where the range ends | 35 | + /* LDRD requires alignment; double-check that. */ |
57 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | 36 | + if (get_alignment_bits(opc) >= MO_64 |
58 | 37 | && (datalo & 1) == 0 && datahi == datalo + 1) { | |
59 | return !(addr > wpend || wp->vaddr > addrend); | 38 | tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); |
60 | } | 39 | } else if (datalo != addend) { |
61 | + | 40 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, |
62 | +/* Return flags for watchpoints that match addr + prot. */ | 41 | tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); |
63 | +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) | 42 | break; |
64 | +{ | 43 | case MO_UQ: |
65 | + CPUWatchpoint *wp; | 44 | - /* Avoid ldrd for user-only emulation, to handle unaligned. */ |
66 | + int ret = 0; | 45 | - if (USING_SOFTMMU |
67 | + | 46 | + /* LDRD requires alignment; double-check that. */ |
68 | + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | 47 | + if (get_alignment_bits(opc) >= MO_64 |
69 | + if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) { | 48 | && (datalo & 1) == 0 && datahi == datalo + 1) { |
70 | + ret |= wp->flags; | 49 | tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0); |
71 | + } | 50 | } else if (datalo == addrlo) { |
72 | + } | 51 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, |
73 | + return ret; | 52 | tcg_out_st32_r(s, cond, datalo, addrlo, addend); |
74 | +} | 53 | break; |
75 | #endif /* !CONFIG_USER_ONLY */ | 54 | case MO_64: |
76 | 55 | - /* Avoid strd for user-only emulation, to handle unaligned. */ | |
77 | /* Add a breakpoint. */ | 56 | - if (USING_SOFTMMU |
78 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | 57 | + /* STRD requires alignment; double-check that. */ |
79 | target_ulong *address) | 58 | + if (get_alignment_bits(opc) >= MO_64 |
80 | { | 59 | && (datalo & 1) == 0 && datahi == datalo + 1) { |
81 | hwaddr iotlb; | 60 | tcg_out_strd_r(s, cond, datalo, addrlo, addend); |
82 | - CPUWatchpoint *wp; | 61 | } else { |
83 | + int flags, match; | 62 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, |
84 | 63 | tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); | |
85 | if (memory_region_is_ram(section->mr)) { | 64 | break; |
86 | /* Normal RAM. */ | 65 | case MO_64: |
87 | @@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, | 66 | - /* Avoid strd for user-only emulation, to handle unaligned. */ |
88 | iotlb += xlat; | 67 | - if (USING_SOFTMMU |
89 | } | 68 | + /* STRD requires alignment; double-check that. */ |
90 | 69 | + if (get_alignment_bits(opc) >= MO_64 | |
91 | - /* Make accesses to pages with watchpoints go via the | 70 | && (datalo & 1) == 0 && datahi == datalo + 1) { |
92 | - watchpoint trap routines. */ | 71 | tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); |
93 | - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | 72 | } else { |
94 | - if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { | ||
95 | - /* Avoid trapping reads of pages with a write breakpoint. */ | ||
96 | - if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | ||
97 | - iotlb = PHYS_SECTION_WATCH + paddr; | ||
98 | - *address |= TLB_MMIO; | ||
99 | - break; | ||
100 | - } | ||
101 | - } | ||
102 | + /* Avoid trapping reads of pages with a write breakpoint. */ | ||
103 | + match = (prot & PAGE_READ ? BP_MEM_READ : 0) | ||
104 | + | (prot & PAGE_WRITE ? BP_MEM_WRITE : 0); | ||
105 | + flags = cpu_watchpoint_address_matches(cpu, vaddr, TARGET_PAGE_SIZE); | ||
106 | + if (flags & match) { | ||
107 | + /* | ||
108 | + * Make accesses to pages with watchpoints go via the | ||
109 | + * watchpoint trap routines. | ||
110 | + */ | ||
111 | + iotlb = PHYS_SECTION_WATCH + paddr; | ||
112 | + *address |= TLB_MMIO; | ||
113 | } | ||
114 | |||
115 | return iotlb; | ||
116 | @@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
117 | |||
118 | addr = cc->adjust_watchpoint_address(cpu, addr, len); | ||
119 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | ||
120 | - if (cpu_watchpoint_address_matches(wp, addr, len) | ||
121 | + if (watchpoint_address_matches(wp, addr, len) | ||
122 | && (wp->flags & flags)) { | ||
123 | if (flags == BP_MEM_READ) { | ||
124 | wp->flags |= BP_WATCHPOINT_HIT_READ; | ||
125 | -- | 73 | -- |
126 | 2.17.1 | 74 | 2.25.1 |
127 | 75 | ||
128 | 76 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | From armv6, the architecture supports unaligned accesses. |
---|---|---|---|
2 | All we need to do is perform the correct alignment check | ||
3 | in tcg_out_tlb_read. | ||
2 | 4 | ||
3 | If I'm not completely wrong, we are dealing with guest addresses here | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | and not with host addresses. Use the right check. | ||
5 | |||
6 | Fixes: c5a7392cfb96 ("s390x/tcg: Provide probe_write_access helper") | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
9 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
10 | Message-Id: <20190826075112.25637-2-david@redhat.com> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 7 | --- |
13 | target/s390x/mem_helper.c | 2 +- | 8 | tcg/arm/tcg-target.c.inc | 41 ++++++++++++++++++++-------------------- |
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | 9 | 1 file changed, 21 insertions(+), 20 deletions(-) |
15 | 10 | ||
16 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | 11 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
17 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/s390x/mem_helper.c | 13 | --- a/tcg/arm/tcg-target.c.inc |
19 | +++ b/target/s390x/mem_helper.c | 14 | +++ b/tcg/arm/tcg-target.c.inc |
20 | @@ -XXX,XX +XXX,XX @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | 15 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, |
21 | uintptr_t ra) | 16 | int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) |
22 | { | 17 | : offsetof(CPUTLBEntry, addr_write)); |
23 | #ifdef CONFIG_USER_ONLY | 18 | int fast_off = TLB_MASK_TABLE_OFS(mem_index); |
24 | - if (!h2g_valid(addr) || !h2g_valid(addr + len - 1) || | 19 | - unsigned s_bits = opc & MO_SIZE; |
25 | + if (!guest_addr_valid(addr) || !guest_addr_valid(addr + len - 1) || | 20 | - unsigned a_bits = get_alignment_bits(opc); |
26 | page_check_range(addr, len, PAGE_WRITE) < 0) { | 21 | - |
27 | s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra); | 22 | - /* |
23 | - * We don't support inline unaligned acceses, but we can easily | ||
24 | - * support overalignment checks. | ||
25 | - */ | ||
26 | - if (a_bits < s_bits) { | ||
27 | - a_bits = s_bits; | ||
28 | - } | ||
29 | + unsigned s_mask = (1 << (opc & MO_SIZE)) - 1; | ||
30 | + unsigned a_mask = (1 << get_alignment_bits(opc)) - 1; | ||
31 | + TCGReg t_addr; | ||
32 | |||
33 | /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ | ||
34 | tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); | ||
35 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, | ||
36 | |||
37 | /* | ||
38 | * Check alignment, check comparators. | ||
39 | - * Do this in no more than 3 insns. Use MOVW for v7, if possible, | ||
40 | + * Do this in 2-4 insns. Use MOVW for v7, if possible, | ||
41 | * to reduce the number of sequential conditional instructions. | ||
42 | * Almost all guests have at least 4k pages, which means that we need | ||
43 | * to clear at least 9 bits even for an 8-byte memory, which means it | ||
44 | * isn't worth checking for an immediate operand for BIC. | ||
45 | + * | ||
46 | + * For unaligned accesses, test the page of the last unit of alignment. | ||
47 | + * This leaves the least significant alignment bits unchanged, and of | ||
48 | + * course must be zero. | ||
49 | */ | ||
50 | + t_addr = addrlo; | ||
51 | + if (a_mask < s_mask) { | ||
52 | + t_addr = TCG_REG_R0; | ||
53 | + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, | ||
54 | + addrlo, s_mask - a_mask); | ||
55 | + } | ||
56 | if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { | ||
57 | - tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1)); | ||
58 | - | ||
59 | - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); | ||
60 | + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); | ||
61 | tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, | ||
62 | - addrlo, TCG_REG_TMP, 0); | ||
63 | + t_addr, TCG_REG_TMP, 0); | ||
64 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); | ||
65 | } else { | ||
66 | - if (a_bits) { | ||
67 | - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, | ||
68 | - (1 << a_bits) - 1); | ||
69 | + if (a_mask) { | ||
70 | + tcg_debug_assert(a_mask <= 0xff); | ||
71 | + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); | ||
72 | } | ||
73 | - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo, | ||
74 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, | ||
75 | SHIFT_IMM_LSR(TARGET_PAGE_BITS)); | ||
76 | - tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, | ||
77 | + tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, | ||
78 | 0, TCG_REG_R2, TCG_REG_TMP, | ||
79 | SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | ||
28 | } | 80 | } |
29 | -- | 81 | -- |
30 | 2.17.1 | 82 | 2.25.1 |
31 | 83 | ||
32 | 84 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Reserve a register for the guest_base using aarch64 for reference. |
---|---|---|---|
2 | By doing so, we do not have to recompute it for every memory load. | ||
2 | 3 | ||
3 | ... and also call it for CONFIG_USER_ONLY. This function probably will | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | also need some refactoring in regards to probing, however, we'll have to | ||
5 | come back to that later, once cleaning up the other mem helpers. | ||
6 | |||
7 | The alignment check always makes sure that the write access falls into a | ||
8 | single page. | ||
9 | |||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
12 | Message-Id: <20190826075112.25637-8-david@redhat.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 6 | --- |
15 | target/s390x/mem_helper.c | 4 +--- | 7 | tcg/arm/tcg-target.c.inc | 39 ++++++++++++++++++++++++++++----------- |
16 | 1 file changed, 1 insertion(+), 3 deletions(-) | 8 | 1 file changed, 28 insertions(+), 11 deletions(-) |
17 | 9 | ||
18 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | 10 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc |
19 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/s390x/mem_helper.c | 12 | --- a/tcg/arm/tcg-target.c.inc |
21 | +++ b/target/s390x/mem_helper.c | 13 | +++ b/tcg/arm/tcg-target.c.inc |
22 | @@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, | 14 | @@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[2] = { |
15 | |||
16 | #define TCG_REG_TMP TCG_REG_R12 | ||
17 | #define TCG_VEC_TMP TCG_REG_Q15 | ||
18 | +#ifndef CONFIG_SOFTMMU | ||
19 | +#define TCG_REG_GUEST_BASE TCG_REG_R11 | ||
20 | +#endif | ||
21 | |||
22 | typedef enum { | ||
23 | COND_EQ = 0x0, | ||
24 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
25 | |||
26 | static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
27 | TCGReg datalo, TCGReg datahi, | ||
28 | - TCGReg addrlo, TCGReg addend) | ||
29 | + TCGReg addrlo, TCGReg addend, | ||
30 | + bool scratch_addend) | ||
31 | { | ||
32 | /* Byte swapping is left to middle-end expansion. */ | ||
33 | tcg_debug_assert((opc & MO_BSWAP) == 0); | ||
34 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
35 | if (get_alignment_bits(opc) >= MO_64 | ||
36 | && (datalo & 1) == 0 && datahi == datalo + 1) { | ||
37 | tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend); | ||
38 | - } else if (datalo != addend) { | ||
39 | + } else if (scratch_addend) { | ||
40 | tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo); | ||
41 | tcg_out_ld32_12(s, COND_AL, datahi, addend, 4); | ||
42 | } else { | ||
43 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
44 | label_ptr = s->code_ptr; | ||
45 | tcg_out_bl_imm(s, COND_NE, 0); | ||
46 | |||
47 | - tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend); | ||
48 | + tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true); | ||
49 | |||
50 | add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, | ||
51 | s->code_ptr, label_ptr); | ||
52 | #else /* !CONFIG_SOFTMMU */ | ||
53 | if (guest_base) { | ||
54 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); | ||
55 | - tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP); | ||
56 | + tcg_out_qemu_ld_index(s, opc, datalo, datahi, | ||
57 | + addrlo, TCG_REG_GUEST_BASE, false); | ||
58 | } else { | ||
59 | tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); | ||
23 | } | 60 | } |
24 | 61 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | |
25 | /* Sanity check writability of the store address. */ | 62 | |
26 | -#ifndef CONFIG_USER_ONLY | 63 | static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, |
27 | - probe_write(env, a2, 0, mem_idx, ra); | 64 | TCGReg datalo, TCGReg datahi, |
28 | -#endif | 65 | - TCGReg addrlo, TCGReg addend) |
29 | + probe_write(env, a2, 1 << sc, mem_idx, ra); | 66 | + TCGReg addrlo, TCGReg addend, |
67 | + bool scratch_addend) | ||
68 | { | ||
69 | /* Byte swapping is left to middle-end expansion. */ | ||
70 | tcg_debug_assert((opc & MO_BSWAP) == 0); | ||
71 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc, | ||
72 | if (get_alignment_bits(opc) >= MO_64 | ||
73 | && (datalo & 1) == 0 && datahi == datalo + 1) { | ||
74 | tcg_out_strd_r(s, cond, datalo, addrlo, addend); | ||
75 | - } else { | ||
76 | + } else if (scratch_addend) { | ||
77 | tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); | ||
78 | tcg_out_st32_12(s, cond, datahi, addend, 4); | ||
79 | + } else { | ||
80 | + tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP, | ||
81 | + addend, addrlo, SHIFT_IMM_LSL(0)); | ||
82 | + tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0); | ||
83 | + tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4); | ||
84 | } | ||
85 | break; | ||
86 | default: | ||
87 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
88 | mem_index = get_mmuidx(oi); | ||
89 | addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); | ||
90 | |||
91 | - tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend); | ||
92 | + tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, | ||
93 | + addrlo, addend, true); | ||
94 | |||
95 | /* The conditional call must come last, as we're going to return here. */ | ||
96 | label_ptr = s->code_ptr; | ||
97 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
98 | s->code_ptr, label_ptr); | ||
99 | #else /* !CONFIG_SOFTMMU */ | ||
100 | if (guest_base) { | ||
101 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); | ||
102 | - tcg_out_qemu_st_index(s, COND_AL, opc, datalo, | ||
103 | - datahi, addrlo, TCG_REG_TMP); | ||
104 | + tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, | ||
105 | + addrlo, TCG_REG_GUEST_BASE, false); | ||
106 | } else { | ||
107 | tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); | ||
108 | } | ||
109 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
110 | |||
111 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | ||
112 | |||
113 | +#ifndef CONFIG_SOFTMMU | ||
114 | + if (guest_base) { | ||
115 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); | ||
116 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); | ||
117 | + } | ||
118 | +#endif | ||
119 | + | ||
120 | tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]); | ||
30 | 121 | ||
31 | /* | 122 | /* |
32 | * Note that the compare-and-swap is atomic, and the store is atomic, | ||
33 | -- | 123 | -- |
34 | 2.17.1 | 124 | 2.25.1 |
35 | 125 | ||
36 | 126 | diff view generated by jsdifflib |
1 | Let the user-only watchpoint stubs resolve to empty inline functions. | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
4 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
6 | --- | 3 | --- |
7 | include/hw/core/cpu.h | 23 +++++++++++++++++++++++ | 4 | tcg/arm/tcg-target.h | 2 - |
8 | exec.c | 26 ++------------------------ | 5 | tcg/arm/tcg-target.c.inc | 83 +++++++++++++++++++++++++++++++++++++++- |
9 | 2 files changed, 25 insertions(+), 24 deletions(-) | 6 | 2 files changed, 81 insertions(+), 4 deletions(-) |
10 | 7 | ||
11 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | 8 | diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h |
12 | index XXXXXXX..XXXXXXX 100644 | 9 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/include/hw/core/cpu.h | 10 | --- a/tcg/arm/tcg-target.h |
14 | +++ b/include/hw/core/cpu.h | 11 | +++ b/tcg/arm/tcg-target.h |
15 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) | 12 | @@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions; |
16 | return false; | 13 | /* not defined -- call should be eliminated at compile time */ |
14 | void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); | ||
15 | |||
16 | -#ifdef CONFIG_SOFTMMU | ||
17 | #define TCG_TARGET_NEED_LDST_LABELS | ||
18 | -#endif | ||
19 | #define TCG_TARGET_NEED_POOL_LABELS | ||
20 | |||
21 | #endif | ||
22 | diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/tcg/arm/tcg-target.c.inc | ||
25 | +++ b/tcg/arm/tcg-target.c.inc | ||
26 | @@ -XXX,XX +XXX,XX @@ | ||
27 | */ | ||
28 | |||
29 | #include "elf.h" | ||
30 | +#include "../tcg-ldst.c.inc" | ||
31 | #include "../tcg-pool.c.inc" | ||
32 | |||
33 | int arm_arch = __ARM_ARCH; | ||
34 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn, | ||
17 | } | 35 | } |
18 | 36 | ||
19 | +#ifdef CONFIG_USER_ONLY | 37 | #ifdef CONFIG_SOFTMMU |
20 | +static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | 38 | -#include "../tcg-ldst.c.inc" |
21 | + int flags, CPUWatchpoint **watchpoint) | 39 | - |
40 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, | ||
41 | * int mmu_idx, uintptr_t ra) | ||
42 | */ | ||
43 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | ||
44 | tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]); | ||
45 | return true; | ||
46 | } | ||
47 | +#else | ||
48 | + | ||
49 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, | ||
50 | + TCGReg addrhi, unsigned a_bits) | ||
22 | +{ | 51 | +{ |
23 | + return -ENOSYS; | 52 | + unsigned a_mask = (1 << a_bits) - 1; |
53 | + TCGLabelQemuLdst *label = new_ldst_label(s); | ||
54 | + | ||
55 | + label->is_ld = is_ld; | ||
56 | + label->addrlo_reg = addrlo; | ||
57 | + label->addrhi_reg = addrhi; | ||
58 | + | ||
59 | + /* We are expecting a_bits to max out at 7, and can easily support 8. */ | ||
60 | + tcg_debug_assert(a_mask <= 0xff); | ||
61 | + /* tst addr, #mask */ | ||
62 | + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); | ||
63 | + | ||
64 | + /* blne slow_path */ | ||
65 | + label->label_ptr[0] = s->code_ptr; | ||
66 | + tcg_out_bl_imm(s, COND_NE, 0); | ||
67 | + | ||
68 | + label->raddr = tcg_splitwx_to_rx(s->code_ptr); | ||
24 | +} | 69 | +} |
25 | + | 70 | + |
26 | +static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, | 71 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) |
27 | + vaddr len, int flags) | ||
28 | +{ | 72 | +{ |
29 | + return -ENOSYS; | 73 | + if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { |
74 | + return false; | ||
75 | + } | ||
76 | + | ||
77 | + if (TARGET_LONG_BITS == 64) { | ||
78 | + /* 64-bit target address is aligned into R2:R3. */ | ||
79 | + if (l->addrhi_reg != TCG_REG_R2) { | ||
80 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); | ||
81 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); | ||
82 | + } else if (l->addrlo_reg != TCG_REG_R3) { | ||
83 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); | ||
84 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); | ||
85 | + } else { | ||
86 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2); | ||
87 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3); | ||
88 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1); | ||
89 | + } | ||
90 | + } else { | ||
91 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); | ||
92 | + } | ||
93 | + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0); | ||
94 | + | ||
95 | + /* | ||
96 | + * Tail call to the helper, with the return address back inline, | ||
97 | + * just for the clarity of the debugging traceback -- the helper | ||
98 | + * cannot return. We have used BLNE to arrive here, so LR is | ||
99 | + * already set. | ||
100 | + */ | ||
101 | + tcg_out_goto(s, COND_AL, (const void *) | ||
102 | + (l->is_ld ? helper_unaligned_ld : helper_unaligned_st)); | ||
103 | + return true; | ||
30 | +} | 104 | +} |
31 | + | 105 | + |
32 | +static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, | 106 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
33 | + CPUWatchpoint *wp) | ||
34 | +{ | 107 | +{ |
108 | + return tcg_out_fail_alignment(s, l); | ||
35 | +} | 109 | +} |
36 | + | 110 | + |
37 | +static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | 111 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
38 | +{ | 112 | +{ |
113 | + return tcg_out_fail_alignment(s, l); | ||
39 | +} | 114 | +} |
115 | #endif /* SOFTMMU */ | ||
116 | |||
117 | static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, | ||
118 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
119 | int mem_index; | ||
120 | TCGReg addend; | ||
121 | tcg_insn_unit *label_ptr; | ||
40 | +#else | 122 | +#else |
41 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | 123 | + unsigned a_bits; |
42 | int flags, CPUWatchpoint **watchpoint); | ||
43 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, | ||
44 | vaddr len, int flags); | ||
45 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); | ||
46 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask); | ||
47 | +#endif | ||
48 | |||
49 | /** | ||
50 | * cpu_get_address_space: | ||
51 | diff --git a/exec.c b/exec.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/exec.c | ||
54 | +++ b/exec.c | ||
55 | @@ -XXX,XX +XXX,XX @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) | ||
56 | } | ||
57 | #endif | 124 | #endif |
58 | 125 | ||
59 | -#if defined(CONFIG_USER_ONLY) | 126 | datalo = *args++; |
60 | -void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | 127 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) |
61 | - | 128 | add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, |
62 | -{ | 129 | s->code_ptr, label_ptr); |
63 | -} | 130 | #else /* !CONFIG_SOFTMMU */ |
64 | - | 131 | + a_bits = get_alignment_bits(opc); |
65 | -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, | 132 | + if (a_bits) { |
66 | - int flags) | 133 | + tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits); |
67 | -{ | 134 | + } |
68 | - return -ENOSYS; | 135 | if (guest_base) { |
69 | -} | 136 | tcg_out_qemu_ld_index(s, opc, datalo, datahi, |
70 | - | 137 | addrlo, TCG_REG_GUEST_BASE, false); |
71 | -void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) | 138 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) |
72 | -{ | 139 | int mem_index; |
73 | -} | 140 | TCGReg addend; |
74 | - | 141 | tcg_insn_unit *label_ptr; |
75 | -int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | 142 | +#else |
76 | - int flags, CPUWatchpoint **watchpoint) | 143 | + unsigned a_bits; |
77 | -{ | 144 | #endif |
78 | - return -ENOSYS; | 145 | |
79 | -} | 146 | datalo = *args++; |
80 | -#else | 147 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) |
81 | +#ifndef CONFIG_USER_ONLY | 148 | add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, |
82 | /* Add a watchpoint. */ | 149 | s->code_ptr, label_ptr); |
83 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | 150 | #else /* !CONFIG_SOFTMMU */ |
84 | int flags, CPUWatchpoint **watchpoint) | 151 | + a_bits = get_alignment_bits(opc); |
85 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | 152 | + if (a_bits) { |
86 | 153 | + tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits); | |
87 | return !(addr > wpend || wp->vaddr > addrend); | 154 | + } |
88 | } | 155 | if (guest_base) { |
89 | - | 156 | tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, |
90 | -#endif | 157 | addrlo, TCG_REG_GUEST_BASE, false); |
91 | +#endif /* !CONFIG_USER_ONLY */ | ||
92 | |||
93 | /* Add a breakpoint. */ | ||
94 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, | ||
95 | -- | 158 | -- |
96 | 2.17.1 | 159 | 2.25.1 |
97 | 160 | ||
98 | 161 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | This is kinda sorta the opposite of the other tcg hosts, where |
---|---|---|---|
2 | we get (normal) alignment checks for free with host SIGBUS and | ||
3 | need to add code to support unaligned accesses. | ||
2 | 4 | ||
3 | Preparation for collapsing the two byte swaps adjust_endianness and | 5 | Fortunately, the ISA contains pairs of instructions that are |
4 | handle_bswap into the former. | 6 | used to implement unaligned memory accesses. Use them. |
5 | 7 | ||
6 | Call memory_region_dispatch_{read|write} with endianness encoded into | 8 | Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com> |
7 | the "MemOp op" operand. | 9 | Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com> |
8 | 10 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | |
9 | This patch does not change any behaviour as | ||
10 | memory_region_dispatch_{read|write} is yet to handle the endianness. | ||
11 | |||
12 | Once it does handle endianness, callers with byte swaps can collapse | ||
13 | them into adjust_endianness. | ||
14 | |||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
17 | Message-Id: <8066ab3eb037c0388dfadfe53c5118429dd1de3a.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
19 | --- | 12 | --- |
20 | include/exec/memory.h | 3 +++ | 13 | tcg/mips/tcg-target.h | 2 - |
21 | accel/tcg/cputlb.c | 8 ++++++-- | 14 | tcg/mips/tcg-target.c.inc | 334 +++++++++++++++++++++++++++++++++++++- |
22 | exec.c | 13 +++++++++++-- | 15 | 2 files changed, 328 insertions(+), 8 deletions(-) |
23 | hw/intc/armv7m_nvic.c | 15 ++++++++------- | ||
24 | hw/s390x/s390-pci-inst.c | 6 ++++-- | ||
25 | hw/vfio/pci-quirks.c | 5 +++-- | ||
26 | hw/virtio/virtio-pci.c | 6 ++++-- | ||
27 | memory.c | 18 ++++++++++++++++++ | ||
28 | memory_ldst.inc.c | 24 ++++++++++++++++++------ | ||
29 | 9 files changed, 75 insertions(+), 23 deletions(-) | ||
30 | 16 | ||
31 | diff --git a/include/exec/memory.h b/include/exec/memory.h | 17 | diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h |
32 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/include/exec/memory.h | 19 | --- a/tcg/mips/tcg-target.h |
34 | +++ b/include/exec/memory.h | 20 | +++ b/tcg/mips/tcg-target.h |
35 | @@ -XXX,XX +XXX,XX @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, | 21 | @@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions; |
22 | void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t) | ||
23 | QEMU_ERROR("code path is reachable"); | ||
24 | |||
25 | -#ifdef CONFIG_SOFTMMU | ||
26 | #define TCG_TARGET_NEED_LDST_LABELS | ||
27 | -#endif | ||
28 | |||
29 | #endif | ||
30 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/tcg/mips/tcg-target.c.inc | ||
33 | +++ b/tcg/mips/tcg-target.c.inc | ||
34 | @@ -XXX,XX +XXX,XX @@ | ||
35 | * THE SOFTWARE. | ||
36 | */ | ||
37 | |||
38 | +#include "../tcg-ldst.c.inc" | ||
39 | + | ||
40 | #ifdef HOST_WORDS_BIGENDIAN | ||
41 | # define MIPS_BE 1 | ||
42 | #else | ||
43 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
44 | OPC_ORI = 015 << 26, | ||
45 | OPC_XORI = 016 << 26, | ||
46 | OPC_LUI = 017 << 26, | ||
47 | + OPC_BNEL = 025 << 26, | ||
48 | + OPC_BNEZALC_R6 = 030 << 26, | ||
49 | OPC_DADDIU = 031 << 26, | ||
50 | + OPC_LDL = 032 << 26, | ||
51 | + OPC_LDR = 033 << 26, | ||
52 | OPC_LB = 040 << 26, | ||
53 | OPC_LH = 041 << 26, | ||
54 | + OPC_LWL = 042 << 26, | ||
55 | OPC_LW = 043 << 26, | ||
56 | OPC_LBU = 044 << 26, | ||
57 | OPC_LHU = 045 << 26, | ||
58 | + OPC_LWR = 046 << 26, | ||
59 | OPC_LWU = 047 << 26, | ||
60 | OPC_SB = 050 << 26, | ||
61 | OPC_SH = 051 << 26, | ||
62 | + OPC_SWL = 052 << 26, | ||
63 | OPC_SW = 053 << 26, | ||
64 | + OPC_SDL = 054 << 26, | ||
65 | + OPC_SDR = 055 << 26, | ||
66 | + OPC_SWR = 056 << 26, | ||
67 | OPC_LD = 067 << 26, | ||
68 | OPC_SD = 077 << 26, | ||
69 | |||
70 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) | ||
71 | } | ||
72 | |||
73 | #if defined(CONFIG_SOFTMMU) | ||
74 | -#include "../tcg-ldst.c.inc" | ||
75 | - | ||
76 | static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { | ||
77 | [MO_UB] = helper_ret_ldub_mmu, | ||
78 | [MO_SB] = helper_ret_ldsb_mmu, | ||
79 | @@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
80 | tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); | ||
81 | return true; | ||
82 | } | ||
83 | -#endif | ||
84 | + | ||
85 | +#else | ||
86 | + | ||
87 | +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo, | ||
88 | + TCGReg addrhi, unsigned a_bits) | ||
89 | +{ | ||
90 | + unsigned a_mask = (1 << a_bits) - 1; | ||
91 | + TCGLabelQemuLdst *l = new_ldst_label(s); | ||
92 | + | ||
93 | + l->is_ld = is_ld; | ||
94 | + l->addrlo_reg = addrlo; | ||
95 | + l->addrhi_reg = addrhi; | ||
96 | + | ||
97 | + /* We are expecting a_bits to max out at 7, much lower than ANDI. */ | ||
98 | + tcg_debug_assert(a_bits < 16); | ||
99 | + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask); | ||
100 | + | ||
101 | + l->label_ptr[0] = s->code_ptr; | ||
102 | + if (use_mips32r6_instructions) { | ||
103 | + tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0); | ||
104 | + } else { | ||
105 | + tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO); | ||
106 | + tcg_out_nop(s); | ||
107 | + } | ||
108 | + | ||
109 | + l->raddr = tcg_splitwx_to_rx(s->code_ptr); | ||
110 | +} | ||
111 | + | ||
112 | +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) | ||
113 | +{ | ||
114 | + void *target; | ||
115 | + | ||
116 | + if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { | ||
117 | + return false; | ||
118 | + } | ||
119 | + | ||
120 | + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
121 | + /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */ | ||
122 | + TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg; | ||
123 | + TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg; | ||
124 | + | ||
125 | + if (a3 != TCG_REG_A2) { | ||
126 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); | ||
127 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); | ||
128 | + } else if (a2 != TCG_REG_A3) { | ||
129 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3); | ||
130 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2); | ||
131 | + } else { | ||
132 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2); | ||
133 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3); | ||
134 | + tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0); | ||
135 | + } | ||
136 | + } else { | ||
137 | + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); | ||
138 | + } | ||
139 | + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); | ||
140 | + | ||
141 | + /* | ||
142 | + * Tail call to the helper, with the return address back inline. | ||
143 | + * We have arrived here via BNEL, so $31 is already set. | ||
144 | + */ | ||
145 | + target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st); | ||
146 | + tcg_out_call_int(s, target, true); | ||
147 | + return true; | ||
148 | +} | ||
149 | + | ||
150 | +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
151 | +{ | ||
152 | + return tcg_out_fail_alignment(s, l); | ||
153 | +} | ||
154 | + | ||
155 | +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) | ||
156 | +{ | ||
157 | + return tcg_out_fail_alignment(s, l); | ||
158 | +} | ||
159 | +#endif /* SOFTMMU */ | ||
160 | |||
161 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
162 | TCGReg base, MemOp opc, bool is_64) | ||
163 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
36 | } | 164 | } |
37 | } | 165 | } |
38 | 166 | ||
39 | +/* enum device_endian to MemOp. */ | 167 | +static void __attribute__((unused)) |
40 | +MemOp devend_memop(enum device_endian end); | 168 | +tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, |
41 | + | 169 | + TCGReg base, MemOp opc, bool is_64) |
42 | #endif | 170 | +{ |
43 | 171 | + const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR; | |
44 | #endif | 172 | + const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL; |
45 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 173 | + const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR; |
46 | index XXXXXXX..XXXXXXX 100644 | 174 | + const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL; |
47 | --- a/accel/tcg/cputlb.c | 175 | + |
48 | +++ b/accel/tcg/cputlb.c | 176 | + bool sgn = (opc & MO_SIGN); |
49 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 177 | + |
50 | qemu_mutex_lock_iothread(); | 178 | + switch (opc & (MO_SSIZE | MO_BSWAP)) { |
51 | locked = true; | 179 | + case MO_SW | MO_BE: |
52 | } | 180 | + case MO_UW | MO_BE: |
53 | - r = memory_region_dispatch_read(mr, mr_offset, &val, size_memop(size), | 181 | + tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0); |
54 | + r = memory_region_dispatch_read(mr, mr_offset, &val, | 182 | + tcg_out_opc_imm(s, OPC_LBU, lo, base, 1); |
55 | + size_memop(size) | MO_TE, | 183 | + if (use_mips32r2_instructions) { |
56 | iotlbentry->attrs); | 184 | + tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8); |
57 | if (r != MEMTX_OK) { | 185 | + } else { |
58 | hwaddr physaddr = mr_offset + | 186 | + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8); |
59 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | 187 | + tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1); |
60 | qemu_mutex_lock_iothread(); | 188 | + } |
61 | locked = true; | 189 | + break; |
62 | } | 190 | + |
63 | - r = memory_region_dispatch_write(mr, mr_offset, val, size_memop(size), | 191 | + case MO_SW | MO_LE: |
64 | + r = memory_region_dispatch_write(mr, mr_offset, val, | 192 | + case MO_UW | MO_LE: |
65 | + size_memop(size) | MO_TE, | 193 | + if (use_mips32r2_instructions && lo != base) { |
66 | iotlbentry->attrs); | 194 | + tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); |
67 | if (r != MEMTX_OK) { | 195 | + tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 1); |
68 | hwaddr physaddr = mr_offset + | 196 | + tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8); |
69 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | 197 | + } else { |
70 | } | 198 | + tcg_out_opc_imm(s, OPC_LBU, TCG_TMP0, base, 0); |
71 | } | 199 | + tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP1, base, 1); |
72 | 200 | + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP1, TCG_TMP1, 8); | |
73 | + /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ | 201 | + tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1); |
74 | res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | 202 | + } |
75 | mmu_idx, addr, retaddr, access_type, size); | 203 | + break; |
76 | return handle_bswap(res, size, big_endian); | 204 | + |
77 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 205 | + case MO_SL: |
78 | } | 206 | + case MO_UL: |
79 | } | 207 | + tcg_out_opc_imm(s, lw1, lo, base, 0); |
80 | 208 | + tcg_out_opc_imm(s, lw2, lo, base, 3); | |
81 | + /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ | 209 | + if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) { |
82 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | 210 | + tcg_out_ext32u(s, lo, lo); |
83 | handle_bswap(val, size, big_endian), | 211 | + } |
84 | addr, retaddr, size); | 212 | + break; |
85 | diff --git a/exec.c b/exec.c | 213 | + |
86 | index XXXXXXX..XXXXXXX 100644 | 214 | + case MO_UL | MO_BSWAP: |
87 | --- a/exec.c | 215 | + case MO_SL | MO_BSWAP: |
88 | +++ b/exec.c | 216 | + if (use_mips32r2_instructions) { |
89 | @@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, | 217 | + tcg_out_opc_imm(s, lw1, lo, base, 0); |
90 | /* XXX: could force current_cpu to NULL to avoid | 218 | + tcg_out_opc_imm(s, lw2, lo, base, 3); |
91 | potential bugs */ | 219 | + tcg_out_bswap32(s, lo, lo, |
92 | val = ldn_p(buf, l); | 220 | + TCG_TARGET_REG_BITS == 64 && is_64 |
93 | + /* | 221 | + ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0); |
94 | + * TODO: Merge bswap from ldn_p into memory_region_dispatch_write | 222 | + } else { |
95 | + * by using ldn_he_p and dropping MO_TE to get a host-endian value. | 223 | + const tcg_insn_unit *subr = |
96 | + */ | 224 | + (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn |
97 | result |= memory_region_dispatch_write(mr, addr1, val, | 225 | + ? bswap32u_addr : bswap32_addr); |
98 | - size_memop(l), attrs); | 226 | + |
99 | + size_memop(l) | MO_TE, | 227 | + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0); |
100 | + attrs); | 228 | + tcg_out_bswap_subr(s, subr); |
101 | } else { | 229 | + /* delay slot */ |
102 | /* RAM case */ | 230 | + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3); |
103 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | 231 | + tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3); |
104 | @@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, | 232 | + } |
105 | /* I/O case */ | 233 | + break; |
106 | release_lock |= prepare_mmio_access(mr); | 234 | + |
107 | l = memory_access_size(mr, l, addr1); | 235 | + case MO_UQ: |
108 | + /* | 236 | + if (TCG_TARGET_REG_BITS == 64) { |
109 | + * TODO: Merge bswap from stn_p into memory_region_dispatch_read | 237 | + tcg_out_opc_imm(s, ld1, lo, base, 0); |
110 | + * by using stn_he_p and dropping MO_TE to get a host-endian value. | 238 | + tcg_out_opc_imm(s, ld2, lo, base, 7); |
111 | + */ | 239 | + } else { |
112 | result |= memory_region_dispatch_read(mr, addr1, &val, | 240 | + tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0); |
113 | - size_memop(l), attrs); | 241 | + tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3); |
114 | + size_memop(l) | MO_TE, attrs); | 242 | + tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0); |
115 | stn_p(buf, l, val); | 243 | + tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3); |
116 | } else { | 244 | + } |
117 | /* RAM case */ | 245 | + break; |
118 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 246 | + |
119 | index XXXXXXX..XXXXXXX 100644 | 247 | + case MO_UQ | MO_BSWAP: |
120 | --- a/hw/intc/armv7m_nvic.c | 248 | + if (TCG_TARGET_REG_BITS == 64) { |
121 | +++ b/hw/intc/armv7m_nvic.c | 249 | + if (use_mips32r2_instructions) { |
122 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr, | 250 | + tcg_out_opc_imm(s, ld1, lo, base, 0); |
123 | if (attrs.secure) { | 251 | + tcg_out_opc_imm(s, ld2, lo, base, 7); |
124 | /* S accesses to the alias act like NS accesses to the real region */ | 252 | + tcg_out_bswap64(s, lo, lo); |
125 | attrs.secure = 0; | 253 | + } else { |
126 | - return memory_region_dispatch_write(mr, addr, value, size_memop(size), | 254 | + tcg_out_opc_imm(s, ld1, TCG_TMP0, base, 0); |
127 | - attrs); | 255 | + tcg_out_bswap_subr(s, bswap64_addr); |
128 | + return memory_region_dispatch_write(mr, addr, value, | 256 | + /* delay slot */ |
129 | + size_memop(size) | MO_TE, attrs); | 257 | + tcg_out_opc_imm(s, ld2, TCG_TMP0, base, 7); |
130 | } else { | 258 | + tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); |
131 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | 259 | + } |
132 | if (attrs.user) { | 260 | + } else if (use_mips32r2_instructions) { |
133 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr, | 261 | + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0); |
134 | if (attrs.secure) { | 262 | + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3); |
135 | /* S accesses to the alias act like NS accesses to the real region */ | 263 | + tcg_out_opc_imm(s, lw1, TCG_TMP1, base, 4 + 0); |
136 | attrs.secure = 0; | 264 | + tcg_out_opc_imm(s, lw2, TCG_TMP1, base, 4 + 3); |
137 | - return memory_region_dispatch_read(mr, addr, data, size_memop(size), | 265 | + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0); |
138 | - attrs); | 266 | + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1); |
139 | + return memory_region_dispatch_read(mr, addr, data, | 267 | + tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16); |
140 | + size_memop(size) | MO_TE, attrs); | 268 | + tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16); |
141 | } else { | 269 | + } else { |
142 | /* NS attrs are RAZ/WI for privileged, and BusFault for user */ | 270 | + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0); |
143 | if (attrs.user) { | 271 | + tcg_out_bswap_subr(s, bswap32_addr); |
144 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_write(void *opaque, hwaddr addr, | 272 | + /* delay slot */ |
145 | 273 | + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3); | |
146 | /* Direct the access to the correct systick */ | 274 | + tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 4 + 0); |
147 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | 275 | + tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3); |
148 | - return memory_region_dispatch_write(mr, addr, value, size_memop(size), | 276 | + tcg_out_bswap_subr(s, bswap32_addr); |
149 | - attrs); | 277 | + /* delay slot */ |
150 | + return memory_region_dispatch_write(mr, addr, value, | 278 | + tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 4 + 3); |
151 | + size_memop(size) | MO_TE, attrs); | 279 | + tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3); |
152 | } | 280 | + } |
153 | 281 | + break; | |
154 | static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | 282 | + |
155 | @@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_systick_read(void *opaque, hwaddr addr, | ||
156 | |||
157 | /* Direct the access to the correct systick */ | ||
158 | mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); | ||
159 | - return memory_region_dispatch_read(mr, addr, data, size_memop(size), attrs); | ||
160 | + return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, | ||
161 | + attrs); | ||
162 | } | ||
163 | |||
164 | static const MemoryRegionOps nvic_systick_ops = { | ||
165 | diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c | ||
166 | index XXXXXXX..XXXXXXX 100644 | ||
167 | --- a/hw/s390x/s390-pci-inst.c | ||
168 | +++ b/hw/s390x/s390-pci-inst.c | ||
169 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | ||
170 | mr = pbdev->pdev->io_regions[pcias].memory; | ||
171 | mr = s390_get_subregion(mr, offset, len); | ||
172 | offset -= mr->addr; | ||
173 | - return memory_region_dispatch_read(mr, offset, data, size_memop(len), | ||
174 | + return memory_region_dispatch_read(mr, offset, data, | ||
175 | + size_memop(len) | MO_BE, | ||
176 | MEMTXATTRS_UNSPECIFIED); | ||
177 | } | ||
178 | |||
179 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | ||
180 | mr = pbdev->pdev->io_regions[pcias].memory; | ||
181 | mr = s390_get_subregion(mr, offset, len); | ||
182 | offset -= mr->addr; | ||
183 | - return memory_region_dispatch_write(mr, offset, data, size_memop(len), | ||
184 | + return memory_region_dispatch_write(mr, offset, data, | ||
185 | + size_memop(len) | MO_BE, | ||
186 | MEMTXATTRS_UNSPECIFIED); | ||
187 | } | ||
188 | |||
189 | diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c | ||
190 | index XXXXXXX..XXXXXXX 100644 | ||
191 | --- a/hw/vfio/pci-quirks.c | ||
192 | +++ b/hw/vfio/pci-quirks.c | ||
193 | @@ -XXX,XX +XXX,XX @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, | ||
194 | |||
195 | /* Write to the proper guest MSI-X table instead */ | ||
196 | memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, | ||
197 | - offset, val, size_memop(size), | ||
198 | + offset, val, | ||
199 | + size_memop(size) | MO_LE, | ||
200 | MEMTXATTRS_UNSPECIFIED); | ||
201 | } | ||
202 | return; /* Do not write guest MSI-X data to hardware */ | ||
203 | @@ -XXX,XX +XXX,XX @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, | ||
204 | if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { | ||
205 | hwaddr offset = rtl->addr & 0xfff; | ||
206 | memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, | ||
207 | - &data, size_memop(size), | ||
208 | + &data, size_memop(size) | MO_LE, | ||
209 | MEMTXATTRS_UNSPECIFIED); | ||
210 | trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); | ||
211 | } | ||
212 | diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c | ||
213 | index XXXXXXX..XXXXXXX 100644 | ||
214 | --- a/hw/virtio/virtio-pci.c | ||
215 | +++ b/hw/virtio/virtio-pci.c | ||
216 | @@ -XXX,XX +XXX,XX @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, | ||
217 | /* As length is under guest control, handle illegal values. */ | ||
218 | return; | ||
219 | } | ||
220 | - memory_region_dispatch_write(mr, addr, val, size_memop(len), | ||
221 | + /* TODO: Merge bswap from cpu_to_leXX into memory_region_dispatch_write. */ | ||
222 | + memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, | ||
223 | MEMTXATTRS_UNSPECIFIED); | ||
224 | } | ||
225 | |||
226 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | ||
227 | /* Make sure caller aligned buf properly */ | ||
228 | assert(!(((uintptr_t)buf) & (len - 1))); | ||
229 | |||
230 | - memory_region_dispatch_read(mr, addr, &val, size_memop(len), | ||
231 | + /* TODO: Merge bswap from leXX_to_cpu into memory_region_dispatch_read. */ | ||
232 | + memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, | ||
233 | MEMTXATTRS_UNSPECIFIED); | ||
234 | switch (len) { | ||
235 | case 1: | ||
236 | diff --git a/memory.c b/memory.c | ||
237 | index XXXXXXX..XXXXXXX 100644 | ||
238 | --- a/memory.c | ||
239 | +++ b/memory.c | ||
240 | @@ -XXX,XX +XXX,XX @@ static void memory_register_types(void) | ||
241 | } | ||
242 | |||
243 | type_init(memory_register_types) | ||
244 | + | ||
245 | +MemOp devend_memop(enum device_endian end) | ||
246 | +{ | ||
247 | + static MemOp conv[] = { | ||
248 | + [DEVICE_LITTLE_ENDIAN] = MO_LE, | ||
249 | + [DEVICE_BIG_ENDIAN] = MO_BE, | ||
250 | + [DEVICE_NATIVE_ENDIAN] = MO_TE, | ||
251 | + [DEVICE_HOST_ENDIAN] = 0, | ||
252 | + }; | ||
253 | + switch (end) { | ||
254 | + case DEVICE_LITTLE_ENDIAN: | ||
255 | + case DEVICE_BIG_ENDIAN: | ||
256 | + case DEVICE_NATIVE_ENDIAN: | ||
257 | + return conv[end]; | ||
258 | + default: | 283 | + default: |
259 | + g_assert_not_reached(); | 284 | + g_assert_not_reached(); |
260 | + } | 285 | + } |
261 | +} | 286 | +} |
262 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | 287 | + |
263 | index XXXXXXX..XXXXXXX 100644 | 288 | static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
264 | --- a/memory_ldst.inc.c | 289 | { |
265 | +++ b/memory_ldst.inc.c | 290 | TCGReg addr_regl, addr_regh __attribute__((unused)); |
266 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | 291 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
267 | release_lock |= prepare_mmio_access(mr); | 292 | MemOp opc; |
268 | 293 | #if defined(CONFIG_SOFTMMU) | |
269 | /* I/O case */ | 294 | tcg_insn_unit *label_ptr[2]; |
270 | - r = memory_region_dispatch_read(mr, addr1, &val, MO_32, attrs); | 295 | +#else |
271 | + /* TODO: Merge bswap32 into memory_region_dispatch_read. */ | 296 | + unsigned a_bits, s_bits; |
272 | + r = memory_region_dispatch_read(mr, addr1, &val, | ||
273 | + MO_32 | devend_memop(endian), attrs); | ||
274 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
275 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
276 | val = bswap32(val); | ||
277 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | ||
278 | release_lock |= prepare_mmio_access(mr); | ||
279 | |||
280 | /* I/O case */ | ||
281 | - r = memory_region_dispatch_read(mr, addr1, &val, MO_64, attrs); | ||
282 | + /* TODO: Merge bswap64 into memory_region_dispatch_read. */ | ||
283 | + r = memory_region_dispatch_read(mr, addr1, &val, | ||
284 | + MO_64 | devend_memop(endian), attrs); | ||
285 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
286 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
287 | val = bswap64(val); | ||
288 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
289 | release_lock |= prepare_mmio_access(mr); | ||
290 | |||
291 | /* I/O case */ | ||
292 | - r = memory_region_dispatch_read(mr, addr1, &val, MO_16, attrs); | ||
293 | + /* TODO: Merge bswap16 into memory_region_dispatch_read. */ | ||
294 | + r = memory_region_dispatch_read(mr, addr1, &val, | ||
295 | + MO_16 | devend_memop(endian), attrs); | ||
296 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
297 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
298 | val = bswap16(val); | ||
299 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
300 | val = bswap32(val); | ||
301 | } | ||
302 | #endif | 297 | #endif |
303 | - r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); | 298 | TCGReg base = TCG_REG_A0; |
304 | + /* TODO: Merge bswap32 into memory_region_dispatch_write. */ | 299 | |
305 | + r = memory_region_dispatch_write(mr, addr1, val, | 300 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
306 | + MO_32 | devend_memop(endian), attrs); | ||
307 | } else { | 301 | } else { |
308 | /* RAM case */ | 302 | tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); |
309 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | 303 | } |
310 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | 304 | - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); |
311 | val = bswap16(val); | 305 | + a_bits = get_alignment_bits(opc); |
312 | } | 306 | + s_bits = opc & MO_SIZE; |
307 | + /* | ||
308 | + * R6 removes the left/right instructions but requires the | ||
309 | + * system to support misaligned memory accesses. | ||
310 | + */ | ||
311 | + if (use_mips32r6_instructions) { | ||
312 | + if (a_bits) { | ||
313 | + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
314 | + } | ||
315 | + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | ||
316 | + } else { | ||
317 | + if (a_bits && a_bits != s_bits) { | ||
318 | + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
319 | + } | ||
320 | + if (a_bits >= s_bits) { | ||
321 | + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | ||
322 | + } else { | ||
323 | + tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); | ||
324 | + } | ||
325 | + } | ||
313 | #endif | 326 | #endif |
314 | - r = memory_region_dispatch_write(mr, addr1, val, MO_16, attrs); | 327 | } |
315 | + /* TODO: Merge bswap16 into memory_region_dispatch_write. */ | 328 | |
316 | + r = memory_region_dispatch_write(mr, addr1, val, | 329 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, |
317 | + MO_16 | devend_memop(endian), attrs); | 330 | } |
331 | } | ||
332 | |||
333 | +static void __attribute__((unused)) | ||
334 | +tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, | ||
335 | + TCGReg base, MemOp opc) | ||
336 | +{ | ||
337 | + const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR; | ||
338 | + const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL; | ||
339 | + const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR; | ||
340 | + const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL; | ||
341 | + | ||
342 | + /* Don't clutter the code below with checks to avoid bswapping ZERO. */ | ||
343 | + if ((lo | hi) == 0) { | ||
344 | + opc &= ~MO_BSWAP; | ||
345 | + } | ||
346 | + | ||
347 | + switch (opc & (MO_SIZE | MO_BSWAP)) { | ||
348 | + case MO_16 | MO_BE: | ||
349 | + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8); | ||
350 | + tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 0); | ||
351 | + tcg_out_opc_imm(s, OPC_SB, lo, base, 1); | ||
352 | + break; | ||
353 | + | ||
354 | + case MO_16 | MO_LE: | ||
355 | + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8); | ||
356 | + tcg_out_opc_imm(s, OPC_SB, lo, base, 0); | ||
357 | + tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 1); | ||
358 | + break; | ||
359 | + | ||
360 | + case MO_32 | MO_BSWAP: | ||
361 | + tcg_out_bswap32(s, TCG_TMP3, lo, 0); | ||
362 | + lo = TCG_TMP3; | ||
363 | + /* fall through */ | ||
364 | + case MO_32: | ||
365 | + tcg_out_opc_imm(s, sw1, lo, base, 0); | ||
366 | + tcg_out_opc_imm(s, sw2, lo, base, 3); | ||
367 | + break; | ||
368 | + | ||
369 | + case MO_64 | MO_BSWAP: | ||
370 | + if (TCG_TARGET_REG_BITS == 64) { | ||
371 | + tcg_out_bswap64(s, TCG_TMP3, lo); | ||
372 | + lo = TCG_TMP3; | ||
373 | + } else if (use_mips32r2_instructions) { | ||
374 | + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? hi : lo); | ||
375 | + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? lo : hi); | ||
376 | + tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16); | ||
377 | + tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16); | ||
378 | + hi = MIPS_BE ? TCG_TMP0 : TCG_TMP1; | ||
379 | + lo = MIPS_BE ? TCG_TMP1 : TCG_TMP0; | ||
380 | + } else { | ||
381 | + tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0); | ||
382 | + tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 0 + 0); | ||
383 | + tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 0 + 3); | ||
384 | + tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0); | ||
385 | + tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 4 + 0); | ||
386 | + tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 4 + 3); | ||
387 | + break; | ||
388 | + } | ||
389 | + /* fall through */ | ||
390 | + case MO_64: | ||
391 | + if (TCG_TARGET_REG_BITS == 64) { | ||
392 | + tcg_out_opc_imm(s, sd1, lo, base, 0); | ||
393 | + tcg_out_opc_imm(s, sd2, lo, base, 7); | ||
394 | + } else { | ||
395 | + tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0); | ||
396 | + tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3); | ||
397 | + tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0); | ||
398 | + tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3); | ||
399 | + } | ||
400 | + break; | ||
401 | + | ||
402 | + default: | ||
403 | + tcg_abort(); | ||
404 | + } | ||
405 | +} | ||
406 | static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
407 | { | ||
408 | TCGReg addr_regl, addr_regh __attribute__((unused)); | ||
409 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
410 | MemOp opc; | ||
411 | #if defined(CONFIG_SOFTMMU) | ||
412 | tcg_insn_unit *label_ptr[2]; | ||
413 | +#else | ||
414 | + unsigned a_bits, s_bits; | ||
415 | #endif | ||
416 | TCGReg base = TCG_REG_A0; | ||
417 | |||
418 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
419 | data_regl, data_regh, addr_regl, addr_regh, | ||
420 | s->code_ptr, label_ptr); | ||
421 | #else | ||
422 | - base = TCG_REG_A0; | ||
423 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | ||
424 | tcg_out_ext32u(s, base, addr_regl); | ||
425 | addr_regl = base; | ||
426 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
318 | } else { | 427 | } else { |
319 | /* RAM case */ | 428 | tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); |
320 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | 429 | } |
321 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | 430 | - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); |
322 | val = bswap64(val); | 431 | + a_bits = get_alignment_bits(opc); |
323 | } | 432 | + s_bits = opc & MO_SIZE; |
433 | + /* | ||
434 | + * R6 removes the left/right instructions but requires the | ||
435 | + * system to support misaligned memory accesses. | ||
436 | + */ | ||
437 | + if (use_mips32r6_instructions) { | ||
438 | + if (a_bits) { | ||
439 | + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
440 | + } | ||
441 | + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
442 | + } else { | ||
443 | + if (a_bits && a_bits != s_bits) { | ||
444 | + tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
445 | + } | ||
446 | + if (a_bits >= s_bits) { | ||
447 | + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
448 | + } else { | ||
449 | + tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); | ||
450 | + } | ||
451 | + } | ||
324 | #endif | 452 | #endif |
325 | - r = memory_region_dispatch_write(mr, addr1, val, MO_64, attrs); | 453 | } |
326 | + /* TODO: Merge bswap64 into memory_region_dispatch_write. */ | 454 | |
327 | + r = memory_region_dispatch_write(mr, addr1, val, | ||
328 | + MO_64 | devend_memop(endian), attrs); | ||
329 | } else { | ||
330 | /* RAM case */ | ||
331 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
332 | -- | 455 | -- |
333 | 2.17.1 | 456 | 2.25.1 |
334 | 457 | ||
335 | 458 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | We can use the routines just added for user-only to emit |
---|---|---|---|
2 | 2 | unaligned accesses in softmmu mode too. | |
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | 3 | |
4 | being converted into a "MemOp op". | 4 | Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com> |
5 | 5 | Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com> | |
6 | Convert interfaces by using no-op size_memop. | 6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
17 | Message-Id: <2f41da26201fb9b0339c2b7fde34df864f7f9ea8.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
19 | --- | 8 | --- |
20 | hw/s390x/s390-pci-inst.c | 8 +++++--- | 9 | tcg/mips/tcg-target.c.inc | 91 ++++++++++++++++++++++----------------- |
21 | 1 file changed, 5 insertions(+), 3 deletions(-) | 10 | 1 file changed, 51 insertions(+), 40 deletions(-) |
22 | 11 | ||
23 | diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c | 12 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc |
24 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/hw/s390x/s390-pci-inst.c | 14 | --- a/tcg/mips/tcg-target.c.inc |
26 | +++ b/hw/s390x/s390-pci-inst.c | 15 | +++ b/tcg/mips/tcg-target.c.inc |
27 | @@ -XXX,XX +XXX,XX @@ | 16 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, |
28 | #include "cpu.h" | 17 | tcg_insn_unit *label_ptr[2], bool is_load) |
29 | #include "s390-pci-inst.h" | 18 | { |
30 | #include "s390-pci-bus.h" | 19 | MemOp opc = get_memop(oi); |
31 | +#include "exec/memop.h" | 20 | - unsigned s_bits = opc & MO_SIZE; |
32 | #include "exec/memory-internal.h" | 21 | unsigned a_bits = get_alignment_bits(opc); |
33 | #include "qemu/error-report.h" | 22 | + unsigned s_bits = opc & MO_SIZE; |
34 | #include "sysemu/hw_accel.h" | 23 | + unsigned a_mask = (1 << a_bits) - 1; |
35 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | 24 | + unsigned s_mask = (1 << s_bits) - 1; |
36 | mr = pbdev->pdev->io_regions[pcias].memory; | 25 | int mem_index = get_mmuidx(oi); |
37 | mr = s390_get_subregion(mr, offset, len); | 26 | int fast_off = TLB_MASK_TABLE_OFS(mem_index); |
38 | offset -= mr->addr; | 27 | int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); |
39 | - return memory_region_dispatch_read(mr, offset, data, len, | 28 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, |
40 | + return memory_region_dispatch_read(mr, offset, data, size_memop(len), | 29 | int add_off = offsetof(CPUTLBEntry, addend); |
41 | MEMTXATTRS_UNSPECIFIED); | 30 | int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) |
31 | : offsetof(CPUTLBEntry, addr_write)); | ||
32 | - target_ulong mask; | ||
33 | + target_ulong tlb_mask; | ||
34 | |||
35 | /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ | ||
36 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); | ||
37 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, | ||
38 | /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ | ||
39 | tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); | ||
40 | |||
41 | - /* We don't currently support unaligned accesses. | ||
42 | - We could do so with mips32r6. */ | ||
43 | - if (a_bits < s_bits) { | ||
44 | - a_bits = s_bits; | ||
45 | - } | ||
46 | - | ||
47 | - /* Mask the page bits, keeping the alignment bits to compare against. */ | ||
48 | - mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); | ||
49 | - | ||
50 | /* Load the (low-half) tlb comparator. */ | ||
51 | if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
52 | - tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); | ||
53 | - tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask); | ||
54 | + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); | ||
55 | } else { | ||
56 | tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD | ||
57 | : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW), | ||
58 | TCG_TMP0, TCG_TMP3, cmp_off); | ||
59 | - tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask); | ||
60 | - /* No second compare is required here; | ||
61 | - load the tlb addend for the fast path. */ | ||
62 | - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); | ||
63 | } | ||
64 | |||
65 | /* Zero extend a 32-bit guest address for a 64-bit host. */ | ||
66 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, | ||
67 | tcg_out_ext32u(s, base, addrl); | ||
68 | addrl = base; | ||
69 | } | ||
70 | - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); | ||
71 | + | ||
72 | + /* | ||
73 | + * Mask the page bits, keeping the alignment bits to compare against. | ||
74 | + * For unaligned accesses, compare against the end of the access to | ||
75 | + * verify that it does not cross a page boundary. | ||
76 | + */ | ||
77 | + tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; | ||
78 | + tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask); | ||
79 | + if (a_mask >= s_mask) { | ||
80 | + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); | ||
81 | + } else { | ||
82 | + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrl, s_mask - a_mask); | ||
83 | + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); | ||
84 | + } | ||
85 | + | ||
86 | + if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { | ||
87 | + /* Load the tlb addend for the fast path. */ | ||
88 | + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); | ||
89 | + } | ||
90 | |||
91 | label_ptr[0] = s->code_ptr; | ||
92 | tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); | ||
93 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, | ||
94 | /* Load and test the high half tlb comparator. */ | ||
95 | if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | ||
96 | /* delay slot */ | ||
97 | - tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); | ||
98 | + tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); | ||
99 | |||
100 | /* Load the tlb addend for the fast path. */ | ||
101 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); | ||
102 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
103 | } | ||
42 | } | 104 | } |
43 | 105 | ||
44 | @@ -XXX,XX +XXX,XX @@ static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, | 106 | -static void __attribute__((unused)) |
45 | mr = pbdev->pdev->io_regions[pcias].memory; | 107 | -tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, |
46 | mr = s390_get_subregion(mr, offset, len); | 108 | +static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, |
47 | offset -= mr->addr; | 109 | TCGReg base, MemOp opc, bool is_64) |
48 | - return memory_region_dispatch_write(mr, offset, data, len, | 110 | { |
49 | + return memory_region_dispatch_write(mr, offset, data, size_memop(len), | 111 | const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR; |
50 | MEMTXATTRS_UNSPECIFIED); | 112 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) |
113 | #if defined(CONFIG_SOFTMMU) | ||
114 | tcg_insn_unit *label_ptr[2]; | ||
115 | #else | ||
116 | - unsigned a_bits, s_bits; | ||
117 | #endif | ||
118 | + unsigned a_bits, s_bits; | ||
119 | TCGReg base = TCG_REG_A0; | ||
120 | |||
121 | data_regl = *args++; | ||
122 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
123 | addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); | ||
124 | oi = *args++; | ||
125 | opc = get_memop(oi); | ||
126 | + a_bits = get_alignment_bits(opc); | ||
127 | + s_bits = opc & MO_SIZE; | ||
128 | |||
129 | + /* | ||
130 | + * R6 removes the left/right instructions but requires the | ||
131 | + * system to support misaligned memory accesses. | ||
132 | + */ | ||
133 | #if defined(CONFIG_SOFTMMU) | ||
134 | tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1); | ||
135 | - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | ||
136 | + if (use_mips32r6_instructions || a_bits >= s_bits) { | ||
137 | + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | ||
138 | + } else { | ||
139 | + tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64); | ||
140 | + } | ||
141 | add_qemu_ldst_label(s, 1, oi, | ||
142 | (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | ||
143 | data_regl, data_regh, addr_regl, addr_regh, | ||
144 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
145 | } else { | ||
146 | tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); | ||
147 | } | ||
148 | - a_bits = get_alignment_bits(opc); | ||
149 | - s_bits = opc & MO_SIZE; | ||
150 | - /* | ||
151 | - * R6 removes the left/right instructions but requires the | ||
152 | - * system to support misaligned memory accesses. | ||
153 | - */ | ||
154 | if (use_mips32r6_instructions) { | ||
155 | if (a_bits) { | ||
156 | tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
157 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, | ||
158 | } | ||
51 | } | 159 | } |
52 | 160 | ||
53 | @@ -XXX,XX +XXX,XX @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, | 161 | -static void __attribute__((unused)) |
54 | 162 | -tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, | |
55 | for (i = 0; i < len / 8; i++) { | 163 | +static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, |
56 | result = memory_region_dispatch_write(mr, offset + i * 8, | 164 | TCGReg base, MemOp opc) |
57 | - ldq_p(buffer + i * 8), 8, | 165 | { |
58 | + ldq_p(buffer + i * 8), | 166 | const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR; |
59 | + size_memop(8), | 167 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) |
60 | MEMTXATTRS_UNSPECIFIED); | 168 | MemOp opc; |
61 | if (result != MEMTX_OK) { | 169 | #if defined(CONFIG_SOFTMMU) |
62 | s390_program_interrupt(env, PGM_OPERAND, 6, ra); | 170 | tcg_insn_unit *label_ptr[2]; |
171 | -#else | ||
172 | - unsigned a_bits, s_bits; | ||
173 | #endif | ||
174 | + unsigned a_bits, s_bits; | ||
175 | TCGReg base = TCG_REG_A0; | ||
176 | |||
177 | data_regl = *args++; | ||
178 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
179 | addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); | ||
180 | oi = *args++; | ||
181 | opc = get_memop(oi); | ||
182 | + a_bits = get_alignment_bits(opc); | ||
183 | + s_bits = opc & MO_SIZE; | ||
184 | |||
185 | + /* | ||
186 | + * R6 removes the left/right instructions but requires the | ||
187 | + * system to support misaligned memory accesses. | ||
188 | + */ | ||
189 | #if defined(CONFIG_SOFTMMU) | ||
190 | tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0); | ||
191 | - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
192 | + if (use_mips32r6_instructions || a_bits >= s_bits) { | ||
193 | + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | ||
194 | + } else { | ||
195 | + tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc); | ||
196 | + } | ||
197 | add_qemu_ldst_label(s, 0, oi, | ||
198 | (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | ||
199 | data_regl, data_regh, addr_regl, addr_regh, | ||
200 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
201 | } else { | ||
202 | tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); | ||
203 | } | ||
204 | - a_bits = get_alignment_bits(opc); | ||
205 | - s_bits = opc & MO_SIZE; | ||
206 | - /* | ||
207 | - * R6 removes the left/right instructions but requires the | ||
208 | - * system to support misaligned memory accesses. | ||
209 | - */ | ||
210 | if (use_mips32r6_instructions) { | ||
211 | if (a_bits) { | ||
212 | tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits); | ||
63 | -- | 213 | -- |
64 | 2.17.1 | 214 | 2.25.1 |
65 | 215 | ||
66 | 216 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Message-Id: <3b042deef0a60dd49ae2320ece92120ba6027f2b.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | exec.c | 6 ++++-- | ||
21 | memory_ldst.inc.c | 18 +++++++++--------- | ||
22 | 2 files changed, 13 insertions(+), 11 deletions(-) | ||
23 | |||
24 | diff --git a/exec.c b/exec.c | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/exec.c | ||
27 | +++ b/exec.c | ||
28 | @@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, | ||
29 | /* XXX: could force current_cpu to NULL to avoid | ||
30 | potential bugs */ | ||
31 | val = ldn_p(buf, l); | ||
32 | - result |= memory_region_dispatch_write(mr, addr1, val, l, attrs); | ||
33 | + result |= memory_region_dispatch_write(mr, addr1, val, | ||
34 | + size_memop(l), attrs); | ||
35 | } else { | ||
36 | /* RAM case */ | ||
37 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | ||
38 | @@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, | ||
39 | /* I/O case */ | ||
40 | release_lock |= prepare_mmio_access(mr); | ||
41 | l = memory_access_size(mr, l, addr1); | ||
42 | - result |= memory_region_dispatch_read(mr, addr1, &val, l, attrs); | ||
43 | + result |= memory_region_dispatch_read(mr, addr1, &val, | ||
44 | + size_memop(l), attrs); | ||
45 | stn_p(buf, l, val); | ||
46 | } else { | ||
47 | /* RAM case */ | ||
48 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | ||
49 | index XXXXXXX..XXXXXXX 100644 | ||
50 | --- a/memory_ldst.inc.c | ||
51 | +++ b/memory_ldst.inc.c | ||
52 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | ||
53 | release_lock |= prepare_mmio_access(mr); | ||
54 | |||
55 | /* I/O case */ | ||
56 | - r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs); | ||
57 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(4), attrs); | ||
58 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
59 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
60 | val = bswap32(val); | ||
61 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | ||
62 | release_lock |= prepare_mmio_access(mr); | ||
63 | |||
64 | /* I/O case */ | ||
65 | - r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs); | ||
66 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(8), attrs); | ||
67 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
68 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
69 | val = bswap64(val); | ||
70 | @@ -XXX,XX +XXX,XX @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, | ||
71 | release_lock |= prepare_mmio_access(mr); | ||
72 | |||
73 | /* I/O case */ | ||
74 | - r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs); | ||
75 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(1), attrs); | ||
76 | } else { | ||
77 | /* RAM case */ | ||
78 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
79 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
80 | release_lock |= prepare_mmio_access(mr); | ||
81 | |||
82 | /* I/O case */ | ||
83 | - r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs); | ||
84 | + r = memory_region_dispatch_read(mr, addr1, &val, size_memop(2), attrs); | ||
85 | #if defined(TARGET_WORDS_BIGENDIAN) | ||
86 | if (endian == DEVICE_LITTLE_ENDIAN) { | ||
87 | val = bswap16(val); | ||
88 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, | ||
89 | if (l < 4 || !memory_access_is_direct(mr, true)) { | ||
90 | release_lock |= prepare_mmio_access(mr); | ||
91 | |||
92 | - r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); | ||
93 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | ||
94 | } else { | ||
95 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
96 | stl_p(ptr, val); | ||
97 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
98 | val = bswap32(val); | ||
99 | } | ||
100 | #endif | ||
101 | - r = memory_region_dispatch_write(mr, addr1, val, 4, attrs); | ||
102 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(4), attrs); | ||
103 | } else { | ||
104 | /* RAM case */ | ||
105 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
106 | @@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, | ||
107 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
108 | if (!memory_access_is_direct(mr, true)) { | ||
109 | release_lock |= prepare_mmio_access(mr); | ||
110 | - r = memory_region_dispatch_write(mr, addr1, val, 1, attrs); | ||
111 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(1), attrs); | ||
112 | } else { | ||
113 | /* RAM case */ | ||
114 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
115 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | ||
116 | val = bswap16(val); | ||
117 | } | ||
118 | #endif | ||
119 | - r = memory_region_dispatch_write(mr, addr1, val, 2, attrs); | ||
120 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(2), attrs); | ||
121 | } else { | ||
122 | /* RAM case */ | ||
123 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
124 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | ||
125 | val = bswap64(val); | ||
126 | } | ||
127 | #endif | ||
128 | - r = memory_region_dispatch_write(mr, addr1, val, 8, attrs); | ||
129 | + r = memory_region_dispatch_write(mr, addr1, val, size_memop(8), attrs); | ||
130 | } else { | ||
131 | /* RAM case */ | ||
132 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
133 | -- | ||
134 | 2.17.1 | ||
135 | |||
136 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Tony Nguyen <tony.nguyen@bt.com> | ||
2 | 1 | ||
3 | The memory_region_dispatch_{read|write} operand "unsigned size" is | ||
4 | being converted into a "MemOp op". | ||
5 | |||
6 | Convert interfaces by using no-op size_memop. | ||
7 | |||
8 | After all interfaces are converted, size_memop will be implemented | ||
9 | and the memory_region_dispatch_{read|write} operand "unsigned size" | ||
10 | will be converted into a "MemOp op". | ||
11 | |||
12 | As size_memop is a no-op, this patch does not change any behaviour. | ||
13 | |||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Message-Id: <c4571c76467ade83660970f7ef9d7292297f1908.1566466906.git.tony.nguyen@bt.com> | ||
18 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
19 | --- | ||
20 | accel/tcg/cputlb.c | 8 ++++---- | ||
21 | 1 file changed, 4 insertions(+), 4 deletions(-) | ||
22 | |||
23 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/accel/tcg/cputlb.c | ||
26 | +++ b/accel/tcg/cputlb.c | ||
27 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
28 | qemu_mutex_lock_iothread(); | ||
29 | locked = true; | ||
30 | } | ||
31 | - r = memory_region_dispatch_read(mr, mr_offset, | ||
32 | - &val, size, iotlbentry->attrs); | ||
33 | + r = memory_region_dispatch_read(mr, mr_offset, &val, size_memop(size), | ||
34 | + iotlbentry->attrs); | ||
35 | if (r != MEMTX_OK) { | ||
36 | hwaddr physaddr = mr_offset + | ||
37 | section->offset_within_address_space - | ||
38 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
39 | qemu_mutex_lock_iothread(); | ||
40 | locked = true; | ||
41 | } | ||
42 | - r = memory_region_dispatch_write(mr, mr_offset, | ||
43 | - val, size, iotlbentry->attrs); | ||
44 | + r = memory_region_dispatch_write(mr, mr_offset, val, size_memop(size), | ||
45 | + iotlbentry->attrs); | ||
46 | if (r != MEMTX_OK) { | ||
47 | hwaddr physaddr = mr_offset + | ||
48 | section->offset_within_address_space - | ||
49 | -- | ||
50 | 2.17.1 | ||
51 | |||
52 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | When BH is constant, it is constrained to 11 bits for use in MOVCC. |
---|---|---|---|
2 | For the cases in which we must load the constant BH into a register, | ||
3 | we do not need the full logic of tcg_out_movi; we can use the simpler | ||
4 | function for emitting a 13 bit constant. | ||
2 | 5 | ||
3 | Temporarily no-op size_memop was introduced to aid the conversion of | 6 | This eliminates the only case in which TCG_REG_T2 was passed to |
4 | memory_region_dispatch_{read|write} operand "unsigned size" into | 7 | tcg_out_movi, which will shortly become invalid. |
5 | "MemOp op". | ||
6 | 8 | ||
7 | Now size_memop is implemented, again hard coded size but with | 9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
8 | MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. | ||
9 | |||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com> | ||
13 | Message-Id: <99c4459d5c1dc9013820be3dbda9798165c15b99.1566466906.git.tony.nguyen@bt.com> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
15 | --- | 11 | --- |
16 | target/mips/op_helper.c | 4 ++-- | 12 | tcg/sparc/tcg-target.c.inc | 10 +++++++--- |
17 | 1 file changed, 2 insertions(+), 2 deletions(-) | 13 | 1 file changed, 7 insertions(+), 3 deletions(-) |
18 | 14 | ||
19 | diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c | 15 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
20 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/target/mips/op_helper.c | 17 | --- a/tcg/sparc/tcg-target.c.inc |
22 | +++ b/target/mips/op_helper.c | 18 | +++ b/tcg/sparc/tcg-target.c.inc |
23 | @@ -XXX,XX +XXX,XX @@ void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) | 19 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, |
24 | if (op == 9) { | 20 | if (use_vis3_instructions && !is_sub) { |
25 | /* Index Store Tag */ | 21 | /* Note that ADDXC doesn't accept immediates. */ |
26 | memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, | 22 | if (bhconst && bh != 0) { |
27 | - size_memop(8), MEMTXATTRS_UNSPECIFIED); | 23 | - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh); |
28 | + MO_64, MEMTXATTRS_UNSPECIFIED); | 24 | + tcg_out_movi_imm13(s, TCG_REG_T2, bh); |
29 | } else if (op == 5) { | 25 | bh = TCG_REG_T2; |
30 | /* Index Load Tag */ | 26 | } |
31 | memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, | 27 | tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); |
32 | - size_memop(8), MEMTXATTRS_UNSPECIFIED); | 28 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, |
33 | + MO_64, MEMTXATTRS_UNSPECIFIED); | 29 | tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); |
34 | } | 30 | } |
35 | #endif | 31 | } else { |
36 | } | 32 | - /* Otherwise adjust BH as if there is carry into T2 ... */ |
33 | + /* | ||
34 | + * Otherwise adjust BH as if there is carry into T2. | ||
35 | + * Note that constant BH is constrained to 11 bits for the MOVCC, | ||
36 | + * so the adjustment fits 12 bits. | ||
37 | + */ | ||
38 | if (bhconst) { | ||
39 | - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1)); | ||
40 | + tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); | ||
41 | } else { | ||
42 | tcg_out_arithi(s, TCG_REG_T2, bh, 1, | ||
43 | is_sub ? ARITH_SUB : ARITH_ADD); | ||
37 | -- | 44 | -- |
38 | 2.17.1 | 45 | 2.25.1 |
39 | 46 | ||
40 | 47 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | Handle 32-bit constants with a separate function, so that |
---|---|---|---|
2 | tcg_out_movi_int does not need to recurse. This slightly | ||
3 | rearranges the order of tests for small constants, but | ||
4 | produces the same output. | ||
2 | 5 | ||
3 | Factor it out into common code. Similar to the !CONFIG_USER_ONLY variant, | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | let's not allow to cross page boundaries. | ||
5 | |||
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <20190826075112.25637-4-david@redhat.com> | ||
9 | [rth: Move cpu & cc variables inside if block.] | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
11 | --- | 8 | --- |
12 | include/exec/exec-all.h | 4 ++-- | 9 | tcg/sparc/tcg-target.c.inc | 36 +++++++++++++++++++++--------------- |
13 | accel/tcg/user-exec.c | 14 ++++++++++++++ | 10 | 1 file changed, 21 insertions(+), 15 deletions(-) |
14 | target/s390x/mem_helper.c | 7 ------- | ||
15 | 3 files changed, 16 insertions(+), 9 deletions(-) | ||
16 | 11 | ||
17 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 12 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
18 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/exec/exec-all.h | 14 | --- a/tcg/sparc/tcg-target.c.inc |
20 | +++ b/include/exec/exec-all.h | 15 | +++ b/tcg/sparc/tcg-target.c.inc |
21 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | 16 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) |
22 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | 17 | tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); |
23 | hwaddr paddr, int prot, | ||
24 | int mmu_idx, target_ulong size); | ||
25 | -void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
26 | - uintptr_t retaddr); | ||
27 | #else | ||
28 | static inline void tlb_init(CPUState *cpu) | ||
29 | { | ||
30 | @@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | ||
31 | { | ||
32 | } | 18 | } |
33 | #endif | 19 | |
34 | +void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | 20 | +static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) |
35 | + uintptr_t retaddr); | ||
36 | |||
37 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | ||
38 | |||
39 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/accel/tcg/user-exec.c | ||
42 | +++ b/accel/tcg/user-exec.c | ||
43 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
44 | g_assert_not_reached(); | ||
45 | } | ||
46 | |||
47 | +void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, | ||
48 | + uintptr_t retaddr) | ||
49 | +{ | 21 | +{ |
50 | + if (!guest_addr_valid(addr) || | 22 | + if (check_fit_i32(arg, 13)) { |
51 | + page_check_range(addr, size, PAGE_WRITE) < 0) { | 23 | + /* A 13-bit constant sign-extended to 64-bits. */ |
52 | + CPUState *cpu = env_cpu(env); | 24 | + tcg_out_movi_imm13(s, ret, arg); |
53 | + CPUClass *cc = CPU_GET_CLASS(cpu); | 25 | + } else { |
54 | + | 26 | + /* A 32-bit constant zero-extended to 64 bits. */ |
55 | + cc->tlb_fill(cpu, addr, size, MMU_DATA_STORE, MMU_USER_IDX, false, | 27 | + tcg_out_sethi(s, ret, arg); |
56 | + retaddr); | 28 | + if (arg & 0x3ff) { |
57 | + g_assert_not_reached(); | 29 | + tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); |
30 | + } | ||
58 | + } | 31 | + } |
59 | +} | 32 | +} |
60 | + | 33 | + |
61 | #if defined(__i386__) | 34 | static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
62 | 35 | tcg_target_long arg, bool in_prologue) | |
63 | #if defined(__NetBSD__) | ||
64 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/target/s390x/mem_helper.c | ||
67 | +++ b/target/s390x/mem_helper.c | ||
68 | @@ -XXX,XX +XXX,XX @@ uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) | ||
69 | void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | ||
70 | uintptr_t ra) | ||
71 | { | 36 | { |
72 | -#ifdef CONFIG_USER_ONLY | 37 | tcg_target_long hi, lo = (int32_t)arg; |
73 | - if (!guest_addr_valid(addr) || !guest_addr_valid(addr + len - 1) || | 38 | tcg_target_long test, lsb; |
74 | - page_check_range(addr, len, PAGE_WRITE) < 0) { | 39 | |
75 | - s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra); | 40 | - /* Make sure we test 32-bit constants for imm13 properly. */ |
41 | - if (type == TCG_TYPE_I32) { | ||
42 | - arg = lo; | ||
43 | + /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ | ||
44 | + if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { | ||
45 | + tcg_out_movi_imm32(s, ret, arg); | ||
46 | + return; | ||
47 | } | ||
48 | |||
49 | /* A 13-bit constant sign-extended to 64-bits. */ | ||
50 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, | ||
51 | } | ||
52 | } | ||
53 | |||
54 | - /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ | ||
55 | - if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { | ||
56 | - tcg_out_sethi(s, ret, arg); | ||
57 | - if (arg & 0x3ff) { | ||
58 | - tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); | ||
59 | - } | ||
60 | - return; | ||
76 | - } | 61 | - } |
77 | -#else | 62 | - |
78 | /* test the actual access, not just any access to the page due to LAP */ | 63 | /* A 32-bit constant sign-extended to 64-bits. */ |
79 | while (len) { | 64 | if (arg == lo) { |
80 | const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); | 65 | tcg_out_sethi(s, ret, ~arg); |
81 | @@ -XXX,XX +XXX,XX @@ void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, | 66 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
82 | addr = wrap_address(env, addr + curlen); | 67 | /* A 64-bit constant decomposed into 2 32-bit pieces. */ |
83 | len -= curlen; | 68 | if (check_fit_i32(lo, 13)) { |
69 | hi = (arg - lo) >> 32; | ||
70 | - tcg_out_movi(s, TCG_TYPE_I32, ret, hi); | ||
71 | + tcg_out_movi_imm32(s, ret, hi); | ||
72 | tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); | ||
73 | tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); | ||
74 | } else { | ||
75 | hi = arg >> 32; | ||
76 | - tcg_out_movi(s, TCG_TYPE_I32, ret, hi); | ||
77 | - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo); | ||
78 | + tcg_out_movi_imm32(s, ret, hi); | ||
79 | + tcg_out_movi_imm32(s, TCG_REG_T2, lo); | ||
80 | tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); | ||
81 | tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR); | ||
84 | } | 82 | } |
85 | -#endif | ||
86 | } | ||
87 | |||
88 | void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) | ||
89 | -- | 83 | -- |
90 | 2.17.1 | 84 | 2.25.1 |
91 | 85 | ||
92 | 86 | diff view generated by jsdifflib |
1 | From: David Hildenbrand <david@redhat.com> | 1 | This will allow us to control exactly what scratch register is |
---|---|---|---|
2 | used for loading the constant. | ||
2 | 3 | ||
3 | We want to perform the same checks in probe_write() to trigger a cpu | 4 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | exit before doing any modifications. We'll have to pass a PC. | ||
5 | |||
6 | Signed-off-by: David Hildenbrand <david@redhat.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <20190823100741.9621-9-david@redhat.com> | ||
9 | [rth: Use vaddr for len, like other watchpoint functions; | ||
10 | Move user-only stub to static inline.] | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 6 | --- |
13 | include/hw/core/cpu.h | 7 +++++++ | 7 | tcg/sparc/tcg-target.c.inc | 15 +++++++++------ |
14 | exec.c | 26 ++++++++++++++++++-------- | 8 | 1 file changed, 9 insertions(+), 6 deletions(-) |
15 | 2 files changed, 25 insertions(+), 8 deletions(-) | ||
16 | 9 | ||
17 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | 10 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
18 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/hw/core/cpu.h | 12 | --- a/tcg/sparc/tcg-target.c.inc |
20 | +++ b/include/hw/core/cpu.h | 13 | +++ b/tcg/sparc/tcg-target.c.inc |
21 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, | 14 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg) |
22 | static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) | 15 | } |
16 | |||
17 | static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, | ||
18 | - tcg_target_long arg, bool in_prologue) | ||
19 | + tcg_target_long arg, bool in_prologue, | ||
20 | + TCGReg scratch) | ||
23 | { | 21 | { |
24 | } | 22 | tcg_target_long hi, lo = (int32_t)arg; |
25 | + | 23 | tcg_target_long test, lsb; |
26 | +static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | 24 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
27 | + MemTxAttrs atr, int fl, uintptr_t ra) | 25 | } else { |
28 | +{ | 26 | hi = arg >> 32; |
29 | +} | 27 | tcg_out_movi_imm32(s, ret, hi); |
30 | #else | 28 | - tcg_out_movi_imm32(s, TCG_REG_T2, lo); |
31 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, | 29 | + tcg_out_movi_imm32(s, scratch, lo); |
32 | int flags, CPUWatchpoint **watchpoint); | 30 | tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); |
33 | @@ -XXX,XX +XXX,XX @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, | 31 | - tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR); |
34 | vaddr len, int flags); | 32 | + tcg_out_arith(s, ret, ret, scratch, ARITH_OR); |
35 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); | ||
36 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask); | ||
37 | +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
38 | + MemTxAttrs attrs, int flags, uintptr_t ra); | ||
39 | #endif | ||
40 | |||
41 | /** | ||
42 | diff --git a/exec.c b/exec.c | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/exec.c | ||
45 | +++ b/exec.c | ||
46 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps notdirty_mem_ops = { | ||
47 | }; | ||
48 | |||
49 | /* Generate a debug exception if a watchpoint has been hit. */ | ||
50 | -static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
51 | +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | ||
52 | + MemTxAttrs attrs, int flags, uintptr_t ra) | ||
53 | { | ||
54 | - CPUState *cpu = current_cpu; | ||
55 | CPUClass *cc = CPU_GET_CLASS(cpu); | ||
56 | - target_ulong vaddr; | ||
57 | CPUWatchpoint *wp; | ||
58 | |||
59 | assert(tcg_enabled()); | ||
60 | @@ -XXX,XX +XXX,XX @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
61 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); | ||
62 | return; | ||
63 | } | ||
64 | - vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; | ||
65 | - vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len); | ||
66 | + | ||
67 | + addr = cc->adjust_watchpoint_address(cpu, addr, len); | ||
68 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | ||
69 | - if (cpu_watchpoint_address_matches(wp, vaddr, len) | ||
70 | + if (cpu_watchpoint_address_matches(wp, addr, len) | ||
71 | && (wp->flags & flags)) { | ||
72 | if (flags == BP_MEM_READ) { | ||
73 | wp->flags |= BP_WATCHPOINT_HIT_READ; | ||
74 | } else { | ||
75 | wp->flags |= BP_WATCHPOINT_HIT_WRITE; | ||
76 | } | ||
77 | - wp->hitaddr = vaddr; | ||
78 | + wp->hitaddr = MAX(addr, wp->vaddr); | ||
79 | wp->hitattrs = attrs; | ||
80 | if (!cpu->watchpoint_hit) { | ||
81 | if (wp->flags & BP_CPU && | ||
82 | @@ -XXX,XX +XXX,XX @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
83 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { | ||
84 | cpu->exception_index = EXCP_DEBUG; | ||
85 | mmap_unlock(); | ||
86 | - cpu_loop_exit(cpu); | ||
87 | + cpu_loop_exit_restore(cpu, ra); | ||
88 | } else { | ||
89 | /* Force execution of one insn next time. */ | ||
90 | cpu->cflags_next_tb = 1 | curr_cflags(); | ||
91 | mmap_unlock(); | ||
92 | + if (ra) { | ||
93 | + cpu_restore_state(cpu, ra, true); | ||
94 | + } | ||
95 | cpu_loop_exit_noexc(cpu); | ||
96 | } | ||
97 | } | ||
98 | @@ -XXX,XX +XXX,XX @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | ||
99 | } | 33 | } |
100 | } | 34 | } |
101 | 35 | ||
102 | +static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) | 36 | static void tcg_out_movi(TCGContext *s, TCGType type, |
103 | +{ | 37 | TCGReg ret, tcg_target_long arg) |
104 | + CPUState *cpu = current_cpu; | 38 | { |
105 | + vaddr addr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; | 39 | - tcg_out_movi_int(s, type, ret, arg, false); |
106 | + | 40 | + tcg_debug_assert(ret != TCG_REG_T2); |
107 | + cpu_check_watchpoint(cpu, addr, len, attrs, flags, 0); | 41 | + tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2); |
108 | +} | 42 | } |
109 | + | 43 | |
110 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, | 44 | static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, |
111 | so these check for a hit then pass through to the normal out-of-line | 45 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, |
112 | phys routines. */ | 46 | } else { |
47 | uintptr_t desti = (uintptr_t)dest; | ||
48 | tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, | ||
49 | - desti & ~0xfff, in_prologue); | ||
50 | + desti & ~0xfff, in_prologue, TCG_REG_O7); | ||
51 | tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); | ||
52 | } | ||
53 | } | ||
54 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
55 | |||
56 | #ifndef CONFIG_SOFTMMU | ||
57 | if (guest_base != 0) { | ||
58 | - tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); | ||
59 | + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, | ||
60 | + guest_base, true, TCG_REG_T1); | ||
61 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); | ||
62 | } | ||
63 | #endif | ||
113 | -- | 64 | -- |
114 | 2.17.1 | 65 | 2.25.1 |
115 | 66 | ||
116 | 67 | diff view generated by jsdifflib |
1 | We had two different mechanisms to force a recheck of the tlb. | 1 | We had code for checking for 13 and 21-bit shifted constants, |
---|---|---|---|
2 | but we can do better and allow 32-bit shifted constants. | ||
3 | This is still 2 insns shorter than the full 64-bit sequence. | ||
2 | 4 | ||
3 | Before TLB_RECHECK was introduced, we had a PAGE_WRITE_INV bit | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | that would immediate set TLB_INVALID_MASK, which automatically | 6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
5 | means that a second check of the tlb entry fails. | ||
6 | |||
7 | We can use the same mechanism to handle small pages. | ||
8 | Conserve TLB_* bits by removing TLB_RECHECK. | ||
9 | |||
10 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 8 | --- |
13 | include/exec/cpu-all.h | 5 +-- | 9 | tcg/sparc/tcg-target.c.inc | 12 ++++++------ |
14 | accel/tcg/cputlb.c | 86 +++++++++++------------------------------- | 10 | 1 file changed, 6 insertions(+), 6 deletions(-) |
15 | 2 files changed, 24 insertions(+), 67 deletions(-) | ||
16 | 11 | ||
17 | diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h | 12 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
18 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/exec/cpu-all.h | 14 | --- a/tcg/sparc/tcg-target.c.inc |
20 | +++ b/include/exec/cpu-all.h | 15 | +++ b/tcg/sparc/tcg-target.c.inc |
21 | @@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env); | 16 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
22 | #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) | ||
23 | /* Set if TLB entry is an IO callback. */ | ||
24 | #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) | ||
25 | -/* Set if TLB entry must have MMU lookup repeated for every access */ | ||
26 | -#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4)) | ||
27 | |||
28 | /* Use this mask to check interception with an alignment mask | ||
29 | * in a TCG backend. | ||
30 | */ | ||
31 | -#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ | ||
32 | - | TLB_RECHECK) | ||
33 | +#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) | ||
34 | |||
35 | /** | ||
36 | * tlb_hit_page: return true if page aligned @addr is a hit against the | ||
37 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/accel/tcg/cputlb.c | ||
40 | +++ b/accel/tcg/cputlb.c | ||
41 | @@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, | ||
42 | |||
43 | address = vaddr_page; | ||
44 | if (size < TARGET_PAGE_SIZE) { | ||
45 | - /* | ||
46 | - * Slow-path the TLB entries; we will repeat the MMU check and TLB | ||
47 | - * fill on every access. | ||
48 | - */ | ||
49 | - address |= TLB_RECHECK; | ||
50 | + /* Repeat the MMU check and TLB fill on every access. */ | ||
51 | + address |= TLB_INVALID_MASK; | ||
52 | } | ||
53 | if (attrs.byte_swap) { | ||
54 | /* Force the access through the I/O slow path. */ | ||
55 | @@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | ||
56 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ | ||
57 | (ADDR) & TARGET_PAGE_MASK) | ||
58 | |||
59 | -/* NOTE: this function can trigger an exception */ | ||
60 | -/* NOTE2: the returned address is not exactly the physical address: it | ||
61 | - * is actually a ram_addr_t (in system mode; the user mode emulation | ||
62 | - * version of this function returns a guest virtual address). | ||
63 | +/* | ||
64 | + * Return a ram_addr_t for the virtual address for execution. | ||
65 | + * | ||
66 | + * Return -1 if we can't translate and execute from an entire page | ||
67 | + * of RAM. This will force us to execute by loading and translating | ||
68 | + * one insn at a time, without caching. | ||
69 | + * | ||
70 | + * NOTE: This function will trigger an exception if the page is | ||
71 | + * not executable. | ||
72 | */ | ||
73 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | ||
74 | { | ||
75 | @@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) | ||
76 | tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); | ||
77 | index = tlb_index(env, mmu_idx, addr); | ||
78 | entry = tlb_entry(env, mmu_idx, addr); | ||
79 | + | ||
80 | + if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { | ||
81 | + /* | ||
82 | + * The MMU protection covers a smaller range than a target | ||
83 | + * page, so we must redo the MMU check for every insn. | ||
84 | + */ | ||
85 | + return -1; | ||
86 | + } | ||
87 | } | ||
88 | assert(tlb_hit(entry->addr_code, addr)); | ||
89 | } | ||
90 | |||
91 | - if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { | ||
92 | - /* | ||
93 | - * Return -1 if we can't translate and execute from an entire | ||
94 | - * page of RAM here, which will cause us to execute by loading | ||
95 | - * and translating one insn at a time, without caching: | ||
96 | - * - TLB_RECHECK: means the MMU protection covers a smaller range | ||
97 | - * than a target page, so we must redo the MMU check every insn | ||
98 | - * - TLB_MMIO: region is not backed by RAM | ||
99 | - */ | ||
100 | + if (unlikely(entry->addr_code & TLB_MMIO)) { | ||
101 | + /* The region is not backed by RAM. */ | ||
102 | return -1; | ||
103 | } | ||
104 | |||
105 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
106 | } | ||
107 | |||
108 | /* Notice an IO access or a needs-MMU-lookup access */ | ||
109 | - if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { | ||
110 | + if (unlikely(tlb_addr & TLB_MMIO)) { | ||
111 | /* There's really nothing that can be done to | ||
112 | support this apart from stop-the-world. */ | ||
113 | goto stop_the_world; | ||
114 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
115 | entry = tlb_entry(env, mmu_idx, addr); | ||
116 | } | ||
117 | tlb_addr = code_read ? entry->addr_code : entry->addr_read; | ||
118 | + tlb_addr &= ~TLB_INVALID_MASK; | ||
119 | } | ||
120 | |||
121 | /* Handle an IO access. */ | ||
122 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
123 | if ((addr & (size - 1)) != 0) { | ||
124 | goto do_unaligned_access; | ||
125 | } | ||
126 | - | ||
127 | - if (tlb_addr & TLB_RECHECK) { | ||
128 | - /* | ||
129 | - * This is a TLB_RECHECK access, where the MMU protection | ||
130 | - * covers a smaller range than a target page, and we must | ||
131 | - * repeat the MMU check here. This tlb_fill() call might | ||
132 | - * longjump out if this access should cause a guest exception. | ||
133 | - */ | ||
134 | - tlb_fill(env_cpu(env), addr, size, | ||
135 | - access_type, mmu_idx, retaddr); | ||
136 | - index = tlb_index(env, mmu_idx, addr); | ||
137 | - entry = tlb_entry(env, mmu_idx, addr); | ||
138 | - | ||
139 | - tlb_addr = code_read ? entry->addr_code : entry->addr_read; | ||
140 | - tlb_addr &= ~TLB_RECHECK; | ||
141 | - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
142 | - /* RAM access */ | ||
143 | - goto do_aligned_access; | ||
144 | - } | ||
145 | - } | ||
146 | - | ||
147 | return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
148 | mmu_idx, addr, retaddr, access_type, op); | ||
149 | } | ||
150 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
151 | return res & MAKE_64BIT_MASK(0, size * 8); | ||
152 | } | ||
153 | |||
154 | - do_aligned_access: | ||
155 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
156 | switch (op) { | ||
157 | case MO_UB: | ||
158 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
159 | if ((addr & (size - 1)) != 0) { | ||
160 | goto do_unaligned_access; | ||
161 | } | ||
162 | - | ||
163 | - if (tlb_addr & TLB_RECHECK) { | ||
164 | - /* | ||
165 | - * This is a TLB_RECHECK access, where the MMU protection | ||
166 | - * covers a smaller range than a target page, and we must | ||
167 | - * repeat the MMU check here. This tlb_fill() call might | ||
168 | - * longjump out if this access should cause a guest exception. | ||
169 | - */ | ||
170 | - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, | ||
171 | - mmu_idx, retaddr); | ||
172 | - index = tlb_index(env, mmu_idx, addr); | ||
173 | - entry = tlb_entry(env, mmu_idx, addr); | ||
174 | - | ||
175 | - tlb_addr = tlb_addr_write(entry); | ||
176 | - tlb_addr &= ~TLB_RECHECK; | ||
177 | - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { | ||
178 | - /* RAM access */ | ||
179 | - goto do_aligned_access; | ||
180 | - } | ||
181 | - } | ||
182 | - | ||
183 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
184 | val, addr, retaddr, op); | ||
185 | return; | ||
186 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
187 | return; | 17 | return; |
188 | } | 18 | } |
189 | 19 | ||
190 | - do_aligned_access: | 20 | - /* A 21-bit constant, shifted. */ |
191 | haddr = (void *)((uintptr_t)addr + entry->addend); | 21 | + /* A 32-bit constant, shifted. */ |
192 | switch (op) { | 22 | lsb = ctz64(arg); |
193 | case MO_UB: | 23 | test = (tcg_target_long)arg >> lsb; |
24 | - if (check_fit_tl(test, 13)) { | ||
25 | - tcg_out_movi_imm13(s, ret, test); | ||
26 | - tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); | ||
27 | - return; | ||
28 | - } else if (lsb > 10 && test == extract64(test, 0, 21)) { | ||
29 | + if (lsb > 10 && test == extract64(test, 0, 21)) { | ||
30 | tcg_out_sethi(s, ret, test << 10); | ||
31 | tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX); | ||
32 | return; | ||
33 | + } else if (test == (uint32_t)test || test == (int32_t)test) { | ||
34 | + tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch); | ||
35 | + tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); | ||
36 | + return; | ||
37 | } | ||
38 | |||
39 | /* A 64-bit constant decomposed into 2 32-bit pieces. */ | ||
194 | -- | 40 | -- |
195 | 2.17.1 | 41 | 2.25.1 |
196 | 42 | ||
197 | 43 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Since 7ecd02a06f8, if patch_reloc fails we restart translation |
---|---|---|---|
2 | with a smaller TB. SPARC had its function signature changed, | ||
3 | but not the logic. Replace assert with return false. | ||
2 | 4 | ||
3 | Temporarily no-op size_memop was introduced to aid the conversion of | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
4 | memory_region_dispatch_{read|write} operand "unsigned size" into | 6 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
5 | "MemOp op". | ||
6 | |||
7 | Now size_memop is implemented, again hard coded size but with | ||
8 | MO_{8|16|32|64}. This is more expressive and avoids size_memop calls. | ||
9 | |||
10 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> | ||
13 | Message-Id: <76dc97273a8eb5e10170ffc16526863df808f487.1566466906.git.tony.nguyen@bt.com> | ||
14 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
15 | --- | 8 | --- |
16 | hw/s390x/s390-pci-inst.c | 3 +-- | 9 | tcg/sparc/tcg-target.c.inc | 8 ++++++-- |
17 | 1 file changed, 1 insertion(+), 2 deletions(-) | 10 | 1 file changed, 6 insertions(+), 2 deletions(-) |
18 | 11 | ||
19 | diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c | 12 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
20 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/hw/s390x/s390-pci-inst.c | 14 | --- a/tcg/sparc/tcg-target.c.inc |
22 | +++ b/hw/s390x/s390-pci-inst.c | 15 | +++ b/tcg/sparc/tcg-target.c.inc |
23 | @@ -XXX,XX +XXX,XX @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, | 16 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type, |
24 | for (i = 0; i < len / 8; i++) { | 17 | |
25 | result = memory_region_dispatch_write(mr, offset + i * 8, | 18 | switch (type) { |
26 | ldq_p(buffer + i * 8), | 19 | case R_SPARC_WDISP16: |
27 | - size_memop(8), | 20 | - assert(check_fit_ptr(pcrel >> 2, 16)); |
28 | - MEMTXATTRS_UNSPECIFIED); | 21 | + if (!check_fit_ptr(pcrel >> 2, 16)) { |
29 | + MO_64, MEMTXATTRS_UNSPECIFIED); | 22 | + return false; |
30 | if (result != MEMTX_OK) { | 23 | + } |
31 | s390_program_interrupt(env, PGM_OPERAND, 6, ra); | 24 | insn &= ~INSN_OFF16(-1); |
32 | return 0; | 25 | insn |= INSN_OFF16(pcrel); |
26 | break; | ||
27 | case R_SPARC_WDISP19: | ||
28 | - assert(check_fit_ptr(pcrel >> 2, 19)); | ||
29 | + if (!check_fit_ptr(pcrel >> 2, 19)) { | ||
30 | + return false; | ||
31 | + } | ||
32 | insn &= ~INSN_OFF19(-1); | ||
33 | insn |= INSN_OFF19(pcrel); | ||
34 | break; | ||
33 | -- | 35 | -- |
34 | 2.17.1 | 36 | 2.25.1 |
35 | 37 | ||
36 | 38 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
---|---|---|---|
2 | |||
3 | Preparation for collapsing the two byte swaps adjust_endianness and | ||
4 | handle_bswap into the former. | ||
5 | |||
6 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <755b7104410956b743e1f1e9c34ab87db113360f.1566466906.git.tony.nguyen@bt.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 2 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 3 | --- |
11 | include/exec/memop.h | 6 ++ | 4 | tcg/sparc/tcg-target.c.inc | 15 +++++++++++++++ |
12 | accel/tcg/cputlb.c | 170 +++++++++++++++++++++---------------------- | 5 | 1 file changed, 15 insertions(+) |
13 | 2 files changed, 87 insertions(+), 89 deletions(-) | ||
14 | 6 | ||
15 | diff --git a/include/exec/memop.h b/include/exec/memop.h | 7 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
16 | index XXXXXXX..XXXXXXX 100644 | 8 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/exec/memop.h | 9 | --- a/tcg/sparc/tcg-target.c.inc |
18 | +++ b/include/exec/memop.h | 10 | +++ b/tcg/sparc/tcg-target.c.inc |
19 | @@ -XXX,XX +XXX,XX @@ static inline MemOp size_memop(unsigned size) | 11 | @@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type, |
20 | return ctz32(size); | 12 | insn &= ~INSN_OFF19(-1); |
21 | } | 13 | insn |= INSN_OFF19(pcrel); |
22 | |||
23 | +/* Big endianness from MemOp. */ | ||
24 | +static inline bool memop_big_endian(MemOp op) | ||
25 | +{ | ||
26 | + return (op & MO_BSWAP) == MO_BE; | ||
27 | +} | ||
28 | + | ||
29 | #endif | ||
30 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/accel/tcg/cputlb.c | ||
33 | +++ b/accel/tcg/cputlb.c | ||
34 | @@ -XXX,XX +XXX,XX @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size, | ||
35 | |||
36 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
37 | int mmu_idx, target_ulong addr, uintptr_t retaddr, | ||
38 | - MMUAccessType access_type, int size) | ||
39 | + MMUAccessType access_type, MemOp op) | ||
40 | { | ||
41 | CPUState *cpu = env_cpu(env); | ||
42 | hwaddr mr_offset; | ||
43 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
44 | qemu_mutex_lock_iothread(); | ||
45 | locked = true; | ||
46 | } | ||
47 | - r = memory_region_dispatch_read(mr, mr_offset, &val, | ||
48 | - size_memop(size) | MO_TE, | ||
49 | - iotlbentry->attrs); | ||
50 | + r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); | ||
51 | if (r != MEMTX_OK) { | ||
52 | hwaddr physaddr = mr_offset + | ||
53 | section->offset_within_address_space - | ||
54 | section->offset_within_region; | ||
55 | |||
56 | - cpu_transaction_failed(cpu, physaddr, addr, size, access_type, | ||
57 | + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, | ||
58 | mmu_idx, iotlbentry->attrs, r, retaddr); | ||
59 | } | ||
60 | if (locked) { | ||
61 | @@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
62 | |||
63 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
64 | int mmu_idx, uint64_t val, target_ulong addr, | ||
65 | - uintptr_t retaddr, int size) | ||
66 | + uintptr_t retaddr, MemOp op) | ||
67 | { | ||
68 | CPUState *cpu = env_cpu(env); | ||
69 | hwaddr mr_offset; | ||
70 | @@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, | ||
71 | qemu_mutex_lock_iothread(); | ||
72 | locked = true; | ||
73 | } | ||
74 | - r = memory_region_dispatch_write(mr, mr_offset, val, | ||
75 | - size_memop(size) | MO_TE, | ||
76 | - iotlbentry->attrs); | ||
77 | + r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); | ||
78 | if (r != MEMTX_OK) { | ||
79 | hwaddr physaddr = mr_offset + | ||
80 | section->offset_within_address_space - | ||
81 | section->offset_within_region; | ||
82 | |||
83 | - cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, | ||
84 | - mmu_idx, iotlbentry->attrs, r, retaddr); | ||
85 | + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), | ||
86 | + MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, | ||
87 | + retaddr); | ||
88 | } | ||
89 | if (locked) { | ||
90 | qemu_mutex_unlock_iothread(); | ||
91 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
92 | * access type. | ||
93 | */ | ||
94 | |||
95 | -static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian) | ||
96 | +static inline uint64_t handle_bswap(uint64_t val, MemOp op) | ||
97 | { | ||
98 | - if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) { | ||
99 | - switch (size) { | ||
100 | - case 1: return val; | ||
101 | - case 2: return bswap16(val); | ||
102 | - case 4: return bswap32(val); | ||
103 | - case 8: return bswap64(val); | ||
104 | + if ((memop_big_endian(op) && NEED_BE_BSWAP) || | ||
105 | + (!memop_big_endian(op) && NEED_LE_BSWAP)) { | ||
106 | + switch (op & MO_SIZE) { | ||
107 | + case MO_8: return val; | ||
108 | + case MO_16: return bswap16(val); | ||
109 | + case MO_32: return bswap32(val); | ||
110 | + case MO_64: return bswap64(val); | ||
111 | default: | ||
112 | g_assert_not_reached(); | ||
113 | } | ||
114 | @@ -XXX,XX +XXX,XX @@ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, | ||
115 | |||
116 | static inline uint64_t __attribute__((always_inline)) | ||
117 | load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
118 | - uintptr_t retaddr, size_t size, bool big_endian, bool code_read, | ||
119 | + uintptr_t retaddr, MemOp op, bool code_read, | ||
120 | FullLoadHelper *full_load) | ||
121 | { | ||
122 | uintptr_t mmu_idx = get_mmuidx(oi); | ||
123 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
124 | unsigned a_bits = get_alignment_bits(get_memop(oi)); | ||
125 | void *haddr; | ||
126 | uint64_t res; | ||
127 | + size_t size = memop_size(op); | ||
128 | |||
129 | /* Handle CPU specific unaligned behaviour */ | ||
130 | if (addr & ((1 << a_bits) - 1)) { | ||
131 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
132 | |||
133 | /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ | ||
134 | res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
135 | - mmu_idx, addr, retaddr, access_type, size); | ||
136 | - return handle_bswap(res, size, big_endian); | ||
137 | + mmu_idx, addr, retaddr, access_type, op); | ||
138 | + return handle_bswap(res, op); | ||
139 | } | ||
140 | |||
141 | /* Handle slow unaligned access (it spans two pages or IO). */ | ||
142 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
143 | r2 = full_load(env, addr2, oi, retaddr); | ||
144 | shift = (addr & (size - 1)) * 8; | ||
145 | |||
146 | - if (big_endian) { | ||
147 | + if (memop_big_endian(op)) { | ||
148 | /* Big-endian combine. */ | ||
149 | res = (r1 << shift) | (r2 >> ((size * 8) - shift)); | ||
150 | } else { | ||
151 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
152 | |||
153 | do_aligned_access: | ||
154 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
155 | - switch (size) { | ||
156 | - case 1: | ||
157 | + switch (op) { | ||
158 | + case MO_UB: | ||
159 | res = ldub_p(haddr); | ||
160 | break; | 14 | break; |
161 | - case 2: | 15 | + case R_SPARC_13: |
162 | - if (big_endian) { | 16 | + if (!check_fit_ptr(value, 13)) { |
163 | - res = lduw_be_p(haddr); | 17 | + return false; |
164 | - } else { | 18 | + } |
165 | - res = lduw_le_p(haddr); | 19 | + insn &= ~INSN_IMM13(-1); |
166 | - } | 20 | + insn |= INSN_IMM13(value); |
167 | + case MO_BEUW: | ||
168 | + res = lduw_be_p(haddr); | ||
169 | break; | ||
170 | - case 4: | ||
171 | - if (big_endian) { | ||
172 | - res = (uint32_t)ldl_be_p(haddr); | ||
173 | - } else { | ||
174 | - res = (uint32_t)ldl_le_p(haddr); | ||
175 | - } | ||
176 | + case MO_LEUW: | ||
177 | + res = lduw_le_p(haddr); | ||
178 | break; | ||
179 | - case 8: | ||
180 | - if (big_endian) { | ||
181 | - res = ldq_be_p(haddr); | ||
182 | - } else { | ||
183 | - res = ldq_le_p(haddr); | ||
184 | - } | ||
185 | + case MO_BEUL: | ||
186 | + res = (uint32_t)ldl_be_p(haddr); | ||
187 | + break; | 21 | + break; |
188 | + case MO_LEUL: | ||
189 | + res = (uint32_t)ldl_le_p(haddr); | ||
190 | + break; | ||
191 | + case MO_BEQ: | ||
192 | + res = ldq_be_p(haddr); | ||
193 | + break; | ||
194 | + case MO_LEQ: | ||
195 | + res = ldq_le_p(haddr); | ||
196 | break; | ||
197 | default: | 22 | default: |
198 | g_assert_not_reached(); | 23 | g_assert_not_reached(); |
199 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | 24 | } |
200 | static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, | 25 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, |
201 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
202 | { | ||
203 | - return load_helper(env, addr, oi, retaddr, 1, false, false, | ||
204 | - full_ldub_mmu); | ||
205 | + return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); | ||
206 | } | ||
207 | |||
208 | tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, | ||
209 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, | ||
210 | static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, | ||
211 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
212 | { | ||
213 | - return load_helper(env, addr, oi, retaddr, 2, false, false, | ||
214 | + return load_helper(env, addr, oi, retaddr, MO_LEUW, false, | ||
215 | full_le_lduw_mmu); | ||
216 | } | ||
217 | |||
218 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, | ||
219 | static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, | ||
220 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
221 | { | ||
222 | - return load_helper(env, addr, oi, retaddr, 2, true, false, | ||
223 | + return load_helper(env, addr, oi, retaddr, MO_BEUW, false, | ||
224 | full_be_lduw_mmu); | ||
225 | } | ||
226 | |||
227 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, | ||
228 | static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, | ||
229 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
230 | { | ||
231 | - return load_helper(env, addr, oi, retaddr, 4, false, false, | ||
232 | + return load_helper(env, addr, oi, retaddr, MO_LEUL, false, | ||
233 | full_le_ldul_mmu); | ||
234 | } | ||
235 | |||
236 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, | ||
237 | static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, | ||
238 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
239 | { | ||
240 | - return load_helper(env, addr, oi, retaddr, 4, true, false, | ||
241 | + return load_helper(env, addr, oi, retaddr, MO_BEUL, false, | ||
242 | full_be_ldul_mmu); | ||
243 | } | ||
244 | |||
245 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, | ||
246 | uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, | ||
247 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
248 | { | ||
249 | - return load_helper(env, addr, oi, retaddr, 8, false, false, | ||
250 | + return load_helper(env, addr, oi, retaddr, MO_LEQ, false, | ||
251 | helper_le_ldq_mmu); | ||
252 | } | ||
253 | |||
254 | uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, | ||
255 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
256 | { | ||
257 | - return load_helper(env, addr, oi, retaddr, 8, true, false, | ||
258 | + return load_helper(env, addr, oi, retaddr, MO_BEQ, false, | ||
259 | helper_be_ldq_mmu); | ||
260 | } | ||
261 | |||
262 | @@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, | ||
263 | |||
264 | static inline void __attribute__((always_inline)) | ||
265 | store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
266 | - TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian) | ||
267 | + TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) | ||
268 | { | ||
269 | uintptr_t mmu_idx = get_mmuidx(oi); | ||
270 | uintptr_t index = tlb_index(env, mmu_idx, addr); | ||
271 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
272 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); | ||
273 | unsigned a_bits = get_alignment_bits(get_memop(oi)); | ||
274 | void *haddr; | ||
275 | + size_t size = memop_size(op); | ||
276 | |||
277 | /* Handle CPU specific unaligned behaviour */ | ||
278 | if (addr & ((1 << a_bits) - 1)) { | ||
279 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
280 | |||
281 | /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ | ||
282 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
283 | - handle_bswap(val, size, big_endian), | ||
284 | - addr, retaddr, size); | ||
285 | + handle_bswap(val, op), | ||
286 | + addr, retaddr, op); | ||
287 | return; | 26 | return; |
288 | } | 27 | } |
289 | 28 | ||
290 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 29 | + /* Use the constant pool, if possible. */ |
291 | */ | 30 | + if (!in_prologue && USE_REG_TB) { |
292 | for (i = 0; i < size; ++i) { | 31 | + new_pool_label(s, arg, R_SPARC_13, s->code_ptr, |
293 | uint8_t val8; | 32 | + tcg_tbrel_diff(s, NULL)); |
294 | - if (big_endian) { | 33 | + tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB)); |
295 | + if (memop_big_endian(op)) { | 34 | + return; |
296 | /* Big-endian extract. */ | 35 | + } |
297 | val8 = val >> (((size - 1) * 8) - (i * 8)); | 36 | + |
298 | } else { | 37 | /* A 64-bit constant decomposed into 2 32-bit pieces. */ |
299 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 38 | if (check_fit_i32(lo, 13)) { |
300 | 39 | hi = (arg - lo) >> 32; | |
301 | do_aligned_access: | ||
302 | haddr = (void *)((uintptr_t)addr + entry->addend); | ||
303 | - switch (size) { | ||
304 | - case 1: | ||
305 | + switch (op) { | ||
306 | + case MO_UB: | ||
307 | stb_p(haddr, val); | ||
308 | break; | ||
309 | - case 2: | ||
310 | - if (big_endian) { | ||
311 | - stw_be_p(haddr, val); | ||
312 | - } else { | ||
313 | - stw_le_p(haddr, val); | ||
314 | - } | ||
315 | + case MO_BEUW: | ||
316 | + stw_be_p(haddr, val); | ||
317 | break; | ||
318 | - case 4: | ||
319 | - if (big_endian) { | ||
320 | - stl_be_p(haddr, val); | ||
321 | - } else { | ||
322 | - stl_le_p(haddr, val); | ||
323 | - } | ||
324 | + case MO_LEUW: | ||
325 | + stw_le_p(haddr, val); | ||
326 | break; | ||
327 | - case 8: | ||
328 | - if (big_endian) { | ||
329 | - stq_be_p(haddr, val); | ||
330 | - } else { | ||
331 | - stq_le_p(haddr, val); | ||
332 | - } | ||
333 | + case MO_BEUL: | ||
334 | + stl_be_p(haddr, val); | ||
335 | + break; | ||
336 | + case MO_LEUL: | ||
337 | + stl_le_p(haddr, val); | ||
338 | + break; | ||
339 | + case MO_BEQ: | ||
340 | + stq_be_p(haddr, val); | ||
341 | + break; | ||
342 | + case MO_LEQ: | ||
343 | + stq_le_p(haddr, val); | ||
344 | break; | ||
345 | default: | ||
346 | g_assert_not_reached(); | ||
347 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
348 | void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, | ||
349 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
350 | { | ||
351 | - store_helper(env, addr, val, oi, retaddr, 1, false); | ||
352 | + store_helper(env, addr, val, oi, retaddr, MO_UB); | ||
353 | } | ||
354 | |||
355 | void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | ||
356 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
357 | { | ||
358 | - store_helper(env, addr, val, oi, retaddr, 2, false); | ||
359 | + store_helper(env, addr, val, oi, retaddr, MO_LEUW); | ||
360 | } | ||
361 | |||
362 | void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | ||
363 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
364 | { | ||
365 | - store_helper(env, addr, val, oi, retaddr, 2, true); | ||
366 | + store_helper(env, addr, val, oi, retaddr, MO_BEUW); | ||
367 | } | ||
368 | |||
369 | void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
370 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
371 | { | ||
372 | - store_helper(env, addr, val, oi, retaddr, 4, false); | ||
373 | + store_helper(env, addr, val, oi, retaddr, MO_LEUL); | ||
374 | } | ||
375 | |||
376 | void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | ||
377 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
378 | { | ||
379 | - store_helper(env, addr, val, oi, retaddr, 4, true); | ||
380 | + store_helper(env, addr, val, oi, retaddr, MO_BEUL); | ||
381 | } | ||
382 | |||
383 | void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
384 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
385 | { | ||
386 | - store_helper(env, addr, val, oi, retaddr, 8, false); | ||
387 | + store_helper(env, addr, val, oi, retaddr, MO_LEQ); | ||
388 | } | ||
389 | |||
390 | void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
391 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
392 | { | ||
393 | - store_helper(env, addr, val, oi, retaddr, 8, true); | ||
394 | + store_helper(env, addr, val, oi, retaddr, MO_BEQ); | ||
395 | } | ||
396 | |||
397 | /* First set of helpers allows passing in of OI and RETADDR. This makes | ||
398 | @@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | ||
399 | static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, | ||
400 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
401 | { | ||
402 | - return load_helper(env, addr, oi, retaddr, 1, false, true, | ||
403 | - full_ldub_cmmu); | ||
404 | + return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu); | ||
405 | } | ||
406 | |||
407 | uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, | ||
408 | @@ -XXX,XX +XXX,XX @@ uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, | ||
409 | static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
410 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
411 | { | ||
412 | - return load_helper(env, addr, oi, retaddr, 2, false, true, | ||
413 | + return load_helper(env, addr, oi, retaddr, MO_LEUW, true, | ||
414 | full_le_lduw_cmmu); | ||
415 | } | ||
416 | |||
417 | @@ -XXX,XX +XXX,XX @@ uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, | ||
418 | static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, | ||
419 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
420 | { | ||
421 | - return load_helper(env, addr, oi, retaddr, 2, true, true, | ||
422 | + return load_helper(env, addr, oi, retaddr, MO_BEUW, true, | ||
423 | full_be_lduw_cmmu); | ||
424 | } | ||
425 | |||
426 | @@ -XXX,XX +XXX,XX @@ uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, | ||
427 | static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, | ||
428 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
429 | { | ||
430 | - return load_helper(env, addr, oi, retaddr, 4, false, true, | ||
431 | + return load_helper(env, addr, oi, retaddr, MO_LEUL, true, | ||
432 | full_le_ldul_cmmu); | ||
433 | } | ||
434 | |||
435 | @@ -XXX,XX +XXX,XX @@ uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, | ||
436 | static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr, | ||
437 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
438 | { | ||
439 | - return load_helper(env, addr, oi, retaddr, 4, true, true, | ||
440 | + return load_helper(env, addr, oi, retaddr, MO_BEUL, true, | ||
441 | full_be_ldul_cmmu); | ||
442 | } | ||
443 | |||
444 | @@ -XXX,XX +XXX,XX @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, | ||
445 | uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, | ||
446 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
447 | { | ||
448 | - return load_helper(env, addr, oi, retaddr, 8, false, true, | ||
449 | + return load_helper(env, addr, oi, retaddr, MO_LEQ, true, | ||
450 | helper_le_ldq_cmmu); | ||
451 | } | ||
452 | |||
453 | uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, | ||
454 | TCGMemOpIdx oi, uintptr_t retaddr) | ||
455 | { | ||
456 | - return load_helper(env, addr, oi, retaddr, 8, true, true, | ||
457 | + return load_helper(env, addr, oi, retaddr, MO_BEQ, true, | ||
458 | helper_be_ldq_cmmu); | ||
459 | } | ||
460 | -- | 40 | -- |
461 | 2.17.1 | 41 | 2.25.1 |
462 | 42 | ||
463 | 43 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | Due to mapping changes, we now rarely place the code_gen_buffer |
---|---|---|---|
2 | near the main executable. Which means that direct calls will | ||
3 | now rarely be in range. | ||
2 | 4 | ||
3 | Now that MemOp has been pushed down into the memory API, and | 5 | So, always use indirect calls for tail calls, which allows us to |
4 | callers are encoding endianness, we can collapse byte swaps | 6 | avoid clobbering %o7, and therefore we need not save and restore it. |
5 | along the I/O path into the accelerator and target independent | ||
6 | adjust_endianness. | ||
7 | 7 | ||
8 | Collapsing byte swaps along the I/O path enables additional endian | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant | ||
10 | byte swaps cancelling out. | ||
11 | |||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Suggested-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
15 | Message-Id: <911ff31af11922a9afba9b7ce128af8b8b80f316.1566466906.git.tony.nguyen@bt.com> | ||
16 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
17 | --- | 10 | --- |
18 | accel/tcg/cputlb.c | 42 ++-------------------------- | 11 | tcg/sparc/tcg-target.c.inc | 37 +++++++++++++++++++++++-------------- |
19 | exec.c | 17 +++--------- | 12 | 1 file changed, 23 insertions(+), 14 deletions(-) |
20 | hw/virtio/virtio-pci.c | 10 +++---- | ||
21 | memory.c | 33 ++++++++-------------- | ||
22 | memory_ldst.inc.c | 63 ------------------------------------------ | ||
23 | 5 files changed, 23 insertions(+), 142 deletions(-) | ||
24 | 13 | ||
25 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 14 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
26 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/accel/tcg/cputlb.c | 16 | --- a/tcg/sparc/tcg-target.c.inc |
28 | +++ b/accel/tcg/cputlb.c | 17 | +++ b/tcg/sparc/tcg-target.c.inc |
29 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | 18 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, |
30 | cpu_loop_exit_atomic(env_cpu(env), retaddr); | 19 | tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); |
31 | } | 20 | } |
32 | 21 | ||
33 | -#ifdef TARGET_WORDS_BIGENDIAN | 22 | +static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, |
34 | -#define NEED_BE_BSWAP 0 | 23 | + bool in_prologue, bool tail_call) |
35 | -#define NEED_LE_BSWAP 1 | 24 | +{ |
36 | -#else | 25 | + uintptr_t desti = (uintptr_t)dest; |
37 | -#define NEED_BE_BSWAP 1 | 26 | + |
38 | -#define NEED_LE_BSWAP 0 | 27 | + /* Be careful not to clobber %o7 for a tail call. */ |
39 | -#endif | 28 | + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, |
40 | - | 29 | + desti & ~0xfff, in_prologue, |
41 | -/* | 30 | + tail_call ? TCG_REG_G2 : TCG_REG_O7); |
42 | - * Byte Swap Helper | 31 | + tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7, |
43 | - * | 32 | + TCG_REG_T1, desti & 0xfff, JMPL); |
44 | - * This should all dead code away depending on the build host and | 33 | +} |
45 | - * access type. | 34 | + |
46 | - */ | 35 | static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, |
47 | - | 36 | bool in_prologue) |
48 | -static inline uint64_t handle_bswap(uint64_t val, MemOp op) | ||
49 | -{ | ||
50 | - if ((memop_big_endian(op) && NEED_BE_BSWAP) || | ||
51 | - (!memop_big_endian(op) && NEED_LE_BSWAP)) { | ||
52 | - switch (op & MO_SIZE) { | ||
53 | - case MO_8: return val; | ||
54 | - case MO_16: return bswap16(val); | ||
55 | - case MO_32: return bswap32(val); | ||
56 | - case MO_64: return bswap64(val); | ||
57 | - default: | ||
58 | - g_assert_not_reached(); | ||
59 | - } | ||
60 | - } else { | ||
61 | - return val; | ||
62 | - } | ||
63 | -} | ||
64 | - | ||
65 | /* | ||
66 | * Load Helpers | ||
67 | * | ||
68 | @@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, | ||
69 | } | ||
70 | } | ||
71 | |||
72 | - /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read. */ | ||
73 | - res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
74 | - mmu_idx, addr, retaddr, access_type, op); | ||
75 | - return handle_bswap(res, op); | ||
76 | + return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], | ||
77 | + mmu_idx, addr, retaddr, access_type, op); | ||
78 | } | ||
79 | |||
80 | /* Handle slow unaligned access (it spans two pages or IO). */ | ||
81 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | ||
82 | } | ||
83 | } | ||
84 | |||
85 | - /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write. */ | ||
86 | io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, | ||
87 | - handle_bswap(val, op), | ||
88 | - addr, retaddr, op); | ||
89 | + val, addr, retaddr, op); | ||
90 | return; | ||
91 | } | ||
92 | |||
93 | diff --git a/exec.c b/exec.c | ||
94 | index XXXXXXX..XXXXXXX 100644 | ||
95 | --- a/exec.c | ||
96 | +++ b/exec.c | ||
97 | @@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, | ||
98 | l = memory_access_size(mr, l, addr1); | ||
99 | /* XXX: could force current_cpu to NULL to avoid | ||
100 | potential bugs */ | ||
101 | - val = ldn_p(buf, l); | ||
102 | - /* | ||
103 | - * TODO: Merge bswap from ldn_p into memory_region_dispatch_write | ||
104 | - * by using ldn_he_p and dropping MO_TE to get a host-endian value. | ||
105 | - */ | ||
106 | + val = ldn_he_p(buf, l); | ||
107 | result |= memory_region_dispatch_write(mr, addr1, val, | ||
108 | - size_memop(l) | MO_TE, | ||
109 | - attrs); | ||
110 | + size_memop(l), attrs); | ||
111 | } else { | ||
112 | /* RAM case */ | ||
113 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | ||
114 | @@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, | ||
115 | /* I/O case */ | ||
116 | release_lock |= prepare_mmio_access(mr); | ||
117 | l = memory_access_size(mr, l, addr1); | ||
118 | - /* | ||
119 | - * TODO: Merge bswap from stn_p into memory_region_dispatch_read | ||
120 | - * by using stn_he_p and dropping MO_TE to get a host-endian value. | ||
121 | - */ | ||
122 | result |= memory_region_dispatch_read(mr, addr1, &val, | ||
123 | - size_memop(l) | MO_TE, attrs); | ||
124 | - stn_p(buf, l, val); | ||
125 | + size_memop(l), attrs); | ||
126 | + stn_he_p(buf, l, val); | ||
127 | } else { | ||
128 | /* RAM case */ | ||
129 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); | ||
130 | diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c | ||
131 | index XXXXXXX..XXXXXXX 100644 | ||
132 | --- a/hw/virtio/virtio-pci.c | ||
133 | +++ b/hw/virtio/virtio-pci.c | ||
134 | @@ -XXX,XX +XXX,XX @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, | ||
135 | val = pci_get_byte(buf); | ||
136 | break; | ||
137 | case 2: | ||
138 | - val = cpu_to_le16(pci_get_word(buf)); | ||
139 | + val = pci_get_word(buf); | ||
140 | break; | ||
141 | case 4: | ||
142 | - val = cpu_to_le32(pci_get_long(buf)); | ||
143 | + val = pci_get_long(buf); | ||
144 | break; | ||
145 | default: | ||
146 | /* As length is under guest control, handle illegal values. */ | ||
147 | return; | ||
148 | } | ||
149 | - /* TODO: Merge bswap from cpu_to_leXX into memory_region_dispatch_write. */ | ||
150 | memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, | ||
151 | MEMTXATTRS_UNSPECIFIED); | ||
152 | } | ||
153 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | ||
154 | /* Make sure caller aligned buf properly */ | ||
155 | assert(!(((uintptr_t)buf) & (len - 1))); | ||
156 | |||
157 | - /* TODO: Merge bswap from leXX_to_cpu into memory_region_dispatch_read. */ | ||
158 | memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, | ||
159 | MEMTXATTRS_UNSPECIFIED); | ||
160 | switch (len) { | ||
161 | @@ -XXX,XX +XXX,XX @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, | ||
162 | pci_set_byte(buf, val); | ||
163 | break; | ||
164 | case 2: | ||
165 | - pci_set_word(buf, le16_to_cpu(val)); | ||
166 | + pci_set_word(buf, val); | ||
167 | break; | ||
168 | case 4: | ||
169 | - pci_set_long(buf, le32_to_cpu(val)); | ||
170 | + pci_set_long(buf, val); | ||
171 | break; | ||
172 | default: | ||
173 | /* As length is under guest control, handle illegal values. */ | ||
174 | diff --git a/memory.c b/memory.c | ||
175 | index XXXXXXX..XXXXXXX 100644 | ||
176 | --- a/memory.c | ||
177 | +++ b/memory.c | ||
178 | @@ -XXX,XX +XXX,XX @@ static bool memory_region_big_endian(MemoryRegion *mr) | ||
179 | #endif | ||
180 | } | ||
181 | |||
182 | -static bool memory_region_wrong_endianness(MemoryRegion *mr) | ||
183 | +static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) | ||
184 | { | 37 | { |
185 | -#ifdef TARGET_WORDS_BIGENDIAN | 38 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest, |
186 | - return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; | 39 | if (disp == (int32_t)disp) { |
187 | -#else | 40 | tcg_out32(s, CALL | (uint32_t)disp >> 2); |
188 | - return mr->ops->endianness == DEVICE_BIG_ENDIAN; | 41 | } else { |
189 | -#endif | 42 | - uintptr_t desti = (uintptr_t)dest; |
190 | -} | 43 | - tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, |
191 | - | 44 | - desti & ~0xfff, in_prologue, TCG_REG_O7); |
192 | -static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) | 45 | - tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); |
193 | -{ | 46 | + tcg_out_jmpl_const(s, dest, in_prologue, false); |
194 | - if (memory_region_wrong_endianness(mr)) { | ||
195 | - switch (size) { | ||
196 | - case 1: | ||
197 | + if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { | ||
198 | + switch (op & MO_SIZE) { | ||
199 | + case MO_8: | ||
200 | break; | ||
201 | - case 2: | ||
202 | + case MO_16: | ||
203 | *data = bswap16(*data); | ||
204 | break; | ||
205 | - case 4: | ||
206 | + case MO_32: | ||
207 | *data = bswap32(*data); | ||
208 | break; | ||
209 | - case 8: | ||
210 | + case MO_64: | ||
211 | *data = bswap64(*data); | ||
212 | break; | ||
213 | default: | ||
214 | - abort(); | ||
215 | + g_assert_not_reached(); | ||
216 | } | ||
217 | } | 47 | } |
218 | } | 48 | } |
219 | @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | 49 | |
50 | @@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s) | ||
51 | |||
52 | /* Set the retaddr operand. */ | ||
53 | tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); | ||
54 | - /* Set the env operand. */ | ||
55 | - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); | ||
56 | /* Tail call. */ | ||
57 | - tcg_out_call_nodelay(s, qemu_ld_helpers[i], true); | ||
58 | - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); | ||
59 | + tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true); | ||
60 | + /* delay slot -- set the env argument */ | ||
61 | + tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); | ||
220 | } | 62 | } |
221 | 63 | ||
222 | r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); | 64 | for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) { |
223 | - adjust_endianness(mr, pval, size); | 65 | @@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s) |
224 | + adjust_endianness(mr, pval, op); | 66 | if (ra >= TCG_REG_O6) { |
225 | return r; | 67 | tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK, |
68 | TCG_TARGET_CALL_STACK_OFFSET); | ||
69 | - ra = TCG_REG_G1; | ||
70 | + } else { | ||
71 | + tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); | ||
72 | } | ||
73 | - tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); | ||
74 | - /* Set the env operand. */ | ||
75 | - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); | ||
76 | + | ||
77 | /* Tail call. */ | ||
78 | - tcg_out_call_nodelay(s, qemu_st_helpers[i], true); | ||
79 | - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); | ||
80 | + tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true); | ||
81 | + /* delay slot -- set the env argument */ | ||
82 | + tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); | ||
83 | } | ||
226 | } | 84 | } |
227 | 85 | #endif | |
228 | @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | ||
229 | return MEMTX_DECODE_ERROR; | ||
230 | } | ||
231 | |||
232 | - adjust_endianness(mr, &data, size); | ||
233 | + adjust_endianness(mr, &data, op); | ||
234 | |||
235 | if ((!kvm_eventfds_enabled()) && | ||
236 | memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { | ||
237 | @@ -XXX,XX +XXX,XX @@ void memory_region_add_eventfd(MemoryRegion *mr, | ||
238 | } | ||
239 | |||
240 | if (size) { | ||
241 | - adjust_endianness(mr, &mrfd.data, size); | ||
242 | + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); | ||
243 | } | ||
244 | memory_region_transaction_begin(); | ||
245 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | ||
246 | @@ -XXX,XX +XXX,XX @@ void memory_region_del_eventfd(MemoryRegion *mr, | ||
247 | unsigned i; | ||
248 | |||
249 | if (size) { | ||
250 | - adjust_endianness(mr, &mrfd.data, size); | ||
251 | + adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); | ||
252 | } | ||
253 | memory_region_transaction_begin(); | ||
254 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | ||
255 | diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c | ||
256 | index XXXXXXX..XXXXXXX 100644 | ||
257 | --- a/memory_ldst.inc.c | ||
258 | +++ b/memory_ldst.inc.c | ||
259 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, | ||
260 | release_lock |= prepare_mmio_access(mr); | ||
261 | |||
262 | /* I/O case */ | ||
263 | - /* TODO: Merge bswap32 into memory_region_dispatch_read. */ | ||
264 | r = memory_region_dispatch_read(mr, addr1, &val, | ||
265 | MO_32 | devend_memop(endian), attrs); | ||
266 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
267 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
268 | - val = bswap32(val); | ||
269 | - } | ||
270 | -#else | ||
271 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
272 | - val = bswap32(val); | ||
273 | - } | ||
274 | -#endif | ||
275 | } else { | ||
276 | /* RAM case */ | ||
277 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
278 | @@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, | ||
279 | release_lock |= prepare_mmio_access(mr); | ||
280 | |||
281 | /* I/O case */ | ||
282 | - /* TODO: Merge bswap64 into memory_region_dispatch_read. */ | ||
283 | r = memory_region_dispatch_read(mr, addr1, &val, | ||
284 | MO_64 | devend_memop(endian), attrs); | ||
285 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
286 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
287 | - val = bswap64(val); | ||
288 | - } | ||
289 | -#else | ||
290 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
291 | - val = bswap64(val); | ||
292 | - } | ||
293 | -#endif | ||
294 | } else { | ||
295 | /* RAM case */ | ||
296 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
297 | @@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, | ||
298 | release_lock |= prepare_mmio_access(mr); | ||
299 | |||
300 | /* I/O case */ | ||
301 | - /* TODO: Merge bswap16 into memory_region_dispatch_read. */ | ||
302 | r = memory_region_dispatch_read(mr, addr1, &val, | ||
303 | MO_16 | devend_memop(endian), attrs); | ||
304 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
305 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
306 | - val = bswap16(val); | ||
307 | - } | ||
308 | -#else | ||
309 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
310 | - val = bswap16(val); | ||
311 | - } | ||
312 | -#endif | ||
313 | } else { | ||
314 | /* RAM case */ | ||
315 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); | ||
316 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, | ||
317 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
318 | if (l < 4 || !memory_access_is_direct(mr, true)) { | ||
319 | release_lock |= prepare_mmio_access(mr); | ||
320 | - | ||
321 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
322 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
323 | - val = bswap32(val); | ||
324 | - } | ||
325 | -#else | ||
326 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
327 | - val = bswap32(val); | ||
328 | - } | ||
329 | -#endif | ||
330 | - /* TODO: Merge bswap32 into memory_region_dispatch_write. */ | ||
331 | r = memory_region_dispatch_write(mr, addr1, val, | ||
332 | MO_32 | devend_memop(endian), attrs); | ||
333 | } else { | ||
334 | @@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, | ||
335 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
336 | if (l < 2 || !memory_access_is_direct(mr, true)) { | ||
337 | release_lock |= prepare_mmio_access(mr); | ||
338 | - | ||
339 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
340 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
341 | - val = bswap16(val); | ||
342 | - } | ||
343 | -#else | ||
344 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
345 | - val = bswap16(val); | ||
346 | - } | ||
347 | -#endif | ||
348 | - /* TODO: Merge bswap16 into memory_region_dispatch_write. */ | ||
349 | r = memory_region_dispatch_write(mr, addr1, val, | ||
350 | MO_16 | devend_memop(endian), attrs); | ||
351 | } else { | ||
352 | @@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, | ||
353 | mr = TRANSLATE(addr, &addr1, &l, true, attrs); | ||
354 | if (l < 8 || !memory_access_is_direct(mr, true)) { | ||
355 | release_lock |= prepare_mmio_access(mr); | ||
356 | - | ||
357 | -#if defined(TARGET_WORDS_BIGENDIAN) | ||
358 | - if (endian == DEVICE_LITTLE_ENDIAN) { | ||
359 | - val = bswap64(val); | ||
360 | - } | ||
361 | -#else | ||
362 | - if (endian == DEVICE_BIG_ENDIAN) { | ||
363 | - val = bswap64(val); | ||
364 | - } | ||
365 | -#endif | ||
366 | - /* TODO: Merge bswap64 into memory_region_dispatch_write. */ | ||
367 | r = memory_region_dispatch_write(mr, addr1, val, | ||
368 | MO_64 | devend_memop(endian), attrs); | ||
369 | } else { | ||
370 | -- | 86 | -- |
371 | 2.17.1 | 87 | 2.25.1 |
372 | 88 | ||
373 | 89 | diff view generated by jsdifflib |
1 | We are currently passing the size of the full write to | 1 | This is kinda sorta the opposite of the other tcg hosts, where |
---|---|---|---|
2 | the tlb_fill for the second page. Instead pass the real | 2 | we get (normal) alignment checks for free with host SIGBUS and |
3 | size of the write to that page. | 3 | need to add code to support unaligned accesses. |
4 | 4 | ||
5 | This argument is unused within all tlb_fill, except to be | 5 | This inline code expansion is somewhat large, but it takes quite |
6 | logged via tracing, so in practice this makes no difference. | 6 | a few instructions to make a function call to a helper anyway. |
7 | 7 | ||
8 | But in a moment we'll need the value of size2 for watchpoints, | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | and if we've computed the value we might as well use it. | ||
10 | |||
11 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
12 | Reviewed-by: David Hildenbrand <david@redhat.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
14 | --- | 10 | --- |
15 | accel/tcg/cputlb.c | 5 ++++- | 11 | tcg/sparc/tcg-target.c.inc | 219 +++++++++++++++++++++++++++++++++++-- |
16 | 1 file changed, 4 insertions(+), 1 deletion(-) | 12 | 1 file changed, 211 insertions(+), 8 deletions(-) |
17 | 13 | ||
18 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | 14 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc |
19 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/accel/tcg/cputlb.c | 16 | --- a/tcg/sparc/tcg-target.c.inc |
21 | +++ b/accel/tcg/cputlb.c | 17 | +++ b/tcg/sparc/tcg-target.c.inc |
22 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 18 | @@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[] = { |
23 | uintptr_t index2; | 19 | #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) |
24 | CPUTLBEntry *entry2; | 20 | #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10)) |
25 | target_ulong page2, tlb_addr2; | 21 | #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) |
26 | + size_t size2; | 22 | +#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11)) |
27 | + | 23 | #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05)) |
28 | do_unaligned_access: | 24 | #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) |
29 | /* | 25 | #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12)) |
30 | * Ensure the second page is in the TLB. Note that the first page | 26 | @@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s) |
31 | @@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, | 27 | tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); |
32 | * cannot evict the first. | 28 | } |
33 | */ | 29 | } |
34 | page2 = (addr + size) & TARGET_PAGE_MASK; | 30 | +#else |
35 | + size2 = (addr + size) & ~TARGET_PAGE_MASK; | 31 | +static const tcg_insn_unit *qemu_unalign_ld_trampoline; |
36 | index2 = tlb_index(env, mmu_idx, page2); | 32 | +static const tcg_insn_unit *qemu_unalign_st_trampoline; |
37 | entry2 = tlb_entry(env, mmu_idx, page2); | 33 | + |
38 | tlb_addr2 = tlb_addr_write(entry2); | 34 | +static void build_trampolines(TCGContext *s) |
39 | if (!tlb_hit_page(tlb_addr2, page2) | 35 | +{ |
40 | && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, | 36 | + for (int ld = 0; ld < 2; ++ld) { |
41 | page2 & TARGET_PAGE_MASK)) { | 37 | + void *helper; |
42 | - tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE, | 38 | + |
43 | + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, | 39 | + while ((uintptr_t)s->code_ptr & 15) { |
44 | mmu_idx, retaddr); | 40 | + tcg_out_nop(s); |
45 | } | 41 | + } |
42 | + | ||
43 | + if (ld) { | ||
44 | + helper = helper_unaligned_ld; | ||
45 | + qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr); | ||
46 | + } else { | ||
47 | + helper = helper_unaligned_st; | ||
48 | + qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr); | ||
49 | + } | ||
50 | + | ||
51 | + if (!SPARC64 && TARGET_LONG_BITS == 64) { | ||
52 | + /* Install the high part of the address. */ | ||
53 | + tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX); | ||
54 | + } | ||
55 | + | ||
56 | + /* Tail call. */ | ||
57 | + tcg_out_jmpl_const(s, helper, true, true); | ||
58 | + /* delay slot -- set the env argument */ | ||
59 | + tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0); | ||
60 | + } | ||
61 | +} | ||
62 | #endif | ||
63 | |||
64 | /* Generate global QEMU prologue and epilogue code */ | ||
65 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
66 | /* delay slot */ | ||
67 | tcg_out_movi_imm13(s, TCG_REG_O0, 0); | ||
68 | |||
69 | -#ifdef CONFIG_SOFTMMU | ||
70 | build_trampolines(s); | ||
71 | -#endif | ||
72 | } | ||
73 | |||
74 | static void tcg_out_nop_fill(tcg_insn_unit *p, int count) | ||
75 | @@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, | ||
76 | static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { | ||
77 | [MO_UB] = LDUB, | ||
78 | [MO_SB] = LDSB, | ||
79 | + [MO_UB | MO_LE] = LDUB, | ||
80 | + [MO_SB | MO_LE] = LDSB, | ||
81 | |||
82 | [MO_BEUW] = LDUH, | ||
83 | [MO_BESW] = LDSH, | ||
84 | [MO_BEUL] = LDUW, | ||
85 | [MO_BESL] = LDSW, | ||
86 | [MO_BEUQ] = LDX, | ||
87 | + [MO_BESQ] = LDX, | ||
88 | |||
89 | [MO_LEUW] = LDUH_LE, | ||
90 | [MO_LESW] = LDSH_LE, | ||
91 | [MO_LEUL] = LDUW_LE, | ||
92 | [MO_LESL] = LDSW_LE, | ||
93 | [MO_LEUQ] = LDX_LE, | ||
94 | + [MO_LESQ] = LDX_LE, | ||
95 | }; | ||
96 | |||
97 | static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { | ||
98 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | ||
99 | MemOpIdx oi, bool is_64) | ||
100 | { | ||
101 | MemOp memop = get_memop(oi); | ||
102 | + tcg_insn_unit *label_ptr; | ||
103 | + | ||
104 | #ifdef CONFIG_SOFTMMU | ||
105 | unsigned memi = get_mmuidx(oi); | ||
106 | TCGReg addrz, param; | ||
107 | const tcg_insn_unit *func; | ||
108 | - tcg_insn_unit *label_ptr; | ||
109 | |||
110 | addrz = tcg_out_tlb_load(s, addr, memi, memop, | ||
111 | offsetof(CPUTLBEntry, addr_read)); | ||
112 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | ||
113 | |||
114 | *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); | ||
115 | #else | ||
116 | + TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); | ||
117 | + unsigned a_bits = get_alignment_bits(memop); | ||
118 | + unsigned s_bits = memop & MO_SIZE; | ||
119 | + unsigned t_bits; | ||
120 | + | ||
121 | if (SPARC64 && TARGET_LONG_BITS == 32) { | ||
122 | tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); | ||
123 | addr = TCG_REG_T1; | ||
124 | } | ||
125 | - tcg_out_ldst_rr(s, data, addr, | ||
126 | - (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), | ||
127 | + | ||
128 | + /* | ||
129 | + * Normal case: alignment equal to access size. | ||
130 | + */ | ||
131 | + if (a_bits == s_bits) { | ||
132 | + tcg_out_ldst_rr(s, data, addr, index, | ||
133 | + qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); | ||
134 | + return; | ||
135 | + } | ||
136 | + | ||
137 | + /* | ||
138 | + * Test for at least natural alignment, and assume most accesses | ||
139 | + * will be aligned -- perform a straight load in the delay slot. | ||
140 | + * This is required to preserve atomicity for aligned accesses. | ||
141 | + */ | ||
142 | + t_bits = MAX(a_bits, s_bits); | ||
143 | + tcg_debug_assert(t_bits < 13); | ||
144 | + tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); | ||
145 | + | ||
146 | + /* beq,a,pt %icc, label */ | ||
147 | + label_ptr = s->code_ptr; | ||
148 | + tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); | ||
149 | + /* delay slot */ | ||
150 | + tcg_out_ldst_rr(s, data, addr, index, | ||
151 | qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); | ||
152 | + | ||
153 | + if (a_bits >= s_bits) { | ||
154 | + /* | ||
155 | + * Overalignment: A successful alignment test will perform the memory | ||
156 | + * operation in the delay slot, and failure need only invoke the | ||
157 | + * handler for SIGBUS. | ||
158 | + */ | ||
159 | + TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64); | ||
160 | + tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false); | ||
161 | + /* delay slot -- move to low part of argument reg */ | ||
162 | + tcg_out_mov_delay(s, arg_low, addr); | ||
163 | + } else { | ||
164 | + /* Underalignment: load by pieces of minimum alignment. */ | ||
165 | + int ld_opc, a_size, s_size, i; | ||
166 | + | ||
167 | + /* | ||
168 | + * Force full address into T1 early; avoids problems with | ||
169 | + * overlap between @addr and @data. | ||
170 | + */ | ||
171 | + tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); | ||
172 | + | ||
173 | + a_size = 1 << a_bits; | ||
174 | + s_size = 1 << s_bits; | ||
175 | + if ((memop & MO_BSWAP) == MO_BE) { | ||
176 | + ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)]; | ||
177 | + tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); | ||
178 | + ld_opc = qemu_ld_opc[a_bits | MO_BE]; | ||
179 | + for (i = a_size; i < s_size; i += a_size) { | ||
180 | + tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); | ||
181 | + tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX); | ||
182 | + tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); | ||
183 | + } | ||
184 | + } else if (a_bits == 0) { | ||
185 | + ld_opc = LDUB; | ||
186 | + tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc); | ||
187 | + for (i = a_size; i < s_size; i += a_size) { | ||
188 | + if ((memop & MO_SIGN) && i == s_size - a_size) { | ||
189 | + ld_opc = LDSB; | ||
190 | + } | ||
191 | + tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc); | ||
192 | + tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); | ||
193 | + tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); | ||
194 | + } | ||
195 | + } else { | ||
196 | + ld_opc = qemu_ld_opc[a_bits | MO_LE]; | ||
197 | + tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc); | ||
198 | + for (i = a_size; i < s_size; i += a_size) { | ||
199 | + tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); | ||
200 | + if ((memop & MO_SIGN) && i == s_size - a_size) { | ||
201 | + ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN]; | ||
202 | + } | ||
203 | + tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc); | ||
204 | + tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX); | ||
205 | + tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR); | ||
206 | + } | ||
207 | + } | ||
208 | + } | ||
209 | + | ||
210 | + *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); | ||
211 | #endif /* CONFIG_SOFTMMU */ | ||
212 | } | ||
213 | |||
214 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, | ||
215 | MemOpIdx oi) | ||
216 | { | ||
217 | MemOp memop = get_memop(oi); | ||
218 | + tcg_insn_unit *label_ptr; | ||
219 | + | ||
220 | #ifdef CONFIG_SOFTMMU | ||
221 | unsigned memi = get_mmuidx(oi); | ||
222 | TCGReg addrz, param; | ||
223 | const tcg_insn_unit *func; | ||
224 | - tcg_insn_unit *label_ptr; | ||
225 | |||
226 | addrz = tcg_out_tlb_load(s, addr, memi, memop, | ||
227 | offsetof(CPUTLBEntry, addr_write)); | ||
228 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, | ||
229 | |||
230 | *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); | ||
231 | #else | ||
232 | + TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0); | ||
233 | + unsigned a_bits = get_alignment_bits(memop); | ||
234 | + unsigned s_bits = memop & MO_SIZE; | ||
235 | + unsigned t_bits; | ||
236 | + | ||
237 | if (SPARC64 && TARGET_LONG_BITS == 32) { | ||
238 | tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); | ||
239 | addr = TCG_REG_T1; | ||
240 | } | ||
241 | - tcg_out_ldst_rr(s, data, addr, | ||
242 | - (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), | ||
243 | + | ||
244 | + /* | ||
245 | + * Normal case: alignment equal to access size. | ||
246 | + */ | ||
247 | + if (a_bits == s_bits) { | ||
248 | + tcg_out_ldst_rr(s, data, addr, index, | ||
249 | + qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); | ||
250 | + return; | ||
251 | + } | ||
252 | + | ||
253 | + /* | ||
254 | + * Test for at least natural alignment, and assume most accesses | ||
255 | + * will be aligned -- perform a straight store in the delay slot. | ||
256 | + * This is required to preserve atomicity for aligned accesses. | ||
257 | + */ | ||
258 | + t_bits = MAX(a_bits, s_bits); | ||
259 | + tcg_debug_assert(t_bits < 13); | ||
260 | + tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC); | ||
261 | + | ||
262 | + /* beq,a,pt %icc, label */ | ||
263 | + label_ptr = s->code_ptr; | ||
264 | + tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0); | ||
265 | + /* delay slot */ | ||
266 | + tcg_out_ldst_rr(s, data, addr, index, | ||
267 | qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); | ||
268 | + | ||
269 | + if (a_bits >= s_bits) { | ||
270 | + /* | ||
271 | + * Overalignment: A successful alignment test will perform the memory | ||
272 | + * operation in the delay slot, and failure need only invoke the | ||
273 | + * handler for SIGBUS. | ||
274 | + */ | ||
275 | + TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64); | ||
276 | + tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false); | ||
277 | + /* delay slot -- move to low part of argument reg */ | ||
278 | + tcg_out_mov_delay(s, arg_low, addr); | ||
279 | + } else { | ||
280 | + /* Underalignment: store by pieces of minimum alignment. */ | ||
281 | + int st_opc, a_size, s_size, i; | ||
282 | + | ||
283 | + /* | ||
284 | + * Force full address into T1 early; avoids problems with | ||
285 | + * overlap between @addr and @data. | ||
286 | + */ | ||
287 | + tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD); | ||
288 | + | ||
289 | + a_size = 1 << a_bits; | ||
290 | + s_size = 1 << s_bits; | ||
291 | + if ((memop & MO_BSWAP) == MO_BE) { | ||
292 | + st_opc = qemu_st_opc[a_bits | MO_BE]; | ||
293 | + for (i = 0; i < s_size; i += a_size) { | ||
294 | + TCGReg d = data; | ||
295 | + int shift = (s_size - a_size - i) * 8; | ||
296 | + if (shift) { | ||
297 | + d = TCG_REG_T2; | ||
298 | + tcg_out_arithi(s, d, data, shift, SHIFT_SRLX); | ||
299 | + } | ||
300 | + tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc); | ||
301 | + } | ||
302 | + } else if (a_bits == 0) { | ||
303 | + tcg_out_ldst(s, data, TCG_REG_T1, 0, STB); | ||
304 | + for (i = 1; i < s_size; i++) { | ||
305 | + tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); | ||
306 | + tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB); | ||
307 | + } | ||
308 | + } else { | ||
309 | + /* Note that ST*A with immediate asi must use indexed address. */ | ||
310 | + st_opc = qemu_st_opc[a_bits + MO_LE]; | ||
311 | + tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc); | ||
312 | + for (i = a_size; i < s_size; i += a_size) { | ||
313 | + tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX); | ||
314 | + tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD); | ||
315 | + tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc); | ||
316 | + } | ||
317 | + } | ||
318 | + } | ||
319 | + | ||
320 | + *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); | ||
321 | #endif /* CONFIG_SOFTMMU */ | ||
322 | } | ||
46 | 323 | ||
47 | -- | 324 | -- |
48 | 2.17.1 | 325 | 2.25.1 |
49 | 326 | ||
50 | 327 | diff view generated by jsdifflib |
1 | From: Tony Nguyen <tony.nguyen@bt.com> | 1 | A mostly generic test for unaligned access raising SIGBUS. |
---|---|---|---|
2 | 2 | ||
3 | Convert memory_region_dispatch_{read|write} operand "unsigned size" | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | into a "MemOp op". | ||
5 | |||
6 | Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-Id: <1dd82df5801866743f838f1d046475115a1d32da.1566466906.git.tony.nguyen@bt.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 5 | --- |
11 | include/exec/memop.h | 22 +++++++++++++++------- | 6 | tests/tcg/multiarch/sigbus.c | 68 ++++++++++++++++++++++++++++++++++++ |
12 | include/exec/memory.h | 9 +++++---- | 7 | 1 file changed, 68 insertions(+) |
13 | memory.c | 7 +++++-- | 8 | create mode 100644 tests/tcg/multiarch/sigbus.c |
14 | 3 files changed, 25 insertions(+), 13 deletions(-) | ||
15 | 9 | ||
16 | diff --git a/include/exec/memop.h b/include/exec/memop.h | 10 | diff --git a/tests/tcg/multiarch/sigbus.c b/tests/tcg/multiarch/sigbus.c |
17 | index XXXXXXX..XXXXXXX 100644 | 11 | new file mode 100644 |
18 | --- a/include/exec/memop.h | 12 | index XXXXXXX..XXXXXXX |
19 | +++ b/include/exec/memop.h | 13 | --- /dev/null |
14 | +++ b/tests/tcg/multiarch/sigbus.c | ||
20 | @@ -XXX,XX +XXX,XX @@ | 15 | @@ -XXX,XX +XXX,XX @@ |
21 | #ifndef MEMOP_H | 16 | +#define _GNU_SOURCE 1 |
22 | #define MEMOP_H | ||
23 | |||
24 | +#include "qemu/host-utils.h" | ||
25 | + | 17 | + |
26 | typedef enum MemOp { | 18 | +#include <assert.h> |
27 | MO_8 = 0, | 19 | +#include <stdlib.h> |
28 | MO_16 = 1, | 20 | +#include <signal.h> |
29 | @@ -XXX,XX +XXX,XX @@ typedef enum MemOp { | 21 | +#include <endian.h> |
30 | MO_SSIZE = MO_SIZE | MO_SIGN, | 22 | + |
31 | } MemOp; | 23 | + |
32 | 24 | +unsigned long long x = 0x8877665544332211ull; | |
33 | -/* Size in bytes to MemOp. */ | 25 | +void * volatile p = (void *)&x + 1; |
34 | -static inline unsigned size_memop(unsigned size) | 26 | + |
35 | +/* MemOp to size in bytes. */ | 27 | +void sigbus(int sig, siginfo_t *info, void *uc) |
36 | +static inline unsigned memop_size(MemOp op) | 28 | +{ |
37 | { | 29 | + assert(sig == SIGBUS); |
38 | - /* | 30 | + assert(info->si_signo == SIGBUS); |
39 | - * FIXME: No-op to aid conversion of memory_region_dispatch_{read|write} | 31 | +#ifdef BUS_ADRALN |
40 | - * "unsigned size" operand into a "MemOp op". | 32 | + assert(info->si_code == BUS_ADRALN); |
41 | - */ | 33 | +#endif |
42 | - return size; | 34 | + assert(info->si_addr == p); |
43 | + return 1 << (op & MO_SIZE); | 35 | + exit(EXIT_SUCCESS); |
44 | +} | 36 | +} |
45 | + | 37 | + |
46 | +/* Size in bytes to MemOp. */ | 38 | +int main() |
47 | +static inline MemOp size_memop(unsigned size) | ||
48 | +{ | 39 | +{ |
49 | +#ifdef CONFIG_DEBUG_TCG | 40 | + struct sigaction sa = { |
50 | + /* Power of 2 up to 8. */ | 41 | + .sa_sigaction = sigbus, |
51 | + assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); | 42 | + .sa_flags = SA_SIGINFO |
43 | + }; | ||
44 | + int allow_fail = 0; | ||
45 | + int tmp; | ||
46 | + | ||
47 | + tmp = sigaction(SIGBUS, &sa, NULL); | ||
48 | + assert(tmp == 0); | ||
49 | + | ||
50 | + /* | ||
51 | + * Select an operation that's likely to enforce alignment. | ||
52 | + * On many guests that support unaligned accesses by default, | ||
53 | + * this is often an atomic operation. | ||
54 | + */ | ||
55 | +#if defined(__aarch64__) | ||
56 | + asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory"); | ||
57 | +#elif defined(__alpha__) | ||
58 | + asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory"); | ||
59 | +#elif defined(__arm__) | ||
60 | + asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory"); | ||
61 | +#elif defined(__powerpc__) | ||
62 | + asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory"); | ||
63 | +#elif defined(__riscv_atomic) | ||
64 | + asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory"); | ||
65 | +#else | ||
66 | + /* No insn known to fault unaligned -- try for a straight load. */ | ||
67 | + allow_fail = 1; | ||
68 | + tmp = *(volatile int *)p; | ||
52 | +#endif | 69 | +#endif |
53 | + return ctz32(size); | ||
54 | } | ||
55 | |||
56 | #endif | ||
57 | diff --git a/include/exec/memory.h b/include/exec/memory.h | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/include/exec/memory.h | ||
60 | +++ b/include/exec/memory.h | ||
61 | @@ -XXX,XX +XXX,XX @@ | ||
62 | #include "exec/cpu-common.h" | ||
63 | #include "exec/hwaddr.h" | ||
64 | #include "exec/memattrs.h" | ||
65 | +#include "exec/memop.h" | ||
66 | #include "exec/ramlist.h" | ||
67 | #include "qemu/bswap.h" | ||
68 | #include "qemu/queue.h" | ||
69 | @@ -XXX,XX +XXX,XX @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner); | ||
70 | * @mr: #MemoryRegion to access | ||
71 | * @addr: address within that region | ||
72 | * @pval: pointer to uint64_t which the data is written to | ||
73 | - * @size: size of the access in bytes | ||
74 | + * @op: size, sign, and endianness of the memory operation | ||
75 | * @attrs: memory transaction attributes to use for the access | ||
76 | */ | ||
77 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | ||
78 | hwaddr addr, | ||
79 | uint64_t *pval, | ||
80 | - unsigned size, | ||
81 | + MemOp op, | ||
82 | MemTxAttrs attrs); | ||
83 | /** | ||
84 | * memory_region_dispatch_write: perform a write directly to the specified | ||
85 | @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | ||
86 | * @mr: #MemoryRegion to access | ||
87 | * @addr: address within that region | ||
88 | * @data: data to write | ||
89 | - * @size: size of the access in bytes | ||
90 | + * @op: size, sign, and endianness of the memory operation | ||
91 | * @attrs: memory transaction attributes to use for the access | ||
92 | */ | ||
93 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | ||
94 | hwaddr addr, | ||
95 | uint64_t data, | ||
96 | - unsigned size, | ||
97 | + MemOp op, | ||
98 | MemTxAttrs attrs); | ||
99 | |||
100 | /** | ||
101 | diff --git a/memory.c b/memory.c | ||
102 | index XXXXXXX..XXXXXXX 100644 | ||
103 | --- a/memory.c | ||
104 | +++ b/memory.c | ||
105 | @@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, | ||
106 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | ||
107 | hwaddr addr, | ||
108 | uint64_t *pval, | ||
109 | - unsigned size, | ||
110 | + MemOp op, | ||
111 | MemTxAttrs attrs) | ||
112 | { | ||
113 | + unsigned size = memop_size(op); | ||
114 | MemTxResult r; | ||
115 | |||
116 | if (!memory_region_access_valid(mr, addr, size, false, attrs)) { | ||
117 | @@ -XXX,XX +XXX,XX @@ static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, | ||
118 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | ||
119 | hwaddr addr, | ||
120 | uint64_t data, | ||
121 | - unsigned size, | ||
122 | + MemOp op, | ||
123 | MemTxAttrs attrs) | ||
124 | { | ||
125 | + unsigned size = memop_size(op); | ||
126 | + | 70 | + |
127 | if (!memory_region_access_valid(mr, addr, size, true, attrs)) { | 71 | + assert(allow_fail); |
128 | unassigned_mem_write(mr, addr, data, size); | 72 | + |
129 | return MEMTX_DECODE_ERROR; | 73 | + /* |
74 | + * We didn't see a signal. | ||
75 | + * We might as well validate the unaligned load worked. | ||
76 | + */ | ||
77 | + if (BYTE_ORDER == LITTLE_ENDIAN) { | ||
78 | + assert(tmp == 0x55443322); | ||
79 | + } else { | ||
80 | + assert(tmp == 0x77665544); | ||
81 | + } | ||
82 | + return EXIT_SUCCESS; | ||
83 | +} | ||
130 | -- | 84 | -- |
131 | 2.17.1 | 85 | 2.25.1 |
132 | 86 | ||
133 | 87 | diff view generated by jsdifflib |