1
The following changes since commit 36eae3a732a1f2aa81391e871ac0e9bb3233e7d7:
1
The following changes since commit 813bac3d8d70d85cb7835f7945eb9eed84c2d8d0:
2
2
3
Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-migration-20220302b' into staging (2022-03-02 20:55:48 +0000)
3
Merge tag '2023q3-bsd-user-pull-request' of https://gitlab.com/bsdimp/qemu into staging (2023-08-29 08:58:00 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220303
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230829
8
8
9
for you to fetch changes up to f23e6de25c31cadd9a3b7122f9384e6b259ce37f:
9
for you to fetch changes up to dad2f2f5afbaf58d6056f31dfd4b9edd0854b8ab:
10
10
11
tcg/loongarch64: Support TCG_TARGET_SIGNED_ADDR32 (2022-03-03 10:47:20 -1000)
11
tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 (2023-08-29 09:57:39 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Reorder do_constant_folding_cond test to satisfy valgrind.
14
softmmu: Use async_run_on_cpu in tcg_commit
15
Fix value of MAX_OPC_PARAM_IARGS.
15
tcg: Remove vecop_list check from tcg_gen_not_vec
16
Add opcodes for vector nand, nor, eqv.
16
tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32
17
Support vector nand, nor, eqv on PPC and S390X hosts.
18
Support AVX512VL, AVX512BW, AVX512DQ, and AVX512VBMI2.
19
Support 32-bit guest addresses as signed values.
20
17
21
----------------------------------------------------------------
18
----------------------------------------------------------------
22
Alex Bennée (1):
19
Richard Henderson (4):
23
tcg/optimize: only read val after const check
20
softmmu: Assert data in bounds in iotlb_to_section
21
softmmu: Use async_run_on_cpu in tcg_commit
22
tcg: Remove vecop_list check from tcg_gen_not_vec
23
tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32
24
24
25
Richard Henderson (28):
25
include/exec/cpu-common.h | 1 -
26
tcg: Add opcodes for vector nand, nor, eqv
26
tcg/sparc64/tcg-target.h | 2 +-
27
tcg/ppc: Implement vector NAND, NOR, EQV
27
accel/tcg/cpu-exec-common.c | 30 --------------------------
28
tcg/s390x: Implement vector NAND, NOR, EQV
28
softmmu/physmem.c | 50 ++++++++++++++++++++++++++++++++------------
29
tcg/i386: Detect AVX512
29
tcg/tcg-op-vec.c | 7 +++----
30
tcg/i386: Add tcg_out_evex_opc
30
tcg/sparc64/tcg-target.c.inc | 11 ----------
31
tcg/i386: Use tcg_can_emit_vec_op in expand_vec_cmp_noinv
31
6 files changed, 41 insertions(+), 60 deletions(-)
32
tcg/i386: Implement avx512 variable shifts
33
tcg/i386: Implement avx512 scalar shift
34
tcg/i386: Implement avx512 immediate sari shift
35
tcg/i386: Implement avx512 immediate rotate
36
tcg/i386: Implement avx512 variable rotate
37
tcg/i386: Support avx512vbmi2 vector shift-double instructions
38
tcg/i386: Expand vector word rotate as avx512vbmi2 shift-double
39
tcg/i386: Remove rotls_vec from tcg_target_op_def
40
tcg/i386: Expand scalar rotate with avx512 insns
41
tcg/i386: Implement avx512 min/max/abs
42
tcg/i386: Implement avx512 multiply
43
tcg/i386: Implement more logical operations for avx512
44
tcg/i386: Implement bitsel for avx512
45
tcg: Add TCG_TARGET_SIGNED_ADDR32
46
accel/tcg: Split out g2h_tlbe
47
accel/tcg: Support TCG_TARGET_SIGNED_ADDR32 for softmmu
48
accel/tcg: Add guest_base_signed_addr32 for user-only
49
linux-user: Support TCG_TARGET_SIGNED_ADDR32
50
tcg/aarch64: Support TCG_TARGET_SIGNED_ADDR32
51
tcg/mips: Support TCG_TARGET_SIGNED_ADDR32
52
tcg/riscv: Support TCG_TARGET_SIGNED_ADDR32
53
tcg/loongarch64: Support TCG_TARGET_SIGNED_ADDR32
54
55
Ziqiao Kong (1):
56
tcg: Set MAX_OPC_PARAM_IARGS to 7
57
58
include/exec/cpu-all.h | 20 +-
59
include/exec/cpu_ldst.h | 3 +-
60
include/qemu/cpuid.h | 20 +-
61
include/tcg/tcg-opc.h | 3 +
62
include/tcg/tcg.h | 5 +-
63
tcg/aarch64/tcg-target-sa32.h | 7 +
64
tcg/aarch64/tcg-target.h | 3 +
65
tcg/arm/tcg-target-sa32.h | 1 +
66
tcg/arm/tcg-target.h | 3 +
67
tcg/i386/tcg-target-con-set.h | 1 +
68
tcg/i386/tcg-target-sa32.h | 1 +
69
tcg/i386/tcg-target.h | 17 +-
70
tcg/i386/tcg-target.opc.h | 3 +
71
tcg/loongarch64/tcg-target-sa32.h | 1 +
72
tcg/mips/tcg-target-sa32.h | 9 +
73
tcg/ppc/tcg-target-sa32.h | 1 +
74
tcg/ppc/tcg-target.h | 3 +
75
tcg/riscv/tcg-target-sa32.h | 5 +
76
tcg/s390x/tcg-target-sa32.h | 1 +
77
tcg/s390x/tcg-target.h | 3 +
78
tcg/sparc/tcg-target-sa32.h | 1 +
79
tcg/tci/tcg-target-sa32.h | 1 +
80
accel/tcg/cputlb.c | 36 ++--
81
bsd-user/main.c | 4 +
82
linux-user/elfload.c | 62 ++++--
83
linux-user/main.c | 3 +
84
tcg/optimize.c | 20 +-
85
tcg/tcg-op-vec.c | 27 ++-
86
tcg/tcg.c | 10 +
87
tcg/aarch64/tcg-target.c.inc | 81 +++++---
88
tcg/i386/tcg-target.c.inc | 387 +++++++++++++++++++++++++++++++-------
89
tcg/loongarch64/tcg-target.c.inc | 15 +-
90
tcg/mips/tcg-target.c.inc | 10 +-
91
tcg/ppc/tcg-target.c.inc | 15 ++
92
tcg/riscv/tcg-target.c.inc | 8 +-
93
tcg/s390x/tcg-target.c.inc | 17 ++
94
tcg/tci/tcg-target.c.inc | 2 +-
95
37 files changed, 640 insertions(+), 169 deletions(-)
96
create mode 100644 tcg/aarch64/tcg-target-sa32.h
97
create mode 100644 tcg/arm/tcg-target-sa32.h
98
create mode 100644 tcg/i386/tcg-target-sa32.h
99
create mode 100644 tcg/loongarch64/tcg-target-sa32.h
100
create mode 100644 tcg/mips/tcg-target-sa32.h
101
create mode 100644 tcg/ppc/tcg-target-sa32.h
102
create mode 100644 tcg/riscv/tcg-target-sa32.h
103
create mode 100644 tcg/s390x/tcg-target-sa32.h
104
create mode 100644 tcg/sparc/tcg-target-sa32.h
105
create mode 100644 tcg/tci/tcg-target-sa32.h
106
diff view generated by jsdifflib
Deleted patch
1
From: Alex Bennée <alex.bennee@linaro.org>
2
1
3
valgrind pointed out that arg_info()->val can be undefined which will
4
be the case if the arguments are not constant. The ordering of the
5
checks will have ensured we never relied on an undefined value but for
6
the sake of completeness re-order the code to be clear.
7
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
10
Message-Id: <20220209112142.3367525-1-alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
tcg/optimize.c | 8 ++++----
14
1 file changed, 4 insertions(+), 4 deletions(-)
15
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/optimize.c
19
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
21
static int do_constant_folding_cond(TCGType type, TCGArg x,
22
TCGArg y, TCGCond c)
23
{
24
- uint64_t xv = arg_info(x)->val;
25
- uint64_t yv = arg_info(y)->val;
26
-
27
if (arg_is_const(x) && arg_is_const(y)) {
28
+ uint64_t xv = arg_info(x)->val;
29
+ uint64_t yv = arg_info(y)->val;
30
+
31
switch (type) {
32
case TCG_TYPE_I32:
33
return do_constant_folding_cond_32(xv, yv, c);
34
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
35
}
36
} else if (args_are_copies(x, y)) {
37
return do_constant_folding_cond_eq(c);
38
- } else if (arg_is_const(y) && yv == 0) {
39
+ } else if (arg_is_const(y) && arg_info(y)->val == 0) {
40
switch (c) {
41
case TCG_COND_LTU:
42
return 0;
43
--
44
2.25.1
45
46
diff view generated by jsdifflib
Deleted patch
1
From: Ziqiao Kong <ziqiaokong@gmail.com>
2
1
3
The last entry of DEF_HELPERS_FLAGS_n is DEF_HELPER_FLAGS_7 and
4
thus the MAX_OPC_PARAM_IARGS should be 7.
5
6
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
7
Signed-off-by: Ziqiao Kong <ziqiaokong@gmail.com>
8
Message-Id: <20220227113127.414533-2-ziqiaokong@gmail.com>
9
Fixes: e6cadf49c3d ("tcg: Add support for a helper with 7 arguments")
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/tcg/tcg.h | 2 +-
13
tcg/tci/tcg-target.c.inc | 2 +-
14
2 files changed, 2 insertions(+), 2 deletions(-)
15
16
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/tcg/tcg.h
19
+++ b/include/tcg/tcg.h
20
@@ -XXX,XX +XXX,XX @@
21
#else
22
#define MAX_OPC_PARAM_PER_ARG 1
23
#endif
24
-#define MAX_OPC_PARAM_IARGS 6
25
+#define MAX_OPC_PARAM_IARGS 7
26
#define MAX_OPC_PARAM_OARGS 1
27
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
28
29
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/tci/tcg-target.c.inc
32
+++ b/tcg/tci/tcg-target.c.inc
33
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
34
TCG_REG_R0,
35
};
36
37
-#if MAX_OPC_PARAM_IARGS != 6
38
+#if MAX_OPC_PARAM_IARGS != 7
39
# error Fix needed, number of supported input arguments changed!
40
#endif
41
42
--
43
2.25.1
diff view generated by jsdifflib
1
All 32-bit LoongArch operations sign-extend the output, so we are easily
1
Acked-by: Alex Bennée <alex.bennee@linaro.org>
2
able to keep TCG_TYPE_I32 values sign-extended in host registers.
2
Suggested-by: Alex Bennée <alex.bennee@linaro.org>
3
4
Cc: WANG Xuerui <git@xen0n.name>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/loongarch64/tcg-target-sa32.h | 2 +-
5
softmmu/physmem.c | 10 ++++++++--
9
tcg/loongarch64/tcg-target.c.inc | 15 ++++++---------
6
1 file changed, 8 insertions(+), 2 deletions(-)
10
2 files changed, 7 insertions(+), 10 deletions(-)
11
7
12
diff --git a/tcg/loongarch64/tcg-target-sa32.h b/tcg/loongarch64/tcg-target-sa32.h
8
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
13
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target-sa32.h
10
--- a/softmmu/physmem.c
15
+++ b/tcg/loongarch64/tcg-target-sa32.h
11
+++ b/softmmu/physmem.c
16
@@ -1 +1 @@
12
@@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
17
-#define TCG_TARGET_SIGNED_ADDR32 0
13
int asidx = cpu_asidx_from_attrs(cpu, attrs);
18
+#define TCG_TARGET_SIGNED_ADDR32 1
14
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
19
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
15
AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
20
index XXXXXXX..XXXXXXX 100644
16
- MemoryRegionSection *sections = d->map.sections;
21
--- a/tcg/loongarch64/tcg-target.c.inc
17
+ int section_index = index & ~TARGET_PAGE_MASK;
22
+++ b/tcg/loongarch64/tcg-target.c.inc
18
+ MemoryRegionSection *ret;
23
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
19
24
return tcg_out_fail_alignment(s, l);
20
- return &sections[index & ~TARGET_PAGE_MASK];
21
+ assert(section_index < d->map.sections_nb);
22
+ ret = d->map.sections + section_index;
23
+ assert(ret->mr);
24
+ assert(ret->mr->ops);
25
+
26
+ return ret;
25
}
27
}
26
28
27
-#endif /* CONFIG_SOFTMMU */
29
static void io_mem_init(void)
28
-
29
/*
30
* `ext32u` the address register into the temp register given,
31
* if target is 32-bit, no-op otherwise.
32
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
33
static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
34
TCGReg addr, TCGReg tmp)
35
{
36
- if (TARGET_LONG_BITS == 32) {
37
+ if (TARGET_LONG_BITS == 32 && !guest_base_signed_addr32) {
38
tcg_out_ext32u(s, tmp, addr);
39
return tmp;
40
}
41
return addr;
42
}
43
+#endif /* CONFIG_SOFTMMU */
44
45
static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
46
TCGReg rk, MemOp opc, TCGType type)
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
48
tcg_insn_unit *label_ptr[1];
49
#else
50
unsigned a_bits;
51
-#endif
52
TCGReg base;
53
+#endif
54
55
data_regl = *args++;
56
addr_regl = *args++;
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
58
59
#if defined(CONFIG_SOFTMMU)
60
tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
61
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
62
- tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
63
+ tcg_out_qemu_ld_indexed(s, data_regl, addr_regl, TCG_REG_TMP2, opc, type);
64
add_qemu_ldst_label(s, 1, oi, type,
65
data_regl, addr_regl,
66
s->code_ptr, label_ptr);
67
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
68
tcg_insn_unit *label_ptr[1];
69
#else
70
unsigned a_bits;
71
-#endif
72
TCGReg base;
73
+#endif
74
75
data_regl = *args++;
76
addr_regl = *args++;
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
78
79
#if defined(CONFIG_SOFTMMU)
80
tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
81
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
82
- tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
83
+ tcg_out_qemu_st_indexed(s, data_regl, addr_regl, TCG_REG_TMP2, opc);
84
add_qemu_ldst_label(s, 0, oi,
85
0, /* type param is unused for stores */
86
data_regl, addr_regl,
87
--
30
--
88
2.25.1
31
2.34.1
89
32
90
33
diff view generated by jsdifflib
1
The evex encoding is added here, for use in a subsequent patch.
1
After system startup, run the update to memory_dispatch
2
and the tlb_flush on the cpu. This eliminates a race,
3
wherein a running cpu sees the memory_dispatch change
4
but has not yet seen the tlb_flush.
2
5
6
Since the update now happens on the cpu, we need not use
7
qatomic_rcu_read to protect the read of memory_dispatch.
8
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1826
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1834
11
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1846
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
12
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
15
---
7
tcg/i386/tcg-target.c.inc | 51 ++++++++++++++++++++++++++++++++++++++-
16
include/exec/cpu-common.h | 1 -
8
1 file changed, 50 insertions(+), 1 deletion(-)
17
accel/tcg/cpu-exec-common.c | 30 ----------------------------
18
softmmu/physmem.c | 40 +++++++++++++++++++++++++++----------
19
3 files changed, 29 insertions(+), 42 deletions(-)
9
20
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
21
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
11
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
23
--- a/include/exec/cpu-common.h
13
+++ b/tcg/i386/tcg-target.c.inc
24
+++ b/include/exec/cpu-common.h
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
25
@@ -XXX,XX +XXX,XX @@ static inline void cpu_physical_memory_write(hwaddr addr,
15
#define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
26
{
16
#define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
27
cpu_physical_memory_rw(addr, (void *)buf, len, true);
17
#define P_VEXL 0x80000 /* Set VEX.L = 1 */
18
+#define P_EVEX 0x100000 /* Requires EVEX encoding */
19
20
#define OPC_ARITH_EvIz    (0x81)
21
#define OPC_ARITH_EvIb    (0x83)
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
23
tcg_out8(s, opc);
24
}
28
}
25
29
-void cpu_reloading_memory_map(void);
26
+static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v,
30
void *cpu_physical_memory_map(hwaddr addr,
27
+ int rm, int index)
31
hwaddr *plen,
32
bool is_write);
33
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/cpu-exec-common.c
36
+++ b/accel/tcg/cpu-exec-common.c
37
@@ -XXX,XX +XXX,XX @@ void cpu_loop_exit_noexc(CPUState *cpu)
38
cpu_loop_exit(cpu);
39
}
40
41
-#if defined(CONFIG_SOFTMMU)
42
-void cpu_reloading_memory_map(void)
43
-{
44
- if (qemu_in_vcpu_thread() && current_cpu->running) {
45
- /* The guest can in theory prolong the RCU critical section as long
46
- * as it feels like. The major problem with this is that because it
47
- * can do multiple reconfigurations of the memory map within the
48
- * critical section, we could potentially accumulate an unbounded
49
- * collection of memory data structures awaiting reclamation.
50
- *
51
- * Because the only thing we're currently protecting with RCU is the
52
- * memory data structures, it's sufficient to break the critical section
53
- * in this callback, which we know will get called every time the
54
- * memory map is rearranged.
55
- *
56
- * (If we add anything else in the system that uses RCU to protect
57
- * its data structures, we will need to implement some other mechanism
58
- * to force TCG CPUs to exit the critical section, at which point this
59
- * part of this callback might become unnecessary.)
60
- *
61
- * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
62
- * only protects cpu->as->dispatch. Since we know our caller is about
63
- * to reload it, it's safe to split the critical section.
64
- */
65
- rcu_read_unlock();
66
- rcu_read_lock();
67
- }
68
-}
69
-#endif
70
-
71
void cpu_loop_exit(CPUState *cpu)
72
{
73
/* Undo the setting in cpu_tb_exec. */
74
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/softmmu/physmem.c
77
+++ b/softmmu/physmem.c
78
@@ -XXX,XX +XXX,XX @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
79
IOMMUTLBEntry iotlb;
80
int iommu_idx;
81
hwaddr addr = orig_addr;
82
- AddressSpaceDispatch *d =
83
- qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
84
+ AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
85
86
for (;;) {
87
section = address_space_translate_internal(d, addr, &addr, plen, false);
88
@@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
89
{
90
int asidx = cpu_asidx_from_attrs(cpu, attrs);
91
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
92
- AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
93
+ AddressSpaceDispatch *d = cpuas->memory_dispatch;
94
int section_index = index & ~TARGET_PAGE_MASK;
95
MemoryRegionSection *ret;
96
97
@@ -XXX,XX +XXX,XX @@ static void tcg_log_global_after_sync(MemoryListener *listener)
98
}
99
}
100
101
+static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
28
+{
102
+{
29
+ /* The entire 4-byte evex prefix; with R' and V' set. */
103
+ CPUAddressSpace *cpuas = data.host_ptr;
30
+ uint32_t p = 0x08041062;
31
+ int mm, pp;
32
+
104
+
33
+ tcg_debug_assert(have_avx512vl);
105
+ cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
34
+
106
+ tlb_flush(cpu);
35
+ /* EVEX.mm */
36
+ if (opc & P_EXT3A) {
37
+ mm = 3;
38
+ } else if (opc & P_EXT38) {
39
+ mm = 2;
40
+ } else if (opc & P_EXT) {
41
+ mm = 1;
42
+ } else {
43
+ g_assert_not_reached();
44
+ }
45
+
46
+ /* EVEX.pp */
47
+ if (opc & P_DATA16) {
48
+ pp = 1; /* 0x66 */
49
+ } else if (opc & P_SIMDF3) {
50
+ pp = 2; /* 0xf3 */
51
+ } else if (opc & P_SIMDF2) {
52
+ pp = 3; /* 0xf2 */
53
+ } else {
54
+ pp = 0;
55
+ }
56
+
57
+ p = deposit32(p, 8, 2, mm);
58
+ p = deposit32(p, 13, 1, (rm & 8) == 0); /* EVEX.RXB.B */
59
+ p = deposit32(p, 14, 1, (index & 8) == 0); /* EVEX.RXB.X */
60
+ p = deposit32(p, 15, 1, (r & 8) == 0); /* EVEX.RXB.R */
61
+ p = deposit32(p, 16, 2, pp);
62
+ p = deposit32(p, 19, 4, ~v);
63
+ p = deposit32(p, 23, 1, (opc & P_VEXW) != 0);
64
+ p = deposit32(p, 29, 2, (opc & P_VEXL) != 0);
65
+
66
+ tcg_out32(s, p);
67
+ tcg_out8(s, opc);
68
+}
107
+}
69
+
108
+
70
static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
109
static void tcg_commit(MemoryListener *listener)
71
{
110
{
72
- tcg_out_vex_opc(s, opc, r, v, rm, 0);
111
CPUAddressSpace *cpuas;
73
+ if (opc & P_EVEX) {
112
- AddressSpaceDispatch *d;
74
+ tcg_out_evex_opc(s, opc, r, v, rm, 0);
113
+ CPUState *cpu;
114
115
assert(tcg_enabled());
116
/* since each CPU stores ram addresses in its TLB cache, we must
117
reset the modified entries */
118
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
119
- cpu_reloading_memory_map();
120
- /* The CPU and TLB are protected by the iothread lock.
121
- * We reload the dispatch pointer now because cpu_reloading_memory_map()
122
- * may have split the RCU critical section.
123
+ cpu = cpuas->cpu;
124
+
125
+ /*
126
+ * Defer changes to as->memory_dispatch until the cpu is quiescent.
127
+ * Otherwise we race between (1) other cpu threads and (2) ongoing
128
+ * i/o for the current cpu thread, with data cached by mmu_lookup().
129
+ *
130
+ * In addition, queueing the work function will kick the cpu back to
131
+ * the main loop, which will end the RCU critical section and reclaim
132
+ * the memory data structures.
133
+ *
134
+ * That said, the listener is also called during realize, before
135
+ * all of the tcg machinery for run-on is initialized: thus halt_cond.
136
*/
137
- d = address_space_to_dispatch(cpuas->as);
138
- qatomic_rcu_set(&cpuas->memory_dispatch, d);
139
- tlb_flush(cpuas->cpu);
140
+ if (cpu->halt_cond) {
141
+ async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
75
+ } else {
142
+ } else {
76
+ tcg_out_vex_opc(s, opc, r, v, rm, 0);
143
+ tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
77
+ }
144
+ }
78
tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
79
}
145
}
80
146
147
static void memory_map_init(void)
81
--
148
--
82
2.25.1
149
2.34.1
83
150
84
151
diff view generated by jsdifflib
1
We've had placeholders for these opcodes for a while,
1
The not pattern is always available via generic expansion.
2
and should have support on ppc, s390x and avx512 hosts.
2
See debug block in tcg_can_emit_vecop_list.
3
3
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Fixes: 11978f6f58 ("tcg: Fix expansion of INDEX_op_not_vec")
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/tcg/tcg-opc.h | 3 +++
8
tcg/tcg-op-vec.c | 7 +++----
10
include/tcg/tcg.h | 3 +++
9
1 file changed, 3 insertions(+), 4 deletions(-)
11
tcg/aarch64/tcg-target.h | 3 +++
12
tcg/arm/tcg-target.h | 3 +++
13
tcg/i386/tcg-target.h | 3 +++
14
tcg/ppc/tcg-target.h | 3 +++
15
tcg/s390x/tcg-target.h | 3 +++
16
tcg/optimize.c | 12 ++++++------
17
tcg/tcg-op-vec.c | 27 ++++++++++++++++++---------
18
tcg/tcg.c | 6 ++++++
19
10 files changed, 51 insertions(+), 15 deletions(-)
20
10
21
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg-opc.h
24
+++ b/include/tcg/tcg-opc.h
25
@@ -XXX,XX +XXX,XX @@ DEF(or_vec, 1, 2, 0, IMPLVEC)
26
DEF(xor_vec, 1, 2, 0, IMPLVEC)
27
DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
28
DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
29
+DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
30
+DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
31
+DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
32
DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
33
34
DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
35
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/include/tcg/tcg.h
38
+++ b/include/tcg/tcg.h
39
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
40
#define TCG_TARGET_HAS_not_vec 0
41
#define TCG_TARGET_HAS_andc_vec 0
42
#define TCG_TARGET_HAS_orc_vec 0
43
+#define TCG_TARGET_HAS_nand_vec 0
44
+#define TCG_TARGET_HAS_nor_vec 0
45
+#define TCG_TARGET_HAS_eqv_vec 0
46
#define TCG_TARGET_HAS_roti_vec 0
47
#define TCG_TARGET_HAS_rots_vec 0
48
#define TCG_TARGET_HAS_rotv_vec 0
49
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/aarch64/tcg-target.h
52
+++ b/tcg/aarch64/tcg-target.h
53
@@ -XXX,XX +XXX,XX @@ typedef enum {
54
55
#define TCG_TARGET_HAS_andc_vec 1
56
#define TCG_TARGET_HAS_orc_vec 1
57
+#define TCG_TARGET_HAS_nand_vec 0
58
+#define TCG_TARGET_HAS_nor_vec 0
59
+#define TCG_TARGET_HAS_eqv_vec 0
60
#define TCG_TARGET_HAS_not_vec 1
61
#define TCG_TARGET_HAS_neg_vec 1
62
#define TCG_TARGET_HAS_abs_vec 1
63
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/arm/tcg-target.h
66
+++ b/tcg/arm/tcg-target.h
67
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
68
69
#define TCG_TARGET_HAS_andc_vec 1
70
#define TCG_TARGET_HAS_orc_vec 1
71
+#define TCG_TARGET_HAS_nand_vec 0
72
+#define TCG_TARGET_HAS_nor_vec 0
73
+#define TCG_TARGET_HAS_eqv_vec 0
74
#define TCG_TARGET_HAS_not_vec 1
75
#define TCG_TARGET_HAS_neg_vec 1
76
#define TCG_TARGET_HAS_abs_vec 1
77
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/i386/tcg-target.h
80
+++ b/tcg/i386/tcg-target.h
81
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
82
83
#define TCG_TARGET_HAS_andc_vec 1
84
#define TCG_TARGET_HAS_orc_vec 0
85
+#define TCG_TARGET_HAS_nand_vec 0
86
+#define TCG_TARGET_HAS_nor_vec 0
87
+#define TCG_TARGET_HAS_eqv_vec 0
88
#define TCG_TARGET_HAS_not_vec 0
89
#define TCG_TARGET_HAS_neg_vec 0
90
#define TCG_TARGET_HAS_abs_vec 1
91
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/ppc/tcg-target.h
94
+++ b/tcg/ppc/tcg-target.h
95
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
96
97
#define TCG_TARGET_HAS_andc_vec 1
98
#define TCG_TARGET_HAS_orc_vec have_isa_2_07
99
+#define TCG_TARGET_HAS_nand_vec 0
100
+#define TCG_TARGET_HAS_nor_vec 0
101
+#define TCG_TARGET_HAS_eqv_vec 0
102
#define TCG_TARGET_HAS_not_vec 1
103
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
104
#define TCG_TARGET_HAS_abs_vec 0
105
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/s390x/tcg-target.h
108
+++ b/tcg/s390x/tcg-target.h
109
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
110
111
#define TCG_TARGET_HAS_andc_vec 1
112
#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
113
+#define TCG_TARGET_HAS_nand_vec 0
114
+#define TCG_TARGET_HAS_nor_vec 0
115
+#define TCG_TARGET_HAS_eqv_vec 0
116
#define TCG_TARGET_HAS_not_vec 1
117
#define TCG_TARGET_HAS_neg_vec 1
118
#define TCG_TARGET_HAS_abs_vec 1
119
diff --git a/tcg/optimize.c b/tcg/optimize.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/tcg/optimize.c
122
+++ b/tcg/optimize.c
123
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
124
CASE_OP_32_64_VEC(orc):
125
return x | ~y;
126
127
- CASE_OP_32_64(eqv):
128
+ CASE_OP_32_64_VEC(eqv):
129
return ~(x ^ y);
130
131
- CASE_OP_32_64(nand):
132
+ CASE_OP_32_64_VEC(nand):
133
return ~(x & y);
134
135
- CASE_OP_32_64(nor):
136
+ CASE_OP_32_64_VEC(nor):
137
return ~(x | y);
138
139
case INDEX_op_clz_i32:
140
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
141
case INDEX_op_dup2_vec:
142
done = fold_dup2(&ctx, op);
143
break;
144
- CASE_OP_32_64(eqv):
145
+ CASE_OP_32_64_VEC(eqv):
146
done = fold_eqv(&ctx, op);
147
break;
148
CASE_OP_32_64(extract):
149
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
150
CASE_OP_32_64(mulu2):
151
done = fold_multiply2(&ctx, op);
152
break;
153
- CASE_OP_32_64(nand):
154
+ CASE_OP_32_64_VEC(nand):
155
done = fold_nand(&ctx, op);
156
break;
157
CASE_OP_32_64(neg):
158
done = fold_neg(&ctx, op);
159
break;
160
- CASE_OP_32_64(nor):
161
+ CASE_OP_32_64_VEC(nor):
162
done = fold_nor(&ctx, op);
163
break;
164
CASE_OP_32_64_VEC(not):
165
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
11
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
166
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
167
--- a/tcg/tcg-op-vec.c
13
--- a/tcg/tcg-op-vec.c
168
+++ b/tcg/tcg-op-vec.c
14
+++ b/tcg/tcg-op-vec.c
169
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
15
@@ -XXX,XX +XXX,XX @@ static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
170
16
171
void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
17
void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
172
{
18
{
173
- /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */
19
- const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
174
- tcg_gen_and_vec(0, r, a, b);
20
-
175
- tcg_gen_not_vec(0, r, r);
21
- if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
176
+ if (TCG_TARGET_HAS_nand_vec) {
22
+ if (TCG_TARGET_HAS_not_vec) {
177
+ vec_gen_op3(INDEX_op_nand_vec, 0, r, a, b);
23
+ vec_gen_op2(INDEX_op_not_vec, 0, r, a);
178
+ } else {
24
+ } else {
179
+ tcg_gen_and_vec(0, r, a, b);
25
tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1));
180
+ tcg_gen_not_vec(0, r, r);
26
}
181
+ }
27
- tcg_swap_vecop_list(hold_list);
182
}
28
}
183
29
184
void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
30
void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
185
{
186
- /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */
187
- tcg_gen_or_vec(0, r, a, b);
188
- tcg_gen_not_vec(0, r, r);
189
+ if (TCG_TARGET_HAS_nor_vec) {
190
+ vec_gen_op3(INDEX_op_nor_vec, 0, r, a, b);
191
+ } else {
192
+ tcg_gen_or_vec(0, r, a, b);
193
+ tcg_gen_not_vec(0, r, r);
194
+ }
195
}
196
197
void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
198
{
199
- /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */
200
- tcg_gen_xor_vec(0, r, a, b);
201
- tcg_gen_not_vec(0, r, r);
202
+ if (TCG_TARGET_HAS_eqv_vec) {
203
+ vec_gen_op3(INDEX_op_eqv_vec, 0, r, a, b);
204
+ } else {
205
+ tcg_gen_xor_vec(0, r, a, b);
206
+ tcg_gen_not_vec(0, r, r);
207
+ }
208
}
209
210
static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
211
diff --git a/tcg/tcg.c b/tcg/tcg.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/tcg/tcg.c
214
+++ b/tcg/tcg.c
215
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
216
return have_vec && TCG_TARGET_HAS_andc_vec;
217
case INDEX_op_orc_vec:
218
return have_vec && TCG_TARGET_HAS_orc_vec;
219
+ case INDEX_op_nand_vec:
220
+ return have_vec && TCG_TARGET_HAS_nand_vec;
221
+ case INDEX_op_nor_vec:
222
+ return have_vec && TCG_TARGET_HAS_nor_vec;
223
+ case INDEX_op_eqv_vec:
224
+ return have_vec && TCG_TARGET_HAS_eqv_vec;
225
case INDEX_op_mul_vec:
226
return have_vec && TCG_TARGET_HAS_mul_vec;
227
case INDEX_op_shli_vec:
228
--
31
--
229
2.25.1
32
2.34.1
230
33
231
34
diff view generated by jsdifflib
Deleted patch
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/ppc/tcg-target.h | 6 +++---
7
tcg/ppc/tcg-target.c.inc | 15 +++++++++++++++
8
2 files changed, 18 insertions(+), 3 deletions(-)
9
1
10
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.h
13
+++ b/tcg/ppc/tcg-target.h
14
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
15
16
#define TCG_TARGET_HAS_andc_vec 1
17
#define TCG_TARGET_HAS_orc_vec have_isa_2_07
18
-#define TCG_TARGET_HAS_nand_vec 0
19
-#define TCG_TARGET_HAS_nor_vec 0
20
-#define TCG_TARGET_HAS_eqv_vec 0
21
+#define TCG_TARGET_HAS_nand_vec have_isa_2_07
22
+#define TCG_TARGET_HAS_nor_vec 1
23
+#define TCG_TARGET_HAS_eqv_vec have_isa_2_07
24
#define TCG_TARGET_HAS_not_vec 1
25
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
26
#define TCG_TARGET_HAS_abs_vec 0
27
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/ppc/tcg-target.c.inc
30
+++ b/tcg/ppc/tcg-target.c.inc
31
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
32
case INDEX_op_xor_vec:
33
case INDEX_op_andc_vec:
34
case INDEX_op_not_vec:
35
+ case INDEX_op_nor_vec:
36
+ case INDEX_op_eqv_vec:
37
+ case INDEX_op_nand_vec:
38
return 1;
39
case INDEX_op_orc_vec:
40
return have_isa_2_07;
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
42
case INDEX_op_orc_vec:
43
insn = VORC;
44
break;
45
+ case INDEX_op_nand_vec:
46
+ insn = VNAND;
47
+ break;
48
+ case INDEX_op_nor_vec:
49
+ insn = VNOR;
50
+ break;
51
+ case INDEX_op_eqv_vec:
52
+ insn = VEQV;
53
+ break;
54
55
case INDEX_op_cmp_vec:
56
switch (args[3]) {
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
58
case INDEX_op_xor_vec:
59
case INDEX_op_andc_vec:
60
case INDEX_op_orc_vec:
61
+ case INDEX_op_nor_vec:
62
+ case INDEX_op_eqv_vec:
63
+ case INDEX_op_nand_vec:
64
case INDEX_op_cmp_vec:
65
case INDEX_op_ssadd_vec:
66
case INDEX_op_sssub_vec:
67
--
68
2.25.1
69
70
diff view generated by jsdifflib
Deleted patch
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/s390x/tcg-target.h | 6 +++---
7
tcg/s390x/tcg-target.c.inc | 17 +++++++++++++++++
8
2 files changed, 20 insertions(+), 3 deletions(-)
9
1
10
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/s390x/tcg-target.h
13
+++ b/tcg/s390x/tcg-target.h
14
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
15
16
#define TCG_TARGET_HAS_andc_vec 1
17
#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
18
-#define TCG_TARGET_HAS_nand_vec 0
19
-#define TCG_TARGET_HAS_nor_vec 0
20
-#define TCG_TARGET_HAS_eqv_vec 0
21
+#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1)
22
+#define TCG_TARGET_HAS_nor_vec 1
23
+#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1)
24
#define TCG_TARGET_HAS_not_vec 1
25
#define TCG_TARGET_HAS_neg_vec 1
26
#define TCG_TARGET_HAS_abs_vec 1
27
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/s390x/tcg-target.c.inc
30
+++ b/tcg/s390x/tcg-target.c.inc
31
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
32
VRRc_VMXL = 0xe7fd,
33
VRRc_VN = 0xe768,
34
VRRc_VNC = 0xe769,
35
+ VRRc_VNN = 0xe76e,
36
VRRc_VNO = 0xe76b,
37
+ VRRc_VNX = 0xe76c,
38
VRRc_VO = 0xe76a,
39
VRRc_VOC = 0xe76f,
40
VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
42
case INDEX_op_xor_vec:
43
tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
44
break;
45
+ case INDEX_op_nand_vec:
46
+ tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
47
+ break;
48
+ case INDEX_op_nor_vec:
49
+ tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
50
+ break;
51
+ case INDEX_op_eqv_vec:
52
+ tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
53
+ break;
54
55
case INDEX_op_shli_vec:
56
tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
57
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
58
case INDEX_op_and_vec:
59
case INDEX_op_andc_vec:
60
case INDEX_op_bitsel_vec:
61
+ case INDEX_op_eqv_vec:
62
+ case INDEX_op_nand_vec:
63
case INDEX_op_neg_vec:
64
+ case INDEX_op_nor_vec:
65
case INDEX_op_not_vec:
66
case INDEX_op_or_vec:
67
case INDEX_op_orc_vec:
68
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
69
case INDEX_op_or_vec:
70
case INDEX_op_orc_vec:
71
case INDEX_op_xor_vec:
72
+ case INDEX_op_nand_vec:
73
+ case INDEX_op_nor_vec:
74
+ case INDEX_op_eqv_vec:
75
case INDEX_op_cmp_vec:
76
case INDEX_op_mul_vec:
77
case INDEX_op_rotlv_vec:
78
--
79
2.25.1
80
81
diff view generated by jsdifflib
Deleted patch
1
There are some operation sizes in some subsets of AVX512 that
2
are missing from previous iterations of AVX. Detect them.
3
1
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/qemu/cpuid.h | 20 +++++++++++++++++---
9
tcg/i386/tcg-target.h | 4 ++++
10
tcg/i386/tcg-target.c.inc | 24 ++++++++++++++++++++++--
11
3 files changed, 43 insertions(+), 5 deletions(-)
12
13
diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/qemu/cpuid.h
16
+++ b/include/qemu/cpuid.h
17
@@ -XXX,XX +XXX,XX @@
18
#ifndef bit_AVX2
19
#define bit_AVX2 (1 << 5)
20
#endif
21
-#ifndef bit_AVX512F
22
-#define bit_AVX512F (1 << 16)
23
-#endif
24
#ifndef bit_BMI2
25
#define bit_BMI2 (1 << 8)
26
#endif
27
+#ifndef bit_AVX512F
28
+#define bit_AVX512F (1 << 16)
29
+#endif
30
+#ifndef bit_AVX512DQ
31
+#define bit_AVX512DQ (1 << 17)
32
+#endif
33
+#ifndef bit_AVX512BW
34
+#define bit_AVX512BW (1 << 30)
35
+#endif
36
+#ifndef bit_AVX512VL
37
+#define bit_AVX512VL (1u << 31)
38
+#endif
39
+
40
+/* Leaf 7, %ecx */
41
+#ifndef bit_AVX512VBMI2
42
+#define bit_AVX512VBMI2 (1 << 6)
43
+#endif
44
45
/* Leaf 0x80000001, %ecx */
46
#ifndef bit_LZCNT
47
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/i386/tcg-target.h
50
+++ b/tcg/i386/tcg-target.h
51
@@ -XXX,XX +XXX,XX @@ extern bool have_bmi1;
52
extern bool have_popcnt;
53
extern bool have_avx1;
54
extern bool have_avx2;
55
+extern bool have_avx512bw;
56
+extern bool have_avx512dq;
57
+extern bool have_avx512vbmi2;
58
+extern bool have_avx512vl;
59
extern bool have_movbe;
60
61
/* optional instructions */
62
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/i386/tcg-target.c.inc
65
+++ b/tcg/i386/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@ bool have_bmi1;
67
bool have_popcnt;
68
bool have_avx1;
69
bool have_avx2;
70
+bool have_avx512bw;
71
+bool have_avx512dq;
72
+bool have_avx512vbmi2;
73
+bool have_avx512vl;
74
bool have_movbe;
75
76
#ifdef CONFIG_CPUID_H
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
78
static void tcg_target_init(TCGContext *s)
79
{
80
#ifdef CONFIG_CPUID_H
81
- unsigned a, b, c, d, b7 = 0;
82
+ unsigned a, b, c, d, b7 = 0, c7 = 0;
83
unsigned max = __get_cpuid_max(0, 0);
84
85
if (max >= 7) {
86
/* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
87
- __cpuid_count(7, 0, a, b7, c, d);
88
+ __cpuid_count(7, 0, a, b7, c7, d);
89
have_bmi1 = (b7 & bit_BMI) != 0;
90
have_bmi2 = (b7 & bit_BMI2) != 0;
91
}
92
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
93
if ((xcrl & 6) == 6) {
94
have_avx1 = (c & bit_AVX) != 0;
95
have_avx2 = (b7 & bit_AVX2) != 0;
96
+
97
+ /*
98
+ * There are interesting instructions in AVX512, so long
99
+ * as we have AVX512VL, which indicates support for EVEX
100
+ * on sizes smaller than 512 bits. We are required to
101
+ * check that OPMASK and all extended ZMM state are enabled
102
+ * even if we're not using them -- the insns will fault.
103
+ */
104
+ if ((xcrl & 0xe0) == 0xe0
105
+ && (b7 & bit_AVX512F)
106
+ && (b7 & bit_AVX512VL)) {
107
+ have_avx512vl = true;
108
+ have_avx512bw = (b7 & bit_AVX512BW) != 0;
109
+ have_avx512dq = (b7 & bit_AVX512DQ) != 0;
110
+ have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0;
111
+ }
112
}
113
}
114
}
115
--
116
2.25.1
117
118
diff view generated by jsdifflib
Deleted patch
1
The condition for UMIN/UMAX availability is about to change;
2
use the canonical version.
3
1
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 8 ++++----
9
1 file changed, 4 insertions(+), 4 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
16
fixup = NEED_SWAP | NEED_INV;
17
break;
18
case TCG_COND_LEU:
19
- if (vece <= MO_32) {
20
+ if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
21
fixup = NEED_UMIN;
22
} else {
23
fixup = NEED_BIAS | NEED_INV;
24
}
25
break;
26
case TCG_COND_GTU:
27
- if (vece <= MO_32) {
28
+ if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
29
fixup = NEED_UMIN | NEED_INV;
30
} else {
31
fixup = NEED_BIAS;
32
}
33
break;
34
case TCG_COND_GEU:
35
- if (vece <= MO_32) {
36
+ if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
37
fixup = NEED_UMAX;
38
} else {
39
fixup = NEED_BIAS | NEED_SWAP | NEED_INV;
40
}
41
break;
42
case TCG_COND_LTU:
43
- if (vece <= MO_32) {
44
+ if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
45
fixup = NEED_UMAX | NEED_INV;
46
} else {
47
fixup = NEED_BIAS | NEED_SWAP;
48
--
49
2.25.1
50
51
diff view generated by jsdifflib
Deleted patch
1
AVX512VL has VPSRAVQ, and
2
AVX512BW has VPSLLVW, VPSRAVW, VPSRLVW.
3
1
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 32 ++++++++++++++++++++++++--------
9
1 file changed, 24 insertions(+), 8 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
16
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
17
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
18
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
19
+#define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
20
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
21
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
22
+#define OPC_VPSRAVW (0x11 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
23
#define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
24
+#define OPC_VPSRAVQ (0x46 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
25
+#define OPC_VPSRLVW (0x10 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
26
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
27
#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
28
#define OPC_VZEROUPPER (0x77 | P_EXT)
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2
31
};
32
static int const shlv_insn[4] = {
33
- /* TODO: AVX512 adds support for MO_16. */
34
- OPC_UD2, OPC_UD2, OPC_VPSLLVD, OPC_VPSLLVQ
35
+ OPC_UD2, OPC_VPSLLVW, OPC_VPSLLVD, OPC_VPSLLVQ
36
};
37
static int const shrv_insn[4] = {
38
- /* TODO: AVX512 adds support for MO_16. */
39
- OPC_UD2, OPC_UD2, OPC_VPSRLVD, OPC_VPSRLVQ
40
+ OPC_UD2, OPC_VPSRLVW, OPC_VPSRLVD, OPC_VPSRLVQ
41
};
42
static int const sarv_insn[4] = {
43
- /* TODO: AVX512 adds support for MO_16, MO_64. */
44
- OPC_UD2, OPC_UD2, OPC_VPSRAVD, OPC_UD2
45
+ OPC_UD2, OPC_VPSRAVW, OPC_VPSRAVD, OPC_VPSRAVQ
46
};
47
static int const shls_insn[4] = {
48
OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
50
51
case INDEX_op_shlv_vec:
52
case INDEX_op_shrv_vec:
53
- return have_avx2 && vece >= MO_32;
54
+ switch (vece) {
55
+ case MO_16:
56
+ return have_avx512bw;
57
+ case MO_32:
58
+ case MO_64:
59
+ return have_avx2;
60
+ }
61
+ return 0;
62
case INDEX_op_sarv_vec:
63
- return have_avx2 && vece == MO_32;
64
+ switch (vece) {
65
+ case MO_16:
66
+ return have_avx512bw;
67
+ case MO_32:
68
+ return have_avx2;
69
+ case MO_64:
70
+ return have_avx512vl;
71
+ }
72
+ return 0;
73
case INDEX_op_rotlv_vec:
74
case INDEX_op_rotrv_vec:
75
return have_avx2 && vece >= MO_32 ? -1 : 0;
76
--
77
2.25.1
78
79
diff view generated by jsdifflib
Deleted patch
1
AVX512VL has VPSRAQ.
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.c.inc | 12 ++++++++++--
8
1 file changed, 10 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/i386/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
15
#define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16)
16
#define OPC_PSRAW (0xe1 | P_EXT | P_DATA16)
17
#define OPC_PSRAD (0xe2 | P_EXT | P_DATA16)
18
+#define OPC_VPSRAQ (0x72 | P_EXT | P_DATA16 | P_VEXW | P_EVEX)
19
#define OPC_PSRLW (0xd1 | P_EXT | P_DATA16)
20
#define OPC_PSRLD (0xd2 | P_EXT | P_DATA16)
21
#define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16)
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
23
OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ
24
};
25
static int const sars_insn[4] = {
26
- OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_UD2
27
+ OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_VPSRAQ
28
};
29
static int const abs_insn[4] = {
30
/* TODO: AVX512 adds support for MO_64. */
31
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
32
case INDEX_op_shrs_vec:
33
return vece >= MO_16;
34
case INDEX_op_sars_vec:
35
- return vece >= MO_16 && vece <= MO_32;
36
+ switch (vece) {
37
+ case MO_16:
38
+ case MO_32:
39
+ return 1;
40
+ case MO_64:
41
+ return have_avx512vl;
42
+ }
43
+ return 0;
44
case INDEX_op_rotls_vec:
45
return vece >= MO_16 ? -1 : 0;
46
47
--
48
2.25.1
49
50
diff view generated by jsdifflib
Deleted patch
1
AVX512 has VPSRAQ with immediate operand, in the same form as
2
with AVX, but requires EVEX encoding and W1.
3
1
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 30 +++++++++++++++++++++---------
9
1 file changed, 21 insertions(+), 9 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
16
break;
17
18
case INDEX_op_shli_vec:
19
+ insn = shift_imm_insn[vece];
20
sub = 6;
21
goto gen_shift;
22
case INDEX_op_shri_vec:
23
+ insn = shift_imm_insn[vece];
24
sub = 2;
25
goto gen_shift;
26
case INDEX_op_sari_vec:
27
- tcg_debug_assert(vece != MO_64);
28
+ if (vece == MO_64) {
29
+ insn = OPC_PSHIFTD_Ib | P_VEXW | P_EVEX;
30
+ } else {
31
+ insn = shift_imm_insn[vece];
32
+ }
33
sub = 4;
34
gen_shift:
35
tcg_debug_assert(vece != MO_8);
36
- insn = shift_imm_insn[vece];
37
if (type == TCG_TYPE_V256) {
38
insn |= P_VEXL;
39
}
40
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
41
return vece == MO_8 ? -1 : 1;
42
43
case INDEX_op_sari_vec:
44
- /* We must expand the operation for MO_8. */
45
- if (vece == MO_8) {
46
+ switch (vece) {
47
+ case MO_8:
48
return -1;
49
- }
50
- /* We can emulate this for MO_64, but it does not pay off
51
- unless we're producing at least 4 values. */
52
- if (vece == MO_64) {
53
+ case MO_16:
54
+ case MO_32:
55
+ return 1;
56
+ case MO_64:
57
+ if (have_avx512vl) {
58
+ return 1;
59
+ }
60
+ /*
61
+ * We can emulate this for MO_64, but it does not pay off
62
+ * unless we're producing at least 4 values.
63
+ */
64
return type >= TCG_TYPE_V256 ? -1 : 0;
65
}
66
- return 1;
67
+ return 0;
68
69
case INDEX_op_shls_vec:
70
case INDEX_op_shrs_vec:
71
--
72
2.25.1
73
74
diff view generated by jsdifflib
Deleted patch
1
AVX512VL has VPROLD and VPROLQ, layered onto the same
2
opcode as PSHIFTD, but requires EVEX encoding and W1.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/i386/tcg-target.h | 2 +-
7
tcg/i386/tcg-target.c.inc | 15 +++++++++++++--
8
2 files changed, 14 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.h
13
+++ b/tcg/i386/tcg-target.h
14
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
15
#define TCG_TARGET_HAS_not_vec 0
16
#define TCG_TARGET_HAS_neg_vec 0
17
#define TCG_TARGET_HAS_abs_vec 1
18
-#define TCG_TARGET_HAS_roti_vec 0
19
+#define TCG_TARGET_HAS_roti_vec have_avx512vl
20
#define TCG_TARGET_HAS_rots_vec 0
21
#define TCG_TARGET_HAS_rotv_vec 0
22
#define TCG_TARGET_HAS_shi_vec 1
23
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/i386/tcg-target.c.inc
26
+++ b/tcg/i386/tcg-target.c.inc
27
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
28
#define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
29
#define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
30
#define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
31
-#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
32
+#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /1 /2 /6 /4 */
33
#define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
34
#define OPC_PSLLW (0xf1 | P_EXT | P_DATA16)
35
#define OPC_PSLLD (0xf2 | P_EXT | P_DATA16)
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
37
insn = shift_imm_insn[vece];
38
}
39
sub = 4;
40
+ goto gen_shift;
41
+ case INDEX_op_rotli_vec:
42
+ insn = OPC_PSHIFTD_Ib | P_EVEX; /* VPROL[DQ] */
43
+ if (vece == MO_64) {
44
+ insn |= P_VEXW;
45
+ }
46
+ sub = 1;
47
+ goto gen_shift;
48
gen_shift:
49
tcg_debug_assert(vece != MO_8);
50
if (type == TCG_TYPE_V256) {
51
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
52
case INDEX_op_shli_vec:
53
case INDEX_op_shri_vec:
54
case INDEX_op_sari_vec:
55
+ case INDEX_op_rotli_vec:
56
case INDEX_op_x86_psrldq_vec:
57
return C_O1_I1(x, x);
58
59
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
60
case INDEX_op_xor_vec:
61
case INDEX_op_andc_vec:
62
return 1;
63
- case INDEX_op_rotli_vec:
64
case INDEX_op_cmp_vec:
65
case INDEX_op_cmpsel_vec:
66
return -1;
67
68
+ case INDEX_op_rotli_vec:
69
+ return have_avx512vl && vece >= MO_32 ? 1 : -1;
70
+
71
case INDEX_op_shli_vec:
72
case INDEX_op_shri_vec:
73
/* We must expand the operation for MO_8. */
74
--
75
2.25.1
diff view generated by jsdifflib
Deleted patch
1
AVX512VL has VPROLVD and VPRORVQ.
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.h | 2 +-
8
tcg/i386/tcg-target.c.inc | 25 ++++++++++++++++++++++++-
9
2 files changed, 25 insertions(+), 2 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.h
14
+++ b/tcg/i386/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
16
#define TCG_TARGET_HAS_abs_vec 1
17
#define TCG_TARGET_HAS_roti_vec have_avx512vl
18
#define TCG_TARGET_HAS_rots_vec 0
19
-#define TCG_TARGET_HAS_rotv_vec 0
20
+#define TCG_TARGET_HAS_rotv_vec have_avx512vl
21
#define TCG_TARGET_HAS_shi_vec 1
22
#define TCG_TARGET_HAS_shs_vec 1
23
#define TCG_TARGET_HAS_shv_vec have_avx2
24
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/i386/tcg-target.c.inc
27
+++ b/tcg/i386/tcg-target.c.inc
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
29
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
30
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
31
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
32
+#define OPC_VPROLVD (0x15 | P_EXT38 | P_DATA16 | P_EVEX)
33
+#define OPC_VPROLVQ (0x15 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
34
+#define OPC_VPRORVD (0x14 | P_EXT38 | P_DATA16 | P_EVEX)
35
+#define OPC_VPRORVQ (0x14 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
36
#define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
37
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
38
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
static int const umax_insn[4] = {
41
OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2
42
};
43
+ static int const rotlv_insn[4] = {
44
+ OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ
45
+ };
46
+ static int const rotrv_insn[4] = {
47
+ OPC_UD2, OPC_UD2, OPC_VPRORVD, OPC_VPRORVQ
48
+ };
49
static int const shlv_insn[4] = {
50
OPC_UD2, OPC_VPSLLVW, OPC_VPSLLVD, OPC_VPSLLVQ
51
};
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
53
case INDEX_op_sarv_vec:
54
insn = sarv_insn[vece];
55
goto gen_simd;
56
+ case INDEX_op_rotlv_vec:
57
+ insn = rotlv_insn[vece];
58
+ goto gen_simd;
59
+ case INDEX_op_rotrv_vec:
60
+ insn = rotrv_insn[vece];
61
+ goto gen_simd;
62
case INDEX_op_shls_vec:
63
insn = shls_insn[vece];
64
goto gen_simd;
65
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
66
case INDEX_op_shlv_vec:
67
case INDEX_op_shrv_vec:
68
case INDEX_op_sarv_vec:
69
+ case INDEX_op_rotlv_vec:
70
+ case INDEX_op_rotrv_vec:
71
case INDEX_op_shls_vec:
72
case INDEX_op_shrs_vec:
73
case INDEX_op_sars_vec:
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
return 0;
76
case INDEX_op_rotlv_vec:
77
case INDEX_op_rotrv_vec:
78
- return have_avx2 && vece >= MO_32 ? -1 : 0;
79
+ switch (vece) {
80
+ case MO_32:
81
+ case MO_64:
82
+ return have_avx512vl ? 1 : have_avx2 ? -1 : 0;
83
+ }
84
+ return 0;
85
86
case INDEX_op_mul_vec:
87
if (vece == MO_8) {
88
--
89
2.25.1
90
91
diff view generated by jsdifflib
Deleted patch
1
We will use VPSHLD, VPSHLDV and VPSHRDV for 16-bit rotates.
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target-con-set.h | 1 +
8
tcg/i386/tcg-target.opc.h | 3 +++
9
tcg/i386/tcg-target.c.inc | 38 +++++++++++++++++++++++++++++++++++
10
3 files changed, 42 insertions(+)
11
12
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/i386/tcg-target-con-set.h
15
+++ b/tcg/i386/tcg-target-con-set.h
16
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
17
C_O1_I2(x, x, x)
18
C_N1_I2(r, r, r)
19
C_N1_I2(r, r, rW)
20
+C_O1_I3(x, 0, x, x)
21
C_O1_I3(x, x, x, x)
22
C_O1_I4(r, r, re, r, 0)
23
C_O1_I4(r, r, r, ri, ri)
24
diff --git a/tcg/i386/tcg-target.opc.h b/tcg/i386/tcg-target.opc.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/i386/tcg-target.opc.h
27
+++ b/tcg/i386/tcg-target.opc.h
28
@@ -XXX,XX +XXX,XX @@ DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC)
29
DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC)
30
DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC)
31
DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC)
32
+DEF(x86_vpshldi_vec, 1, 2, 1, IMPLVEC)
33
+DEF(x86_vpshldv_vec, 1, 3, 0, IMPLVEC)
34
+DEF(x86_vpshrdv_vec, 1, 3, 0, IMPLVEC)
35
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/i386/tcg-target.c.inc
38
+++ b/tcg/i386/tcg-target.c.inc
39
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
40
#define OPC_VPROLVQ (0x15 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
41
#define OPC_VPRORVD (0x14 | P_EXT38 | P_DATA16 | P_EVEX)
42
#define OPC_VPRORVQ (0x14 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
43
+#define OPC_VPSHLDW (0x70 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
44
+#define OPC_VPSHLDD (0x71 | P_EXT3A | P_DATA16 | P_EVEX)
45
+#define OPC_VPSHLDQ (0x71 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
46
+#define OPC_VPSHLDVW (0x70 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
47
+#define OPC_VPSHLDVD (0x71 | P_EXT38 | P_DATA16 | P_EVEX)
48
+#define OPC_VPSHLDVQ (0x71 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
49
+#define OPC_VPSHRDVW (0x72 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
50
+#define OPC_VPSHRDVD (0x73 | P_EXT38 | P_DATA16 | P_EVEX)
51
+#define OPC_VPSHRDVQ (0x73 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
52
#define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
53
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
54
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
56
static int const sars_insn[4] = {
57
OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_VPSRAQ
58
};
59
+ static int const vpshldi_insn[4] = {
60
+ OPC_UD2, OPC_VPSHLDW, OPC_VPSHLDD, OPC_VPSHLDQ
61
+ };
62
+ static int const vpshldv_insn[4] = {
63
+ OPC_UD2, OPC_VPSHLDVW, OPC_VPSHLDVD, OPC_VPSHLDVQ
64
+ };
65
+ static int const vpshrdv_insn[4] = {
66
+ OPC_UD2, OPC_VPSHRDVW, OPC_VPSHRDVD, OPC_VPSHRDVQ
67
+ };
68
static int const abs_insn[4] = {
69
/* TODO: AVX512 adds support for MO_64. */
70
OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2
71
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
72
case INDEX_op_x86_packus_vec:
73
insn = packus_insn[vece];
74
goto gen_simd;
75
+ case INDEX_op_x86_vpshldv_vec:
76
+ insn = vpshldv_insn[vece];
77
+ a1 = a2;
78
+ a2 = args[3];
79
+ goto gen_simd;
80
+ case INDEX_op_x86_vpshrdv_vec:
81
+ insn = vpshrdv_insn[vece];
82
+ a1 = a2;
83
+ a2 = args[3];
84
+ goto gen_simd;
85
#if TCG_TARGET_REG_BITS == 32
86
case INDEX_op_dup2_vec:
87
/* First merge the two 32-bit inputs to a single 64-bit element. */
88
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
89
insn = OPC_VPERM2I128;
90
sub = args[3];
91
goto gen_simd_imm8;
92
+ case INDEX_op_x86_vpshldi_vec:
93
+ insn = vpshldi_insn[vece];
94
+ sub = args[3];
95
+ goto gen_simd_imm8;
96
gen_simd_imm8:
97
+ tcg_debug_assert(insn != OPC_UD2);
98
if (type == TCG_TYPE_V256) {
99
insn |= P_VEXL;
100
}
101
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
102
case INDEX_op_x86_vperm2i128_vec:
103
case INDEX_op_x86_punpckl_vec:
104
case INDEX_op_x86_punpckh_vec:
105
+ case INDEX_op_x86_vpshldi_vec:
106
#if TCG_TARGET_REG_BITS == 32
107
case INDEX_op_dup2_vec:
108
#endif
109
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
110
case INDEX_op_x86_psrldq_vec:
111
return C_O1_I1(x, x);
112
113
+ case INDEX_op_x86_vpshldv_vec:
114
+ case INDEX_op_x86_vpshrdv_vec:
115
+ return C_O1_I3(x, 0, x, x);
116
+
117
case INDEX_op_x86_vpblendvb_vec:
118
return C_O1_I3(x, x, x, x);
119
120
--
121
2.25.1
122
123
diff view generated by jsdifflib
Deleted patch
1
While there are no specific 16-bit rotate instructions, there
2
are double-word shifts, which can perform the same operation.
3
1
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 18 +++++++++++++++++-
9
1 file changed, 17 insertions(+), 1 deletion(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
16
case INDEX_op_rotlv_vec:
17
case INDEX_op_rotrv_vec:
18
switch (vece) {
19
+ case MO_16:
20
+ return have_avx512vbmi2 ? -1 : 0;
21
case MO_32:
22
case MO_64:
23
return have_avx512vl ? 1 : have_avx2 ? -1 : 0;
24
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotli(TCGType type, unsigned vece,
25
return;
26
}
27
28
+ if (have_avx512vbmi2) {
29
+ vec_gen_4(INDEX_op_x86_vpshldi_vec, type, vece,
30
+ tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v1), imm);
31
+ return;
32
+ }
33
+
34
t = tcg_temp_new_vec(type);
35
tcg_gen_shli_vec(vece, t, v1, imm);
36
tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - imm);
37
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotls(TCGType type, unsigned vece,
38
static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
39
TCGv_vec v1, TCGv_vec sh, bool right)
40
{
41
- TCGv_vec t = tcg_temp_new_vec(type);
42
+ TCGv_vec t;
43
44
+ if (have_avx512vbmi2) {
45
+ vec_gen_4(right ? INDEX_op_x86_vpshrdv_vec : INDEX_op_x86_vpshldv_vec,
46
+ type, vece, tcgv_vec_arg(v0), tcgv_vec_arg(v1),
47
+ tcgv_vec_arg(v1), tcgv_vec_arg(sh));
48
+ return;
49
+ }
50
+
51
+ t = tcg_temp_new_vec(type);
52
tcg_gen_dupi_vec(vece, t, 8 << vece);
53
tcg_gen_sub_vec(vece, t, t, sh);
54
if (right) {
55
--
56
2.25.1
57
58
diff view generated by jsdifflib
Deleted patch
1
There is no such instruction on x86, so we should
2
not be pretending it has arguments.
3
1
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 1 -
9
1 file changed, 1 deletion(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
16
case INDEX_op_shls_vec:
17
case INDEX_op_shrs_vec:
18
case INDEX_op_sars_vec:
19
- case INDEX_op_rotls_vec:
20
case INDEX_op_cmp_vec:
21
case INDEX_op_x86_shufps_vec:
22
case INDEX_op_x86_blend_vec:
23
--
24
2.25.1
25
26
diff view generated by jsdifflib
1
Expand 32-bit and 64-bit scalar rotate with VPRO[LR]V;
1
Since a59a29312660 ("tcg/sparc64: Remove sparc32plus constraints")
2
expand 16-bit scalar rotate with VPSHLDV.
2
we no longer distinguish registers with 32 vs 64 bits.
3
Therefore we can remove support for the backend-specific
4
type change opcodes.
3
5
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/i386/tcg-target.c.inc | 49 +++++++++++++++++++++++----------------
8
tcg/sparc64/tcg-target.h | 2 +-
9
1 file changed, 29 insertions(+), 20 deletions(-)
9
tcg/sparc64/tcg-target.c.inc | 11 -----------
10
2 files changed, 1 insertion(+), 12 deletions(-)
10
11
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
--- a/tcg/sparc64/tcg-target.h
14
+++ b/tcg/i386/tcg-target.c.inc
15
+++ b/tcg/sparc64/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotli(TCGType type, unsigned vece,
16
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
16
tcg_temp_free_vec(t);
17
#define TCG_TARGET_HAS_mulsh_i32 0
18
#define TCG_TARGET_HAS_qemu_st8_i32 0
19
20
-#define TCG_TARGET_HAS_extr_i64_i32 1
21
+#define TCG_TARGET_HAS_extr_i64_i32 0
22
#define TCG_TARGET_HAS_div_i64 1
23
#define TCG_TARGET_HAS_rem_i64 0
24
#define TCG_TARGET_HAS_rot_i64 0
25
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/sparc64/tcg-target.c.inc
28
+++ b/tcg/sparc64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
30
tcg_out_ext32u(s, rd, rs);
17
}
31
}
18
32
19
-static void expand_vec_rotls(TCGType type, unsigned vece,
33
-static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
20
- TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh)
21
-{
34
-{
22
- TCGv_i32 rsh;
35
- tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
23
- TCGv_vec t;
24
-
25
- tcg_debug_assert(vece != MO_8);
26
-
27
- t = tcg_temp_new_vec(type);
28
- rsh = tcg_temp_new_i32();
29
-
30
- tcg_gen_neg_i32(rsh, lsh);
31
- tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1);
32
- tcg_gen_shls_vec(vece, t, v1, lsh);
33
- tcg_gen_shrs_vec(vece, v0, v1, rsh);
34
- tcg_gen_or_vec(vece, v0, v0, t);
35
- tcg_temp_free_vec(t);
36
- tcg_temp_free_i32(rsh);
37
-}
36
-}
38
-
37
-
39
static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
38
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
40
TCGv_vec v1, TCGv_vec sh, bool right)
41
{
39
{
42
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
40
return false;
43
tcg_temp_free_vec(t);
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
44
}
42
case INDEX_op_divu_i64:
45
43
c = ARITH_UDIVX;
46
+static void expand_vec_rotls(TCGType type, unsigned vece,
44
goto gen_arith;
47
+ TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh)
45
- case INDEX_op_extrh_i64_i32:
48
+{
46
- tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
49
+ TCGv_vec t = tcg_temp_new_vec(type);
47
- break;
50
+
48
51
+ tcg_debug_assert(vece != MO_8);
49
case INDEX_op_brcond_i64:
52
+
50
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
53
+ if (vece >= MO_32 ? have_avx512vl : have_avx512vbmi2) {
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
54
+ tcg_gen_dup_i32_vec(vece, t, lsh);
52
case INDEX_op_ext32u_i64:
55
+ if (vece >= MO_32) {
53
case INDEX_op_ext_i32_i64:
56
+ tcg_gen_rotlv_vec(vece, v0, v1, t);
54
case INDEX_op_extu_i32_i64:
57
+ } else {
55
- case INDEX_op_extrl_i64_i32:
58
+ expand_vec_rotv(type, vece, v0, v1, t, false);
56
default:
59
+ }
57
g_assert_not_reached();
60
+ } else {
58
}
61
+ TCGv_i32 rsh = tcg_temp_new_i32();
59
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
62
+
60
case INDEX_op_ext32u_i64:
63
+ tcg_gen_neg_i32(rsh, lsh);
61
case INDEX_op_ext_i32_i64:
64
+ tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1);
62
case INDEX_op_extu_i32_i64:
65
+ tcg_gen_shls_vec(vece, t, v1, lsh);
63
- case INDEX_op_extrl_i64_i32:
66
+ tcg_gen_shrs_vec(vece, v0, v1, rsh);
64
- case INDEX_op_extrh_i64_i32:
67
+ tcg_gen_or_vec(vece, v0, v0, t);
65
case INDEX_op_qemu_ld_a32_i32:
68
+
66
case INDEX_op_qemu_ld_a64_i32:
69
+ tcg_temp_free_i32(rsh);
67
case INDEX_op_qemu_ld_a32_i64:
70
+ }
71
+
72
+ tcg_temp_free_vec(t);
73
+}
74
+
75
static void expand_vec_mul(TCGType type, unsigned vece,
76
TCGv_vec v0, TCGv_vec v1, TCGv_vec v2)
77
{
78
--
68
--
79
2.25.1
69
2.34.1
80
81
diff view generated by jsdifflib
Deleted patch
1
AVX512VL has VPABSQ, VPMAXSQ, VPMAXUQ, VPMINSQ, VPMINUQ.
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.c.inc | 18 +++++++++++-------
8
1 file changed, 11 insertions(+), 7 deletions(-)
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/i386/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
15
#define OPC_PABSB (0x1c | P_EXT38 | P_DATA16)
16
#define OPC_PABSW (0x1d | P_EXT38 | P_DATA16)
17
#define OPC_PABSD (0x1e | P_EXT38 | P_DATA16)
18
+#define OPC_VPABSQ (0x1f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
19
#define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
20
#define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
21
#define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
22
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
23
#define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16)
24
#define OPC_PMAXSW (0xee | P_EXT | P_DATA16)
25
#define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16)
26
+#define OPC_VPMAXSQ (0x3d | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
27
#define OPC_PMAXUB (0xde | P_EXT | P_DATA16)
28
#define OPC_PMAXUW (0x3e | P_EXT38 | P_DATA16)
29
#define OPC_PMAXUD (0x3f | P_EXT38 | P_DATA16)
30
+#define OPC_VPMAXUQ (0x3f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
31
#define OPC_PMINSB (0x38 | P_EXT38 | P_DATA16)
32
#define OPC_PMINSW (0xea | P_EXT | P_DATA16)
33
#define OPC_PMINSD (0x39 | P_EXT38 | P_DATA16)
34
+#define OPC_VPMINSQ (0x39 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
35
#define OPC_PMINUB (0xda | P_EXT | P_DATA16)
36
#define OPC_PMINUW (0x3a | P_EXT38 | P_DATA16)
37
#define OPC_PMINUD (0x3b | P_EXT38 | P_DATA16)
38
+#define OPC_VPMINUQ (0x3b | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
39
#define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
40
#define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
41
#define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
43
OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2
44
};
45
static int const smin_insn[4] = {
46
- OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_UD2
47
+ OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_VPMINSQ
48
};
49
static int const smax_insn[4] = {
50
- OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_UD2
51
+ OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_VPMAXSQ
52
};
53
static int const umin_insn[4] = {
54
- OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_UD2
55
+ OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ
56
};
57
static int const umax_insn[4] = {
58
- OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2
59
+ OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ
60
};
61
static int const rotlv_insn[4] = {
62
OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
64
OPC_UD2, OPC_VPSHRDVW, OPC_VPSHRDVD, OPC_VPSHRDVQ
65
};
66
static int const abs_insn[4] = {
67
- /* TODO: AVX512 adds support for MO_64. */
68
- OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2
69
+ OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_VPABSQ
70
};
71
72
TCGType type = vecl + TCG_TYPE_V64;
73
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
74
case INDEX_op_umin_vec:
75
case INDEX_op_umax_vec:
76
case INDEX_op_abs_vec:
77
- return vece <= MO_32;
78
+ return vece <= MO_32 || have_avx512vl;
79
80
default:
81
return 0;
82
--
83
2.25.1
84
85
diff view generated by jsdifflib
Deleted patch
1
AVX512DQ has VPMULLQ.
2
1
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.c.inc | 12 ++++++------
8
1 file changed, 6 insertions(+), 6 deletions(-)
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/i386/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
15
#define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
16
#define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
17
#define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
18
+#define OPC_VPMULLQ (0x40 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
19
#define OPC_POR (0xeb | P_EXT | P_DATA16)
20
#define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
21
#define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
23
OPC_PSUBUB, OPC_PSUBUW, OPC_UD2, OPC_UD2
24
};
25
static int const mul_insn[4] = {
26
- OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2
27
+ OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_VPMULLQ
28
};
29
static int const shift_imm_insn[4] = {
30
OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
31
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
32
return 0;
33
34
case INDEX_op_mul_vec:
35
- if (vece == MO_8) {
36
- /* We can expand the operation for MO_8. */
37
+ switch (vece) {
38
+ case MO_8:
39
return -1;
40
- }
41
- if (vece == MO_64) {
42
- return 0;
43
+ case MO_64:
44
+ return have_avx512dq;
45
}
46
return 1;
47
48
--
49
2.25.1
50
51
diff view generated by jsdifflib
Deleted patch
1
AVX512VL has a general ternary logic operation, VPTERNLOGQ,
2
which can implement NOT, ORC, NAND, NOR, EQV.
3
1
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.h | 10 +++++-----
9
tcg/i386/tcg-target.c.inc | 34 ++++++++++++++++++++++++++++++++++
10
2 files changed, 39 insertions(+), 5 deletions(-)
11
12
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/i386/tcg-target.h
15
+++ b/tcg/i386/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
17
#define TCG_TARGET_HAS_v256 have_avx2
18
19
#define TCG_TARGET_HAS_andc_vec 1
20
-#define TCG_TARGET_HAS_orc_vec 0
21
-#define TCG_TARGET_HAS_nand_vec 0
22
-#define TCG_TARGET_HAS_nor_vec 0
23
-#define TCG_TARGET_HAS_eqv_vec 0
24
-#define TCG_TARGET_HAS_not_vec 0
25
+#define TCG_TARGET_HAS_orc_vec have_avx512vl
26
+#define TCG_TARGET_HAS_nand_vec have_avx512vl
27
+#define TCG_TARGET_HAS_nor_vec have_avx512vl
28
+#define TCG_TARGET_HAS_eqv_vec have_avx512vl
29
+#define TCG_TARGET_HAS_not_vec have_avx512vl
30
#define TCG_TARGET_HAS_neg_vec 0
31
#define TCG_TARGET_HAS_abs_vec 1
32
#define TCG_TARGET_HAS_roti_vec have_avx512vl
33
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/i386/tcg-target.c.inc
36
+++ b/tcg/i386/tcg-target.c.inc
37
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
38
#define OPC_VPSRLVW (0x10 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
39
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
40
#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
41
+#define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
42
#define OPC_VZEROUPPER (0x77 | P_EXT)
43
#define OPC_XCHG_ax_r32    (0x90)
44
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
46
insn = vpshldi_insn[vece];
47
sub = args[3];
48
goto gen_simd_imm8;
49
+
50
+ case INDEX_op_not_vec:
51
+ insn = OPC_VPTERNLOGQ;
52
+ a2 = a1;
53
+ sub = 0x33; /* !B */
54
+ goto gen_simd_imm8;
55
+ case INDEX_op_nor_vec:
56
+ insn = OPC_VPTERNLOGQ;
57
+ sub = 0x11; /* norCB */
58
+ goto gen_simd_imm8;
59
+ case INDEX_op_nand_vec:
60
+ insn = OPC_VPTERNLOGQ;
61
+ sub = 0x77; /* nandCB */
62
+ goto gen_simd_imm8;
63
+ case INDEX_op_eqv_vec:
64
+ insn = OPC_VPTERNLOGQ;
65
+ sub = 0x99; /* xnorCB */
66
+ goto gen_simd_imm8;
67
+ case INDEX_op_orc_vec:
68
+ insn = OPC_VPTERNLOGQ;
69
+ sub = 0xdd; /* orB!C */
70
+ goto gen_simd_imm8;
71
+
72
gen_simd_imm8:
73
tcg_debug_assert(insn != OPC_UD2);
74
if (type == TCG_TYPE_V256) {
75
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
76
case INDEX_op_or_vec:
77
case INDEX_op_xor_vec:
78
case INDEX_op_andc_vec:
79
+ case INDEX_op_orc_vec:
80
+ case INDEX_op_nand_vec:
81
+ case INDEX_op_nor_vec:
82
+ case INDEX_op_eqv_vec:
83
case INDEX_op_ssadd_vec:
84
case INDEX_op_usadd_vec:
85
case INDEX_op_sssub_vec:
86
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
87
88
case INDEX_op_abs_vec:
89
case INDEX_op_dup_vec:
90
+ case INDEX_op_not_vec:
91
case INDEX_op_shli_vec:
92
case INDEX_op_shri_vec:
93
case INDEX_op_sari_vec:
94
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
95
case INDEX_op_or_vec:
96
case INDEX_op_xor_vec:
97
case INDEX_op_andc_vec:
98
+ case INDEX_op_orc_vec:
99
+ case INDEX_op_nand_vec:
100
+ case INDEX_op_nor_vec:
101
+ case INDEX_op_eqv_vec:
102
+ case INDEX_op_not_vec:
103
return 1;
104
case INDEX_op_cmp_vec:
105
case INDEX_op_cmpsel_vec:
106
--
107
2.25.1
108
109
diff view generated by jsdifflib
Deleted patch
1
The general ternary logic operation can implement BITSEL.
2
Funnel the 4-operand operation into three variants of the
3
3-operand instruction, depending on input operand overlap.
4
1
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/i386/tcg-target.h | 2 +-
10
tcg/i386/tcg-target.c.inc | 20 +++++++++++++++++++-
11
2 files changed, 20 insertions(+), 2 deletions(-)
12
13
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/i386/tcg-target.h
16
+++ b/tcg/i386/tcg-target.h
17
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
18
#define TCG_TARGET_HAS_mul_vec 1
19
#define TCG_TARGET_HAS_sat_vec 1
20
#define TCG_TARGET_HAS_minmax_vec 1
21
-#define TCG_TARGET_HAS_bitsel_vec 0
22
+#define TCG_TARGET_HAS_bitsel_vec have_avx512vl
23
#define TCG_TARGET_HAS_cmpsel_vec -1
24
25
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
26
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/i386/tcg-target.c.inc
29
+++ b/tcg/i386/tcg-target.c.inc
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
31
32
TCGType type = vecl + TCG_TYPE_V64;
33
int insn, sub;
34
- TCGArg a0, a1, a2;
35
+ TCGArg a0, a1, a2, a3;
36
37
a0 = args[0];
38
a1 = args[1];
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
sub = 0xdd; /* orB!C */
41
goto gen_simd_imm8;
42
43
+ case INDEX_op_bitsel_vec:
44
+ insn = OPC_VPTERNLOGQ;
45
+ a3 = args[3];
46
+ if (a0 == a1) {
47
+ a1 = a2;
48
+ a2 = a3;
49
+ sub = 0xca; /* A?B:C */
50
+ } else if (a0 == a2) {
51
+ a2 = a3;
52
+ sub = 0xe2; /* B?A:C */
53
+ } else {
54
+ tcg_out_mov(s, type, a0, a3);
55
+ sub = 0xb8; /* B?C:A */
56
+ }
57
+ goto gen_simd_imm8;
58
+
59
gen_simd_imm8:
60
tcg_debug_assert(insn != OPC_UD2);
61
if (type == TCG_TYPE_V256) {
62
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
63
case INDEX_op_x86_vpshrdv_vec:
64
return C_O1_I3(x, 0, x, x);
65
66
+ case INDEX_op_bitsel_vec:
67
case INDEX_op_x86_vpblendvb_vec:
68
return C_O1_I3(x, x, x, x);
69
70
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
71
case INDEX_op_nor_vec:
72
case INDEX_op_eqv_vec:
73
case INDEX_op_not_vec:
74
+ case INDEX_op_bitsel_vec:
75
return 1;
76
case INDEX_op_cmp_vec:
77
case INDEX_op_cmpsel_vec:
78
--
79
2.25.1
80
81
diff view generated by jsdifflib
Deleted patch
1
Define as 0 for all tcg hosts. Put this in a separate header,
2
because we'll want this in places that do not ordinarily have
3
access to all of tcg/tcg.h.
4
1
5
Reviewed-by: WANG Xuerui <git@xen0n.name>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/aarch64/tcg-target-sa32.h | 1 +
12
tcg/arm/tcg-target-sa32.h | 1 +
13
tcg/i386/tcg-target-sa32.h | 1 +
14
tcg/loongarch64/tcg-target-sa32.h | 1 +
15
tcg/mips/tcg-target-sa32.h | 1 +
16
tcg/ppc/tcg-target-sa32.h | 1 +
17
tcg/riscv/tcg-target-sa32.h | 1 +
18
tcg/s390x/tcg-target-sa32.h | 1 +
19
tcg/sparc/tcg-target-sa32.h | 1 +
20
tcg/tci/tcg-target-sa32.h | 1 +
21
tcg/tcg.c | 4 ++++
22
11 files changed, 14 insertions(+)
23
create mode 100644 tcg/aarch64/tcg-target-sa32.h
24
create mode 100644 tcg/arm/tcg-target-sa32.h
25
create mode 100644 tcg/i386/tcg-target-sa32.h
26
create mode 100644 tcg/loongarch64/tcg-target-sa32.h
27
create mode 100644 tcg/mips/tcg-target-sa32.h
28
create mode 100644 tcg/ppc/tcg-target-sa32.h
29
create mode 100644 tcg/riscv/tcg-target-sa32.h
30
create mode 100644 tcg/s390x/tcg-target-sa32.h
31
create mode 100644 tcg/sparc/tcg-target-sa32.h
32
create mode 100644 tcg/tci/tcg-target-sa32.h
33
34
diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h
35
new file mode 100644
36
index XXXXXXX..XXXXXXX
37
--- /dev/null
38
+++ b/tcg/aarch64/tcg-target-sa32.h
39
@@ -0,0 +1 @@
40
+#define TCG_TARGET_SIGNED_ADDR32 0
41
diff --git a/tcg/arm/tcg-target-sa32.h b/tcg/arm/tcg-target-sa32.h
42
new file mode 100644
43
index XXXXXXX..XXXXXXX
44
--- /dev/null
45
+++ b/tcg/arm/tcg-target-sa32.h
46
@@ -0,0 +1 @@
47
+#define TCG_TARGET_SIGNED_ADDR32 0
48
diff --git a/tcg/i386/tcg-target-sa32.h b/tcg/i386/tcg-target-sa32.h
49
new file mode 100644
50
index XXXXXXX..XXXXXXX
51
--- /dev/null
52
+++ b/tcg/i386/tcg-target-sa32.h
53
@@ -0,0 +1 @@
54
+#define TCG_TARGET_SIGNED_ADDR32 0
55
diff --git a/tcg/loongarch64/tcg-target-sa32.h b/tcg/loongarch64/tcg-target-sa32.h
56
new file mode 100644
57
index XXXXXXX..XXXXXXX
58
--- /dev/null
59
+++ b/tcg/loongarch64/tcg-target-sa32.h
60
@@ -0,0 +1 @@
61
+#define TCG_TARGET_SIGNED_ADDR32 0
62
diff --git a/tcg/mips/tcg-target-sa32.h b/tcg/mips/tcg-target-sa32.h
63
new file mode 100644
64
index XXXXXXX..XXXXXXX
65
--- /dev/null
66
+++ b/tcg/mips/tcg-target-sa32.h
67
@@ -0,0 +1 @@
68
+#define TCG_TARGET_SIGNED_ADDR32 0
69
diff --git a/tcg/ppc/tcg-target-sa32.h b/tcg/ppc/tcg-target-sa32.h
70
new file mode 100644
71
index XXXXXXX..XXXXXXX
72
--- /dev/null
73
+++ b/tcg/ppc/tcg-target-sa32.h
74
@@ -0,0 +1 @@
75
+#define TCG_TARGET_SIGNED_ADDR32 0
76
diff --git a/tcg/riscv/tcg-target-sa32.h b/tcg/riscv/tcg-target-sa32.h
77
new file mode 100644
78
index XXXXXXX..XXXXXXX
79
--- /dev/null
80
+++ b/tcg/riscv/tcg-target-sa32.h
81
@@ -0,0 +1 @@
82
+#define TCG_TARGET_SIGNED_ADDR32 0
83
diff --git a/tcg/s390x/tcg-target-sa32.h b/tcg/s390x/tcg-target-sa32.h
84
new file mode 100644
85
index XXXXXXX..XXXXXXX
86
--- /dev/null
87
+++ b/tcg/s390x/tcg-target-sa32.h
88
@@ -0,0 +1 @@
89
+#define TCG_TARGET_SIGNED_ADDR32 0
90
diff --git a/tcg/sparc/tcg-target-sa32.h b/tcg/sparc/tcg-target-sa32.h
91
new file mode 100644
92
index XXXXXXX..XXXXXXX
93
--- /dev/null
94
+++ b/tcg/sparc/tcg-target-sa32.h
95
@@ -0,0 +1 @@
96
+#define TCG_TARGET_SIGNED_ADDR32 0
97
diff --git a/tcg/tci/tcg-target-sa32.h b/tcg/tci/tcg-target-sa32.h
98
new file mode 100644
99
index XXXXXXX..XXXXXXX
100
--- /dev/null
101
+++ b/tcg/tci/tcg-target-sa32.h
102
@@ -0,0 +1 @@
103
+#define TCG_TARGET_SIGNED_ADDR32 0
104
diff --git a/tcg/tcg.c b/tcg/tcg.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/tcg.c
107
+++ b/tcg/tcg.c
108
@@ -XXX,XX +XXX,XX @@
109
#include "exec/log.h"
110
#include "tcg/tcg-ldst.h"
111
#include "tcg-internal.h"
112
+#include "tcg-target-sa32.h"
113
+
114
+/* Sanity check for TCG_TARGET_SIGNED_ADDR32. */
115
+QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS == 32 && TCG_TARGET_SIGNED_ADDR32);
116
117
#ifdef CONFIG_TCG_INTERPRETER
118
#include <ffi.h>
119
--
120
2.25.1
121
122
diff view generated by jsdifflib
Deleted patch
1
Create a new function to combine a CPUTLBEntry addend
2
with the guest address to form a host address.
3
1
4
Reviewed-by: WANG Xuerui <git@xen0n.name>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
accel/tcg/cputlb.c | 24 ++++++++++++++----------
11
1 file changed, 14 insertions(+), 10 deletions(-)
12
13
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/accel/tcg/cputlb.c
16
+++ b/accel/tcg/cputlb.c
17
@@ -XXX,XX +XXX,XX @@ static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
18
return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
19
}
20
21
+static inline uintptr_t g2h_tlbe(const CPUTLBEntry *tlb, target_ulong gaddr)
22
+{
23
+ return tlb->addend + (uintptr_t)gaddr;
24
+}
25
+
26
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
27
size_t max_entries)
28
{
29
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
30
31
if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
32
TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
33
- addr &= TARGET_PAGE_MASK;
34
- addr += tlb_entry->addend;
35
+ addr = g2h_tlbe(tlb_entry, addr & TARGET_PAGE_MASK);
36
if ((addr - start) < length) {
37
#if TCG_OVERSIZED_GUEST
38
tlb_entry->addr_write |= TLB_NOTDIRTY;
39
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
40
return -1;
41
}
42
43
- p = (void *)((uintptr_t)addr + entry->addend);
44
+ p = (void *)g2h_tlbe(entry, addr);
45
if (hostp) {
46
*hostp = p;
47
}
48
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
49
}
50
51
/* Everything else is RAM. */
52
- *phost = (void *)((uintptr_t)addr + entry->addend);
53
+ *phost = (void *)g2h_tlbe(entry, addr);
54
return flags;
55
}
56
57
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
58
data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
59
} else {
60
data->is_io = false;
61
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
62
+ data->v.ram.hostaddr = (void *)g2h_tlbe(tlbe, addr);
63
}
64
return true;
65
} else {
66
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
67
goto stop_the_world;
68
}
69
70
- hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
71
+ hostaddr = (void *)g2h_tlbe(tlbe, addr);
72
73
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
74
notdirty_write(env_cpu(env), addr, size,
75
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
76
access_type, op ^ (need_swap * MO_BSWAP));
77
}
78
79
- haddr = (void *)((uintptr_t)addr + entry->addend);
80
+ haddr = (void *)g2h_tlbe(entry, addr);
81
82
/*
83
* Keep these two load_memop separate to ensure that the compiler
84
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
85
return res & MAKE_64BIT_MASK(0, size * 8);
86
}
87
88
- haddr = (void *)((uintptr_t)addr + entry->addend);
89
+ haddr = (void *)g2h_tlbe(entry, addr);
90
return load_memop(haddr, op);
91
}
92
93
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
94
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
95
}
96
97
- haddr = (void *)((uintptr_t)addr + entry->addend);
98
+ haddr = (void *)g2h_tlbe(entry, addr);
99
100
/*
101
* Keep these two store_memop separate to ensure that the compiler
102
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
103
return;
104
}
105
106
- haddr = (void *)((uintptr_t)addr + entry->addend);
107
+ haddr = (void *)g2h_tlbe(entry, addr);
108
store_memop(haddr, val, op);
109
}
110
111
--
112
2.25.1
113
114
diff view generated by jsdifflib
Deleted patch
1
When TCG_TARGET_SIGNED_ADDR32 is set, adjust the tlb addend to
2
allow the 32-bit guest address to be sign extended within the
3
64-bit host register instead of zero extended.
4
1
5
This will simplify tcg hosts like MIPS, RISC-V, and LoongArch,
6
which naturally sign-extend 32-bit values, in contrast to x86_64
7
and AArch64 which zero-extend them.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
accel/tcg/cputlb.c | 12 +++++++++++-
14
1 file changed, 11 insertions(+), 1 deletion(-)
15
16
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/cputlb.c
19
+++ b/accel/tcg/cputlb.c
20
@@ -XXX,XX +XXX,XX @@
21
#include "qemu/plugin-memory.h"
22
#endif
23
#include "tcg/tcg-ldst.h"
24
+#include "tcg-target-sa32.h"
25
26
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
27
/* #define DEBUG_TLB */
28
@@ -XXX,XX +XXX,XX @@ static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
29
30
static inline uintptr_t g2h_tlbe(const CPUTLBEntry *tlb, target_ulong gaddr)
31
{
32
+ if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) {
33
+ return tlb->addend + (int32_t)gaddr;
34
+ }
35
return tlb->addend + (uintptr_t)gaddr;
36
}
37
38
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
39
desc->iotlb[index].attrs = attrs;
40
41
/* Now calculate the new entry */
42
- tn.addend = addend - vaddr_page;
43
+
44
+ if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) {
45
+ tn.addend = addend - (int32_t)vaddr_page;
46
+ } else {
47
+ tn.addend = addend - vaddr_page;
48
+ }
49
+
50
if (prot & PAGE_READ) {
51
tn.addr_read = address;
52
if (wp_flags & BP_MEM_READ) {
53
--
54
2.25.1
55
56
diff view generated by jsdifflib
Deleted patch
1
While the host may prefer to treat 32-bit addresses as signed,
2
there are edge cases of guests that cannot be implemented with
3
addresses 0x7fff_ffff and 0x8000_0000 being non-consecutive.
4
1
5
Therefore, default to guest_base_signed_addr32 false, and allow
6
probe_guest_base to determine whether it is possible to set it
7
to true. A tcg backend which sets TCG_TARGET_SIGNED_ADDR32 will
8
have to cope with either setting for user-only.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
include/exec/cpu-all.h | 16 ++++++++++++++++
15
include/exec/cpu_ldst.h | 3 ++-
16
bsd-user/main.c | 4 ++++
17
linux-user/main.c | 3 +++
18
4 files changed, 25 insertions(+), 1 deletion(-)
19
20
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/cpu-all.h
23
+++ b/include/exec/cpu-all.h
24
@@ -XXX,XX +XXX,XX @@ static inline void tswap64s(uint64_t *s)
25
26
#if defined(CONFIG_USER_ONLY)
27
#include "exec/user/abitypes.h"
28
+#include "tcg-target-sa32.h"
29
30
/* On some host systems the guest address space is reserved on the host.
31
* This allows the guest address space to be offset to a convenient location.
32
@@ -XXX,XX +XXX,XX @@ extern uintptr_t guest_base;
33
extern bool have_guest_base;
34
extern unsigned long reserved_va;
35
36
+#if TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32
37
+extern bool guest_base_signed_addr32;
38
+#else
39
+#define guest_base_signed_addr32 false
40
+#endif
41
+
42
+static inline void set_guest_base_signed_addr32(void)
43
+{
44
+#ifdef guest_base_signed_addr32
45
+ qemu_build_not_reached();
46
+#else
47
+ guest_base_signed_addr32 = true;
48
+#endif
49
+}
50
+
51
/*
52
* Limit the guest addresses as best we can.
53
*
54
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
55
index XXXXXXX..XXXXXXX 100644
56
--- a/include/exec/cpu_ldst.h
57
+++ b/include/exec/cpu_ldst.h
58
@@ -XXX,XX +XXX,XX @@ static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
59
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
60
static inline void *g2h_untagged(abi_ptr x)
61
{
62
- return (void *)((uintptr_t)(x) + guest_base);
63
+ uintptr_t hx = guest_base_signed_addr32 ? (int32_t)x : (uintptr_t)x;
64
+ return (void *)(guest_base + hx);
65
}
66
67
static inline void *g2h(CPUState *cs, abi_ptr x)
68
diff --git a/bsd-user/main.c b/bsd-user/main.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/bsd-user/main.c
71
+++ b/bsd-user/main.c
72
@@ -XXX,XX +XXX,XX @@
73
int singlestep;
74
uintptr_t guest_base;
75
bool have_guest_base;
76
+#ifndef guest_base_signed_addr32
77
+bool guest_base_signed_addr32;
78
+#endif
79
+
80
/*
81
* When running 32-on-64 we should make sure we can fit all of the possible
82
* guest address space into a contiguous chunk of virtual host memory.
83
diff --git a/linux-user/main.c b/linux-user/main.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/linux-user/main.c
86
+++ b/linux-user/main.c
87
@@ -XXX,XX +XXX,XX @@ static const char *seed_optarg;
88
unsigned long mmap_min_addr;
89
uintptr_t guest_base;
90
bool have_guest_base;
91
+#ifndef guest_base_signed_addr32
92
+bool guest_base_signed_addr32;
93
+#endif
94
95
/*
96
* Used to implement backwards-compatibility for the `-strace`, and
97
--
98
2.25.1
99
100
diff view generated by jsdifflib
Deleted patch
1
When using reserved_va, which is the default for a 64-bit host
2
and a 32-bit guest, set guest_base_signed_addr32 if requested
3
by TCG_TARGET_SIGNED_ADDR32, and the executable layout allows.
4
1
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/cpu-all.h | 4 ---
10
linux-user/elfload.c | 62 ++++++++++++++++++++++++++++++++++--------
11
2 files changed, 50 insertions(+), 16 deletions(-)
12
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
16
+++ b/include/exec/cpu-all.h
17
@@ -XXX,XX +XXX,XX @@ extern const TargetPageBits target_page;
18
#define PAGE_RESET 0x0040
19
/* For linux-user, indicates that the page is MAP_ANON. */
20
#define PAGE_ANON 0x0080
21
-
22
-#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
23
-/* FIXME: Code that sets/uses this is broken and needs to go away. */
24
#define PAGE_RESERVED 0x0100
25
-#endif
26
/* Target-specific bits that will be used via page_get_flags(). */
27
#define PAGE_TARGET_1 0x0200
28
#define PAGE_TARGET_2 0x0400
29
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/linux-user/elfload.c
32
+++ b/linux-user/elfload.c
33
@@ -XXX,XX +XXX,XX @@ static void pgb_dynamic(const char *image_name, long align)
34
static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
35
abi_ulong guest_hiaddr, long align)
36
{
37
- int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
38
+ int flags = (MAP_ANONYMOUS | MAP_PRIVATE |
39
+ MAP_NORESERVE | MAP_FIXED_NOREPLACE);
40
+ unsigned long local_rva = reserved_va;
41
+ bool protect_wrap = false;
42
void *addr, *test;
43
44
- if (guest_hiaddr > reserved_va) {
45
+ if (guest_hiaddr > local_rva) {
46
error_report("%s: requires more than reserved virtual "
47
"address space (0x%" PRIx64 " > 0x%lx)",
48
- image_name, (uint64_t)guest_hiaddr, reserved_va);
49
+ image_name, (uint64_t)guest_hiaddr, local_rva);
50
exit(EXIT_FAILURE);
51
}
52
53
- /* Widen the "image" to the entire reserved address space. */
54
- pgb_static(image_name, 0, reserved_va, align);
55
+ if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) {
56
+ if (guest_loaddr < 0x80000000u && guest_hiaddr > 0x80000000u) {
57
+ /*
58
+ * The executable itself wraps on signed addresses.
59
+ * Without per-page translation, we must keep the
60
+ * guest address 0x7fff_ffff adjacent to 0x8000_0000
61
+ * consecutive in host memory: unsigned addresses.
62
+ */
63
+ } else {
64
+ set_guest_base_signed_addr32();
65
+ if (local_rva <= 0x80000000u) {
66
+ /* No guest addresses are "negative": win! */
67
+ } else {
68
+ /* Begin by allocating the entire address space. */
69
+ local_rva = 0xfffffffful + 1;
70
+ protect_wrap = true;
71
+ }
72
+ }
73
+ }
74
75
- /* osdep.h defines this as 0 if it's missing */
76
- flags |= MAP_FIXED_NOREPLACE;
77
+ /* Widen the "image" to the entire reserved address space. */
78
+ pgb_static(image_name, 0, local_rva, align);
79
+ assert(guest_base != 0);
80
81
/* Reserve the memory on the host. */
82
- assert(guest_base != 0);
83
test = g2h_untagged(0);
84
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
85
+ addr = mmap(test, local_rva, PROT_NONE, flags, -1, 0);
86
if (addr == MAP_FAILED || addr != test) {
87
+ /*
88
+ * If protect_wrap, we could try again with the original reserved_va
89
+ * setting, but the edge case of low ulimit vm setting on a 64-bit
90
+ * host is probably useless.
91
+ */
92
error_report("Unable to reserve 0x%lx bytes of virtual address "
93
- "space at %p (%s) for use as guest address space (check your"
94
- "virtual memory ulimit setting, min_mmap_addr or reserve less "
95
- "using -R option)", reserved_va, test, strerror(errno));
96
+ "space at %p (%s) for use as guest address space "
97
+ "(check your virtual memory ulimit setting, "
98
+ "min_mmap_addr or reserve less using -R option)",
99
+ local_rva, test, strerror(errno));
100
exit(EXIT_FAILURE);
101
}
102
103
+ if (protect_wrap) {
104
+ /*
105
+ * Prevent the page just before 0x80000000 from being allocated.
106
+ * This prevents a single guest object/allocation from crossing
107
+ * the signed wrap, and thus being discontiguous in host memory.
108
+ */
109
+ page_set_flags(0x7fffffff & TARGET_PAGE_MASK, 0x80000000u,
110
+ PAGE_RESERVED);
111
+ /* Adjust guest_base so that 0 is in the middle of the reservation. */
112
+ guest_base += 0x80000000ul;
113
+ }
114
+
115
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
116
__func__, addr, reserved_va);
117
}
118
--
119
2.25.1
120
121
diff view generated by jsdifflib
Deleted patch
1
AArch64 has both sign and zero-extending addressing modes, which
2
means that either treatment of guest addresses is equally efficient.
3
Enabling this for AArch64 gives us testing of the feature in CI.
4
1
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/aarch64/tcg-target-sa32.h | 8 +++-
9
tcg/aarch64/tcg-target.c.inc | 81 ++++++++++++++++++++++++-----------
10
2 files changed, 64 insertions(+), 25 deletions(-)
11
12
diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/aarch64/tcg-target-sa32.h
15
+++ b/tcg/aarch64/tcg-target-sa32.h
16
@@ -1 +1,7 @@
17
-#define TCG_TARGET_SIGNED_ADDR32 0
18
+/*
19
+ * AArch64 has both SXTW and UXTW addressing modes, which means that
20
+ * it is agnostic to how guest addresses should be represented.
21
+ * Because aarch64 is more common than the other hosts that will
22
+ * want to use this feature, enable it for continuous testing.
23
+ */
24
+#define TCG_TARGET_SIGNED_ADDR32 1
25
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/aarch64/tcg-target.c.inc
28
+++ b/tcg/aarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
30
LDST_LD_S_W = 3, /* load and sign-extend into Wt */
31
} AArch64LdstType;
32
33
+/*
34
+ * See aarch64/instrs/extendreg/DecodeRegExtend
35
+ * But note that option<1> == 0 is UNDEFINED for LDR/STR.
36
+ */
37
+typedef enum {
38
+ LDST_EXT_UXTW = 2, /* zero-extend from uint32_t */
39
+ LDST_EXT_UXTX = 3, /* zero-extend from uint64_t (i.e. no extension) */
40
+ LDST_EXT_SXTW = 6, /* sign-extend from int32_t */
41
+} AArch64LdstExt;
42
+
43
/* We encode the format of the insn into the beginning of the name, so that
44
we can have the preprocessor help "typecheck" the insn vs the output
45
function. Arm didn't provide us with nice names for the formats, so we
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,
47
}
48
49
static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
50
- TCGReg rd, TCGReg base, TCGType ext,
51
+ TCGReg rd, TCGReg base, AArch64LdstExt option,
52
TCGReg regoff)
53
{
54
/* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
55
tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
56
- 0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
57
+ option << 13 | base << 5 | (rd & 0x1f));
58
}
59
60
static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
62
63
/* Worst-case scenario, move offset to temp register, use reg offset. */
64
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
65
- tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
66
+ tcg_out_ldst_r(s, insn, rd, rn, LDST_EXT_UXTX, TCG_REG_TMP);
67
}
68
69
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
70
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
71
72
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
73
TCGReg data_r, TCGReg addr_r,
74
- TCGType otype, TCGReg off_r)
75
+ AArch64LdstExt option, TCGReg off_r)
76
{
77
switch (memop & MO_SSIZE) {
78
case MO_UB:
79
- tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
80
+ tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, option, off_r);
81
break;
82
case MO_SB:
83
tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
84
- data_r, addr_r, otype, off_r);
85
+ data_r, addr_r, option, off_r);
86
break;
87
case MO_UW:
88
- tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
89
+ tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, option, off_r);
90
break;
91
case MO_SW:
92
tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
93
- data_r, addr_r, otype, off_r);
94
+ data_r, addr_r, option, off_r);
95
break;
96
case MO_UL:
97
- tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
98
+ tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, option, off_r);
99
break;
100
case MO_SL:
101
- tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
102
+ tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, option, off_r);
103
break;
104
case MO_UQ:
105
- tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
106
+ tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, option, off_r);
107
break;
108
default:
109
tcg_abort();
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
111
112
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
113
TCGReg data_r, TCGReg addr_r,
114
- TCGType otype, TCGReg off_r)
115
+ AArch64LdstExt option, TCGReg off_r)
116
{
117
switch (memop & MO_SIZE) {
118
case MO_8:
119
- tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
120
+ tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, option, off_r);
121
break;
122
case MO_16:
123
- tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
124
+ tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, option, off_r);
125
break;
126
case MO_32:
127
- tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
128
+ tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, option, off_r);
129
break;
130
case MO_64:
131
- tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
132
+ tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, option, off_r);
133
break;
134
default:
135
tcg_abort();
136
}
137
}
138
139
+/*
140
+ * Bits for the option field of LDR/STR (register),
141
+ * for application to a guest address.
142
+ */
143
+static AArch64LdstExt ldst_ext_option(void)
144
+{
145
+#ifdef CONFIG_USER_ONLY
146
+ bool signed_addr32 = guest_base_signed_addr32;
147
+#else
148
+ bool signed_addr32 = TCG_TARGET_SIGNED_ADDR32;
149
+#endif
150
+
151
+ if (TARGET_LONG_BITS == 64) {
152
+ return LDST_EXT_UXTX;
153
+ } else if (signed_addr32) {
154
+ return LDST_EXT_SXTW;
155
+ } else {
156
+ return LDST_EXT_UXTW;
157
+ }
158
+}
159
+
160
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
161
MemOpIdx oi, TCGType ext)
162
{
163
MemOp memop = get_memop(oi);
164
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
165
+ AArch64LdstExt option = ldst_ext_option();
166
167
/* Byte swapping is left to middle-end expansion. */
168
tcg_debug_assert((memop & MO_BSWAP) == 0);
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
170
171
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
172
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
173
- TCG_REG_X1, otype, addr_reg);
174
+ TCG_REG_X1, option, addr_reg);
175
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
176
s->code_ptr, label_ptr);
177
#else /* !CONFIG_SOFTMMU */
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
179
}
180
if (USE_GUEST_BASE) {
181
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
182
- TCG_REG_GUEST_BASE, otype, addr_reg);
183
+ TCG_REG_GUEST_BASE, option, addr_reg);
184
} else {
185
+ /* This case is always a 64-bit guest with no extension. */
186
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
187
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
188
+ addr_reg, LDST_EXT_UXTX, TCG_REG_XZR);
189
}
190
#endif /* CONFIG_SOFTMMU */
191
}
192
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
193
MemOpIdx oi)
194
{
195
MemOp memop = get_memop(oi);
196
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
197
+ AArch64LdstExt option = ldst_ext_option();
198
199
/* Byte swapping is left to middle-end expansion. */
200
tcg_debug_assert((memop & MO_BSWAP) == 0);
201
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
202
203
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
204
tcg_out_qemu_st_direct(s, memop, data_reg,
205
- TCG_REG_X1, otype, addr_reg);
206
+ TCG_REG_X1, option, addr_reg);
207
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
208
data_reg, addr_reg, s->code_ptr, label_ptr);
209
#else /* !CONFIG_SOFTMMU */
210
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
211
}
212
if (USE_GUEST_BASE) {
213
tcg_out_qemu_st_direct(s, memop, data_reg,
214
- TCG_REG_GUEST_BASE, otype, addr_reg);
215
+ TCG_REG_GUEST_BASE, option, addr_reg);
216
} else {
217
+ /* This case is always a 64-bit guest with no extension. */
218
tcg_out_qemu_st_direct(s, memop, data_reg,
219
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
220
+ addr_reg, LDST_EXT_UXTX, TCG_REG_XZR);
221
}
222
#endif /* CONFIG_SOFTMMU */
223
}
224
--
225
2.25.1
diff view generated by jsdifflib
Deleted patch
1
All 32-bit mips operations sign-extend the output, so we are easily
2
able to keep TCG_TYPE_I32 values sign-extended in host registers.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/mips/tcg-target-sa32.h | 8 ++++++++
8
tcg/mips/tcg-target.c.inc | 10 ++--------
9
2 files changed, 10 insertions(+), 8 deletions(-)
10
11
diff --git a/tcg/mips/tcg-target-sa32.h b/tcg/mips/tcg-target-sa32.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/mips/tcg-target-sa32.h
14
+++ b/tcg/mips/tcg-target-sa32.h
15
@@ -1 +1,9 @@
16
+/*
17
+ * Do not set TCG_TARGET_SIGNED_ADDR32 for mips32;
18
+ * TCG expects this to only be set for 64-bit hosts.
19
+ */
20
+#ifdef __mips64
21
+#define TCG_TARGET_SIGNED_ADDR32 1
22
+#else
23
#define TCG_TARGET_SIGNED_ADDR32 0
24
+#endif
25
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/mips/tcg-target.c.inc
28
+++ b/tcg/mips/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
30
TCG_TMP0, TCG_TMP3, cmp_off);
31
}
32
33
- /* Zero extend a 32-bit guest address for a 64-bit host. */
34
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
35
- tcg_out_ext32u(s, base, addrl);
36
- addrl = base;
37
- }
38
-
39
/*
40
* Mask the page bits, keeping the alignment bits to compare against.
41
* For unaligned accesses, compare against the end of the access to
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
43
data_regl, data_regh, addr_regl, addr_regh,
44
s->code_ptr, label_ptr);
45
#else
46
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
47
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
48
tcg_out_ext32u(s, base, addr_regl);
49
addr_regl = base;
50
}
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
52
data_regl, data_regh, addr_regl, addr_regh,
53
s->code_ptr, label_ptr);
54
#else
55
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
56
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
57
tcg_out_ext32u(s, base, addr_regl);
58
addr_regl = base;
59
}
60
--
61
2.25.1
62
63
diff view generated by jsdifflib
Deleted patch
1
All RV64 32-bit operations sign-extend the output, so we are easily
2
able to keep TCG_TYPE_I32 values sign-extended in host registers.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/riscv/tcg-target-sa32.h | 6 +++++-
9
tcg/riscv/tcg-target.c.inc | 8 ++------
10
2 files changed, 7 insertions(+), 7 deletions(-)
11
12
diff --git a/tcg/riscv/tcg-target-sa32.h b/tcg/riscv/tcg-target-sa32.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/riscv/tcg-target-sa32.h
15
+++ b/tcg/riscv/tcg-target-sa32.h
16
@@ -1 +1,5 @@
17
-#define TCG_TARGET_SIGNED_ADDR32 0
18
+/*
19
+ * Do not set TCG_TARGET_SIGNED_ADDR32 for RV32;
20
+ * TCG expects this to only be set for 64-bit hosts.
21
+ */
22
+#define TCG_TARGET_SIGNED_ADDR32 (__riscv_xlen == 64)
23
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/riscv/tcg-target.c.inc
26
+++ b/tcg/riscv/tcg-target.c.inc
27
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
28
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
29
30
/* TLB Hit - translate address using addend. */
31
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
32
- tcg_out_ext32u(s, TCG_REG_TMP0, addrl);
33
- addrl = TCG_REG_TMP0;
34
- }
35
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
36
}
37
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
39
data_regl, data_regh, addr_regl, addr_regh,
40
s->code_ptr, label_ptr);
41
#else
42
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
43
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
44
tcg_out_ext32u(s, base, addr_regl);
45
addr_regl = base;
46
}
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
48
data_regl, data_regh, addr_regl, addr_regh,
49
s->code_ptr, label_ptr);
50
#else
51
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
52
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
53
tcg_out_ext32u(s, base, addr_regl);
54
addr_regl = base;
55
}
56
--
57
2.25.1
58
59
diff view generated by jsdifflib