1
I have not been able to prod reviews of all of the rotate patches
1
The following changes since commit d1181d29370a4318a9f11ea92065bea6bb159f83:
2
in 4 weeks, but let's not let that block ARM work forever.
3
2
4
3
Merge tag 'pull-nbd-2023-07-19' of https://repo.or.cz/qemu/ericb into staging (2023-07-20 09:54:07 +0100)
5
r~
6
7
8
The following changes since commit cccdd8c7971896c339d59c9c5d4647d4ffd9568a:
9
10
Merge remote-tracking branch 'remotes/ehabkost/tags/machine-next-pull-request' into staging (2020-06-02 10:25:55 +0100)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://github.com/rth7680/qemu.git tags/pull-tcg-20200602
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230724
15
8
16
for you to fetch changes up to 71b04329c4f7d5824a289ca5225e1883a278cf3b:
9
for you to fetch changes up to 32b120394c578bc824f1db4835b3bffbeca88fae:
17
10
18
accel/tcg: Provide a NetBSD specific aarch64 cpu_signal_handler (2020-06-02 08:42:37 -0700)
11
accel/tcg: Fix type of 'last' for pageflags_{find,next} (2023-07-24 09:48:49 +0100)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
Vector rotate support
14
accel/tcg: Zero-pad vaddr in tlb debug output
22
Signal handling support for NetBSD arm/aarch64
15
accel/tcg: Fix type of 'last' for pageflags_{find,next}
16
accel/tcg: Fix sense of read-only probes in ldst_atomicity
17
accel/tcg: Take mmap_lock in load_atomic*_or_exit
18
tcg: Add earlyclobber to op_add2 for x86 and s390x
19
tcg/ppc: Fix race in goto_tb implementation
23
20
24
----------------------------------------------------------------
21
----------------------------------------------------------------
25
Nick Hudson (2):
22
Anton Johansson (1):
26
accel/tcg: Adjust cpu_signal_handler for NetBSD/arm
23
accel/tcg: Zero-pad vaddr in tlb_debug output
27
accel/tcg: Provide a NetBSD specific aarch64 cpu_signal_handler
28
24
29
Richard Henderson (10):
25
Ilya Leoshkevich (1):
30
tcg: Implement gvec support for rotate by immediate
26
tcg/{i386, s390x}: Add earlyclobber to the op_add2's first output
31
tcg: Implement gvec support for rotate by vector
32
tcg: Remove expansion to shift by vector from do_shifts
33
tcg: Implement gvec support for rotate by scalar
34
tcg/i386: Implement INDEX_op_rotl{i,s,v}_vec
35
tcg/aarch64: Implement INDEX_op_rotl{i,v}_vec
36
tcg/ppc: Implement INDEX_op_rot[lr]v_vec
37
target/ppc: Use tcg_gen_gvec_rotlv
38
target/s390x: Use tcg_gen_gvec_rotl{i,s,v}
39
tcg: Improve move ops in liveness_pass_2
40
27
41
accel/tcg/tcg-runtime.h | 15 +++
28
Jordan Niethe (1):
42
include/tcg/tcg-op-gvec.h | 12 ++
29
tcg/ppc: Fix race in goto_tb implementation
43
include/tcg/tcg-op.h | 5 +
44
include/tcg/tcg-opc.h | 4 +
45
include/tcg/tcg.h | 3 +
46
target/ppc/helper.h | 4 -
47
target/s390x/helper.h | 4 -
48
tcg/aarch64/tcg-target.h | 3 +
49
tcg/aarch64/tcg-target.opc.h | 1 +
50
tcg/i386/tcg-target.h | 3 +
51
tcg/ppc/tcg-target.h | 3 +
52
tcg/ppc/tcg-target.opc.h | 1 -
53
accel/tcg/tcg-runtime-gvec.c | 144 ++++++++++++++++++++++++
54
accel/tcg/user-exec.c | 43 +++++++-
55
target/ppc/int_helper.c | 17 ---
56
target/ppc/translate/vmx-impl.inc.c | 8 +-
57
target/s390x/translate_vx.inc.c | 66 ++---------
58
target/s390x/vec_int_helper.c | 31 ------
59
tcg/aarch64/tcg-target.inc.c | 53 ++++++++-
60
tcg/i386/tcg-target.inc.c | 116 +++++++++++++++++---
61
tcg/ppc/tcg-target.inc.c | 23 +++-
62
tcg/tcg-op-gvec.c | 212 ++++++++++++++++++++++++++++++++++++
63
tcg/tcg-op-vec.c | 62 +++++++----
64
tcg/tcg.c | 85 +++++++++++----
65
target/s390x/insn-data.def | 4 +-
66
tcg/README | 7 +-
67
26 files changed, 736 insertions(+), 193 deletions(-)
68
30
31
Luca Bonissi (1):
32
accel/tcg: Fix type of 'last' for pageflags_{find,next}
33
34
Richard Henderson (3):
35
include/exec: Add WITH_MMAP_LOCK_GUARD
36
accel/tcg: Fix sense of read-only probes in ldst_atomicity
37
accel/tcg: Take mmap_lock in load_atomic*_or_exit
38
39
include/exec/exec-all.h | 10 ++++++++++
40
tcg/i386/tcg-target-con-set.h | 5 ++++-
41
tcg/s390x/tcg-target-con-set.h | 8 +++++---
42
accel/tcg/cputlb.c | 20 ++++++++++----------
43
accel/tcg/user-exec.c | 4 ++--
44
bsd-user/mmap.c | 1 +
45
linux-user/mmap.c | 1 +
46
tcg/tcg.c | 8 +++++++-
47
accel/tcg/ldst_atomicity.c.inc | 32 ++++++++++++++++++--------------
48
tcg/i386/tcg-target.c.inc | 2 +-
49
tcg/ppc/tcg-target.c.inc | 9 +++++----
50
tcg/s390x/tcg-target.c.inc | 4 ++--
51
12 files changed, 66 insertions(+), 38 deletions(-)
diff view generated by jsdifflib
1
For immediates, we must continue the special casing of 8-bit
1
From: Jordan Niethe <jniethe5@gmail.com>
2
elements. The other element sizes and shift types are trivially
3
implemented with shifts.
4
2
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Commit 20b6643324 ("tcg/ppc: Reorg goto_tb implementation") modified
4
goto_tb to ensure only a single instruction was patched to prevent
5
incorrect behavior if a thread was in the middle of multiple
6
instructions when they were replaced. However this introduced a race
7
between loading the jmp target into TCG_REG_TB and patching and
8
executing the direct branch.
9
10
The relevant part of the goto_tb implementation:
11
12
ld TCG_REG_TB, TARGET_ADDR_LOCATION(TCG_REG_TB)
13
patch_location:
14
mtctr TCG_REG_TB
15
bctr
16
17
tb_target_set_jmp_target() will replace 'patch_location' with a direct
18
branch if the target is in range. The direct branch now relies on
19
TCG_REG_TB being set up correctly by the ld. Prior to this commit
20
multiple instructions were patched in for the direct branch case; these
21
instructions would initialize TCG_REG_TB to the same value as the branch
22
target.
23
24
Imagine the following sequence:
25
26
1) Thread A is executing the goto_tb sequence and loads the jmp
27
target into TCG_REG_TB.
28
29
2) Thread B updates the jmp target address and calls
30
tb_target_set_jmp_target(). This patches a new direct branch into the
31
goto_tb sequence.
32
33
3) Thread A executes the newly patched direct branch. The value in
34
TCG_REG_TB still contains the old jmp target.
35
36
TCG_REG_TB MUST contain the translation block's tc.ptr. Execution will
37
eventually crash after performing memory accesses generated from a
38
faulty value in TCG_REG_TB.
39
40
This presents as segfaults or illegal instruction exceptions.
41
42
Do not revert commit 20b6643324 as it did fix a different race
43
condition. Instead remove the direct branch optimization and always use
44
indirect branches.
45
46
The direct branch optimization can be re-added later with a race free
47
sequence.
48
49
Fixes: 20b6643324 ("tcg/ppc: Reorg goto_tb implementation")
50
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1726
51
Reported-by: Anushree Mathur <anushree.mathur@linux.vnet.ibm.com>
52
Tested-by: Anushree Mathur <anushree.mathur@linux.vnet.ibm.com>
53
Tested-by: Michael Tokarev <mjt@tls.msk.ru>
54
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
55
Co-developed-by: Benjamin Gray <bgray@linux.ibm.com>
56
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
57
Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
58
Message-Id: <20230717093001.13167-1-jniethe5@gmail.com>
6
---
59
---
7
tcg/i386/tcg-target.inc.c | 116 ++++++++++++++++++++++++++++++++------
60
tcg/ppc/tcg-target.c.inc | 9 +++++----
8
1 file changed, 100 insertions(+), 16 deletions(-)
61
1 file changed, 5 insertions(+), 4 deletions(-)
9
62
10
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
63
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
64
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.inc.c
65
--- a/tcg/ppc/tcg-target.c.inc
13
+++ b/tcg/i386/tcg-target.inc.c
66
+++ b/tcg/ppc/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
67
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
15
case INDEX_op_shls_vec:
68
ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
16
case INDEX_op_shrs_vec:
69
tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
17
case INDEX_op_sars_vec:
70
18
+ case INDEX_op_rotls_vec:
71
- /* Direct branch will be patched by tb_target_set_jmp_target. */
19
case INDEX_op_cmp_vec:
72
+ /* TODO: Use direct branches when possible. */
20
case INDEX_op_x86_shufps_vec:
73
set_jmp_insn_offset(s, which);
21
case INDEX_op_x86_blend_vec:
74
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
22
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
23
case INDEX_op_xor_vec:
76
- /* When branch is out of range, fall through to indirect. */
24
case INDEX_op_andc_vec:
77
tcg_out32(s, BCCTR | BO_ALWAYS);
25
return 1;
78
26
+ case INDEX_op_rotli_vec:
79
/* For the unlinked case, need to reset TCG_REG_TB. */
27
case INDEX_op_cmp_vec:
80
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
28
case INDEX_op_cmpsel_vec:
81
intptr_t diff = addr - jmp_rx;
29
return -1;
82
tcg_insn_unit insn;
30
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
83
31
return vece >= MO_16;
84
+ if (USE_REG_TB) {
32
case INDEX_op_sars_vec:
33
return vece >= MO_16 && vece <= MO_32;
34
+ case INDEX_op_rotls_vec:
35
+ return vece >= MO_16 ? -1 : 0;
36
37
case INDEX_op_shlv_vec:
38
case INDEX_op_shrv_vec:
39
return have_avx2 && vece >= MO_32;
40
case INDEX_op_sarv_vec:
41
return have_avx2 && vece == MO_32;
42
+ case INDEX_op_rotlv_vec:
43
+ case INDEX_op_rotrv_vec:
44
+ return have_avx2 && vece >= MO_32 ? -1 : 0;
45
46
case INDEX_op_mul_vec:
47
if (vece == MO_8) {
48
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
49
}
50
}
51
52
-static void expand_vec_shi(TCGType type, unsigned vece, bool shr,
53
+static void expand_vec_shi(TCGType type, unsigned vece, TCGOpcode opc,
54
TCGv_vec v0, TCGv_vec v1, TCGArg imm)
55
{
56
TCGv_vec t1, t2;
57
@@ -XXX,XX +XXX,XX @@ static void expand_vec_shi(TCGType type, unsigned vece, bool shr,
58
t1 = tcg_temp_new_vec(type);
59
t2 = tcg_temp_new_vec(type);
60
61
- /* Unpack to W, shift, and repack. Tricky bits:
62
- (1) Use punpck*bw x,x to produce DDCCBBAA,
63
- i.e. duplicate in other half of the 16-bit lane.
64
- (2) For right-shift, add 8 so that the high half of
65
- the lane becomes zero. For left-shift, we must
66
- shift up and down again.
67
- (3) Step 2 leaves high half zero such that PACKUSWB
68
- (pack with unsigned saturation) does not modify
69
- the quantity. */
70
+ /*
71
+ * Unpack to W, shift, and repack. Tricky bits:
72
+ * (1) Use punpck*bw x,x to produce DDCCBBAA,
73
+ * i.e. duplicate in other half of the 16-bit lane.
74
+ * (2) For right-shift, add 8 so that the high half of the lane
75
+ * becomes zero. For left-shift, and left-rotate, we must
76
+ * shift up and down again.
77
+ * (3) Step 2 leaves high half zero such that PACKUSWB
78
+ * (pack with unsigned saturation) does not modify
79
+ * the quantity.
80
+ */
81
vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
82
tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
83
vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
84
tcgv_vec_arg(t2), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
85
86
- if (shr) {
87
- tcg_gen_shri_vec(MO_16, t1, t1, imm + 8);
88
- tcg_gen_shri_vec(MO_16, t2, t2, imm + 8);
89
+ if (opc != INDEX_op_rotli_vec) {
90
+ imm += 8;
91
+ }
92
+ if (opc == INDEX_op_shri_vec) {
93
+ tcg_gen_shri_vec(MO_16, t1, t1, imm);
94
+ tcg_gen_shri_vec(MO_16, t2, t2, imm);
95
} else {
96
- tcg_gen_shli_vec(MO_16, t1, t1, imm + 8);
97
- tcg_gen_shli_vec(MO_16, t2, t2, imm + 8);
98
+ tcg_gen_shli_vec(MO_16, t1, t1, imm);
99
+ tcg_gen_shli_vec(MO_16, t2, t2, imm);
100
tcg_gen_shri_vec(MO_16, t1, t1, 8);
101
tcg_gen_shri_vec(MO_16, t2, t2, 8);
102
}
103
@@ -XXX,XX +XXX,XX @@ static void expand_vec_sari(TCGType type, unsigned vece,
104
}
105
}
106
107
+static void expand_vec_rotli(TCGType type, unsigned vece,
108
+ TCGv_vec v0, TCGv_vec v1, TCGArg imm)
109
+{
110
+ TCGv_vec t;
111
+
112
+ if (vece == MO_8) {
113
+ expand_vec_shi(type, vece, INDEX_op_rotli_vec, v0, v1, imm);
114
+ return;
85
+ return;
115
+ }
86
+ }
116
+
87
+
117
+ t = tcg_temp_new_vec(type);
88
if (in_range_b(diff)) {
118
+ tcg_gen_shli_vec(vece, t, v1, imm);
89
insn = B | (diff & 0x3fffffc);
119
+ tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - imm);
90
- } else if (USE_REG_TB) {
120
+ tcg_gen_or_vec(vece, v0, v0, t);
91
- insn = MTSPR | RS(TCG_REG_TB) | CTR;
121
+ tcg_temp_free_vec(t);
92
} else {
122
+}
93
insn = NOP;
123
+
94
}
124
+static void expand_vec_rotls(TCGType type, unsigned vece,
125
+ TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh)
126
+{
127
+ TCGv_i32 rsh;
128
+ TCGv_vec t;
129
+
130
+ tcg_debug_assert(vece != MO_8);
131
+
132
+ t = tcg_temp_new_vec(type);
133
+ rsh = tcg_temp_new_i32();
134
+
135
+ tcg_gen_neg_i32(rsh, lsh);
136
+ tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1);
137
+ tcg_gen_shls_vec(vece, t, v1, lsh);
138
+ tcg_gen_shrs_vec(vece, v0, v1, rsh);
139
+ tcg_gen_or_vec(vece, v0, v0, t);
140
+ tcg_temp_free_vec(t);
141
+ tcg_temp_free_i32(rsh);
142
+}
143
+
144
+static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
145
+ TCGv_vec v1, TCGv_vec sh, bool right)
146
+{
147
+ TCGv_vec t = tcg_temp_new_vec(type);
148
+
149
+ tcg_gen_dupi_vec(vece, t, 8 << vece);
150
+ tcg_gen_sub_vec(vece, t, t, sh);
151
+ if (right) {
152
+ tcg_gen_shlv_vec(vece, t, v1, t);
153
+ tcg_gen_shrv_vec(vece, v0, v1, sh);
154
+ } else {
155
+ tcg_gen_shrv_vec(vece, t, v1, t);
156
+ tcg_gen_shlv_vec(vece, v0, v1, sh);
157
+ }
158
+ tcg_gen_or_vec(vece, v0, v0, t);
159
+ tcg_temp_free_vec(t);
160
+}
161
+
162
static void expand_vec_mul(TCGType type, unsigned vece,
163
TCGv_vec v0, TCGv_vec v1, TCGv_vec v2)
164
{
165
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
166
switch (opc) {
167
case INDEX_op_shli_vec:
168
case INDEX_op_shri_vec:
169
- expand_vec_shi(type, vece, opc == INDEX_op_shri_vec, v0, v1, a2);
170
+ expand_vec_shi(type, vece, opc, v0, v1, a2);
171
break;
172
173
case INDEX_op_sari_vec:
174
expand_vec_sari(type, vece, v0, v1, a2);
175
break;
176
177
+ case INDEX_op_rotli_vec:
178
+ expand_vec_rotli(type, vece, v0, v1, a2);
179
+ break;
180
+
181
+ case INDEX_op_rotls_vec:
182
+ expand_vec_rotls(type, vece, v0, v1, temp_tcgv_i32(arg_temp(a2)));
183
+ break;
184
+
185
+ case INDEX_op_rotlv_vec:
186
+ v2 = temp_tcgv_vec(arg_temp(a2));
187
+ expand_vec_rotv(type, vece, v0, v1, v2, false);
188
+ break;
189
+ case INDEX_op_rotrv_vec:
190
+ v2 = temp_tcgv_vec(arg_temp(a2));
191
+ expand_vec_rotv(type, vece, v0, v1, v2, true);
192
+ break;
193
+
194
case INDEX_op_mul_vec:
195
v2 = temp_tcgv_vec(arg_temp(a2));
196
expand_vec_mul(type, vece, v0, v1, v2);
197
--
95
--
198
2.25.1
96
2.34.1
199
200
diff view generated by jsdifflib
1
If the output of the move is dead, then the last use is in
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
the store. If we propagate the input to the store, then we
3
can remove the move opcode entirely.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
tcg/tcg.c | 78 +++++++++++++++++++++++++++++++++++++++----------------
4
include/exec/exec-all.h | 10 ++++++++++
8
1 file changed, 56 insertions(+), 22 deletions(-)
5
bsd-user/mmap.c | 1 +
6
linux-user/mmap.c | 1 +
7
3 files changed, 12 insertions(+)
9
8
10
diff --git a/tcg/tcg.c b/tcg/tcg.c
9
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg.c
11
--- a/include/exec/exec-all.h
13
+++ b/tcg/tcg.c
12
+++ b/include/exec/exec-all.h
14
@@ -XXX,XX +XXX,XX @@ static bool liveness_pass_2(TCGContext *s)
13
@@ -XXX,XX +XXX,XX @@ void TSA_NO_TSA mmap_lock(void);
15
}
14
void TSA_NO_TSA mmap_unlock(void);
16
15
bool have_mmap_lock(void);
17
/* Outputs become available. */
16
18
- for (i = 0; i < nb_oargs; i++) {
17
+static inline void mmap_unlock_guard(void *unused)
19
- arg_ts = arg_temp(op->args[i]);
18
+{
20
+ if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
19
+ mmap_unlock();
21
+ arg_ts = arg_temp(op->args[0]);
20
+}
22
dir_ts = arg_ts->state_ptr;
23
- if (!dir_ts) {
24
- continue;
25
+ if (dir_ts) {
26
+ op->args[0] = temp_arg(dir_ts);
27
+ changes = true;
28
+
21
+
29
+ /* The output is now live and modified. */
22
+#define WITH_MMAP_LOCK_GUARD() \
30
+ arg_ts->state = 0;
23
+ for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
24
+ = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
31
+
25
+
32
+ if (NEED_SYNC_ARG(0)) {
26
/**
33
+ TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
27
* adjust_signal_pc:
34
+ ? INDEX_op_st_i32
28
* @pc: raw pc from the host signal ucontext_t.
35
+ : INDEX_op_st_i64);
29
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
36
+ TCGOp *sop = tcg_op_insert_after(s, op, sopc);
30
#else
37
+ TCGTemp *out_ts = dir_ts;
31
static inline void mmap_lock(void) {}
38
+
32
static inline void mmap_unlock(void) {}
39
+ if (IS_DEAD_ARG(0)) {
33
+#define WITH_MMAP_LOCK_GUARD()
40
+ out_ts = arg_temp(op->args[1]);
34
41
+ arg_ts->state = TS_DEAD;
35
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
42
+ tcg_op_remove(s, op);
36
void tlb_set_dirty(CPUState *cpu, vaddr addr);
43
+ } else {
37
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
44
+ arg_ts->state = TS_MEM;
38
index XXXXXXX..XXXXXXX 100644
45
+ }
39
--- a/bsd-user/mmap.c
46
+
40
+++ b/bsd-user/mmap.c
47
+ sop->args[0] = temp_arg(out_ts);
41
@@ -XXX,XX +XXX,XX @@ void mmap_lock(void)
48
+ sop->args[1] = temp_arg(arg_ts->mem_base);
42
49
+ sop->args[2] = arg_ts->mem_offset;
43
void mmap_unlock(void)
50
+ } else {
44
{
51
+ tcg_debug_assert(!IS_DEAD_ARG(0));
45
+ assert(mmap_lock_count > 0);
52
+ }
46
if (--mmap_lock_count == 0) {
53
}
47
pthread_mutex_unlock(&mmap_mutex);
54
- op->args[i] = temp_arg(dir_ts);
48
}
55
- changes = true;
49
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
56
+ } else {
50
index XXXXXXX..XXXXXXX 100644
57
+ for (i = 0; i < nb_oargs; i++) {
51
--- a/linux-user/mmap.c
58
+ arg_ts = arg_temp(op->args[i]);
52
+++ b/linux-user/mmap.c
59
+ dir_ts = arg_ts->state_ptr;
53
@@ -XXX,XX +XXX,XX @@ void mmap_lock(void)
60
+ if (!dir_ts) {
54
61
+ continue;
55
void mmap_unlock(void)
62
+ }
56
{
63
+ op->args[i] = temp_arg(dir_ts);
57
+ assert(mmap_lock_count > 0);
64
+ changes = true;
58
if (--mmap_lock_count == 0) {
65
59
pthread_mutex_unlock(&mmap_mutex);
66
- /* The output is now live and modified. */
67
- arg_ts->state = 0;
68
+ /* The output is now live and modified. */
69
+ arg_ts->state = 0;
70
71
- /* Sync outputs upon their last write. */
72
- if (NEED_SYNC_ARG(i)) {
73
- TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
74
- ? INDEX_op_st_i32
75
- : INDEX_op_st_i64);
76
- TCGOp *sop = tcg_op_insert_after(s, op, sopc);
77
+ /* Sync outputs upon their last write. */
78
+ if (NEED_SYNC_ARG(i)) {
79
+ TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
80
+ ? INDEX_op_st_i32
81
+ : INDEX_op_st_i64);
82
+ TCGOp *sop = tcg_op_insert_after(s, op, sopc);
83
84
- sop->args[0] = temp_arg(dir_ts);
85
- sop->args[1] = temp_arg(arg_ts->mem_base);
86
- sop->args[2] = arg_ts->mem_offset;
87
+ sop->args[0] = temp_arg(dir_ts);
88
+ sop->args[1] = temp_arg(arg_ts->mem_base);
89
+ sop->args[2] = arg_ts->mem_offset;
90
91
- arg_ts->state = TS_MEM;
92
- }
93
- /* Drop outputs that are dead. */
94
- if (IS_DEAD_ARG(i)) {
95
- arg_ts->state = TS_DEAD;
96
+ arg_ts->state = TS_MEM;
97
+ }
98
+ /* Drop outputs that are dead. */
99
+ if (IS_DEAD_ARG(i)) {
100
+ arg_ts->state = TS_DEAD;
101
+ }
102
}
103
}
104
}
60
}
105
--
61
--
106
2.25.1
62
2.34.1
107
108
diff view generated by jsdifflib
1
Merge VERLL and VERLLV into op_vesv and op_ves, alongside
1
In the initial commit, cdfac37be0d, the sense of the test is incorrect,
2
all of the other vector shift operations.
2
as the -1/0 return was confusing. In bef6f008b981, we mechanically
3
invert all callers while changing to false/true return, preserving the
4
incorrectness of the test.
3
5
4
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Now that the return sense is sane, it's easy to see that if !write,
7
then the page is not modifiable (i.e. most likely read-only, with
8
PROT_NONE handled via SIGSEGV).
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
12
---
7
target/s390x/helper.h | 4 --
13
accel/tcg/ldst_atomicity.c.inc | 4 ++--
8
target/s390x/translate_vx.inc.c | 66 +++++----------------------------
14
1 file changed, 2 insertions(+), 2 deletions(-)
9
target/s390x/vec_int_helper.c | 31 ----------------
10
target/s390x/insn-data.def | 4 +-
11
4 files changed, 11 insertions(+), 94 deletions(-)
12
15
13
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
16
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
14
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
15
--- a/target/s390x/helper.h
18
--- a/accel/tcg/ldst_atomicity.c.inc
16
+++ b/target/s390x/helper.h
19
+++ b/accel/tcg/ldst_atomicity.c.inc
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_vmlo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
20
@@ -XXX,XX +XXX,XX @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
18
DEF_HELPER_FLAGS_4(gvec_vmlo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
21
* another process, because the fallback start_exclusive solution
19
DEF_HELPER_FLAGS_3(gvec_vpopct8, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
22
* provides no protection across processes.
20
DEF_HELPER_FLAGS_3(gvec_vpopct16, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
23
*/
21
-DEF_HELPER_FLAGS_4(gvec_verllv8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
24
- if (page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
22
-DEF_HELPER_FLAGS_4(gvec_verllv16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
25
+ if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
23
-DEF_HELPER_FLAGS_4(gvec_verll8, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32)
26
uint64_t *p = __builtin_assume_aligned(pv, 8);
24
-DEF_HELPER_FLAGS_4(gvec_verll16, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32)
27
return *p;
25
DEF_HELPER_FLAGS_4(gvec_verim8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
28
}
26
DEF_HELPER_FLAGS_4(gvec_verim16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
29
@@ -XXX,XX +XXX,XX @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
27
DEF_HELPER_FLAGS_4(gvec_vsl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32)
30
* another process, because the fallback start_exclusive solution
28
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
31
* provides no protection across processes.
29
index XXXXXXX..XXXXXXX 100644
32
*/
30
--- a/target/s390x/translate_vx.inc.c
33
- if (page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
31
+++ b/target/s390x/translate_vx.inc.c
34
+ if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
32
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o)
35
return *p;
33
return DISAS_NEXT;
36
}
34
}
37
#endif
35
36
-static void gen_rll_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
37
-{
38
- TCGv_i32 t0 = tcg_temp_new_i32();
39
-
40
- tcg_gen_andi_i32(t0, b, 31);
41
- tcg_gen_rotl_i32(d, a, t0);
42
- tcg_temp_free_i32(t0);
43
-}
44
-
45
-static void gen_rll_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
46
-{
47
- TCGv_i64 t0 = tcg_temp_new_i64();
48
-
49
- tcg_gen_andi_i64(t0, b, 63);
50
- tcg_gen_rotl_i64(d, a, t0);
51
- tcg_temp_free_i64(t0);
52
-}
53
-
54
-static DisasJumpType op_verllv(DisasContext *s, DisasOps *o)
55
-{
56
- const uint8_t es = get_field(s, m4);
57
- static const GVecGen3 g[4] = {
58
- { .fno = gen_helper_gvec_verllv8, },
59
- { .fno = gen_helper_gvec_verllv16, },
60
- { .fni4 = gen_rll_i32, },
61
- { .fni8 = gen_rll_i64, },
62
- };
63
-
64
- if (es > ES_64) {
65
- gen_program_exception(s, PGM_SPECIFICATION);
66
- return DISAS_NORETURN;
67
- }
68
-
69
- gen_gvec_3(get_field(s, v1), get_field(s, v2),
70
- get_field(s, v3), &g[es]);
71
- return DISAS_NEXT;
72
-}
73
-
74
-static DisasJumpType op_verll(DisasContext *s, DisasOps *o)
75
-{
76
- const uint8_t es = get_field(s, m4);
77
- static const GVecGen2s g[4] = {
78
- { .fno = gen_helper_gvec_verll8, },
79
- { .fno = gen_helper_gvec_verll16, },
80
- { .fni4 = gen_rll_i32, },
81
- { .fni8 = gen_rll_i64, },
82
- };
83
-
84
- if (es > ES_64) {
85
- gen_program_exception(s, PGM_SPECIFICATION);
86
- return DISAS_NORETURN;
87
- }
88
- gen_gvec_2s(get_field(s, v1), get_field(s, v3), o->addr1,
89
- &g[es]);
90
- return DISAS_NEXT;
91
-}
92
-
93
static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c)
94
{
95
TCGv_i32 t = tcg_temp_new_i32();
96
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)
97
case 0x70:
98
gen_gvec_fn_3(shlv, es, v1, v2, v3);
99
break;
100
+ case 0x73:
101
+ gen_gvec_fn_3(rotlv, es, v1, v2, v3);
102
+ break;
103
case 0x7a:
104
gen_gvec_fn_3(sarv, es, v1, v2, v3);
105
break;
106
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o)
107
case 0x30:
108
gen_gvec_fn_2i(shli, es, v1, v3, d2);
109
break;
110
+ case 0x33:
111
+ gen_gvec_fn_2i(rotli, es, v1, v3, d2);
112
+ break;
113
case 0x3a:
114
gen_gvec_fn_2i(sari, es, v1, v3, d2);
115
break;
116
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o)
117
case 0x30:
118
gen_gvec_fn_2s(shls, es, v1, v3, shift);
119
break;
120
+ case 0x33:
121
+ gen_gvec_fn_2s(rotls, es, v1, v3, shift);
122
+ break;
123
case 0x3a:
124
gen_gvec_fn_2s(sars, es, v1, v3, shift);
125
break;
126
diff --git a/target/s390x/vec_int_helper.c b/target/s390x/vec_int_helper.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/target/s390x/vec_int_helper.c
129
+++ b/target/s390x/vec_int_helper.c
130
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_vpopct##BITS)(void *v1, const void *v2, uint32_t desc) \
131
DEF_VPOPCT(8)
132
DEF_VPOPCT(16)
133
134
-#define DEF_VERLLV(BITS) \
135
-void HELPER(gvec_verllv##BITS)(void *v1, const void *v2, const void *v3, \
136
- uint32_t desc) \
137
-{ \
138
- int i; \
139
- \
140
- for (i = 0; i < (128 / BITS); i++) { \
141
- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
142
- const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \
143
- \
144
- s390_vec_write_element##BITS(v1, i, rol##BITS(a, b)); \
145
- } \
146
-}
147
-DEF_VERLLV(8)
148
-DEF_VERLLV(16)
149
-
150
-#define DEF_VERLL(BITS) \
151
-void HELPER(gvec_verll##BITS)(void *v1, const void *v2, uint64_t count, \
152
- uint32_t desc) \
153
-{ \
154
- int i; \
155
- \
156
- for (i = 0; i < (128 / BITS); i++) { \
157
- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
158
- \
159
- s390_vec_write_element##BITS(v1, i, rol##BITS(a, count)); \
160
- } \
161
-}
162
-DEF_VERLL(8)
163
-DEF_VERLL(16)
164
-
165
#define DEF_VERIM(BITS) \
166
void HELPER(gvec_verim##BITS)(void *v1, const void *v2, const void *v3, \
167
uint32_t desc) \
168
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
169
index XXXXXXX..XXXXXXX 100644
170
--- a/target/s390x/insn-data.def
171
+++ b/target/s390x/insn-data.def
172
@@ -XXX,XX +XXX,XX @@
173
/* VECTOR POPULATION COUNT */
174
F(0xe750, VPOPCT, VRR_a, V, 0, 0, 0, 0, vpopct, 0, IF_VEC)
175
/* VECTOR ELEMENT ROTATE LEFT LOGICAL */
176
- F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, verllv, 0, IF_VEC)
177
- F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, verll, 0, IF_VEC)
178
+ F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC)
179
+ F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC)
180
/* VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */
181
F(0xe772, VERIM, VRI_d, V, 0, 0, 0, 0, verim, 0, IF_VEC)
182
/* VECTOR ELEMENT SHIFT LEFT */
183
--
38
--
184
2.25.1
39
2.34.1
185
186
diff view generated by jsdifflib
1
From: Nick Hudson <skrll@netbsd.org>
1
For user-only, the probe for page writability may race with another
2
thread's mprotect. Take the mmap_lock around the operation. This
3
is still faster than the start/end_exclusive fallback.
2
4
3
Fix building on NetBSD/arm by extracting the FSR value from the
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
correct siginfo_t field.
5
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Nick Hudson <skrll@netbsd.org>
8
Message-Id: <20200516154147.24842-1-skrll@netbsd.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
accel/tcg/user-exec.c | 16 +++++++++++++---
8
accel/tcg/ldst_atomicity.c.inc | 32 ++++++++++++++++++--------------
12
1 file changed, 13 insertions(+), 3 deletions(-)
9
1 file changed, 18 insertions(+), 14 deletions(-)
13
10
14
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
11
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/user-exec.c
13
--- a/accel/tcg/ldst_atomicity.c.inc
17
+++ b/accel/tcg/user-exec.c
14
+++ b/accel/tcg/ldst_atomicity.c.inc
18
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
15
@@ -XXX,XX +XXX,XX @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
19
16
* another process, because the fallback start_exclusive solution
20
#if defined(__NetBSD__)
17
* provides no protection across processes.
21
#include <ucontext.h>
18
*/
22
+#include <sys/siginfo.h>
19
- if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
20
- uint64_t *p = __builtin_assume_aligned(pv, 8);
21
- return *p;
22
+ WITH_MMAP_LOCK_GUARD() {
23
+ if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
24
+ uint64_t *p = __builtin_assume_aligned(pv, 8);
25
+ return *p;
26
+ }
27
}
23
#endif
28
#endif
24
29
25
int cpu_signal_handler(int host_signum, void *pinfo,
30
@@ -XXX,XX +XXX,XX @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
26
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
31
return atomic16_read_ro(p);
27
siginfo_t *info = pinfo;
32
}
28
#if defined(__NetBSD__)
33
29
ucontext_t *uc = puc;
34
-#ifdef CONFIG_USER_ONLY
30
+ siginfo_t *si = pinfo;
35
/*
31
#else
36
* We can only use cmpxchg to emulate a load if the page is writable.
32
ucontext_t *uc = puc;
37
* If the page is not writable, then assume the value is immutable
38
* and requires no locking. This ignores the case of MAP_SHARED with
39
* another process, because the fallback start_exclusive solution
40
* provides no protection across processes.
41
+ *
42
+ * In system mode all guest pages are writable. For user mode,
43
+ * we must take mmap_lock so that the query remains valid until
44
+ * the write is complete -- tests/tcg/multiarch/munmap-pthread.c
45
+ * is an example that can race.
46
*/
47
- if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
48
- return *p;
49
- }
50
+ WITH_MMAP_LOCK_GUARD() {
51
+#ifdef CONFIG_USER_ONLY
52
+ if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
53
+ return *p;
54
+ }
33
#endif
55
#endif
34
unsigned long pc;
56
-
35
+ uint32_t fsr;
57
- /*
36
int is_write;
58
- * In system mode all guest pages are writable, and for user-only
37
59
- * we have just checked writability. Try cmpxchg.
38
#if defined(__NetBSD__)
60
- */
39
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
61
- if (HAVE_ATOMIC128_RW) {
40
pc = uc->uc_mcontext.arm_pc;
62
- return atomic16_read_rw(p);
41
#endif
63
+ if (HAVE_ATOMIC128_RW) {
42
64
+ return atomic16_read_rw(p);
43
- /* error_code is the FSR value, in which bit 11 is WnR (assuming a v6 or
65
+ }
44
- * later processor; on v5 we will always report this as a read).
66
}
45
+#ifdef __NetBSD__
67
46
+ fsr = si->si_trap;
68
/* Ultimate fallback: re-execute in serial context. */
47
+#else
48
+ fsr = uc->uc_mcontext.error_code;
49
+#endif
50
+ /*
51
+ * In the FSR, bit 11 is WnR, assuming a v6 or
52
+ * later processor. On v5 we will always report
53
+ * this as a read, which will fail later.
54
*/
55
- is_write = extract32(uc->uc_mcontext.error_code, 11, 1);
56
+ is_write = extract32(fsr, 11, 1);
57
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
58
}
59
60
--
69
--
61
2.25.1
70
2.34.1
62
63
diff view generated by jsdifflib
1
No host backend support yet, but the interfaces for rotlv
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
and rotrv are in place.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
i386 and s390x implementations of op_add2 require an earlyclobber,
4
which is currently missing. This breaks VCKSM in s390x guests. E.g., on
5
x86_64 the following op:
6
7
add2_i32 tmp2,tmp3,tmp2,tmp3,tmp3,tmp2 dead: 0 2 3 4 5 pref=none,0xffff
8
9
is translated to:
10
11
addl %ebx, %r12d
12
adcl %r12d, %ebx
13
14
Introduce a new C_N1_O1_I4 constraint, and make sure that earlyclobber
15
of aliased outputs is honored.
16
17
Cc: qemu-stable@nongnu.org
18
Fixes: 82790a870992 ("tcg: Add markup for output requires new register")
19
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Message-Id: <20230719221310.1968845-7-iii@linux.ibm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
23
---
7
v3: Drop the generic expansion from rot to shift; we can do better
24
tcg/i386/tcg-target-con-set.h | 5 ++++-
8
for each backend, and then this code becomes unused.
25
tcg/s390x/tcg-target-con-set.h | 8 +++++---
9
---
26
tcg/tcg.c | 8 +++++++-
10
accel/tcg/tcg-runtime.h | 10 +++
27
tcg/i386/tcg-target.c.inc | 2 +-
11
include/tcg/tcg-op-gvec.h | 4 ++
28
tcg/s390x/tcg-target.c.inc | 4 ++--
12
include/tcg/tcg-op.h | 2 +
29
5 files changed, 19 insertions(+), 8 deletions(-)
13
include/tcg/tcg-opc.h | 2 +
14
include/tcg/tcg.h | 1 +
15
tcg/aarch64/tcg-target.h | 1 +
16
tcg/i386/tcg-target.h | 1 +
17
tcg/ppc/tcg-target.h | 1 +
18
accel/tcg/tcg-runtime-gvec.c | 96 +++++++++++++++++++++++++++
19
tcg/tcg-op-gvec.c | 122 +++++++++++++++++++++++++++++++++++
20
tcg/tcg-op-vec.c | 10 +++
21
tcg/tcg.c | 3 +
22
tcg/README | 4 +-
23
13 files changed, 256 insertions(+), 1 deletion(-)
24
30
25
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
31
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
26
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/tcg-runtime.h
33
--- a/tcg/i386/tcg-target-con-set.h
28
+++ b/accel/tcg/tcg-runtime.h
34
+++ b/tcg/i386/tcg-target-con-set.h
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
@@ -XXX,XX +XXX,XX @@
30
DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
*
31
DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
* C_N1_Im(...) defines a constraint set with 1 output and <m> inputs,
32
38
* except that the output must use a new register.
33
+DEF_HELPER_FLAGS_4(gvec_rotl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
+ *
34
+DEF_HELPER_FLAGS_4(gvec_rotl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
+ * C_Nn_Om_Ik(...) defines a constraint set with <n + m> outputs and <k>
35
+DEF_HELPER_FLAGS_4(gvec_rotl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
+ * inputs, except that the first <n> outputs must use new registers.
36
+DEF_HELPER_FLAGS_4(gvec_rotl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
42
*/
37
+
43
C_O0_I1(r)
38
+DEF_HELPER_FLAGS_4(gvec_rotr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
44
C_O0_I2(L, L)
39
+DEF_HELPER_FLAGS_4(gvec_rotr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
45
@@ -XXX,XX +XXX,XX @@ C_O2_I1(r, r, L)
40
+DEF_HELPER_FLAGS_4(gvec_rotr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
46
C_O2_I2(a, d, a, r)
41
+DEF_HELPER_FLAGS_4(gvec_rotr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
47
C_O2_I2(r, r, L, L)
42
+
48
C_O2_I3(a, d, 0, 1, r)
43
DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
49
-C_O2_I4(r, r, 0, 1, re, re)
44
DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
50
+C_N1_O1_I4(r, r, 0, 1, re, re)
45
DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
51
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
46
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
47
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
48
--- a/include/tcg/tcg-op-gvec.h
53
--- a/tcg/s390x/tcg-target-con-set.h
49
+++ b/include/tcg/tcg-op-gvec.h
54
+++ b/tcg/s390x/tcg-target-con-set.h
50
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
55
@@ -XXX,XX +XXX,XX @@
51
uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
56
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
52
void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
57
* Each operand should be a sequence of constraint letters as defined by
53
uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
58
* tcg-target-con-str.h; the constraint combination is inclusive or.
54
+void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
59
+ *
55
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
60
+ * C_Nn_Om_Ik(...) defines a constraint set with <n + m> outputs and <k>
56
+void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
61
+ * inputs, except that the first <n> outputs must use new registers.
57
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
62
*/
58
63
C_O0_I1(r)
59
void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
64
C_O0_I2(r, r)
60
uint32_t aofs, uint32_t bofs,
65
@@ -XXX,XX +XXX,XX @@ C_O2_I1(o, m, r)
61
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
66
C_O2_I2(o, m, 0, r)
62
index XXXXXXX..XXXXXXX 100644
67
C_O2_I2(o, m, r, r)
63
--- a/include/tcg/tcg-op.h
68
C_O2_I3(o, m, 0, 1, r)
64
+++ b/include/tcg/tcg-op.h
69
-C_O2_I4(r, r, 0, 1, rA, r)
65
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
70
-C_O2_I4(r, r, 0, 1, ri, r)
66
void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
71
-C_O2_I4(r, r, 0, 1, r, r)
67
void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
72
+C_N1_O1_I4(r, r, 0, 1, ri, r)
68
void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
73
+C_N1_O1_I4(r, r, 0, 1, rA, r)
69
+void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
70
+void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
71
72
void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
73
TCGv_vec a, TCGv_vec b);
74
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/include/tcg/tcg-opc.h
77
+++ b/include/tcg/tcg-opc.h
78
@@ -XXX,XX +XXX,XX @@ DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
79
DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
80
DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
81
DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
82
+DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
83
+DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
84
85
DEF(cmp_vec, 1, 2, 1, IMPLVEC)
86
87
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
88
index XXXXXXX..XXXXXXX 100644
89
--- a/include/tcg/tcg.h
90
+++ b/include/tcg/tcg.h
91
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
92
#define TCG_TARGET_HAS_andc_vec 0
93
#define TCG_TARGET_HAS_orc_vec 0
94
#define TCG_TARGET_HAS_roti_vec 0
95
+#define TCG_TARGET_HAS_rotv_vec 0
96
#define TCG_TARGET_HAS_shi_vec 0
97
#define TCG_TARGET_HAS_shs_vec 0
98
#define TCG_TARGET_HAS_shv_vec 0
99
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/aarch64/tcg-target.h
102
+++ b/tcg/aarch64/tcg-target.h
103
@@ -XXX,XX +XXX,XX @@ typedef enum {
104
#define TCG_TARGET_HAS_neg_vec 1
105
#define TCG_TARGET_HAS_abs_vec 1
106
#define TCG_TARGET_HAS_roti_vec 0
107
+#define TCG_TARGET_HAS_rotv_vec 0
108
#define TCG_TARGET_HAS_shi_vec 1
109
#define TCG_TARGET_HAS_shs_vec 0
110
#define TCG_TARGET_HAS_shv_vec 1
111
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/i386/tcg-target.h
114
+++ b/tcg/i386/tcg-target.h
115
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
116
#define TCG_TARGET_HAS_neg_vec 0
117
#define TCG_TARGET_HAS_abs_vec 1
118
#define TCG_TARGET_HAS_roti_vec 0
119
+#define TCG_TARGET_HAS_rotv_vec 0
120
#define TCG_TARGET_HAS_shi_vec 1
121
#define TCG_TARGET_HAS_shs_vec 1
122
#define TCG_TARGET_HAS_shv_vec have_avx2
123
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/ppc/tcg-target.h
126
+++ b/tcg/ppc/tcg-target.h
127
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
128
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
129
#define TCG_TARGET_HAS_abs_vec 0
130
#define TCG_TARGET_HAS_roti_vec 0
131
+#define TCG_TARGET_HAS_rotv_vec 0
132
#define TCG_TARGET_HAS_shi_vec 0
133
#define TCG_TARGET_HAS_shs_vec 0
134
#define TCG_TARGET_HAS_shv_vec 1
135
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/accel/tcg/tcg-runtime-gvec.c
138
+++ b/accel/tcg/tcg-runtime-gvec.c
139
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)
140
clear_high(d, oprsz, desc);
141
}
142
143
+void HELPER(gvec_rotl8v)(void *d, void *a, void *b, uint32_t desc)
144
+{
145
+ intptr_t oprsz = simd_oprsz(desc);
146
+ intptr_t i;
147
+
148
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
149
+ uint8_t sh = *(uint8_t *)(b + i) & 7;
150
+ *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), sh);
151
+ }
152
+ clear_high(d, oprsz, desc);
153
+}
154
+
155
+void HELPER(gvec_rotl16v)(void *d, void *a, void *b, uint32_t desc)
156
+{
157
+ intptr_t oprsz = simd_oprsz(desc);
158
+ intptr_t i;
159
+
160
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
161
+ uint8_t sh = *(uint16_t *)(b + i) & 15;
162
+ *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), sh);
163
+ }
164
+ clear_high(d, oprsz, desc);
165
+}
166
+
167
+void HELPER(gvec_rotl32v)(void *d, void *a, void *b, uint32_t desc)
168
+{
169
+ intptr_t oprsz = simd_oprsz(desc);
170
+ intptr_t i;
171
+
172
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
173
+ uint8_t sh = *(uint32_t *)(b + i) & 31;
174
+ *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), sh);
175
+ }
176
+ clear_high(d, oprsz, desc);
177
+}
178
+
179
+void HELPER(gvec_rotl64v)(void *d, void *a, void *b, uint32_t desc)
180
+{
181
+ intptr_t oprsz = simd_oprsz(desc);
182
+ intptr_t i;
183
+
184
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
185
+ uint8_t sh = *(uint64_t *)(b + i) & 63;
186
+ *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), sh);
187
+ }
188
+ clear_high(d, oprsz, desc);
189
+}
190
+
191
+void HELPER(gvec_rotr8v)(void *d, void *a, void *b, uint32_t desc)
192
+{
193
+ intptr_t oprsz = simd_oprsz(desc);
194
+ intptr_t i;
195
+
196
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
197
+ uint8_t sh = *(uint8_t *)(b + i) & 7;
198
+ *(uint8_t *)(d + i) = ror8(*(uint8_t *)(a + i), sh);
199
+ }
200
+ clear_high(d, oprsz, desc);
201
+}
202
+
203
+void HELPER(gvec_rotr16v)(void *d, void *a, void *b, uint32_t desc)
204
+{
205
+ intptr_t oprsz = simd_oprsz(desc);
206
+ intptr_t i;
207
+
208
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
209
+ uint8_t sh = *(uint16_t *)(b + i) & 15;
210
+ *(uint16_t *)(d + i) = ror16(*(uint16_t *)(a + i), sh);
211
+ }
212
+ clear_high(d, oprsz, desc);
213
+}
214
+
215
+void HELPER(gvec_rotr32v)(void *d, void *a, void *b, uint32_t desc)
216
+{
217
+ intptr_t oprsz = simd_oprsz(desc);
218
+ intptr_t i;
219
+
220
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
221
+ uint8_t sh = *(uint32_t *)(b + i) & 31;
222
+ *(uint32_t *)(d + i) = ror32(*(uint32_t *)(a + i), sh);
223
+ }
224
+ clear_high(d, oprsz, desc);
225
+}
226
+
227
+void HELPER(gvec_rotr64v)(void *d, void *a, void *b, uint32_t desc)
228
+{
229
+ intptr_t oprsz = simd_oprsz(desc);
230
+ intptr_t i;
231
+
232
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
233
+ uint8_t sh = *(uint64_t *)(b + i) & 63;
234
+ *(uint64_t *)(d + i) = ror64(*(uint64_t *)(a + i), sh);
235
+ }
236
+ clear_high(d, oprsz, desc);
237
+}
238
+
239
#define DO_CMP1(NAME, TYPE, OP) \
240
void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \
241
{ \
242
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/tcg/tcg-op-gvec.c
245
+++ b/tcg/tcg-op-gvec.c
246
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
247
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
248
}
249
250
+/*
251
+ * Similarly for rotates.
252
+ */
253
+
254
+static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d,
255
+ TCGv_vec a, TCGv_vec b)
256
+{
257
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
258
+
259
+ tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
260
+ tcg_gen_and_vec(vece, t, t, b);
261
+ tcg_gen_rotlv_vec(vece, d, a, t);
262
+ tcg_temp_free_vec(t);
263
+}
264
+
265
+static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
266
+{
267
+ TCGv_i32 t = tcg_temp_new_i32();
268
+
269
+ tcg_gen_andi_i32(t, b, 31);
270
+ tcg_gen_rotl_i32(d, a, t);
271
+ tcg_temp_free_i32(t);
272
+}
273
+
274
+static void tcg_gen_rotl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
275
+{
276
+ TCGv_i64 t = tcg_temp_new_i64();
277
+
278
+ tcg_gen_andi_i64(t, b, 63);
279
+ tcg_gen_rotl_i64(d, a, t);
280
+ tcg_temp_free_i64(t);
281
+}
282
+
283
+void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
284
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
285
+{
286
+ static const TCGOpcode vecop_list[] = { INDEX_op_rotlv_vec, 0 };
287
+ static const GVecGen3 g[4] = {
288
+ { .fniv = tcg_gen_rotlv_mod_vec,
289
+ .fno = gen_helper_gvec_rotl8v,
290
+ .opt_opc = vecop_list,
291
+ .vece = MO_8 },
292
+ { .fniv = tcg_gen_rotlv_mod_vec,
293
+ .fno = gen_helper_gvec_rotl16v,
294
+ .opt_opc = vecop_list,
295
+ .vece = MO_16 },
296
+ { .fni4 = tcg_gen_rotl_mod_i32,
297
+ .fniv = tcg_gen_rotlv_mod_vec,
298
+ .fno = gen_helper_gvec_rotl32v,
299
+ .opt_opc = vecop_list,
300
+ .vece = MO_32 },
301
+ { .fni8 = tcg_gen_rotl_mod_i64,
302
+ .fniv = tcg_gen_rotlv_mod_vec,
303
+ .fno = gen_helper_gvec_rotl64v,
304
+ .opt_opc = vecop_list,
305
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
306
+ .vece = MO_64 },
307
+ };
308
+
309
+ tcg_debug_assert(vece <= MO_64);
310
+ tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
311
+}
312
+
313
+static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d,
314
+ TCGv_vec a, TCGv_vec b)
315
+{
316
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
317
+
318
+ tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
319
+ tcg_gen_and_vec(vece, t, t, b);
320
+ tcg_gen_rotrv_vec(vece, d, a, t);
321
+ tcg_temp_free_vec(t);
322
+}
323
+
324
+static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
325
+{
326
+ TCGv_i32 t = tcg_temp_new_i32();
327
+
328
+ tcg_gen_andi_i32(t, b, 31);
329
+ tcg_gen_rotr_i32(d, a, t);
330
+ tcg_temp_free_i32(t);
331
+}
332
+
333
+static void tcg_gen_rotr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
334
+{
335
+ TCGv_i64 t = tcg_temp_new_i64();
336
+
337
+ tcg_gen_andi_i64(t, b, 63);
338
+ tcg_gen_rotr_i64(d, a, t);
339
+ tcg_temp_free_i64(t);
340
+}
341
+
342
+void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
343
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
344
+{
345
+ static const TCGOpcode vecop_list[] = { INDEX_op_rotrv_vec, 0 };
346
+ static const GVecGen3 g[4] = {
347
+ { .fniv = tcg_gen_rotrv_mod_vec,
348
+ .fno = gen_helper_gvec_rotr8v,
349
+ .opt_opc = vecop_list,
350
+ .vece = MO_8 },
351
+ { .fniv = tcg_gen_rotrv_mod_vec,
352
+ .fno = gen_helper_gvec_rotr16v,
353
+ .opt_opc = vecop_list,
354
+ .vece = MO_16 },
355
+ { .fni4 = tcg_gen_rotr_mod_i32,
356
+ .fniv = tcg_gen_rotrv_mod_vec,
357
+ .fno = gen_helper_gvec_rotr32v,
358
+ .opt_opc = vecop_list,
359
+ .vece = MO_32 },
360
+ { .fni8 = tcg_gen_rotr_mod_i64,
361
+ .fniv = tcg_gen_rotrv_mod_vec,
362
+ .fno = gen_helper_gvec_rotr64v,
363
+ .opt_opc = vecop_list,
364
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
365
+ .vece = MO_64 },
366
+ };
367
+
368
+ tcg_debug_assert(vece <= MO_64);
369
+ tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
370
+}
371
+
372
/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
373
static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
374
uint32_t oprsz, TCGCond cond)
375
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
376
index XXXXXXX..XXXXXXX 100644
377
--- a/tcg/tcg-op-vec.c
378
+++ b/tcg/tcg-op-vec.c
379
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
380
do_op3_nofail(vece, r, a, b, INDEX_op_sarv_vec);
381
}
382
383
+void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
384
+{
385
+ do_op3_nofail(vece, r, a, b, INDEX_op_rotlv_vec);
386
+}
387
+
388
+void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
389
+{
390
+ do_op3_nofail(vece, r, a, b, INDEX_op_rotrv_vec);
391
+}
392
+
393
static void do_shifts(unsigned vece, TCGv_vec r, TCGv_vec a,
394
TCGv_i32 s, TCGOpcode opc_s, TCGOpcode opc_v)
395
{
396
diff --git a/tcg/tcg.c b/tcg/tcg.c
74
diff --git a/tcg/tcg.c b/tcg/tcg.c
397
index XXXXXXX..XXXXXXX 100644
75
index XXXXXXX..XXXXXXX 100644
398
--- a/tcg/tcg.c
76
--- a/tcg/tcg.c
399
+++ b/tcg/tcg.c
77
+++ b/tcg/tcg.c
400
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
78
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
401
return have_vec && TCG_TARGET_HAS_shv_vec;
79
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
402
case INDEX_op_rotli_vec:
80
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
403
return have_vec && TCG_TARGET_HAS_roti_vec;
81
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
404
+ case INDEX_op_rotlv_vec:
82
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
405
+ case INDEX_op_rotrv_vec:
83
406
+ return have_vec && TCG_TARGET_HAS_rotv_vec;
84
typedef enum {
407
case INDEX_op_ssadd_vec:
85
#include "tcg-target-con-set.h"
408
case INDEX_op_usadd_vec:
86
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
409
case INDEX_op_sssub_vec:
87
#undef C_O2_I2
410
diff --git a/tcg/README b/tcg/README
88
#undef C_O2_I3
89
#undef C_O2_I4
90
+#undef C_N1_O1_I4
91
92
/* Put all of the constraint sets into an array, indexed by the enum. */
93
94
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
95
#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
96
#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
97
#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
98
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
99
100
static const TCGTargetOpDef constraint_sets[] = {
101
#include "tcg-target-con-set.h"
102
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef constraint_sets[] = {
103
#undef C_O2_I2
104
#undef C_O2_I3
105
#undef C_O2_I4
106
+#undef C_N1_O1_I4
107
108
/* Expand the enumerator to be returned from tcg_target_op_def(). */
109
110
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef constraint_sets[] = {
111
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
112
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
113
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
114
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
115
116
#include "tcg-target.c.inc"
117
118
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
119
* dead after the instruction, we must allocate a new
120
* register and move it.
121
*/
122
- if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
123
+ if (temp_readonly(ts) || !IS_DEAD_ARG(i)
124
+ || def->args_ct[arg_ct->alias_index].newreg) {
125
allocate_new_reg = true;
126
} else if (ts->val_type == TEMP_VAL_REG) {
127
/*
128
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
411
index XXXXXXX..XXXXXXX 100644
129
index XXXXXXX..XXXXXXX 100644
412
--- a/tcg/README
130
--- a/tcg/i386/tcg-target.c.inc
413
+++ b/tcg/README
131
+++ b/tcg/i386/tcg-target.c.inc
414
@@ -XXX,XX +XXX,XX @@ E.g. VECL=1 -> 64 << 1 -> v128, and VECE=2 -> 1 << 2 -> i32.
132
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
415
133
case INDEX_op_add2_i64:
416
* shrv_vec v0, v1, v2
134
case INDEX_op_sub2_i32:
417
* sarv_vec v0, v1, v2
135
case INDEX_op_sub2_i64:
418
+* rotlv_vec v0, v1, v2
136
- return C_O2_I4(r, r, 0, 1, re, re);
419
+* rotrv_vec v0, v1, v2
137
+ return C_N1_O1_I4(r, r, 0, 1, re, re);
420
138
421
- Similarly for logical and arithmetic right shift.
139
case INDEX_op_ctz_i32:
422
+ Similarly for logical and arithmetic right shift, and rotates.
140
case INDEX_op_ctz_i64:
423
141
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
424
* cmp_vec v0, v1, v2, cond
142
index XXXXXXX..XXXXXXX 100644
425
143
--- a/tcg/s390x/tcg-target.c.inc
144
+++ b/tcg/s390x/tcg-target.c.inc
145
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
146
147
case INDEX_op_add2_i32:
148
case INDEX_op_sub2_i32:
149
- return C_O2_I4(r, r, 0, 1, ri, r);
150
+ return C_N1_O1_I4(r, r, 0, 1, ri, r);
151
152
case INDEX_op_add2_i64:
153
case INDEX_op_sub2_i64:
154
- return C_O2_I4(r, r, 0, 1, rA, r);
155
+ return C_N1_O1_I4(r, r, 0, 1, rA, r);
156
157
case INDEX_op_st_vec:
158
return C_O0_I2(v, r);
426
--
159
--
427
2.25.1
160
2.34.1
428
429
diff view generated by jsdifflib
1
No host backend support yet, but the interfaces for rotli
1
From: Anton Johansson <anjo@rev.ng>
2
are in place. Canonicalize immediate rotate to the left,
3
based on a survey of architectures, but provide both left
4
and right shift interfaces to the translators.
5
2
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
In replacing target_ulong with vaddr and TARGET_FMT_lx with VADDR_PRIx,
4
the zero-padding of TARGET_FMT_lx got lost. Readd 16-wide zero-padding
5
for logging consistency.
6
7
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Anton Johansson <anjo@rev.ng>
9
Message-Id: <20230713120746.26897-1-anjo@rev.ng>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
12
---
9
accel/tcg/tcg-runtime.h | 5 +++
13
accel/tcg/cputlb.c | 20 ++++++++++----------
10
include/tcg/tcg-op-gvec.h | 6 ++++
14
1 file changed, 10 insertions(+), 10 deletions(-)
11
include/tcg/tcg-op.h | 2 ++
12
include/tcg/tcg-opc.h | 1 +
13
include/tcg/tcg.h | 1 +
14
tcg/aarch64/tcg-target.h | 1 +
15
tcg/i386/tcg-target.h | 1 +
16
tcg/ppc/tcg-target.h | 1 +
17
accel/tcg/tcg-runtime-gvec.c | 48 +++++++++++++++++++++++++
18
tcg/tcg-op-gvec.c | 68 ++++++++++++++++++++++++++++++++++++
19
tcg/tcg-op-vec.c | 12 +++++++
20
tcg/tcg.c | 2 ++
21
tcg/README | 3 +-
22
13 files changed, 150 insertions(+), 1 deletion(-)
23
15
24
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
16
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
25
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
26
--- a/accel/tcg/tcg-runtime.h
18
--- a/accel/tcg/cputlb.c
27
+++ b/accel/tcg/tcg-runtime.h
19
+++ b/accel/tcg/cputlb.c
28
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
20
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
29
DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
21
30
DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
22
/* Check if we need to flush due to large pages. */
31
23
if ((page & lp_mask) == lp_addr) {
32
+DEF_HELPER_FLAGS_3(gvec_rotl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
24
- tlb_debug("forcing full flush midx %d (%"
33
+DEF_HELPER_FLAGS_3(gvec_rotl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
25
- VADDR_PRIx "/%" VADDR_PRIx ")\n",
34
+DEF_HELPER_FLAGS_3(gvec_rotl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
26
+ tlb_debug("forcing full flush midx %d (%016"
35
+DEF_HELPER_FLAGS_3(gvec_rotl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
27
+ VADDR_PRIx "/%016" VADDR_PRIx ")\n",
36
+
28
midx, lp_addr, lp_mask);
37
DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
38
DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
} else {
39
DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
40
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
32
41
index XXXXXXX..XXXXXXX 100644
33
assert_cpu_is_self(cpu);
42
--- a/include/tcg/tcg-op-gvec.h
34
43
+++ b/include/tcg/tcg-op-gvec.h
35
- tlb_debug("page addr: %" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
44
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
36
+ tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
45
int64_t shift, uint32_t oprsz, uint32_t maxsz);
37
46
void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
38
qemu_spin_lock(&env_tlb(env)->c.lock);
47
int64_t shift, uint32_t oprsz, uint32_t maxsz);
39
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
48
+void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
40
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
49
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
41
50
+void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
42
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
51
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
52
53
void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
54
TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
55
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
56
void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
57
void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
58
void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
59
+void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
60
+void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
61
62
#endif
63
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/include/tcg/tcg-op.h
66
+++ b/include/tcg/tcg-op.h
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
68
void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
69
void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
70
void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
71
+void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
72
+void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
73
74
void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
75
void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
76
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/include/tcg/tcg-opc.h
79
+++ b/include/tcg/tcg-opc.h
80
@@ -XXX,XX +XXX,XX @@ DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
81
DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
82
DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
83
DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
84
+DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec))
85
86
DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
87
DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
88
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
89
index XXXXXXX..XXXXXXX 100644
90
--- a/include/tcg/tcg.h
91
+++ b/include/tcg/tcg.h
92
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
93
#define TCG_TARGET_HAS_not_vec 0
94
#define TCG_TARGET_HAS_andc_vec 0
95
#define TCG_TARGET_HAS_orc_vec 0
96
+#define TCG_TARGET_HAS_roti_vec 0
97
#define TCG_TARGET_HAS_shi_vec 0
98
#define TCG_TARGET_HAS_shs_vec 0
99
#define TCG_TARGET_HAS_shv_vec 0
100
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/aarch64/tcg-target.h
103
+++ b/tcg/aarch64/tcg-target.h
104
@@ -XXX,XX +XXX,XX @@ typedef enum {
105
#define TCG_TARGET_HAS_not_vec 1
106
#define TCG_TARGET_HAS_neg_vec 1
107
#define TCG_TARGET_HAS_abs_vec 1
108
+#define TCG_TARGET_HAS_roti_vec 0
109
#define TCG_TARGET_HAS_shi_vec 1
110
#define TCG_TARGET_HAS_shs_vec 0
111
#define TCG_TARGET_HAS_shv_vec 1
112
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/i386/tcg-target.h
115
+++ b/tcg/i386/tcg-target.h
116
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
117
#define TCG_TARGET_HAS_not_vec 0
118
#define TCG_TARGET_HAS_neg_vec 0
119
#define TCG_TARGET_HAS_abs_vec 1
120
+#define TCG_TARGET_HAS_roti_vec 0
121
#define TCG_TARGET_HAS_shi_vec 1
122
#define TCG_TARGET_HAS_shs_vec 1
123
#define TCG_TARGET_HAS_shv_vec have_avx2
124
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/ppc/tcg-target.h
127
+++ b/tcg/ppc/tcg-target.h
128
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
129
#define TCG_TARGET_HAS_not_vec 1
130
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
131
#define TCG_TARGET_HAS_abs_vec 0
132
+#define TCG_TARGET_HAS_roti_vec 0
133
#define TCG_TARGET_HAS_shi_vec 0
134
#define TCG_TARGET_HAS_shs_vec 0
135
#define TCG_TARGET_HAS_shv_vec 1
136
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/accel/tcg/tcg-runtime-gvec.c
139
+++ b/accel/tcg/tcg-runtime-gvec.c
140
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc)
141
clear_high(d, oprsz, desc);
142
}
143
144
+void HELPER(gvec_rotl8i)(void *d, void *a, uint32_t desc)
145
+{
146
+ intptr_t oprsz = simd_oprsz(desc);
147
+ int shift = simd_data(desc);
148
+ intptr_t i;
149
+
150
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
151
+ *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), shift);
152
+ }
153
+ clear_high(d, oprsz, desc);
154
+}
155
+
156
+void HELPER(gvec_rotl16i)(void *d, void *a, uint32_t desc)
157
+{
158
+ intptr_t oprsz = simd_oprsz(desc);
159
+ int shift = simd_data(desc);
160
+ intptr_t i;
161
+
162
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
163
+ *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), shift);
164
+ }
165
+ clear_high(d, oprsz, desc);
166
+}
167
+
168
+void HELPER(gvec_rotl32i)(void *d, void *a, uint32_t desc)
169
+{
170
+ intptr_t oprsz = simd_oprsz(desc);
171
+ int shift = simd_data(desc);
172
+ intptr_t i;
173
+
174
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
175
+ *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), shift);
176
+ }
177
+ clear_high(d, oprsz, desc);
178
+}
179
+
180
+void HELPER(gvec_rotl64i)(void *d, void *a, uint32_t desc)
181
+{
182
+ intptr_t oprsz = simd_oprsz(desc);
183
+ int shift = simd_data(desc);
184
+ intptr_t i;
185
+
186
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
187
+ *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), shift);
188
+ }
189
+ clear_high(d, oprsz, desc);
190
+}
191
+
192
void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc)
193
{
43
{
194
intptr_t oprsz = simd_oprsz(desc);
44
- tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
195
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
45
+ tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
196
index XXXXXXX..XXXXXXX 100644
46
197
--- a/tcg/tcg-op-gvec.c
47
/* This should already be page aligned */
198
+++ b/tcg/tcg-op-gvec.c
48
addr &= TARGET_PAGE_MASK;
199
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
49
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, vaddr addr)
200
}
50
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
201
}
51
uint16_t idxmap)
202
203
+void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
204
+{
205
+ uint64_t mask = dup_const(MO_8, 0xff << c);
206
+
207
+ tcg_gen_shli_i64(d, a, c);
208
+ tcg_gen_shri_i64(a, a, 8 - c);
209
+ tcg_gen_andi_i64(d, d, mask);
210
+ tcg_gen_andi_i64(a, a, ~mask);
211
+ tcg_gen_or_i64(d, d, a);
212
+}
213
+
214
+void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
215
+{
216
+ uint64_t mask = dup_const(MO_16, 0xffff << c);
217
+
218
+ tcg_gen_shli_i64(d, a, c);
219
+ tcg_gen_shri_i64(a, a, 16 - c);
220
+ tcg_gen_andi_i64(d, d, mask);
221
+ tcg_gen_andi_i64(a, a, ~mask);
222
+ tcg_gen_or_i64(d, d, a);
223
+}
224
+
225
+void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
226
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
227
+{
228
+ static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
229
+ static const GVecGen2i g[4] = {
230
+ { .fni8 = tcg_gen_vec_rotl8i_i64,
231
+ .fniv = tcg_gen_rotli_vec,
232
+ .fno = gen_helper_gvec_rotl8i,
233
+ .opt_opc = vecop_list,
234
+ .vece = MO_8 },
235
+ { .fni8 = tcg_gen_vec_rotl16i_i64,
236
+ .fniv = tcg_gen_rotli_vec,
237
+ .fno = gen_helper_gvec_rotl16i,
238
+ .opt_opc = vecop_list,
239
+ .vece = MO_16 },
240
+ { .fni4 = tcg_gen_rotli_i32,
241
+ .fniv = tcg_gen_rotli_vec,
242
+ .fno = gen_helper_gvec_rotl32i,
243
+ .opt_opc = vecop_list,
244
+ .vece = MO_32 },
245
+ { .fni8 = tcg_gen_rotli_i64,
246
+ .fniv = tcg_gen_rotli_vec,
247
+ .fno = gen_helper_gvec_rotl64i,
248
+ .opt_opc = vecop_list,
249
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
250
+ .vece = MO_64 },
251
+ };
252
+
253
+ tcg_debug_assert(vece <= MO_64);
254
+ tcg_debug_assert(shift >= 0 && shift < (8 << vece));
255
+ if (shift == 0) {
256
+ tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
257
+ } else {
258
+ tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
259
+ }
260
+}
261
+
262
+void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
263
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
264
+{
265
+ tcg_debug_assert(vece <= MO_64);
266
+ tcg_debug_assert(shift >= 0 && shift < (8 << vece));
267
+ tcg_gen_gvec_rotli(vece, dofs, aofs, -shift & ((8 << vece) - 1),
268
+ oprsz, maxsz);
269
+}
270
+
271
/*
272
* Specialized generation vector shifts by a non-constant scalar.
273
*/
274
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
275
index XXXXXXX..XXXXXXX 100644
276
--- a/tcg/tcg-op-vec.c
277
+++ b/tcg/tcg-op-vec.c
278
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
279
do_shifti(INDEX_op_sari_vec, vece, r, a, i);
280
}
281
282
+void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
283
+{
284
+ do_shifti(INDEX_op_rotli_vec, vece, r, a, i);
285
+}
286
+
287
+void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i)
288
+{
289
+ int bits = 8 << vece;
290
+ tcg_debug_assert(i >= 0 && i < bits);
291
+ do_shifti(INDEX_op_rotli_vec, vece, r, a, -i & (bits - 1));
292
+}
293
+
294
void tcg_gen_cmp_vec(TCGCond cond, unsigned vece,
295
TCGv_vec r, TCGv_vec a, TCGv_vec b)
296
{
52
{
297
diff --git a/tcg/tcg.c b/tcg/tcg.c
53
- tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
298
index XXXXXXX..XXXXXXX 100644
54
+ tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
299
--- a/tcg/tcg.c
55
300
+++ b/tcg/tcg.c
56
/* This should already be page aligned */
301
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
57
addr &= TARGET_PAGE_MASK;
302
case INDEX_op_shrv_vec:
58
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
303
case INDEX_op_sarv_vec:
59
vaddr addr,
304
return have_vec && TCG_TARGET_HAS_shv_vec;
60
uint16_t idxmap)
305
+ case INDEX_op_rotli_vec:
61
{
306
+ return have_vec && TCG_TARGET_HAS_roti_vec;
62
- tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
307
case INDEX_op_ssadd_vec:
63
+ tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
308
case INDEX_op_usadd_vec:
64
309
case INDEX_op_sssub_vec:
65
/* This should already be page aligned */
310
diff --git a/tcg/README b/tcg/README
66
addr &= TARGET_PAGE_MASK;
311
index XXXXXXX..XXXXXXX 100644
67
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
312
--- a/tcg/README
68
*/
313
+++ b/tcg/README
69
if (mask < f->mask || len > f->mask) {
314
@@ -XXX,XX +XXX,XX @@ E.g. VECL=1 -> 64 << 1 -> v128, and VECE=2 -> 1 << 2 -> i32.
70
tlb_debug("forcing full flush midx %d ("
315
71
- "%" VADDR_PRIx "/%" VADDR_PRIx "+%" VADDR_PRIx ")\n",
316
* shri_vec v0, v1, i2
72
+ "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
317
* sari_vec v0, v1, i2
73
midx, addr, mask, len);
318
+* rotli_vec v0, v1, i2
74
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
319
* shrs_vec v0, v1, s2
75
return;
320
* sars_vec v0, v1, s2
76
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
321
77
*/
322
- Similarly for logical and arithmetic right shift.
78
if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
323
+ Similarly for logical and arithmetic right shift, and left rotate.
79
tlb_debug("forcing full flush midx %d ("
324
80
- "%" VADDR_PRIx "/%" VADDR_PRIx ")\n",
325
* shlv_vec v0, v1, v2
81
+ "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
82
midx, d->large_page_addr, d->large_page_mask);
83
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
84
return;
85
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
86
87
assert_cpu_is_self(cpu);
88
89
- tlb_debug("range: %" VADDR_PRIx "/%u+%" VADDR_PRIx " mmu_map:0x%x\n",
90
+ tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
91
d.addr, d.bits, d.len, d.idxmap);
92
93
qemu_spin_lock(&env_tlb(env)->c.lock);
94
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
95
&xlat, &sz, full->attrs, &prot);
96
assert(sz >= TARGET_PAGE_SIZE);
97
98
- tlb_debug("vaddr=%" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
99
+ tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
100
" prot=%x idx=%d\n",
101
addr, full->phys_addr, prot, mmu_idx);
326
102
327
--
103
--
328
2.25.1
104
2.34.1
329
330
diff view generated by jsdifflib
Deleted patch
1
We do not reflect this expansion in tcg_can_emit_vecop_list,
2
so it is unused and unusable. However, we actually perform
3
the same expansion in do_gvec_shifts, so it is also unneeded.
4
1
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg-op-vec.c | 35 +++++++++++------------------------
8
1 file changed, 11 insertions(+), 24 deletions(-)
9
10
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg-op-vec.c
13
+++ b/tcg/tcg-op-vec.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
15
}
16
17
static void do_shifts(unsigned vece, TCGv_vec r, TCGv_vec a,
18
- TCGv_i32 s, TCGOpcode opc_s, TCGOpcode opc_v)
19
+ TCGv_i32 s, TCGOpcode opc)
20
{
21
TCGTemp *rt = tcgv_vec_temp(r);
22
TCGTemp *at = tcgv_vec_temp(a);
23
@@ -XXX,XX +XXX,XX @@ static void do_shifts(unsigned vece, TCGv_vec r, TCGv_vec a,
24
TCGArg ai = temp_arg(at);
25
TCGArg si = temp_arg(st);
26
TCGType type = rt->base_type;
27
- const TCGOpcode *hold_list;
28
int can;
29
30
tcg_debug_assert(at->base_type >= type);
31
- tcg_assert_listed_vecop(opc_s);
32
- hold_list = tcg_swap_vecop_list(NULL);
33
-
34
- can = tcg_can_emit_vec_op(opc_s, type, vece);
35
+ tcg_assert_listed_vecop(opc);
36
+ can = tcg_can_emit_vec_op(opc, type, vece);
37
if (can > 0) {
38
- vec_gen_3(opc_s, type, vece, ri, ai, si);
39
+ vec_gen_3(opc, type, vece, ri, ai, si);
40
} else if (can < 0) {
41
- tcg_expand_vec_op(opc_s, type, vece, ri, ai, si);
42
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
43
+ tcg_expand_vec_op(opc, type, vece, ri, ai, si);
44
+ tcg_swap_vecop_list(hold_list);
45
} else {
46
- TCGv_vec vec_s = tcg_temp_new_vec(type);
47
-
48
- if (vece == MO_64) {
49
- TCGv_i64 s64 = tcg_temp_new_i64();
50
- tcg_gen_extu_i32_i64(s64, s);
51
- tcg_gen_dup_i64_vec(MO_64, vec_s, s64);
52
- tcg_temp_free_i64(s64);
53
- } else {
54
- tcg_gen_dup_i32_vec(vece, vec_s, s);
55
- }
56
- do_op3_nofail(vece, r, a, vec_s, opc_v);
57
- tcg_temp_free_vec(vec_s);
58
+ g_assert_not_reached();
59
}
60
- tcg_swap_vecop_list(hold_list);
61
}
62
63
void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
64
{
65
- do_shifts(vece, r, a, b, INDEX_op_shls_vec, INDEX_op_shlv_vec);
66
+ do_shifts(vece, r, a, b, INDEX_op_shls_vec);
67
}
68
69
void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
70
{
71
- do_shifts(vece, r, a, b, INDEX_op_shrs_vec, INDEX_op_shrv_vec);
72
+ do_shifts(vece, r, a, b, INDEX_op_shrs_vec);
73
}
74
75
void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
76
{
77
- do_shifts(vece, r, a, b, INDEX_op_sars_vec, INDEX_op_sarv_vec);
78
+ do_shifts(vece, r, a, b, INDEX_op_sars_vec);
79
}
80
81
void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
82
--
83
2.25.1
84
85
diff view generated by jsdifflib
Deleted patch
1
No host backend support yet, but the interfaces for rotls
2
are in place. Only implement left-rotate for now, as the
3
only known use of vector rotate by scalar is s390x, so any
4
right-rotate would be unused and untestable.
5
1
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/tcg/tcg-op-gvec.h | 2 ++
10
include/tcg/tcg-op.h | 1 +
11
include/tcg/tcg-opc.h | 1 +
12
include/tcg/tcg.h | 1 +
13
tcg/aarch64/tcg-target.h | 1 +
14
tcg/i386/tcg-target.h | 1 +
15
tcg/ppc/tcg-target.h | 1 +
16
tcg/tcg-op-gvec.c | 22 ++++++++++++++++++++++
17
tcg/tcg-op-vec.c | 5 +++++
18
tcg/tcg.c | 2 ++
19
10 files changed, 37 insertions(+)
20
21
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg-op-gvec.h
24
+++ b/include/tcg/tcg-op-gvec.h
25
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
26
TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
27
void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
28
TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
29
+void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
30
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
31
32
/*
33
* Perform vector shift by vector element, modulo the element size.
34
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/tcg/tcg-op.h
37
+++ b/include/tcg/tcg-op.h
38
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
39
void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
40
void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
41
void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
42
+void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
43
44
void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
45
void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
46
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/include/tcg/tcg-opc.h
49
+++ b/include/tcg/tcg-opc.h
50
@@ -XXX,XX +XXX,XX @@ DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec))
51
DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
52
DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
53
DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
54
+DEF(rotls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rots_vec))
55
56
DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
57
DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
58
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
59
index XXXXXXX..XXXXXXX 100644
60
--- a/include/tcg/tcg.h
61
+++ b/include/tcg/tcg.h
62
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
63
#define TCG_TARGET_HAS_andc_vec 0
64
#define TCG_TARGET_HAS_orc_vec 0
65
#define TCG_TARGET_HAS_roti_vec 0
66
+#define TCG_TARGET_HAS_rots_vec 0
67
#define TCG_TARGET_HAS_rotv_vec 0
68
#define TCG_TARGET_HAS_shi_vec 0
69
#define TCG_TARGET_HAS_shs_vec 0
70
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/aarch64/tcg-target.h
73
+++ b/tcg/aarch64/tcg-target.h
74
@@ -XXX,XX +XXX,XX @@ typedef enum {
75
#define TCG_TARGET_HAS_neg_vec 1
76
#define TCG_TARGET_HAS_abs_vec 1
77
#define TCG_TARGET_HAS_roti_vec 0
78
+#define TCG_TARGET_HAS_rots_vec 0
79
#define TCG_TARGET_HAS_rotv_vec 0
80
#define TCG_TARGET_HAS_shi_vec 1
81
#define TCG_TARGET_HAS_shs_vec 0
82
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/i386/tcg-target.h
85
+++ b/tcg/i386/tcg-target.h
86
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
87
#define TCG_TARGET_HAS_neg_vec 0
88
#define TCG_TARGET_HAS_abs_vec 1
89
#define TCG_TARGET_HAS_roti_vec 0
90
+#define TCG_TARGET_HAS_rots_vec 0
91
#define TCG_TARGET_HAS_rotv_vec 0
92
#define TCG_TARGET_HAS_shi_vec 1
93
#define TCG_TARGET_HAS_shs_vec 1
94
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
95
index XXXXXXX..XXXXXXX 100644
96
--- a/tcg/ppc/tcg-target.h
97
+++ b/tcg/ppc/tcg-target.h
98
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
99
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
100
#define TCG_TARGET_HAS_abs_vec 0
101
#define TCG_TARGET_HAS_roti_vec 0
102
+#define TCG_TARGET_HAS_rots_vec 0
103
#define TCG_TARGET_HAS_rotv_vec 0
104
#define TCG_TARGET_HAS_shi_vec 0
105
#define TCG_TARGET_HAS_shs_vec 0
106
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/tcg/tcg-op-gvec.c
109
+++ b/tcg/tcg-op-gvec.c
110
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
111
do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
112
}
113
114
+void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
115
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz)
116
+{
117
+ static const GVecGen2sh g = {
118
+ .fni4 = tcg_gen_rotl_i32,
119
+ .fni8 = tcg_gen_rotl_i64,
120
+ .fniv_s = tcg_gen_rotls_vec,
121
+ .fniv_v = tcg_gen_rotlv_vec,
122
+ .fno = {
123
+ gen_helper_gvec_rotl8i,
124
+ gen_helper_gvec_rotl16i,
125
+ gen_helper_gvec_rotl32i,
126
+ gen_helper_gvec_rotl64i,
127
+ },
128
+ .s_list = { INDEX_op_rotls_vec, 0 },
129
+ .v_list = { INDEX_op_rotlv_vec, 0 },
130
+ };
131
+
132
+ tcg_debug_assert(vece <= MO_64);
133
+ do_gvec_shifts(vece, dofs, aofs, shift, oprsz, maxsz, &g);
134
+}
135
+
136
/*
137
* Expand D = A << (B % element bits)
138
*
139
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/tcg-op-vec.c
142
+++ b/tcg/tcg-op-vec.c
143
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b)
144
do_shifts(vece, r, a, b, INDEX_op_sars_vec);
145
}
146
147
+void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s)
148
+{
149
+ do_shifts(vece, r, a, s, INDEX_op_rotls_vec);
150
+}
151
+
152
void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
153
TCGv_vec b, TCGv_vec c)
154
{
155
diff --git a/tcg/tcg.c b/tcg/tcg.c
156
index XXXXXXX..XXXXXXX 100644
157
--- a/tcg/tcg.c
158
+++ b/tcg/tcg.c
159
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
160
return have_vec && TCG_TARGET_HAS_shv_vec;
161
case INDEX_op_rotli_vec:
162
return have_vec && TCG_TARGET_HAS_roti_vec;
163
+ case INDEX_op_rotls_vec:
164
+ return have_vec && TCG_TARGET_HAS_rots_vec;
165
case INDEX_op_rotlv_vec:
166
case INDEX_op_rotrv_vec:
167
return have_vec && TCG_TARGET_HAS_rotv_vec;
168
--
169
2.25.1
170
171
diff view generated by jsdifflib
Deleted patch
1
For immediate rotate , we can implement this in two instructions,
2
using SLI. For variable rotate, the oddness of aarch64 right-shift-
3
as-negative-left-shift means a backend-specific expansion works best.
4
1
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target.opc.h | 1 +
8
tcg/aarch64/tcg-target.inc.c | 53 ++++++++++++++++++++++++++++++++++--
9
2 files changed, 52 insertions(+), 2 deletions(-)
10
11
diff --git a/tcg/aarch64/tcg-target.opc.h b/tcg/aarch64/tcg-target.opc.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/aarch64/tcg-target.opc.h
14
+++ b/tcg/aarch64/tcg-target.opc.h
15
@@ -XXX,XX +XXX,XX @@
16
*/
17
18
DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC)
19
+DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC)
20
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/aarch64/tcg-target.inc.c
23
+++ b/tcg/aarch64/tcg-target.inc.c
24
@@ -XXX,XX +XXX,XX @@ typedef enum {
25
I3614_SSHR = 0x0f000400,
26
I3614_SSRA = 0x0f001400,
27
I3614_SHL = 0x0f005400,
28
+ I3614_SLI = 0x2f005400,
29
I3614_USHR = 0x2f000400,
30
I3614_USRA = 0x2f001400,
31
32
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
33
case INDEX_op_sari_vec:
34
tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
35
break;
36
+ case INDEX_op_aa64_sli_vec:
37
+ tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
38
+ break;
39
case INDEX_op_shlv_vec:
40
tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
41
break;
42
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
43
case INDEX_op_shlv_vec:
44
case INDEX_op_bitsel_vec:
45
return 1;
46
+ case INDEX_op_rotli_vec:
47
case INDEX_op_shrv_vec:
48
case INDEX_op_sarv_vec:
49
+ case INDEX_op_rotlv_vec:
50
+ case INDEX_op_rotrv_vec:
51
return -1;
52
case INDEX_op_mul_vec:
53
case INDEX_op_smax_vec:
54
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
55
TCGArg a0, ...)
56
{
57
va_list va;
58
- TCGv_vec v0, v1, v2, t1;
59
+ TCGv_vec v0, v1, v2, t1, t2;
60
+ TCGArg a2;
61
62
va_start(va, a0);
63
v0 = temp_tcgv_vec(arg_temp(a0));
64
v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
65
- v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
66
+ a2 = va_arg(va, TCGArg);
67
+ v2 = temp_tcgv_vec(arg_temp(a2));
68
69
switch (opc) {
70
+ case INDEX_op_rotli_vec:
71
+ t1 = tcg_temp_new_vec(type);
72
+ tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
73
+ vec_gen_4(INDEX_op_aa64_sli_vec, type, vece,
74
+ tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
75
+ tcg_temp_free_vec(t1);
76
+ break;
77
+
78
case INDEX_op_shrv_vec:
79
case INDEX_op_sarv_vec:
80
/* Right shifts are negative left shifts for AArch64. */
81
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
82
tcg_temp_free_vec(t1);
83
break;
84
85
+ case INDEX_op_rotlv_vec:
86
+ t1 = tcg_temp_new_vec(type);
87
+ tcg_gen_dupi_vec(vece, t1, 8 << vece);
88
+ tcg_gen_sub_vec(vece, t1, v2, t1);
89
+ /* Right shifts are negative left shifts for AArch64. */
90
+ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
91
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
92
+ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0),
93
+ tcgv_vec_arg(v1), tcgv_vec_arg(v2));
94
+ tcg_gen_or_vec(vece, v0, v0, t1);
95
+ tcg_temp_free_vec(t1);
96
+ break;
97
+
98
+ case INDEX_op_rotrv_vec:
99
+ t1 = tcg_temp_new_vec(type);
100
+ t2 = tcg_temp_new_vec(type);
101
+ tcg_gen_neg_vec(vece, t1, v2);
102
+ tcg_gen_dupi_vec(vece, t2, 8 << vece);
103
+ tcg_gen_add_vec(vece, t2, t1, t2);
104
+ /* Right shifts are negative left shifts for AArch64. */
105
+ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
106
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
107
+ vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t2),
108
+ tcgv_vec_arg(v1), tcgv_vec_arg(t2));
109
+ tcg_gen_or_vec(vece, v0, t1, t2);
110
+ tcg_temp_free_vec(t1);
111
+ tcg_temp_free_vec(t2);
112
+ break;
113
+
114
default:
115
g_assert_not_reached();
116
}
117
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
118
static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } };
119
static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
120
static const TCGTargetOpDef w_w_w = { .args_ct_str = { "w", "w", "w" } };
121
+ static const TCGTargetOpDef w_0_w = { .args_ct_str = { "w", "0", "w" } };
122
static const TCGTargetOpDef w_w_wO = { .args_ct_str = { "w", "w", "wO" } };
123
static const TCGTargetOpDef w_w_wN = { .args_ct_str = { "w", "w", "wN" } };
124
static const TCGTargetOpDef w_w_wZ = { .args_ct_str = { "w", "w", "wZ" } };
125
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
126
return &w_w_wZ;
127
case INDEX_op_bitsel_vec:
128
return &w_w_w_w;
129
+ case INDEX_op_aa64_sli_vec:
130
+ return &w_0_w;
131
132
default:
133
return NULL;
134
--
135
2.25.1
136
137
diff view generated by jsdifflib
Deleted patch
1
We already had support for rotlv, using a target-specific opcode;
2
convert to use the generic opcode. Handle rotrv via simple negation.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/ppc/tcg-target.h | 2 +-
7
tcg/ppc/tcg-target.opc.h | 1 -
8
tcg/ppc/tcg-target.inc.c | 23 +++++++++++++++++++----
9
3 files changed, 20 insertions(+), 6 deletions(-)
10
11
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/ppc/tcg-target.h
14
+++ b/tcg/ppc/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
16
#define TCG_TARGET_HAS_abs_vec 0
17
#define TCG_TARGET_HAS_roti_vec 0
18
#define TCG_TARGET_HAS_rots_vec 0
19
-#define TCG_TARGET_HAS_rotv_vec 0
20
+#define TCG_TARGET_HAS_rotv_vec 1
21
#define TCG_TARGET_HAS_shi_vec 0
22
#define TCG_TARGET_HAS_shs_vec 0
23
#define TCG_TARGET_HAS_shv_vec 1
24
diff --git a/tcg/ppc/tcg-target.opc.h b/tcg/ppc/tcg-target.opc.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/ppc/tcg-target.opc.h
27
+++ b/tcg/ppc/tcg-target.opc.h
28
@@ -XXX,XX +XXX,XX @@ DEF(ppc_msum_vec, 1, 3, 0, IMPLVEC)
29
DEF(ppc_muleu_vec, 1, 2, 0, IMPLVEC)
30
DEF(ppc_mulou_vec, 1, 2, 0, IMPLVEC)
31
DEF(ppc_pkum_vec, 1, 2, 0, IMPLVEC)
32
-DEF(ppc_rotl_vec, 1, 2, 0, IMPLVEC)
33
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/ppc/tcg-target.inc.c
36
+++ b/tcg/ppc/tcg-target.inc.c
37
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
38
case INDEX_op_shlv_vec:
39
case INDEX_op_shrv_vec:
40
case INDEX_op_sarv_vec:
41
+ case INDEX_op_rotlv_vec:
42
return vece <= MO_32 || have_isa_2_07;
43
case INDEX_op_ssadd_vec:
44
case INDEX_op_sssub_vec:
45
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
46
case INDEX_op_shli_vec:
47
case INDEX_op_shri_vec:
48
case INDEX_op_sari_vec:
49
+ case INDEX_op_rotli_vec:
50
return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
51
case INDEX_op_neg_vec:
52
return vece >= MO_32 && have_isa_3_00;
53
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
54
return 0;
55
case INDEX_op_bitsel_vec:
56
return have_vsx;
57
+ case INDEX_op_rotrv_vec:
58
+ return -1;
59
default:
60
return 0;
61
}
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
63
case INDEX_op_ppc_pkum_vec:
64
insn = pkum_op[vece];
65
break;
66
- case INDEX_op_ppc_rotl_vec:
67
+ case INDEX_op_rotlv_vec:
68
insn = rotl_op[vece];
69
break;
70
case INDEX_op_ppc_msum_vec:
71
@@ -XXX,XX +XXX,XX @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
72
t3 = tcg_temp_new_vec(type);
73
t4 = tcg_temp_new_vec(type);
74
tcg_gen_dupi_vec(MO_8, t4, -16);
75
- vec_gen_3(INDEX_op_ppc_rotl_vec, type, MO_32, tcgv_vec_arg(t1),
76
+ vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
77
tcgv_vec_arg(v2), tcgv_vec_arg(t4));
78
vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
79
tcgv_vec_arg(v1), tcgv_vec_arg(v2));
80
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
81
TCGArg a0, ...)
82
{
83
va_list va;
84
- TCGv_vec v0, v1, v2;
85
+ TCGv_vec v0, v1, v2, t0;
86
TCGArg a2;
87
88
va_start(va, a0);
89
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
90
case INDEX_op_sari_vec:
91
expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
92
break;
93
+ case INDEX_op_rotli_vec:
94
+ expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
95
+ break;
96
case INDEX_op_cmp_vec:
97
v2 = temp_tcgv_vec(arg_temp(a2));
98
expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
99
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
100
v2 = temp_tcgv_vec(arg_temp(a2));
101
expand_vec_mul(type, vece, v0, v1, v2);
102
break;
103
+ case INDEX_op_rotlv_vec:
104
+ v2 = temp_tcgv_vec(arg_temp(a2));
105
+ t0 = tcg_temp_new_vec(type);
106
+ tcg_gen_neg_vec(vece, t0, v2);
107
+ tcg_gen_rotlv_vec(vece, v0, v1, t0);
108
+ tcg_temp_free_vec(t0);
109
+ break;
110
default:
111
g_assert_not_reached();
112
}
113
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
114
case INDEX_op_shlv_vec:
115
case INDEX_op_shrv_vec:
116
case INDEX_op_sarv_vec:
117
+ case INDEX_op_rotlv_vec:
118
+ case INDEX_op_rotrv_vec:
119
case INDEX_op_ppc_mrgh_vec:
120
case INDEX_op_ppc_mrgl_vec:
121
case INDEX_op_ppc_muleu_vec:
122
case INDEX_op_ppc_mulou_vec:
123
case INDEX_op_ppc_pkum_vec:
124
- case INDEX_op_ppc_rotl_vec:
125
case INDEX_op_dup2_vec:
126
return &v_v_v;
127
case INDEX_op_not_vec:
128
--
129
2.25.1
130
131
diff view generated by jsdifflib
Deleted patch
1
Acked-by: David Gibson <david@gibson.dropbear.id.au>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/ppc/helper.h | 4 ----
5
target/ppc/int_helper.c | 17 -----------------
6
target/ppc/translate/vmx-impl.inc.c | 8 ++++----
7
3 files changed, 4 insertions(+), 25 deletions(-)
8
1
9
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/ppc/helper.h
12
+++ b/target/ppc/helper.h
13
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vsubuqm, void, avr, avr, avr)
14
DEF_HELPER_4(vsubecuq, void, avr, avr, avr, avr)
15
DEF_HELPER_4(vsubeuqm, void, avr, avr, avr, avr)
16
DEF_HELPER_3(vsubcuq, void, avr, avr, avr)
17
-DEF_HELPER_3(vrlb, void, avr, avr, avr)
18
-DEF_HELPER_3(vrlh, void, avr, avr, avr)
19
-DEF_HELPER_3(vrlw, void, avr, avr, avr)
20
-DEF_HELPER_3(vrld, void, avr, avr, avr)
21
DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32)
22
DEF_HELPER_3(vextractub, void, avr, avr, i32)
23
DEF_HELPER_3(vextractuh, void, avr, avr, i32)
24
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/ppc/int_helper.c
27
+++ b/target/ppc/int_helper.c
28
@@ -XXX,XX +XXX,XX @@ VRFI(p, float_round_up)
29
VRFI(z, float_round_to_zero)
30
#undef VRFI
31
32
-#define VROTATE(suffix, element, mask) \
33
- void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
34
- { \
35
- int i; \
36
- \
37
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
38
- unsigned int shift = b->element[i] & mask; \
39
- r->element[i] = (a->element[i] << shift) | \
40
- (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
41
- } \
42
- }
43
-VROTATE(b, u8, 0x7)
44
-VROTATE(h, u16, 0xF)
45
-VROTATE(w, u32, 0x1F)
46
-VROTATE(d, u64, 0x3F)
47
-#undef VROTATE
48
-
49
void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
50
{
51
int i;
52
diff --git a/target/ppc/translate/vmx-impl.inc.c b/target/ppc/translate/vmx-impl.inc.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/ppc/translate/vmx-impl.inc.c
55
+++ b/target/ppc/translate/vmx-impl.inc.c
56
@@ -XXX,XX +XXX,XX @@ GEN_VXFORM3(vsubeuqm, 31, 0);
57
GEN_VXFORM3(vsubecuq, 31, 0);
58
GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
59
vsubecuq, PPC_NONE, PPC2_ALTIVEC_207)
60
-GEN_VXFORM(vrlb, 2, 0);
61
-GEN_VXFORM(vrlh, 2, 1);
62
-GEN_VXFORM(vrlw, 2, 2);
63
+GEN_VXFORM_V(vrlb, MO_8, tcg_gen_gvec_rotlv, 2, 0);
64
+GEN_VXFORM_V(vrlh, MO_16, tcg_gen_gvec_rotlv, 2, 1);
65
+GEN_VXFORM_V(vrlw, MO_32, tcg_gen_gvec_rotlv, 2, 2);
66
GEN_VXFORM(vrlwmi, 2, 2);
67
GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \
68
vrlwmi, PPC_NONE, PPC2_ISA300)
69
-GEN_VXFORM(vrld, 2, 3);
70
+GEN_VXFORM_V(vrld, MO_64, tcg_gen_gvec_rotlv, 2, 3);
71
GEN_VXFORM(vrldmi, 2, 3);
72
GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \
73
vrldmi, PPC_NONE, PPC2_ISA300)
74
--
75
2.25.1
76
77
diff view generated by jsdifflib
1
From: Nick Hudson <skrll@netbsd.org>
1
From: Luca Bonissi <qemu@bonslack.org>
2
2
3
Fix qemu build on NetBSD/evbarm-aarch64 by providing a NetBSD specific
3
These should match 'start' as target_ulong, not target_long.
4
cpu_signal_handler.
5
4
5
On 32bit targets, the parameter was sign-extended to uint64_t,
6
so only the first mmap within the upper 2GB memory can succeed.
7
8
Signed-off-by: Luca Bonissi <qemu@bonslack.org>
9
Message-Id: <327460e2-0ebd-9edb-426b-1df80d16c32a@bonslack.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Nick Hudson <skrll@netbsd.org>
8
Message-Id: <20200517101529.5367-1-skrll@netbsd.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
12
---
11
accel/tcg/user-exec.c | 27 +++++++++++++++++++++++++++
13
accel/tcg/user-exec.c | 4 ++--
12
1 file changed, 27 insertions(+)
14
1 file changed, 2 insertions(+), 2 deletions(-)
13
15
14
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
16
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/user-exec.c
18
--- a/accel/tcg/user-exec.c
17
+++ b/accel/tcg/user-exec.c
19
+++ b/accel/tcg/user-exec.c
18
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
20
@@ -XXX,XX +XXX,XX @@ typedef struct PageFlagsNode {
19
21
20
#elif defined(__aarch64__)
22
static IntervalTreeRoot pageflags_root;
21
23
22
+#if defined(__NetBSD__)
24
-static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
23
+
25
+static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
24
+#include <ucontext.h>
26
{
25
+#include <sys/siginfo.h>
27
IntervalTreeNode *n;
26
+
28
27
+int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
29
@@ -XXX,XX +XXX,XX @@ static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
28
+{
29
+ ucontext_t *uc = puc;
30
+ siginfo_t *si = pinfo;
31
+ unsigned long pc;
32
+ int is_write;
33
+ uint32_t esr;
34
+
35
+ pc = uc->uc_mcontext.__gregs[_REG_PC];
36
+ esr = si->si_trap;
37
+
38
+ /*
39
+ * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC
40
+ * is 0b10010x: then bit 6 is the WnR bit
41
+ */
42
+ is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
43
+ return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask);
44
+}
45
+
46
+#else
47
+
48
#ifndef ESR_MAGIC
49
/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
50
#define ESR_MAGIC 0x45535201
51
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
52
}
53
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
54
}
30
}
55
+#endif
31
56
32
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
57
#elif defined(__s390__)
33
- target_long last)
34
+ target_ulong last)
35
{
36
IntervalTreeNode *n;
58
37
59
--
38
--
60
2.25.1
39
2.34.1
61
62
diff view generated by jsdifflib