1
The following changes since commit eb6490f544388dd24c0d054a96dd304bc7284450:
1
Now include a patch to address
2
2
3
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200703' into staging (2020-07-04 16:08:41 +0100)
3
qemu-system-x86_64: /home/pm215/qemu/tcg/sparc/tcg-target.inc.c:319:
4
patch_reloc: Assertion `check_fit_ptr(value, 13)' failed.
5
6
I believe it was the change to bswap64 to use the constant pool more
7
often that exposed the latent problem.
8
9
Only re-posting the new patch.
10
11
12
r~
13
14
15
The following changes since commit 58b1f0f21edcab13f78a376b1d90267626be1275:
16
17
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging (2018-12-16 12:49:06 +0000)
4
18
5
are available in the Git repository at:
19
are available in the Git repository at:
6
20
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200706
21
https://github.com/rth7680/qemu.git tags/pull-tcg-20181216
8
22
9
for you to fetch changes up to 852f933e482518797f7785a2e017a215b88df815:
23
for you to fetch changes up to b7c2cd08a6f68010ad27c9c0bf2fde02fb743a0e:
10
24
11
tcg: Fix do_nonatomic_op_* vs signed operations (2020-07-06 10:58:19 -0700)
25
xxhash: match output against the original xxhash32 (2018-12-17 06:04:44 +0300)
12
26
13
----------------------------------------------------------------
27
----------------------------------------------------------------
14
Fix for ppc shifts
28
- Remove retranslation remenents
15
Fix for non-parallel atomic ops
29
- Return success from patch_reloc
30
- Preserve 32-bit values as zero-extended on x86_64
31
- Make bswap during memory ops as optional
32
- Cleanup xxhash
33
- Revert constant pooling for tcg/sparc/
16
34
17
----------------------------------------------------------------
35
----------------------------------------------------------------
18
Catherine A. Frederick (1):
36
Alistair Francis (1):
19
tcg/ppc: Sanitize immediate shifts
37
tcg/mips: Improve the add2/sub2 command to use TCG_TARGET_REG_BITS
20
38
21
Richard Henderson (1):
39
Emilio G. Cota (5):
22
tcg: Fix do_nonatomic_op_* vs signed operations
40
tcg: Drop nargs from tcg_op_insert_{before,after}
41
qht-bench: document -p flag
42
exec: introduce qemu_xxhash{2,4,5,6,7}
43
include: move exec/tb-hash-xx.h to qemu/xxhash.h
44
xxhash: match output against the original xxhash32
23
45
24
tcg/ppc/tcg-target.inc.c | 15 ++++++++++-----
46
Richard Henderson (27):
25
tcg/tcg-op.c | 10 ++++++----
47
target/sparc: Remove the constant pool
26
2 files changed, 16 insertions(+), 9 deletions(-)
48
tcg/i386: Always use %ebp for TCG_AREG0
49
tcg/i386: Move TCG_REG_CALL_STACK from define to enum
50
tcg/aarch64: Remove reloc_pc26_atomic
51
tcg/aarch64: Fold away "noaddr" branch routines
52
tcg/arm: Remove reloc_pc24_atomic
53
tcg/arm: Fold away "noaddr" branch routines
54
tcg/ppc: Fold away "noaddr" branch routines
55
tcg/s390: Remove retranslation code
56
tcg/sparc: Remove retranslation code
57
tcg/mips: Remove retranslation code
58
tcg: Return success from patch_reloc
59
tcg/i386: Return false on failure from patch_reloc
60
tcg/aarch64: Return false on failure from patch_reloc
61
tcg/arm: Return false on failure from patch_reloc
62
tcg/ppc: Return false on failure from patch_reloc
63
tcg/s390x: Return false on failure from patch_reloc
64
tcg/i386: Propagate is64 to tcg_out_qemu_ld_direct
65
tcg/i386: Propagate is64 to tcg_out_qemu_ld_slow_path
66
tcg/i386: Implement INDEX_op_extr{lh}_i64_i32 for 32-bit guests
67
tcg/i386: Assume 32-bit values are zero-extended
68
tcg/i386: Precompute all guest_base parameters
69
tcg/i386: Add setup_guest_base_seg for FreeBSD
70
tcg: Clean up generic bswap32
71
tcg: Clean up generic bswap64
72
tcg/optimize: Optimize bswap
73
tcg: Add TCG_TARGET_HAS_MEMORY_BSWAP
27
74
75
include/exec/tb-hash.h | 4 +-
76
include/{exec/tb-hash-xx.h => qemu/xxhash.h} | 47 ++++--
77
tcg/aarch64/tcg-target.h | 1 +
78
tcg/arm/tcg-target.h | 1 +
79
tcg/i386/tcg-target.h | 17 +--
80
tcg/mips/tcg-target.h | 1 +
81
tcg/ppc/tcg-target.h | 1 +
82
tcg/s390/tcg-target.h | 1 +
83
tcg/sparc/tcg-target.h | 1 +
84
tcg/tcg.h | 4 +-
85
tcg/tci/tcg-target.h | 2 +
86
tcg/aarch64/tcg-target.inc.c | 71 +++------
87
tcg/arm/tcg-target.inc.c | 55 +++----
88
tcg/i386/tcg-target.inc.c | 208 ++++++++++++--------------
89
tcg/mips/tcg-target.inc.c | 12 +-
90
tcg/optimize.c | 16 +-
91
tcg/ppc/tcg-target.inc.c | 60 ++++----
92
tcg/s390/tcg-target.inc.c | 45 +++---
93
tcg/sparc/tcg-target.inc.c | 58 ++------
94
tcg/tcg-op.c | 215 ++++++++++++++++++++-------
95
tcg/tcg.c | 18 +--
96
tcg/tci/tcg-target.inc.c | 3 +-
97
tests/qht-bench.c | 5 +-
98
util/qsp.c | 14 +-
99
24 files changed, 460 insertions(+), 400 deletions(-)
100
rename include/{exec/tb-hash-xx.h => qemu/xxhash.h} (73%)
101
diff view generated by jsdifflib
1
From: "Catherine A. Frederick" <chocola@animebitch.es>
1
Partially reverts ab20bdc1162. The 14-bit displacement that we
2
allowed to reach the constant pool is not always sufficient.
3
Retain the tb-relative addressing, as that is how most return
4
values from the tb are computed.
2
5
3
Sanitize shift constants so that shift operations with
4
large constants don't generate invalid instructions.
5
6
Signed-off-by: Catherine A. Frederick <chocola@animebitch.es>
7
Message-Id: <20200607211100.22858-1-agrecascino123@gmail.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
tcg/ppc/tcg-target.inc.c | 15 ++++++++++-----
8
tcg/sparc/tcg-target.inc.c | 47 ++++++++------------------------------
11
1 file changed, 10 insertions(+), 5 deletions(-)
9
1 file changed, 9 insertions(+), 38 deletions(-)
12
10
13
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
11
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/ppc/tcg-target.inc.c
13
--- a/tcg/sparc/tcg-target.inc.c
16
+++ b/tcg/ppc/tcg-target.inc.c
14
+++ b/tcg/sparc/tcg-target.inc.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
15
@@ -XXX,XX +XXX,XX @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
18
16
insn &= ~INSN_OFF19(-1);
19
case INDEX_op_shl_i32:
17
insn |= INSN_OFF19(pcrel);
20
if (const_args[2]) {
21
- tcg_out_shli32(s, args[0], args[1], args[2]);
22
+ /* Limit immediate shift count lest we create an illegal insn. */
23
+ tcg_out_shli32(s, args[0], args[1], args[2] & 31);
24
} else {
25
tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
26
}
27
break;
18
break;
28
case INDEX_op_shr_i32:
19
- case R_SPARC_13:
29
if (const_args[2]) {
20
- /* Note that we're abusing this reloc type for our own needs. */
30
- tcg_out_shri32(s, args[0], args[1], args[2]);
21
- if (!check_fit_ptr(value, 13)) {
31
+ /* Limit immediate shift count lest we create an illegal insn. */
22
- int adj = (value > 0 ? 0xff8 : -0x1000);
32
+ tcg_out_shri32(s, args[0], args[1], args[2] & 31);
23
- value -= adj;
33
} else {
24
- assert(check_fit_ptr(value, 13));
34
tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
25
- *code_ptr++ = (ARITH_ADD | INSN_RD(TCG_REG_T2)
35
}
26
- | INSN_RS1(TCG_REG_TB) | INSN_IMM13(adj));
36
break;
27
- insn ^= INSN_RS1(TCG_REG_TB) ^ INSN_RS1(TCG_REG_T2);
37
case INDEX_op_sar_i32:
28
- }
38
if (const_args[2]) {
29
- insn &= ~INSN_IMM13(-1);
39
- tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
30
- insn |= INSN_IMM13(value);
40
+ /* Limit immediate shift count lest we create an illegal insn. */
31
- break;
41
+ tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
32
- case R_SPARC_32:
42
} else {
33
- /* Note that we're abusing this reloc type for our own needs. */
43
tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
34
- code_ptr[0] = deposit32(code_ptr[0], 0, 22, value >> 10);
44
}
35
- code_ptr[1] = deposit32(code_ptr[1], 0, 10, value);
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
36
- return;
46
37
default:
47
case INDEX_op_shl_i64:
38
g_assert_not_reached();
48
if (const_args[2]) {
39
}
49
- tcg_out_shli64(s, args[0], args[1], args[2]);
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
50
+ /* Limit immediate shift count lest we create an illegal insn. */
41
return;
51
+ tcg_out_shli64(s, args[0], args[1], args[2] & 63);
42
}
52
} else {
43
53
tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
44
+ /* A 13-bit constant relative to the TB. */
54
}
45
+ if (!in_prologue && USE_REG_TB) {
55
break;
46
+ test = arg - (uintptr_t)s->code_gen_ptr;
56
case INDEX_op_shr_i64:
47
+ if (check_fit_ptr(test, 13)) {
57
if (const_args[2]) {
48
+ tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
58
- tcg_out_shri64(s, args[0], args[1], args[2]);
49
+ return;
59
+ /* Limit immediate shift count lest we create an illegal insn. */
50
+ }
60
+ tcg_out_shri64(s, args[0], args[1], args[2] & 63);
51
+ }
61
} else {
52
+
62
tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
53
/* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
63
}
54
if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
55
tcg_out_sethi(s, ret, arg);
56
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
57
return;
58
}
59
60
- if (!in_prologue) {
61
- if (USE_REG_TB) {
62
- intptr_t diff = arg - (uintptr_t)s->code_gen_ptr;
63
- if (check_fit_ptr(diff, 13)) {
64
- tcg_out_arithi(s, ret, TCG_REG_TB, diff, ARITH_ADD);
65
- } else {
66
- new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
67
- -(intptr_t)s->code_gen_ptr);
68
- tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
69
- /* May be used to extend the 13-bit range in patch_reloc. */
70
- tcg_out32(s, NOP);
71
- }
72
- } else {
73
- new_pool_label(s, arg, R_SPARC_32, s->code_ptr, 0);
74
- tcg_out_sethi(s, ret, 0);
75
- tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) | INSN_IMM13(0));
76
- }
77
- return;
78
- }
79
-
80
/* A 64-bit constant decomposed into 2 32-bit pieces. */
81
if (check_fit_i32(lo, 13)) {
82
hi = (arg - lo) >> 32;
64
--
83
--
65
2.25.1
84
2.17.2
66
85
67
86
diff view generated by jsdifflib
Deleted patch
1
The smin/smax/umin/umax operations require the operands to be
2
properly sign extended. Do not drop the MO_SIGN bit from the
3
load, and additionally extend the val input.
4
1
5
Reviewed-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
6
Reported-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20200701165646.1901320-1-richard.henderson@linaro.org>
9
---
10
tcg/tcg-op.c | 10 ++++++----
11
1 file changed, 6 insertions(+), 4 deletions(-)
12
13
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tcg-op.c
16
+++ b/tcg/tcg-op.c
17
@@ -XXX,XX +XXX,XX @@ static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
18
19
memop = tcg_canonicalize_memop(memop, 0, 0);
20
21
- tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
22
- gen(t2, t1, val);
23
+ tcg_gen_qemu_ld_i32(t1, addr, idx, memop);
24
+ tcg_gen_ext_i32(t2, val, memop);
25
+ gen(t2, t1, t2);
26
tcg_gen_qemu_st_i32(t2, addr, idx, memop);
27
28
tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
29
@@ -XXX,XX +XXX,XX @@ static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
30
31
memop = tcg_canonicalize_memop(memop, 1, 0);
32
33
- tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
34
- gen(t2, t1, val);
35
+ tcg_gen_qemu_ld_i64(t1, addr, idx, memop);
36
+ tcg_gen_ext_i64(t2, val, memop);
37
+ gen(t2, t1, t2);
38
tcg_gen_qemu_st_i64(t2, addr, idx, memop);
39
40
tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
41
--
42
2.25.1
43
44
diff view generated by jsdifflib