1
The following changes since commit 5a67d7735d4162630769ef495cf813244fc850df:
1
Hi; here's a target-arm pullreq. Mostly this is some decodetree
2
conversion patches from me, plus a scattering of other bug fixes.
2
3
3
Merge remote-tracking branch 'remotes/berrange-gitlab/tags/tls-deps-pull-request' into staging (2021-07-02 08:22:39 +0100)
4
thanks
5
-- PMM
6
7
The following changes since commit e3660cc1e3cb136af50c0eaaeac27943c2438d1d:
8
9
Merge tag 'pull-loongarch-20230616' of https://gitlab.com/gaosong/qemu into staging (2023-06-16 12:30:16 +0200)
4
10
5
are available in the Git repository at:
11
are available in the Git repository at:
6
12
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210702
13
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230619
8
14
9
for you to fetch changes up to 04ea4d3cfd0a21b248ece8eb7a9436a3d9898dd8:
15
for you to fetch changes up to 074259c0f2ac40042dce766d870318cc22f388eb:
10
16
11
target/arm: Implement MVE shifts by register (2021-07-02 11:48:38 +0100)
17
hw/misc/bcm2835_property: Handle CORE_CLK_ID firmware property (2023-06-19 15:27:21 +0100)
12
18
13
----------------------------------------------------------------
19
----------------------------------------------------------------
14
target-arm queue:
20
target-arm queue:
15
* more MVE instructions
21
* Fix return value from LDSMIN/LDSMAX 8/16 bit atomics
16
* hw/gpio/gpio_pwr: use shutdown function for reboot
22
* Return correct result for LDG when ATA=0
17
* target/arm: Check NaN mode before silencing NaN
23
* Conversion of system insns, loads and stores to decodetree
18
* tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
24
* hw/intc/allwinner-a10-pic: Handle IRQ levels other than 0 or 1
19
* hw/arm: Add basic power management to raspi.
25
* hw/sd/allwinner-sdhost: Don't send non-boolean IRQ line levels
20
* docs/system/arm: Add quanta-gbs-bmc, quanta-q7l1-bmc
26
* hw/timer/nrf51_timer: Don't lose time when timer is queried in tight loop
27
* hw/arm/Kconfig: sbsa-ref uses Bochs display
28
* imx_serial: set wake bit when we receive a data byte
29
* docs: sbsa: document board to firmware interface
30
* hw/misc/bcm2835_property: avoid hard-coded constants
21
31
22
----------------------------------------------------------------
32
----------------------------------------------------------------
23
Joe Komlodi (1):
33
Marcin Juszkiewicz (2):
24
target/arm: Check NaN mode before silencing NaN
34
hw/arm/Kconfig: sbsa-ref uses Bochs display
35
docs: sbsa: document board to firmware interface
25
36
26
Maxim Uvarov (1):
37
Martin Kaiser (1):
27
hw/gpio/gpio_pwr: use shutdown function for reboot
38
imx_serial: set wake bit when we receive a data byte
28
39
29
Nolan Leake (1):
40
Peter Maydell (26):
30
hw/arm: Add basic power management to raspi.
41
target/arm: Fix return value from LDSMIN/LDSMAX 8/16 bit atomics
42
target/arm: Return correct result for LDG when ATA=0
43
target/arm: Pass memop to gen_mte_check1_mmuidx() in reg_imm9 decode
44
target/arm: Consistently use finalize_memop_asimd() for ASIMD loads/stores
45
target/arm: Convert hint instruction space to decodetree
46
target/arm: Convert barrier insns to decodetree
47
target/arm: Convert CFINV, XAFLAG and AXFLAG to decodetree
48
target/arm: Convert MSR (immediate) to decodetree
49
target/arm: Convert MSR (reg), MRS, SYS, SYSL to decodetree
50
target/arm: Convert exception generation instructions to decodetree
51
target/arm: Convert load/store exclusive and ordered to decodetree
52
target/arm: Convert LDXP, STXP, CASP, CAS to decodetree
53
target/arm: Convert load reg (literal) group to decodetree
54
target/arm: Convert load/store-pair to decodetree
55
target/arm: Convert ld/st reg+imm9 insns to decodetree
56
target/arm: Convert LDR/STR with 12-bit immediate to decodetree
57
target/arm: Convert LDR/STR reg+reg to decodetree
58
target/arm: Convert atomic memory ops to decodetree
59
target/arm: Convert load (pointer auth) insns to decodetree
60
target/arm: Convert LDAPR/STLR (imm) to decodetree
61
target/arm: Convert load/store (multiple structures) to decodetree
62
target/arm: Convert load/store single structure to decodetree
63
target/arm: Convert load/store tags insns to decodetree
64
hw/intc/allwinner-a10-pic: Handle IRQ levels other than 0 or 1
65
hw/sd/allwinner-sdhost: Don't send non-boolean IRQ line levels
66
hw/timer/nrf51_timer: Don't lose time when timer is queried in tight loop
31
67
32
Patrick Venture (2):
68
Sergey Kambalin (4):
33
docs/system/arm: Add quanta-q7l1-bmc reference
69
hw/arm/raspi: Import Linux raspi definitions as 'raspberrypi-fw-defs.h'
34
docs/system/arm: Add quanta-gbs-bmc reference
70
hw/misc/bcm2835_property: Use 'raspberrypi-fw-defs.h' definitions
71
hw/misc/bcm2835_property: Replace magic frequency values by definitions
72
hw/misc/bcm2835_property: Handle CORE_CLK_ID firmware property
35
73
36
Peter Maydell (18):
74
docs/system/arm/sbsa.rst | 38 +-
37
target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation
75
include/hw/arm/raspi_platform.h | 10 +
38
target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH
76
include/hw/char/imx_serial.h | 1 +
39
target/arm: Make asimd_imm_const() public
77
include/hw/misc/raspberrypi-fw-defs.h | 163 ++
40
target/arm: Use asimd_imm_const for A64 decode
78
target/arm/tcg/a64.decode | 403 ++++
41
target/arm: Use dup_const() instead of bitfield_replicate()
79
hw/char/imx_serial.c | 5 +-
42
target/arm: Implement MVE logical immediate insns
80
hw/intc/allwinner-a10-pic.c | 2 +-
43
target/arm: Implement MVE vector shift left by immediate insns
81
hw/misc/bcm2835_property.c | 112 +-
44
target/arm: Implement MVE vector shift right by immediate insns
82
hw/sd/allwinner-sdhost.c | 2 +-
45
target/arm: Implement MVE VSHLL
83
hw/timer/nrf51_timer.c | 7 +-
46
target/arm: Implement MVE VSRI, VSLI
84
target/arm/tcg/translate-a64.c | 3319 +++++++++++++++------------------
47
target/arm: Implement MVE VSHRN, VRSHRN
85
hw/arm/Kconfig | 1 +
48
target/arm: Implement MVE saturating narrowing shifts
86
12 files changed, 2157 insertions(+), 1906 deletions(-)
49
target/arm: Implement MVE VSHLC
87
create mode 100644 include/hw/misc/raspberrypi-fw-defs.h
50
target/arm: Implement MVE VADDLV
51
target/arm: Implement MVE long shifts by immediate
52
target/arm: Implement MVE long shifts by register
53
target/arm: Implement MVE shifts by immediate
54
target/arm: Implement MVE shifts by register
55
56
Philippe Mathieu-Daudé (1):
57
tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
58
59
docs/system/arm/aspeed.rst | 1 +
60
docs/system/arm/nuvoton.rst | 5 +-
61
include/hw/arm/bcm2835_peripherals.h | 3 +-
62
include/hw/misc/bcm2835_powermgt.h | 29 ++
63
target/arm/helper-mve.h | 108 +++++++
64
target/arm/translate.h | 41 +++
65
target/arm/mve.decode | 177 ++++++++++-
66
target/arm/t32.decode | 71 ++++-
67
hw/arm/bcm2835_peripherals.c | 13 +-
68
hw/gpio/gpio_pwr.c | 2 +-
69
hw/misc/bcm2835_powermgt.c | 160 ++++++++++
70
target/arm/helper-a64.c | 12 +-
71
target/arm/mve_helper.c | 524 +++++++++++++++++++++++++++++++--
72
target/arm/translate-a64.c | 86 +-----
73
target/arm/translate-mve.c | 261 +++++++++++++++-
74
target/arm/translate-neon.c | 81 -----
75
target/arm/translate.c | 327 +++++++++++++++++++-
76
target/arm/vfp_helper.c | 24 +-
77
hw/misc/meson.build | 1 +
78
tests/acceptance/boot_linux_console.py | 43 +++
79
20 files changed, 1760 insertions(+), 209 deletions(-)
80
create mode 100644 include/hw/misc/bcm2835_powermgt.h
81
create mode 100644 hw/misc/bcm2835_powermgt.c
82
diff view generated by jsdifflib
New patch
1
The atomic memory operations are supposed to return the old memory
2
data value in the destination register. This value is not
3
sign-extended, even if the operation is the signed minimum or
4
maximum. (In the pseudocode for the instructions the returned data
5
value is passed to ZeroExtend() to create the value in the register.)
1
6
7
We got this wrong because we were doing a 32-to-64 zero extend on the
8
result for 8 and 16 bit data values, rather than the correct amount
9
of zero extension.
10
11
Fix the bug by using ext8u and ext16u for the MO_8 and MO_16 data
12
sizes rather than ext32u.
13
14
Cc: qemu-stable@nongnu.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-id: 20230602155223.2040685-2-peter.maydell@linaro.org
18
---
19
target/arm/tcg/translate-a64.c | 18 ++++++++++++++++--
20
1 file changed, 16 insertions(+), 2 deletions(-)
21
22
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/tcg/translate-a64.c
25
+++ b/target/arm/tcg/translate-a64.c
26
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
27
*/
28
fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
29
30
- if ((mop & MO_SIGN) && size != MO_64) {
31
- tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
32
+ if (mop & MO_SIGN) {
33
+ switch (size) {
34
+ case MO_8:
35
+ tcg_gen_ext8u_i64(tcg_rt, tcg_rt);
36
+ break;
37
+ case MO_16:
38
+ tcg_gen_ext16u_i64(tcg_rt, tcg_rt);
39
+ break;
40
+ case MO_32:
41
+ tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
42
+ break;
43
+ case MO_64:
44
+ break;
45
+ default:
46
+ g_assert_not_reached();
47
+ }
48
}
49
}
50
51
--
52
2.34.1
diff view generated by jsdifflib
New patch
1
The LDG instruction loads the tag from a memory address (identified
2
by [Xn + offset]), and then merges that tag into the destination
3
register Xt. We implemented this correctly for the case when
4
allocation tags are enabled, but didn't get it right when ATA=0:
5
instead of merging the tag bits into Xt, we merged them into the
6
memory address [Xn + offset] and then set Xt to that.
1
7
8
Merge the tag bits into the old Xt value, as they should be.
9
10
Cc: qemu-stable@nongnu.org
11
Fixes: c15294c1e36a7dd9b25 ("target/arm: Implement LDG, STG, ST2G instructions")
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/tcg/translate-a64.c | 6 +++++-
16
1 file changed, 5 insertions(+), 1 deletion(-)
17
18
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/tcg/translate-a64.c
21
+++ b/target/arm/tcg/translate-a64.c
22
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
23
if (s->ata) {
24
gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
25
} else {
26
+ /*
27
+ * Tag access disabled: we must check for aborts on the load
28
+ * load from [rn+offset], and then insert a 0 tag into rt.
29
+ */
30
clean_addr = clean_data_tbi(s, addr);
31
gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
32
- gen_address_with_allocation_tag0(tcg_rt, addr);
33
+ gen_address_with_allocation_tag0(tcg_rt, tcg_rt);
34
}
35
} else {
36
tcg_rt = cpu_reg_sp(s, rt);
37
--
38
2.34.1
diff view generated by jsdifflib
1
Implement the MVE VSRI and VSLI insns, which perform a
1
In disas_ldst_reg_imm9() we missed one place where a call to
2
shift-and-insert operation.
2
a gen_mte_check* function should now be passed the memop we
3
have created rather than just being passed the size. Fix this.
3
4
5
Fixes: 0a9091424d ("target/arm: Pass memop to gen_mte_check1*")
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210628135835.6690-11-peter.maydell@linaro.org
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
---
9
---
8
target/arm/helper-mve.h | 8 ++++++++
10
target/arm/tcg/translate-a64.c | 2 +-
9
target/arm/mve.decode | 9 ++++++++
11
1 file changed, 1 insertion(+), 1 deletion(-)
10
target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 3 +++
12
4 files changed, 62 insertions(+)
13
12
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
15
--- a/target/arm/tcg/translate-a64.c
17
+++ b/target/arm/helper-mve.h
16
+++ b/target/arm/tcg/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
19
DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
20
DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
21
DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
writeback || rn != 31,
22
+
21
- size, is_unpriv, memidx);
23
+DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+ memop, is_unpriv, memidx);
24
+DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
25
+DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
if (is_vector) {
26
+
25
if (is_store) {
27
+DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/mve.decode
33
+++ b/target/arm/mve.decode
34
@@ -XXX,XX +XXX,XX @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
35
36
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
37
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
38
+
39
+# Shift-and-insert
40
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
41
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
42
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
43
+
44
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
45
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
46
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
52
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
53
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
54
55
+/* Shift-and-insert; we always work with 64 bits at a time */
56
+#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
57
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
+ void *vm, uint32_t shift) \
59
+ { \
60
+ uint64_t *d = vd, *m = vm; \
61
+ uint16_t mask; \
62
+ uint64_t shiftmask; \
63
+ unsigned e; \
64
+ if (shift == 0 || shift == ESIZE * 8) { \
65
+ /* \
66
+ * Only VSLI can shift by 0; only VSRI can shift by <dt>. \
67
+ * The generic logic would give the right answer for 0 but \
68
+ * fails for <dt>. \
69
+ */ \
70
+ goto done; \
71
+ } \
72
+ assert(shift < ESIZE * 8); \
73
+ mask = mve_element_mask(env); \
74
+ /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
75
+ shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
76
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
77
+ uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
78
+ (d[H8(e)] & ~shiftmask); \
79
+ mergemask(&d[H8(e)], r, mask); \
80
+ } \
81
+done: \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
86
+#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
87
+#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
88
+#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
89
+
90
+DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
91
+DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
92
+DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
93
+DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
94
+DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
95
+DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
96
+
97
/*
98
* Long shifts taking half-sized inputs from top or bottom of the input
99
* vector and producing a double-width result. ESIZE, TYPE are for
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_U, vshli_u, true)
105
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
106
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
107
108
+DO_2SHIFT(VSRI, vsri, false)
109
+DO_2SHIFT(VSLI, vsli, false)
110
+
111
#define DO_VSHLL(INSN, FN) \
112
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
113
{ \
114
--
26
--
115
2.20.1
27
2.34.1
116
28
117
29
diff view generated by jsdifflib
New patch
1
In the recent refactoring we missed a few places which should be
2
calling finalize_memop_asimd() for ASIMD loads and stores but
3
instead are just calling finalize_memop(); fix these.
1
4
5
For the disas_ldst_single_struct() and disas_ldst_multiple_struct()
6
cases, this is not a behaviour change because there the size
7
is never MO_128 and the two finalize functions do the same thing.
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
target/arm/tcg/translate-a64.c | 10 ++++++----
13
1 file changed, 6 insertions(+), 4 deletions(-)
14
15
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/tcg/translate-a64.c
18
+++ b/target/arm/tcg/translate-a64.c
19
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
20
if (!fp_access_check(s)) {
21
return;
22
}
23
+ memop = finalize_memop_asimd(s, size);
24
} else {
25
if (size == 3 && opc == 2) {
26
/* PRFM - prefetch */
27
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
28
is_store = (opc == 0);
29
is_signed = !is_store && extract32(opc, 1, 1);
30
is_extended = (size < 3) && extract32(opc, 0, 1);
31
+ memop = finalize_memop(s, size + is_signed * MO_SIGN);
32
}
33
34
if (rn == 31) {
35
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
36
37
tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
38
39
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
40
clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, memop);
41
42
if (is_vector) {
43
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
44
if (!fp_access_check(s)) {
45
return;
46
}
47
+ memop = finalize_memop_asimd(s, size);
48
} else {
49
if (size == 3 && opc == 2) {
50
/* PRFM - prefetch */
51
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
52
is_store = (opc == 0);
53
is_signed = !is_store && extract32(opc, 1, 1);
54
is_extended = (size < 3) && extract32(opc, 0, 1);
55
+ memop = finalize_memop(s, size + is_signed * MO_SIGN);
56
}
57
58
if (rn == 31) {
59
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
60
offset = imm12 << size;
61
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
62
63
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
64
clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, memop);
65
66
if (is_vector) {
67
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
68
* promote consecutive little-endian elements below.
69
*/
70
clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
71
- total, finalize_memop(s, size));
72
+ total, finalize_memop_asimd(s, size));
73
74
/*
75
* Consecutive little-endian elements from a single register
76
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
77
total = selem << scale;
78
tcg_rn = cpu_reg_sp(s, rn);
79
80
- mop = finalize_memop(s, scale);
81
+ mop = finalize_memop_asimd(s, scale);
82
83
clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
84
total, mop);
85
--
86
2.34.1
diff view generated by jsdifflib
New patch
1
Convert the various instructions in the hint instruction space
2
to decodetree.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230602155223.2040685-3-peter.maydell@linaro.org
7
---
8
target/arm/tcg/a64.decode | 31 ++++
9
target/arm/tcg/translate-a64.c | 277 ++++++++++++++++++---------------
10
2 files changed, 185 insertions(+), 123 deletions(-)
11
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/tcg/a64.decode
15
+++ b/target/arm/tcg/a64.decode
16
@@ -XXX,XX +XXX,XX @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
17
# the processor is in halting debug state (which we don't implement).
18
# The pattern is listed here as documentation.
19
# DRPS 1101011 0101 11111 000000 11111 00000
20
+
21
+# Hint instruction group
22
+{
23
+ [
24
+ YIELD 1101 0101 0000 0011 0010 0000 001 11111
25
+ WFE 1101 0101 0000 0011 0010 0000 010 11111
26
+ WFI 1101 0101 0000 0011 0010 0000 011 11111
27
+ # We implement WFE to never block, so our SEV/SEVL are NOPs
28
+ # SEV 1101 0101 0000 0011 0010 0000 100 11111
29
+ # SEVL 1101 0101 0000 0011 0010 0000 101 11111
30
+ # Our DGL is a NOP because we don't merge memory accesses anyway.
31
+ # DGL 1101 0101 0000 0011 0010 0000 110 11111
32
+ XPACLRI 1101 0101 0000 0011 0010 0000 111 11111
33
+ PACIA1716 1101 0101 0000 0011 0010 0001 000 11111
34
+ PACIB1716 1101 0101 0000 0011 0010 0001 010 11111
35
+ AUTIA1716 1101 0101 0000 0011 0010 0001 100 11111
36
+ AUTIB1716 1101 0101 0000 0011 0010 0001 110 11111
37
+ ESB 1101 0101 0000 0011 0010 0010 000 11111
38
+ PACIAZ 1101 0101 0000 0011 0010 0011 000 11111
39
+ PACIASP 1101 0101 0000 0011 0010 0011 001 11111
40
+ PACIBZ 1101 0101 0000 0011 0010 0011 010 11111
41
+ PACIBSP 1101 0101 0000 0011 0010 0011 011 11111
42
+ AUTIAZ 1101 0101 0000 0011 0010 0011 100 11111
43
+ AUTIASP 1101 0101 0000 0011 0010 0011 101 11111
44
+ AUTIBZ 1101 0101 0000 0011 0010 0011 110 11111
45
+ AUTIBSP 1101 0101 0000 0011 0010 0011 111 11111
46
+ ]
47
+ # The canonical NOP has CRm == op2 == 0, but all of the space
48
+ # that isn't specifically allocated to an instruction must NOP
49
+ NOP 1101 0101 0000 0011 0010 ---- --- 11111
50
+}
51
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/tcg/translate-a64.c
54
+++ b/target/arm/tcg/translate-a64.c
55
@@ -XXX,XX +XXX,XX @@ static bool trans_ERETA(DisasContext *s, arg_reta *a)
56
return true;
57
}
58
59
-/* HINT instruction group, including various allocated HINTs */
60
-static void handle_hint(DisasContext *s, uint32_t insn,
61
- unsigned int op1, unsigned int op2, unsigned int crm)
62
+static bool trans_NOP(DisasContext *s, arg_NOP *a)
63
{
64
- unsigned int selector = crm << 3 | op2;
65
+ return true;
66
+}
67
68
- if (op1 != 3) {
69
- unallocated_encoding(s);
70
- return;
71
+static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
72
+{
73
+ /*
74
+ * When running in MTTCG we don't generate jumps to the yield and
75
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
76
+ * If we wanted to more completely model WFE/SEV so we don't busy
77
+ * spin unnecessarily we would need to do something more involved.
78
+ */
79
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
80
+ s->base.is_jmp = DISAS_YIELD;
81
}
82
+ return true;
83
+}
84
85
- switch (selector) {
86
- case 0b00000: /* NOP */
87
- break;
88
- case 0b00011: /* WFI */
89
- s->base.is_jmp = DISAS_WFI;
90
- break;
91
- case 0b00001: /* YIELD */
92
- /* When running in MTTCG we don't generate jumps to the yield and
93
- * WFE helpers as it won't affect the scheduling of other vCPUs.
94
- * If we wanted to more completely model WFE/SEV so we don't busy
95
- * spin unnecessarily we would need to do something more involved.
96
+static bool trans_WFI(DisasContext *s, arg_WFI *a)
97
+{
98
+ s->base.is_jmp = DISAS_WFI;
99
+ return true;
100
+}
101
+
102
+static bool trans_WFE(DisasContext *s, arg_WFI *a)
103
+{
104
+ /*
105
+ * When running in MTTCG we don't generate jumps to the yield and
106
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
107
+ * If we wanted to more completely model WFE/SEV so we don't busy
108
+ * spin unnecessarily we would need to do something more involved.
109
+ */
110
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
111
+ s->base.is_jmp = DISAS_WFE;
112
+ }
113
+ return true;
114
+}
115
+
116
+static bool trans_XPACLRI(DisasContext *s, arg_XPACLRI *a)
117
+{
118
+ if (s->pauth_active) {
119
+ gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
120
+ }
121
+ return true;
122
+}
123
+
124
+static bool trans_PACIA1716(DisasContext *s, arg_PACIA1716 *a)
125
+{
126
+ if (s->pauth_active) {
127
+ gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
128
+ }
129
+ return true;
130
+}
131
+
132
+static bool trans_PACIB1716(DisasContext *s, arg_PACIB1716 *a)
133
+{
134
+ if (s->pauth_active) {
135
+ gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
136
+ }
137
+ return true;
138
+}
139
+
140
+static bool trans_AUTIA1716(DisasContext *s, arg_AUTIA1716 *a)
141
+{
142
+ if (s->pauth_active) {
143
+ gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
144
+ }
145
+ return true;
146
+}
147
+
148
+static bool trans_AUTIB1716(DisasContext *s, arg_AUTIB1716 *a)
149
+{
150
+ if (s->pauth_active) {
151
+ gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
152
+ }
153
+ return true;
154
+}
155
+
156
+static bool trans_ESB(DisasContext *s, arg_ESB *a)
157
+{
158
+ /* Without RAS, we must implement this as NOP. */
159
+ if (dc_isar_feature(aa64_ras, s)) {
160
+ /*
161
+ * QEMU does not have a source of physical SErrors,
162
+ * so we are only concerned with virtual SErrors.
163
+ * The pseudocode in the ARM for this case is
164
+ * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
165
+ * AArch64.vESBOperation();
166
+ * Most of the condition can be evaluated at translation time.
167
+ * Test for EL2 present, and defer test for SEL2 to runtime.
168
*/
169
- if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
170
- s->base.is_jmp = DISAS_YIELD;
171
+ if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
172
+ gen_helper_vesb(cpu_env);
173
}
174
- break;
175
- case 0b00010: /* WFE */
176
- if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
177
- s->base.is_jmp = DISAS_WFE;
178
- }
179
- break;
180
- case 0b00100: /* SEV */
181
- case 0b00101: /* SEVL */
182
- case 0b00110: /* DGH */
183
- /* we treat all as NOP at least for now */
184
- break;
185
- case 0b00111: /* XPACLRI */
186
- if (s->pauth_active) {
187
- gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
188
- }
189
- break;
190
- case 0b01000: /* PACIA1716 */
191
- if (s->pauth_active) {
192
- gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
193
- }
194
- break;
195
- case 0b01010: /* PACIB1716 */
196
- if (s->pauth_active) {
197
- gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
198
- }
199
- break;
200
- case 0b01100: /* AUTIA1716 */
201
- if (s->pauth_active) {
202
- gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
203
- }
204
- break;
205
- case 0b01110: /* AUTIB1716 */
206
- if (s->pauth_active) {
207
- gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
208
- }
209
- break;
210
- case 0b10000: /* ESB */
211
- /* Without RAS, we must implement this as NOP. */
212
- if (dc_isar_feature(aa64_ras, s)) {
213
- /*
214
- * QEMU does not have a source of physical SErrors,
215
- * so we are only concerned with virtual SErrors.
216
- * The pseudocode in the ARM for this case is
217
- * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
218
- * AArch64.vESBOperation();
219
- * Most of the condition can be evaluated at translation time.
220
- * Test for EL2 present, and defer test for SEL2 to runtime.
221
- */
222
- if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
223
- gen_helper_vesb(cpu_env);
224
- }
225
- }
226
- break;
227
- case 0b11000: /* PACIAZ */
228
- if (s->pauth_active) {
229
- gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
230
- tcg_constant_i64(0));
231
- }
232
- break;
233
- case 0b11001: /* PACIASP */
234
- if (s->pauth_active) {
235
- gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
236
- }
237
- break;
238
- case 0b11010: /* PACIBZ */
239
- if (s->pauth_active) {
240
- gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
241
- tcg_constant_i64(0));
242
- }
243
- break;
244
- case 0b11011: /* PACIBSP */
245
- if (s->pauth_active) {
246
- gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
247
- }
248
- break;
249
- case 0b11100: /* AUTIAZ */
250
- if (s->pauth_active) {
251
- gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
252
- tcg_constant_i64(0));
253
- }
254
- break;
255
- case 0b11101: /* AUTIASP */
256
- if (s->pauth_active) {
257
- gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
258
- }
259
- break;
260
- case 0b11110: /* AUTIBZ */
261
- if (s->pauth_active) {
262
- gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
263
- tcg_constant_i64(0));
264
- }
265
- break;
266
- case 0b11111: /* AUTIBSP */
267
- if (s->pauth_active) {
268
- gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
269
- }
270
- break;
271
- default:
272
- /* default specified as NOP equivalent */
273
- break;
274
}
275
+ return true;
276
+}
277
+
278
+static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a)
279
+{
280
+ if (s->pauth_active) {
281
+ gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
282
+ }
283
+ return true;
284
+}
285
+
286
+static bool trans_PACIASP(DisasContext *s, arg_PACIASP *a)
287
+{
288
+ if (s->pauth_active) {
289
+ gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
290
+ }
291
+ return true;
292
+}
293
+
294
+static bool trans_PACIBZ(DisasContext *s, arg_PACIBZ *a)
295
+{
296
+ if (s->pauth_active) {
297
+ gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
298
+ }
299
+ return true;
300
+}
301
+
302
+static bool trans_PACIBSP(DisasContext *s, arg_PACIBSP *a)
303
+{
304
+ if (s->pauth_active) {
305
+ gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
306
+ }
307
+ return true;
308
+}
309
+
310
+static bool trans_AUTIAZ(DisasContext *s, arg_AUTIAZ *a)
311
+{
312
+ if (s->pauth_active) {
313
+ gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
314
+ }
315
+ return true;
316
+}
317
+
318
+static bool trans_AUTIASP(DisasContext *s, arg_AUTIASP *a)
319
+{
320
+ if (s->pauth_active) {
321
+ gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
322
+ }
323
+ return true;
324
+}
325
+
326
+static bool trans_AUTIBZ(DisasContext *s, arg_AUTIBZ *a)
327
+{
328
+ if (s->pauth_active) {
329
+ gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
330
+ }
331
+ return true;
332
+}
333
+
334
+static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a)
335
+{
336
+ if (s->pauth_active) {
337
+ gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
338
+ }
339
+ return true;
340
}
341
342
static void gen_clrex(DisasContext *s, uint32_t insn)
343
@@ -XXX,XX +XXX,XX @@ static void disas_system(DisasContext *s, uint32_t insn)
344
return;
345
}
346
switch (crn) {
347
- case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
348
- handle_hint(s, insn, op1, op2, crm);
349
- break;
350
case 3: /* CLREX, DSB, DMB, ISB */
351
handle_sync(s, insn, op1, op2, crm);
352
break;
353
--
354
2.34.1
diff view generated by jsdifflib
1
Implement the MVE shifts by register, which perform
1
Convert the insns in the "Barriers" instruction class to
2
shifts on a single general-purpose register.
2
decodetree: CLREX, DSB, DMB, ISB and SB.
3
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210628135835.6690-19-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-4-peter.maydell@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
---
8
---
8
target/arm/helper-mve.h | 2 ++
9
target/arm/tcg/a64.decode | 7 +++
9
target/arm/translate.h | 1 +
10
target/arm/tcg/translate-a64.c | 92 ++++++++++++++--------------------
10
target/arm/t32.decode | 18 ++++++++++++++----
11
2 files changed, 46 insertions(+), 53 deletions(-)
11
target/arm/mve_helper.c | 10 ++++++++++
12
target/arm/translate.c | 30 ++++++++++++++++++++++++++++++
13
5 files changed, 57 insertions(+), 4 deletions(-)
14
12
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
15
--- a/target/arm/tcg/a64.decode
18
+++ b/target/arm/helper-mve.h
16
+++ b/target/arm/tcg/a64.decode
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
17
@@ -XXX,XX +XXX,XX @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
20
18
# that isn't specifically allocated to an instruction must NOP
21
DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
19
NOP 1101 0101 0000 0011 0010 ---- --- 11111
22
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
23
+DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
24
+DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
25
diff --git a/target/arm/translate.h b/target/arm/translate.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.h
28
+++ b/target/arm/translate.h
29
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
30
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
31
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
32
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
33
+typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
34
35
/**
36
* arm_tbflags_from_tb:
37
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/t32.decode
40
+++ b/target/arm/t32.decode
41
@@ -XXX,XX +XXX,XX @@
42
&mve_shl_ri rdalo rdahi shim
43
&mve_shl_rr rdalo rdahi rm
44
&mve_sh_ri rda shim
45
+&mve_sh_rr rda rm
46
47
# rdahi: bits [3:1] from insn, bit 0 is 1
48
# rdalo: bits [3:1] from insn, bit 0 is 0
49
@@ -XXX,XX +XXX,XX @@
50
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
51
@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
52
&mve_sh_ri shim=%imm5_12_6
53
+@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr
54
55
{
56
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
57
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
58
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
59
}
60
61
- LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
62
- ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
63
- UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
64
- SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
65
+ {
66
+ UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr
67
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
68
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
69
+ }
70
+
71
+ {
72
+ SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr
73
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
74
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
75
+ }
76
+
77
UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
78
SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
79
]
80
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/mve_helper.c
83
+++ b/target/arm/mve_helper.c
84
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
85
{
86
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
87
}
20
}
88
+
21
+
89
+uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
22
+# Barriers
90
+{
91
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
92
+}
93
+
23
+
94
+uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
24
+CLREX 1101 0101 0000 0011 0011 ---- 010 11111
95
+{
25
+DSB_DMB 1101 0101 0000 0011 0011 domain:2 types:2 10- 11111
96
+ return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
26
+ISB 1101 0101 0000 0011 0011 ---- 110 11111
97
+}
27
+SB 1101 0101 0000 0011 0011 0000 111 11111
98
diff --git a/target/arm/translate.c b/target/arm/translate.c
28
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
99
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate.c
30
--- a/target/arm/tcg/translate-a64.c
101
+++ b/target/arm/translate.c
31
+++ b/target/arm/tcg/translate-a64.c
102
@@ -XXX,XX +XXX,XX @@ static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
32
@@ -XXX,XX +XXX,XX @@ static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a)
103
return do_mve_sh_ri(s, a, gen_mve_uqshl);
33
return true;
104
}
34
}
105
35
106
+static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
36
-static void gen_clrex(DisasContext *s, uint32_t insn)
107
+{
37
+static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
108
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
38
{
109
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
39
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
110
+ return false;
40
+ return true;
111
+ }
41
}
112
+ if (!dc_isar_feature(aa32_mve, s) ||
42
113
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
43
-/* CLREX, DSB, DMB, ISB */
114
+ a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
44
-static void handle_sync(DisasContext *s, uint32_t insn,
115
+ a->rm == a->rda) {
45
- unsigned int op1, unsigned int op2, unsigned int crm)
116
+ /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
46
+static bool trans_DSB_DMB(DisasContext *s, arg_DSB_DMB *a)
117
+ unallocated_encoding(s);
47
{
118
+ return true;
48
+ /* We handle DSB and DMB the same way */
119
+ }
49
TCGBar bar;
120
+
50
121
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
51
- if (op1 != 3) {
122
+ fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
52
- unallocated_encoding(s);
53
- return;
54
+ switch (a->types) {
55
+ case 1: /* MBReqTypes_Reads */
56
+ bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
57
+ break;
58
+ case 2: /* MBReqTypes_Writes */
59
+ bar = TCG_BAR_SC | TCG_MO_ST_ST;
60
+ break;
61
+ default: /* MBReqTypes_All */
62
+ bar = TCG_BAR_SC | TCG_MO_ALL;
63
+ break;
64
}
65
+ tcg_gen_mb(bar);
123
+ return true;
66
+ return true;
124
+}
67
+}
125
+
68
126
+static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
69
- switch (op2) {
70
- case 2: /* CLREX */
71
- gen_clrex(s, insn);
72
- return;
73
- case 4: /* DSB */
74
- case 5: /* DMB */
75
- switch (crm & 3) {
76
- case 1: /* MBReqTypes_Reads */
77
- bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
78
- break;
79
- case 2: /* MBReqTypes_Writes */
80
- bar = TCG_BAR_SC | TCG_MO_ST_ST;
81
- break;
82
- default: /* MBReqTypes_All */
83
- bar = TCG_BAR_SC | TCG_MO_ALL;
84
- break;
85
- }
86
- tcg_gen_mb(bar);
87
- return;
88
- case 6: /* ISB */
89
- /* We need to break the TB after this insn to execute
90
- * a self-modified code correctly and also to take
91
- * any pending interrupts immediately.
92
- */
93
- reset_btype(s);
94
- gen_goto_tb(s, 0, 4);
95
- return;
96
+static bool trans_ISB(DisasContext *s, arg_ISB *a)
127
+{
97
+{
128
+ return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
98
+ /*
99
+ * We need to break the TB after this insn to execute
100
+ * self-modifying code correctly and also to take
101
+ * any pending interrupts immediately.
102
+ */
103
+ reset_btype(s);
104
+ gen_goto_tb(s, 0, 4);
105
+ return true;
129
+}
106
+}
130
+
107
131
+static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
108
- case 7: /* SB */
109
- if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
110
- goto do_unallocated;
111
- }
112
- /*
113
- * TODO: There is no speculation barrier opcode for TCG;
114
- * MB and end the TB instead.
115
- */
116
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
117
- gen_goto_tb(s, 0, 4);
118
- return;
119
-
120
- default:
121
- do_unallocated:
122
- unallocated_encoding(s);
123
- return;
124
+static bool trans_SB(DisasContext *s, arg_SB *a)
132
+{
125
+{
133
+ return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
126
+ if (!dc_isar_feature(aa64_sb, s)) {
134
+}
127
+ return false;
135
+
128
}
136
/*
129
+ /*
137
* Multiply and multiply accumulate
130
+ * TODO: There is no speculation barrier opcode for TCG;
138
*/
131
+ * MB and end the TB instead.
132
+ */
133
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
134
+ gen_goto_tb(s, 0, 4);
135
+ return true;
136
}
137
138
static void gen_xaflag(void)
139
@@ -XXX,XX +XXX,XX @@ static void disas_system(DisasContext *s, uint32_t insn)
140
return;
141
}
142
switch (crn) {
143
- case 3: /* CLREX, DSB, DMB, ISB */
144
- handle_sync(s, insn, op1, op2, crm);
145
- break;
146
case 4: /* MSR (immediate) */
147
handle_msr_i(s, insn, op1, op2, crm);
148
break;
139
--
149
--
140
2.20.1
150
2.34.1
141
151
142
152
diff view generated by jsdifflib
1
Implement the MVE VHLL (vector shift left long) insn. This has two
1
Convert the CFINV, XAFLAG and AXFLAG insns to decodetree.
2
encodings: the T1 encoding is the usual shift-by-immediate format,
2
The old decoder handles these in handle_msr_i(), but
3
and the T2 encoding is a special case where the shift count is always
3
the architecture defines them as separate instructions
4
equal to the element size.
4
from MSR (immediate).
5
5
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-10-peter.maydell@linaro.org
8
Message-id: 20230602155223.2040685-5-peter.maydell@linaro.org
9
---
9
---
10
target/arm/helper-mve.h | 9 +++++++
10
target/arm/tcg/a64.decode | 6 ++++
11
target/arm/mve.decode | 53 +++++++++++++++++++++++++++++++++++---
11
target/arm/tcg/translate-a64.c | 53 +++++++++++++++++-----------------
12
target/arm/mve_helper.c | 32 +++++++++++++++++++++++
12
2 files changed, 32 insertions(+), 27 deletions(-)
13
target/arm/translate-mve.c | 15 +++++++++++
14
4 files changed, 105 insertions(+), 4 deletions(-)
15
13
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
16
--- a/target/arm/tcg/a64.decode
19
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/tcg/a64.decode
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ CLREX 1101 0101 0000 0011 0011 ---- 010 11111
21
DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DSB_DMB 1101 0101 0000 0011 0011 domain:2 types:2 10- 11111
22
DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
ISB 1101 0101 0000 0011 0011 ---- 110 11111
23
DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
SB 1101 0101 0000 0011 0011 0000 111 11111
24
+
22
+
25
+DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+# PSTATE
26
+DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+
27
+DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+CFINV 1101 0101 0000 0 000 0100 0000 000 11111
28
+DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+XAFLAG 1101 0101 0000 0 000 0100 0000 001 11111
29
+DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+AXFLAG 1101 0101 0000 0 000 0100 0000 010 11111
30
+DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
31
+DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
30
--- a/target/arm/tcg/translate-a64.c
36
+++ b/target/arm/mve.decode
31
+++ b/target/arm/tcg/translate-a64.c
37
@@ -XXX,XX +XXX,XX @@
32
@@ -XXX,XX +XXX,XX @@ static bool trans_SB(DisasContext *s, arg_SB *a)
38
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
33
return true;
39
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
34
}
40
35
41
+@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
36
-static void gen_xaflag(void)
42
+@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
37
+static bool trans_CFINV(DisasContext *s, arg_CFINV *a)
43
+# VSHLL encoding T2 where shift == esize
38
{
44
+@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \
39
- TCGv_i32 z = tcg_temp_new_i32();
45
+ qd=%qd qm=%qm size=0 shift=8
40
+ if (!dc_isar_feature(aa64_condm_4, s)) {
46
+@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \
41
+ return false;
47
+ qd=%qd qm=%qm size=1 shift=16
42
+ }
48
+
43
+ tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
49
# Right shifts are encoded as N - shift, where N is the element size in bits.
44
+ return true;
50
%rshift_i5 16:5 !function=rsub_32
51
%rshift_i4 16:4 !function=rsub_16
52
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
53
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
54
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
55
56
-VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
57
-VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
58
+# The VSHLL T2 encoding is not a @2op pattern, but is here because it
59
+# overlaps what would be size=0b11 VMULH/VRMULH
60
+{
61
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
62
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
63
64
-VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
65
-VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
66
+ VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
67
+}
45
+}
68
+
46
+
47
+static bool trans_XAFLAG(DisasContext *s, arg_XAFLAG *a)
69
+{
48
+{
70
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
49
+ TCGv_i32 z;
71
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
72
+
50
+
73
+ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
51
+ if (!dc_isar_feature(aa64_condm_5, s)) {
74
+}
52
+ return false;
75
+
76
+{
77
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
78
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
79
+
80
+ VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
81
+}
82
+
83
+{
84
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
85
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
86
+
87
+ VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
88
+}
89
90
VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
91
VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
92
@@ -XXX,XX +XXX,XX @@ VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
93
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
94
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
95
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
96
+
97
+# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
98
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
99
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
100
+
101
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
102
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
103
+
104
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
105
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
106
+
107
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
108
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
109
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/target/arm/mve_helper.c
112
+++ b/target/arm/mve_helper.c
113
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
114
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
115
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
116
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
117
+
118
+/*
119
+ * Long shifts taking half-sized inputs from top or bottom of the input
120
+ * vector and producing a double-width result. ESIZE, TYPE are for
121
+ * the input, and LESIZE, LTYPE for the output.
122
+ * Unlike the normal shift helpers, we do not handle negative shift counts,
123
+ * because the long shift is strictly left-only.
124
+ */
125
+#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
126
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
127
+ void *vm, uint32_t shift) \
128
+ { \
129
+ LTYPE *d = vd; \
130
+ TYPE *m = vm; \
131
+ uint16_t mask = mve_element_mask(env); \
132
+ unsigned le; \
133
+ assert(shift <= 16); \
134
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
135
+ LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
136
+ mergemask(&d[H##LESIZE(le)], r, mask); \
137
+ } \
138
+ mve_advance_vpt(env); \
139
+ }
53
+ }
140
+
54
+
141
+#define DO_VSHLL_ALL(OP, TOP) \
55
+ z = tcg_temp_new_i32();
142
+ DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
56
143
+ DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
57
tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
144
+ DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
58
145
+ DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
59
@@ -XXX,XX +XXX,XX @@ static void gen_xaflag(void)
60
61
/* C | Z */
62
tcg_gen_or_i32(cpu_CF, cpu_CF, z);
146
+
63
+
147
+DO_VSHLL_ALL(vshllb, false)
64
+ return true;
148
+DO_VSHLL_ALL(vshllt, true)
65
}
149
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
66
150
index XXXXXXX..XXXXXXX 100644
67
-static void gen_axflag(void)
151
--- a/target/arm/translate-mve.c
68
+static bool trans_AXFLAG(DisasContext *s, arg_AXFLAG *a)
152
+++ b/target/arm/translate-mve.c
69
{
153
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_S, vshli_s, true)
70
+ if (!dc_isar_feature(aa64_condm_5, s)) {
154
DO_2SHIFT(VSHRI_U, vshli_u, true)
71
+ return false;
155
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
+
158
+#define DO_VSHLL(INSN, FN) \
159
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
160
+ { \
161
+ static MVEGenTwoOpShiftFn * const fns[] = { \
162
+ gen_helper_mve_##FN##b, \
163
+ gen_helper_mve_##FN##h, \
164
+ }; \
165
+ return do_2shift(s, a, fns[a->size], false); \
166
+ }
72
+ }
167
+
73
+
168
+DO_VSHLL(VSHLL_BS, vshllbs)
74
tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */
169
+DO_VSHLL(VSHLL_BU, vshllbu)
75
tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */
170
+DO_VSHLL(VSHLL_TS, vshllts)
76
171
+DO_VSHLL(VSHLL_TU, vshlltu)
77
@@ -XXX,XX +XXX,XX @@ static void gen_axflag(void)
78
79
tcg_gen_movi_i32(cpu_NF, 0);
80
tcg_gen_movi_i32(cpu_VF, 0);
81
+
82
+ return true;
83
}
84
85
/* MSR (immediate) - move immediate to processor state field */
86
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
87
s->base.is_jmp = DISAS_TOO_MANY;
88
89
switch (op) {
90
- case 0x00: /* CFINV */
91
- if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
92
- goto do_unallocated;
93
- }
94
- tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
95
- s->base.is_jmp = DISAS_NEXT;
96
- break;
97
-
98
- case 0x01: /* XAFlag */
99
- if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
100
- goto do_unallocated;
101
- }
102
- gen_xaflag();
103
- s->base.is_jmp = DISAS_NEXT;
104
- break;
105
-
106
- case 0x02: /* AXFlag */
107
- if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
108
- goto do_unallocated;
109
- }
110
- gen_axflag();
111
- s->base.is_jmp = DISAS_NEXT;
112
- break;
113
-
114
case 0x03: /* UAO */
115
if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
116
goto do_unallocated;
172
--
117
--
173
2.20.1
118
2.34.1
174
175
diff view generated by jsdifflib
New patch
1
Convert the MSR (immediate) insn to decodetree. Our implementation
2
has basically no commonality between the different destinations,
3
so we decode the destination register in a64.decode.
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20230602155223.2040685-6-peter.maydell@linaro.org
8
---
9
target/arm/tcg/a64.decode | 13 ++
10
target/arm/tcg/translate-a64.c | 251 ++++++++++++++++-----------------
11
2 files changed, 136 insertions(+), 128 deletions(-)
12
13
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/tcg/a64.decode
16
+++ b/target/arm/tcg/a64.decode
17
@@ -XXX,XX +XXX,XX @@ SB 1101 0101 0000 0011 0011 0000 111 11111
18
CFINV 1101 0101 0000 0 000 0100 0000 000 11111
19
XAFLAG 1101 0101 0000 0 000 0100 0000 001 11111
20
AXFLAG 1101 0101 0000 0 000 0100 0000 010 11111
21
+
22
+# These are architecturally all "MSR (immediate)"; we decode the destination
23
+# register too because there is no commonality in our implementation.
24
+@msr_i .... .... .... . ... .... imm:4 ... .....
25
+MSR_i_UAO 1101 0101 0000 0 000 0100 .... 011 11111 @msr_i
26
+MSR_i_PAN 1101 0101 0000 0 000 0100 .... 100 11111 @msr_i
27
+MSR_i_SPSEL 1101 0101 0000 0 000 0100 .... 101 11111 @msr_i
28
+MSR_i_SBSS 1101 0101 0000 0 011 0100 .... 001 11111 @msr_i
29
+MSR_i_DIT 1101 0101 0000 0 011 0100 .... 010 11111 @msr_i
30
+MSR_i_TCO 1101 0101 0000 0 011 0100 .... 100 11111 @msr_i
31
+MSR_i_DAIFSET 1101 0101 0000 0 011 0100 .... 110 11111 @msr_i
32
+MSR_i_DAIFCLEAR 1101 0101 0000 0 011 0100 .... 111 11111 @msr_i
33
+MSR_i_SVCR 1101 0101 0000 0 011 0100 0 mask:2 imm:1 011 11111
34
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/tcg/translate-a64.c
37
+++ b/target/arm/tcg/translate-a64.c
38
@@ -XXX,XX +XXX,XX @@ static bool trans_AXFLAG(DisasContext *s, arg_AXFLAG *a)
39
return true;
40
}
41
42
-/* MSR (immediate) - move immediate to processor state field */
43
-static void handle_msr_i(DisasContext *s, uint32_t insn,
44
- unsigned int op1, unsigned int op2, unsigned int crm)
45
+static bool trans_MSR_i_UAO(DisasContext *s, arg_i *a)
46
{
47
- int op = op1 << 3 | op2;
48
-
49
- /* End the TB by default, chaining is ok. */
50
- s->base.is_jmp = DISAS_TOO_MANY;
51
-
52
- switch (op) {
53
- case 0x03: /* UAO */
54
- if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
55
- goto do_unallocated;
56
- }
57
- if (crm & 1) {
58
- set_pstate_bits(PSTATE_UAO);
59
- } else {
60
- clear_pstate_bits(PSTATE_UAO);
61
- }
62
- gen_rebuild_hflags(s);
63
- break;
64
-
65
- case 0x04: /* PAN */
66
- if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
67
- goto do_unallocated;
68
- }
69
- if (crm & 1) {
70
- set_pstate_bits(PSTATE_PAN);
71
- } else {
72
- clear_pstate_bits(PSTATE_PAN);
73
- }
74
- gen_rebuild_hflags(s);
75
- break;
76
-
77
- case 0x05: /* SPSel */
78
- if (s->current_el == 0) {
79
- goto do_unallocated;
80
- }
81
- gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP));
82
- break;
83
-
84
- case 0x19: /* SSBS */
85
- if (!dc_isar_feature(aa64_ssbs, s)) {
86
- goto do_unallocated;
87
- }
88
- if (crm & 1) {
89
- set_pstate_bits(PSTATE_SSBS);
90
- } else {
91
- clear_pstate_bits(PSTATE_SSBS);
92
- }
93
- /* Don't need to rebuild hflags since SSBS is a nop */
94
- break;
95
-
96
- case 0x1a: /* DIT */
97
- if (!dc_isar_feature(aa64_dit, s)) {
98
- goto do_unallocated;
99
- }
100
- if (crm & 1) {
101
- set_pstate_bits(PSTATE_DIT);
102
- } else {
103
- clear_pstate_bits(PSTATE_DIT);
104
- }
105
- /* There's no need to rebuild hflags because DIT is a nop */
106
- break;
107
-
108
- case 0x1e: /* DAIFSet */
109
- gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm));
110
- break;
111
-
112
- case 0x1f: /* DAIFClear */
113
- gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm));
114
- /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
115
- s->base.is_jmp = DISAS_UPDATE_EXIT;
116
- break;
117
-
118
- case 0x1c: /* TCO */
119
- if (dc_isar_feature(aa64_mte, s)) {
120
- /* Full MTE is enabled -- set the TCO bit as directed. */
121
- if (crm & 1) {
122
- set_pstate_bits(PSTATE_TCO);
123
- } else {
124
- clear_pstate_bits(PSTATE_TCO);
125
- }
126
- gen_rebuild_hflags(s);
127
- /* Many factors, including TCO, go into MTE_ACTIVE. */
128
- s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
129
- } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
130
- /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
131
- s->base.is_jmp = DISAS_NEXT;
132
- } else {
133
- goto do_unallocated;
134
- }
135
- break;
136
-
137
- case 0x1b: /* SVCR* */
138
- if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) {
139
- goto do_unallocated;
140
- }
141
- if (sme_access_check(s)) {
142
- int old = s->pstate_sm | (s->pstate_za << 1);
143
- int new = (crm & 1) * 3;
144
- int msk = (crm >> 1) & 3;
145
-
146
- if ((old ^ new) & msk) {
147
- /* At least one bit changes. */
148
- gen_helper_set_svcr(cpu_env, tcg_constant_i32(new),
149
- tcg_constant_i32(msk));
150
- } else {
151
- s->base.is_jmp = DISAS_NEXT;
152
- }
153
- }
154
- break;
155
-
156
- default:
157
- do_unallocated:
158
- unallocated_encoding(s);
159
- return;
160
+ if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
161
+ return false;
162
}
163
+ if (a->imm & 1) {
164
+ set_pstate_bits(PSTATE_UAO);
165
+ } else {
166
+ clear_pstate_bits(PSTATE_UAO);
167
+ }
168
+ gen_rebuild_hflags(s);
169
+ s->base.is_jmp = DISAS_TOO_MANY;
170
+ return true;
171
+}
172
+
173
+static bool trans_MSR_i_PAN(DisasContext *s, arg_i *a)
174
+{
175
+ if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
176
+ return false;
177
+ }
178
+ if (a->imm & 1) {
179
+ set_pstate_bits(PSTATE_PAN);
180
+ } else {
181
+ clear_pstate_bits(PSTATE_PAN);
182
+ }
183
+ gen_rebuild_hflags(s);
184
+ s->base.is_jmp = DISAS_TOO_MANY;
185
+ return true;
186
+}
187
+
188
+static bool trans_MSR_i_SPSEL(DisasContext *s, arg_i *a)
189
+{
190
+ if (s->current_el == 0) {
191
+ return false;
192
+ }
193
+ gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(a->imm & PSTATE_SP));
194
+ s->base.is_jmp = DISAS_TOO_MANY;
195
+ return true;
196
+}
197
+
198
+static bool trans_MSR_i_SBSS(DisasContext *s, arg_i *a)
199
+{
200
+ if (!dc_isar_feature(aa64_ssbs, s)) {
201
+ return false;
202
+ }
203
+ if (a->imm & 1) {
204
+ set_pstate_bits(PSTATE_SSBS);
205
+ } else {
206
+ clear_pstate_bits(PSTATE_SSBS);
207
+ }
208
+ /* Don't need to rebuild hflags since SSBS is a nop */
209
+ s->base.is_jmp = DISAS_TOO_MANY;
210
+ return true;
211
+}
212
+
213
+static bool trans_MSR_i_DIT(DisasContext *s, arg_i *a)
214
+{
215
+ if (!dc_isar_feature(aa64_dit, s)) {
216
+ return false;
217
+ }
218
+ if (a->imm & 1) {
219
+ set_pstate_bits(PSTATE_DIT);
220
+ } else {
221
+ clear_pstate_bits(PSTATE_DIT);
222
+ }
223
+ /* There's no need to rebuild hflags because DIT is a nop */
224
+ s->base.is_jmp = DISAS_TOO_MANY;
225
+ return true;
226
+}
227
+
228
+static bool trans_MSR_i_TCO(DisasContext *s, arg_i *a)
229
+{
230
+ if (dc_isar_feature(aa64_mte, s)) {
231
+ /* Full MTE is enabled -- set the TCO bit as directed. */
232
+ if (a->imm & 1) {
233
+ set_pstate_bits(PSTATE_TCO);
234
+ } else {
235
+ clear_pstate_bits(PSTATE_TCO);
236
+ }
237
+ gen_rebuild_hflags(s);
238
+ /* Many factors, including TCO, go into MTE_ACTIVE. */
239
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
240
+ return true;
241
+ } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
242
+ /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
243
+ return true;
244
+ } else {
245
+ /* Insn not present */
246
+ return false;
247
+ }
248
+}
249
+
250
+static bool trans_MSR_i_DAIFSET(DisasContext *s, arg_i *a)
251
+{
252
+ gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(a->imm));
253
+ s->base.is_jmp = DISAS_TOO_MANY;
254
+ return true;
255
+}
256
+
257
+static bool trans_MSR_i_DAIFCLEAR(DisasContext *s, arg_i *a)
258
+{
259
+ gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(a->imm));
260
+ /* Exit the cpu loop to re-evaluate pending IRQs. */
261
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
262
+ return true;
263
+}
264
+
265
+static bool trans_MSR_i_SVCR(DisasContext *s, arg_MSR_i_SVCR *a)
266
+{
267
+ if (!dc_isar_feature(aa64_sme, s) || a->mask == 0) {
268
+ return false;
269
+ }
270
+ if (sme_access_check(s)) {
271
+ int old = s->pstate_sm | (s->pstate_za << 1);
272
+ int new = a->imm * 3;
273
+
274
+ if ((old ^ new) & a->mask) {
275
+ /* At least one bit changes. */
276
+ gen_helper_set_svcr(cpu_env, tcg_constant_i32(new),
277
+ tcg_constant_i32(a->mask));
278
+ s->base.is_jmp = DISAS_TOO_MANY;
279
+ }
280
+ }
281
+ return true;
282
}
283
284
static void gen_get_nzcv(TCGv_i64 tcg_rt)
285
@@ -XXX,XX +XXX,XX @@ static void disas_system(DisasContext *s, uint32_t insn)
286
rt = extract32(insn, 0, 5);
287
288
if (op0 == 0) {
289
- if (l || rt != 31) {
290
- unallocated_encoding(s);
291
- return;
292
- }
293
- switch (crn) {
294
- case 4: /* MSR (immediate) */
295
- handle_msr_i(s, insn, op1, op2, crm);
296
- break;
297
- default:
298
- unallocated_encoding(s);
299
- break;
300
- }
301
+ unallocated_encoding(s);
302
return;
303
}
304
handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
305
--
306
2.34.1
diff view generated by jsdifflib
1
In do_ldst(), the calculation of the offset needs to be based on the
1
Convert MSR (reg), MRS, SYS, SYSL to decodetree. For QEMU these are
2
size of the memory access, not the size of the elements in the
2
all essentially the same instruction (system register access).
3
vector. This meant we were getting it wrong for the widening and
4
narrowing variants of the various VLDR and VSTR insns.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-2-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-7-peter.maydell@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
---
8
---
10
target/arm/translate-mve.c | 17 +++++++++--------
9
target/arm/tcg/a64.decode | 8 ++++++++
11
1 file changed, 9 insertions(+), 8 deletions(-)
10
target/arm/tcg/translate-a64.c | 32 +++++---------------------------
11
2 files changed, 13 insertions(+), 27 deletions(-)
12
12
13
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
13
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
14
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-mve.c
15
--- a/target/arm/tcg/a64.decode
16
+++ b/target/arm/translate-mve.c
16
+++ b/target/arm/tcg/a64.decode
17
@@ -XXX,XX +XXX,XX @@ static bool mve_skip_first_beat(DisasContext *s)
17
@@ -XXX,XX +XXX,XX @@ MSR_i_TCO 1101 0101 0000 0 011 0100 .... 100 11111 @msr_i
18
MSR_i_DAIFSET 1101 0101 0000 0 011 0100 .... 110 11111 @msr_i
19
MSR_i_DAIFCLEAR 1101 0101 0000 0 011 0100 .... 111 11111 @msr_i
20
MSR_i_SVCR 1101 0101 0000 0 011 0100 0 mask:2 imm:1 011 11111
21
+
22
+# MRS, MSR (register), SYS, SYSL. These are all essentially the
23
+# same instruction as far as QEMU is concerned.
24
+# NB: op0 is bits [20:19], but op0=0b00 is other insns, so we have
25
+# to hand-decode it.
26
+SYS 1101 0101 00 l:1 01 op1:3 crn:4 crm:4 op2:3 rt:5 op0=1
27
+SYS 1101 0101 00 l:1 10 op1:3 crn:4 crm:4 op2:3 rt:5 op0=2
28
+SYS 1101 0101 00 l:1 11 op1:3 crn:4 crm:4 op2:3 rt:5 op0=3
29
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/tcg/translate-a64.c
32
+++ b/target/arm/tcg/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@ static void gen_sysreg_undef(DisasContext *s, bool isread,
34
* These are all essentially the same insn in 'read' and 'write'
35
* versions, with varying op0 fields.
36
*/
37
-static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
38
+static void handle_sys(DisasContext *s, bool isread,
39
unsigned int op0, unsigned int op1, unsigned int op2,
40
unsigned int crn, unsigned int crm, unsigned int rt)
41
{
42
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
18
}
43
}
19
}
44
}
20
45
21
-static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
46
-/* System
22
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
47
- * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
23
+ unsigned msize)
48
- * +---------------------+---+-----+-----+-------+-------+-----+------+
49
- * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
50
- * +---------------------+---+-----+-----+-------+-------+-----+------+
51
- */
52
-static void disas_system(DisasContext *s, uint32_t insn)
53
+static bool trans_SYS(DisasContext *s, arg_SYS *a)
24
{
54
{
25
TCGv_i32 addr;
55
- unsigned int l, op0, op1, crn, crm, op2, rt;
26
uint32_t offset;
56
- l = extract32(insn, 21, 1);
27
@@ -XXX,XX +XXX,XX @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
57
- op0 = extract32(insn, 19, 2);
28
return true;
58
- op1 = extract32(insn, 16, 3);
29
}
59
- crn = extract32(insn, 12, 4);
30
60
- crm = extract32(insn, 8, 4);
31
- offset = a->imm << a->size;
61
- op2 = extract32(insn, 5, 3);
32
+ offset = a->imm << msize;
62
- rt = extract32(insn, 0, 5);
33
if (!a->a) {
63
-
34
offset = -offset;
64
- if (op0 == 0) {
35
}
65
- unallocated_encoding(s);
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
66
- return;
37
{ gen_helper_mve_vstrw, gen_helper_mve_vldrw },
67
- }
38
{ NULL, NULL }
68
- handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
39
};
69
+ handle_sys(s, a->l, a->op0, a->op1, a->op2, a->crn, a->crm, a->rt);
40
- return do_ldst(s, a, ldstfns[a->size][a->l]);
70
+ return true;
41
+ return do_ldst(s, a, ldstfns[a->size][a->l], a->size);
42
}
71
}
43
72
44
-#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
73
/* Exception generation
45
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \
74
@@ -XXX,XX +XXX,XX @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
46
static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
75
switch (extract32(insn, 25, 7)) {
47
{ \
76
case 0x6a: /* Exception generation / System */
48
static MVEGenLdStFn * const ldstfns[2][2] = { \
77
if (insn & (1 << 24)) {
49
{ gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
78
- if (extract32(insn, 22, 2) == 0) {
50
{ NULL, gen_helper_mve_##ULD }, \
79
- disas_system(s, insn);
51
}; \
80
- } else {
52
- return do_ldst(s, a, ldstfns[a->u][a->l]); \
81
- unallocated_encoding(s);
53
+ return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \
82
- }
54
}
83
+ unallocated_encoding(s);
55
84
} else {
56
-DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
85
disas_exc(s, insn);
57
-DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
86
}
58
-DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
59
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
60
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
61
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
62
63
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
64
{
65
--
87
--
66
2.20.1
88
2.34.1
67
89
68
90
diff view generated by jsdifflib
1
Implement the MVE long shifts by register, which perform shifts on a
1
Convert the exception generation instructions SVC, HVC, SMC, BRK and
2
pair of general-purpose registers treated as a 64-bit quantity, with
2
HLT to decodetree.
3
the shift count in another general-purpose register, which might be
4
either positive or negative.
5
3
6
Like the long-shifts-by-immediate, these encodings sit in the space
4
The old decoder decoded the halting-debug insnns DCPS1, DCPS2 and
7
that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15.
5
DCPS3 just in order to then make them UNDEF; as with DRPS, we don't
8
Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and
6
bother to decode them, but document the patterns in a64.decode.
9
also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases),
10
we have to move the CSEL pattern into the same decodetree group.
11
7
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210628135835.6690-17-peter.maydell@linaro.org
10
Message-id: 20230602155223.2040685-8-peter.maydell@linaro.org
15
---
11
---
16
target/arm/helper-mve.h | 6 +++
12
target/arm/tcg/a64.decode | 15 +++
17
target/arm/translate.h | 1 +
13
target/arm/tcg/translate-a64.c | 173 ++++++++++++---------------------
18
target/arm/t32.decode | 16 +++++--
14
2 files changed, 79 insertions(+), 109 deletions(-)
19
target/arm/mve_helper.c | 93 +++++++++++++++++++++++++++++++++++++++++
20
target/arm/translate.c | 69 ++++++++++++++++++++++++++++++
21
5 files changed, 182 insertions(+), 3 deletions(-)
22
15
23
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
24
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/helper-mve.h
18
--- a/target/arm/tcg/a64.decode
26
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/tcg/a64.decode
27
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
@@ -XXX,XX +XXX,XX @@ MSR_i_SVCR 1101 0101 0000 0 011 0100 0 mask:2 imm:1 011 11111
28
21
SYS 1101 0101 00 l:1 01 op1:3 crn:4 crm:4 op2:3 rt:5 op0=1
29
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
22
SYS 1101 0101 00 l:1 10 op1:3 crn:4 crm:4 op2:3 rt:5 op0=2
30
23
SYS 1101 0101 00 l:1 11 op1:3 crn:4 crm:4 op2:3 rt:5 op0=3
31
+DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
+
32
+DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32)
25
+# Exception generation
33
DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
26
+
34
DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
27
+@i16 .... .... ... imm:16 ... .. &i
35
+DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
28
+SVC 1101 0100 000 ................ 000 01 @i16
36
+DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
29
+HVC 1101 0100 000 ................ 000 10 @i16
37
+DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
30
+SMC 1101 0100 000 ................ 000 11 @i16
38
+DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
31
+BRK 1101 0100 001 ................ 000 00 @i16
39
diff --git a/target/arm/translate.h b/target/arm/translate.h
32
+HLT 1101 0100 010 ................ 000 00 @i16
33
+# These insns always UNDEF unless in halting debug state, which
34
+# we don't implement. So we don't need to decode them. The patterns
35
+# are listed here as documentation.
36
+# DCPS1 1101 0100 101 ................ 000 01 @i16
37
+# DCPS2 1101 0100 101 ................ 000 10 @i16
38
+# DCPS3 1101 0100 101 ................ 000 11 @i16
39
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
40
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate.h
41
--- a/target/arm/tcg/translate-a64.c
42
+++ b/target/arm/translate.h
42
+++ b/target/arm/tcg/translate-a64.c
43
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
43
@@ -XXX,XX +XXX,XX @@ static bool trans_SYS(DisasContext *s, arg_SYS *a)
44
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
44
return true;
45
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
45
}
46
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
46
47
+typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
47
-/* Exception generation
48
48
- *
49
/**
49
- * 31 24 23 21 20 5 4 2 1 0
50
* arm_tbflags_from_tb:
50
- * +-----------------+-----+------------------------+-----+----+
51
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
51
- * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
52
index XXXXXXX..XXXXXXX 100644
52
- * +-----------------------+------------------------+----------+
53
--- a/target/arm/t32.decode
53
- */
54
+++ b/target/arm/t32.decode
54
-static void disas_exc(DisasContext *s, uint32_t insn)
55
@@ -XXX,XX +XXX,XX @@
55
+static bool trans_SVC(DisasContext *s, arg_i *a)
56
&mcrr !extern cp opc1 crm rt rt2
57
58
&mve_shl_ri rdalo rdahi shim
59
+&mve_shl_rr rdalo rdahi rm
60
61
# rdahi: bits [3:1] from insn, bit 0 is 1
62
# rdalo: bits [3:1] from insn, bit 0 is 0
63
@@ -XXX,XX +XXX,XX @@
64
65
@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
66
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
67
+@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
68
+ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
69
70
{
56
{
71
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
57
- int opc = extract32(insn, 21, 3);
72
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
58
- int op2_ll = extract32(insn, 0, 5);
73
URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
59
- int imm16 = extract32(insn, 5, 16);
74
SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
60
- uint32_t syndrome;
75
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
61
-
76
+
62
- switch (opc) {
77
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
63
- case 0:
78
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
64
- /* For SVC, HVC and SMC we advance the single-step state
79
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
65
- * machine before taking the exception. This is architecturally
80
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
66
- * mandated, to ensure that single-stepping a system call
81
+ UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
67
- * instruction works properly.
82
+ SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
68
- */
83
]
69
- switch (op2_ll) {
84
70
- case 1: /* SVC */
85
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
71
- syndrome = syn_aa64_svc(imm16);
86
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
72
- if (s->fgt_svc) {
87
+
73
- gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
88
+ # v8.1M CSEL and friends
74
- break;
89
+ CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
75
- }
76
- gen_ss_advance(s);
77
- gen_exception_insn(s, 4, EXCP_SWI, syndrome);
78
- break;
79
- case 2: /* HVC */
80
- if (s->current_el == 0) {
81
- unallocated_encoding(s);
82
- break;
83
- }
84
- /* The pre HVC helper handles cases when HVC gets trapped
85
- * as an undefined insn by runtime configuration.
86
- */
87
- gen_a64_update_pc(s, 0);
88
- gen_helper_pre_hvc(cpu_env);
89
- gen_ss_advance(s);
90
- gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(imm16), 2);
91
- break;
92
- case 3: /* SMC */
93
- if (s->current_el == 0) {
94
- unallocated_encoding(s);
95
- break;
96
- }
97
- gen_a64_update_pc(s, 0);
98
- gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
99
- gen_ss_advance(s);
100
- gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(imm16), 3);
101
- break;
102
- default:
103
- unallocated_encoding(s);
104
- break;
105
- }
106
- break;
107
- case 1:
108
- if (op2_ll != 0) {
109
- unallocated_encoding(s);
110
- break;
111
- }
112
- /* BRK */
113
- gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
114
- break;
115
- case 2:
116
- if (op2_ll != 0) {
117
- unallocated_encoding(s);
118
- break;
119
- }
120
- /* HLT. This has two purposes.
121
- * Architecturally, it is an external halting debug instruction.
122
- * Since QEMU doesn't implement external debug, we treat this as
123
- * it is required for halting debug disabled: it will UNDEF.
124
- * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
125
- */
126
- if (semihosting_enabled(s->current_el == 0) && imm16 == 0xf000) {
127
- gen_exception_internal_insn(s, EXCP_SEMIHOST);
128
- } else {
129
- unallocated_encoding(s);
130
- }
131
- break;
132
- case 5:
133
- if (op2_ll < 1 || op2_ll > 3) {
134
- unallocated_encoding(s);
135
- break;
136
- }
137
- /* DCPS1, DCPS2, DCPS3 */
138
- unallocated_encoding(s);
139
- break;
140
- default:
141
- unallocated_encoding(s);
142
- break;
143
+ /*
144
+ * For SVC, HVC and SMC we advance the single-step state
145
+ * machine before taking the exception. This is architecturally
146
+ * mandated, to ensure that single-stepping a system call
147
+ * instruction works properly.
148
+ */
149
+ uint32_t syndrome = syn_aa64_svc(a->imm);
150
+ if (s->fgt_svc) {
151
+ gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
152
+ return true;
153
}
154
+ gen_ss_advance(s);
155
+ gen_exception_insn(s, 4, EXCP_SWI, syndrome);
156
+ return true;
90
}
157
}
158
159
-/* Branches, exception generating and system instructions */
160
-static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
161
+static bool trans_HVC(DisasContext *s, arg_i *a)
91
{
162
{
92
MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi
163
- switch (extract32(insn, 25, 7)) {
93
@@ -XXX,XX +XXX,XX @@ SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi
164
- case 0x6a: /* Exception generation / System */
94
}
165
- if (insn & (1 << 24)) {
95
RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi
166
- unallocated_encoding(s);
96
167
- } else {
97
-# v8.1M CSEL and friends
168
- disas_exc(s, insn);
98
-CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
169
- }
99
-
170
- break;
100
# Data-processing (register-shifted register)
171
- default:
101
172
+ if (s->current_el == 0) {
102
MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \
173
unallocated_encoding(s);
103
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
174
- break;
104
index XXXXXXX..XXXXXXX 100644
175
+ return true;
105
--- a/target/arm/mve_helper.c
176
}
106
+++ b/target/arm/mve_helper.c
177
+ /*
107
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
178
+ * The pre HVC helper handles cases when HVC gets trapped
108
return rdm;
179
+ * as an undefined insn by runtime configuration.
109
}
180
+ */
110
181
+ gen_a64_update_pc(s, 0);
111
+uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
182
+ gen_helper_pre_hvc(cpu_env);
183
+ /* Architecture requires ss advance before we do the actual work */
184
+ gen_ss_advance(s);
185
+ gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), 2);
186
+ return true;
187
+}
188
+
189
+static bool trans_SMC(DisasContext *s, arg_i *a)
112
+{
190
+{
113
+ return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
191
+ if (s->current_el == 0) {
114
+}
115
+
116
+uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
117
+{
118
+ return do_uqrshl_d(n, (int8_t)shift, false, NULL);
119
+}
120
+
121
uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
122
{
123
return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
124
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
{
126
return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
}
128
+
129
+uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
130
+{
131
+ return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
132
+}
133
+
134
+uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
135
+{
136
+ return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
137
+}
138
+
139
+/* Operate on 64-bit values, but saturate at 48 bits */
140
+static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
141
+ bool round, uint32_t *sat)
142
+{
143
+ if (shift <= -48) {
144
+ /* Rounding the sign bit always produces 0. */
145
+ if (round) {
146
+ return 0;
147
+ }
148
+ return src >> 63;
149
+ } else if (shift < 0) {
150
+ if (round) {
151
+ src >>= -shift - 1;
152
+ return (src >> 1) + (src & 1);
153
+ }
154
+ return src >> -shift;
155
+ } else if (shift < 48) {
156
+ int64_t val = src << shift;
157
+ int64_t extval = sextract64(val, 0, 48);
158
+ if (!sat || val == extval) {
159
+ return extval;
160
+ }
161
+ } else if (!sat || src == 0) {
162
+ return 0;
163
+ }
164
+
165
+ *sat = 1;
166
+ return (1ULL << 47) - (src >= 0);
167
+}
168
+
169
+/* Operate on 64-bit values, but saturate at 48 bits */
170
+static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
171
+ bool round, uint32_t *sat)
172
+{
173
+ uint64_t val, extval;
174
+
175
+ if (shift <= -(48 + round)) {
176
+ return 0;
177
+ } else if (shift < 0) {
178
+ if (round) {
179
+ val = src >> (-shift - 1);
180
+ val = (val >> 1) + (val & 1);
181
+ } else {
182
+ val = src >> -shift;
183
+ }
184
+ extval = extract64(val, 0, 48);
185
+ if (!sat || val == extval) {
186
+ return extval;
187
+ }
188
+ } else if (shift < 48) {
189
+ uint64_t val = src << shift;
190
+ uint64_t extval = extract64(val, 0, 48);
191
+ if (!sat || val == extval) {
192
+ return extval;
193
+ }
194
+ } else if (!sat || src == 0) {
195
+ return 0;
196
+ }
197
+
198
+ *sat = 1;
199
+ return MAKE_64BIT_MASK(0, 48);
200
+}
201
+
202
+uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
203
+{
204
+ return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
205
+}
206
+
207
+uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
208
+{
209
+ return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
210
+}
211
diff --git a/target/arm/translate.c b/target/arm/translate.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/arm/translate.c
214
+++ b/target/arm/translate.c
215
@@ -XXX,XX +XXX,XX @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
216
return do_mve_shl_ri(s, a, gen_urshr64_i64);
217
}
218
219
+static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
220
+{
221
+ TCGv_i64 rda;
222
+ TCGv_i32 rdalo, rdahi;
223
+
224
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
225
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
226
+ return false;
227
+ }
228
+ if (a->rdahi == 15) {
229
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
230
+ return false;
231
+ }
232
+ if (!dc_isar_feature(aa32_mve, s) ||
233
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
234
+ a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
235
+ a->rm == a->rdahi || a->rm == a->rdalo) {
236
+ /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
237
+ unallocated_encoding(s);
192
+ unallocated_encoding(s);
238
+ return true;
193
+ return true;
239
+ }
194
+ }
240
+
195
+ gen_a64_update_pc(s, 0);
241
+ rda = tcg_temp_new_i64();
196
+ gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(a->imm)));
242
+ rdalo = load_reg(s, a->rdalo);
197
+ /* Architecture requires ss advance before we do the actual work */
243
+ rdahi = load_reg(s, a->rdahi);
198
+ gen_ss_advance(s);
244
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
199
+ gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(a->imm), 3);
245
+
246
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
247
+ fn(rda, cpu_env, rda, cpu_R[a->rm]);
248
+
249
+ tcg_gen_extrl_i64_i32(rdalo, rda);
250
+ tcg_gen_extrh_i64_i32(rdahi, rda);
251
+ store_reg(s, a->rdalo, rdalo);
252
+ store_reg(s, a->rdahi, rdahi);
253
+ tcg_temp_free_i64(rda);
254
+
255
+ return true;
200
+ return true;
256
+}
201
+}
257
+
202
+
258
+static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
203
+static bool trans_BRK(DisasContext *s, arg_i *a)
259
+{
204
+{
260
+ return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
205
+ gen_exception_bkpt_insn(s, syn_aa64_bkpt(a->imm));
206
+ return true;
261
+}
207
+}
262
+
208
+
263
+static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
209
+static bool trans_HLT(DisasContext *s, arg_i *a)
264
+{
210
+{
265
+ return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
211
+ /*
266
+}
212
+ * HLT. This has two purposes.
267
+
213
+ * Architecturally, it is an external halting debug instruction.
268
+static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
214
+ * Since QEMU doesn't implement external debug, we treat this as
269
+{
215
+ * it is required for halting debug disabled: it will UNDEF.
270
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
216
+ * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
271
+}
217
+ */
272
+
218
+ if (semihosting_enabled(s->current_el == 0) && a->imm == 0xf000) {
273
+static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
219
+ gen_exception_internal_insn(s, EXCP_SEMIHOST);
274
+{
220
+ } else {
275
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
221
+ unallocated_encoding(s);
276
+}
222
+ }
277
+
223
+ return true;
278
+static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
224
}
279
+{
225
280
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
281
+}
282
+
283
+static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
284
+{
285
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
286
+}
287
+
288
/*
226
/*
289
* Multiply and multiply accumulate
227
@@ -XXX,XX +XXX,XX @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
290
*/
228
static void disas_a64_legacy(DisasContext *s, uint32_t insn)
229
{
230
switch (extract32(insn, 25, 4)) {
231
- case 0xa: case 0xb: /* Branch, exception generation and system insns */
232
- disas_b_exc_sys(s, insn);
233
- break;
234
case 0x4:
235
case 0x6:
236
case 0xc:
291
--
237
--
292
2.20.1
238
2.34.1
293
294
diff view generated by jsdifflib
1
Implement the MVE VSHLC insn, which performs a shift left of the
1
Convert the instructions in the load/store exclusive (STXR,
2
entire vector with carry in bits provided from a general purpose
2
STLXR, LDXR, LDAXR) and load/store ordered (STLR, STLLR,
3
register and carry out bits written back to that register.
3
LDAR, LDLAR) to decodetree.
4
5
Note that for STLR, STLLR, LDAR, LDLAR this fixes an under-decoding
6
in the legacy decoder where we were not checking that the RES1 bits
7
in the Rs and Rt2 fields were set.
8
9
The new function ldst_iss_sf() is equivalent to the existing
10
disas_ldst_compute_iss_sf(), but it takes the pre-decoded 'ext' field
11
rather than taking an undecoded two-bit opc field and extracting
12
'ext' from it. Once all the loads and stores have been converted
13
to decodetree disas_ldst_compute_iss_sf() will be unused and
14
can be deleted.
4
15
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-14-peter.maydell@linaro.org
18
Message-id: 20230602155223.2040685-9-peter.maydell@linaro.org
8
---
19
---
9
target/arm/helper-mve.h | 2 ++
20
target/arm/tcg/a64.decode | 11 +++
10
target/arm/mve.decode | 2 ++
21
target/arm/tcg/translate-a64.c | 154 ++++++++++++++++++++-------------
11
target/arm/mve_helper.c | 38 ++++++++++++++++++++++++++++++++++++++
22
2 files changed, 103 insertions(+), 62 deletions(-)
12
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
23
13
4 files changed, 72 insertions(+)
24
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
26
--- a/target/arm/tcg/a64.decode
18
+++ b/target/arm/helper-mve.h
27
+++ b/target/arm/tcg/a64.decode
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
@@ -XXX,XX +XXX,XX @@ HLT 1101 0100 010 ................ 000 00 @i16
20
DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
# DCPS1 1101 0100 101 ................ 000 01 @i16
21
DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
# DCPS2 1101 0100 101 ................ 000 10 @i16
22
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
# DCPS3 1101 0100 101 ................ 000 11 @i16
23
+
32
+
24
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
33
+# Loads and stores
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
+
35
+&stxr rn rt rt2 rs sz lasr
36
+&stlr rn rt sz lasr
37
+@stxr sz:2 ...... ... rs:5 lasr:1 rt2:5 rn:5 rt:5 &stxr
38
+@stlr sz:2 ...... ... ..... lasr:1 ..... rn:5 rt:5 &stlr
39
+STXR .. 001000 000 ..... . ..... ..... ..... @stxr # inc STLXR
40
+LDXR .. 001000 010 ..... . ..... ..... ..... @stxr # inc LDAXR
41
+STLR .. 001000 100 11111 . 11111 ..... ..... @stlr # inc STLLR
42
+LDAR .. 001000 110 11111 . 11111 ..... ..... @stlr # inc LDLAR
43
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
26
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/mve.decode
45
--- a/target/arm/tcg/translate-a64.c
28
+++ b/target/arm/mve.decode
46
+++ b/target/arm/tcg/translate-a64.c
29
@@ -XXX,XX +XXX,XX @@ VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
47
@@ -XXX,XX +XXX,XX @@ static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
30
VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
48
return regsize == 64;
31
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
49
}
32
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
50
33
+
51
+static bool ldst_iss_sf(int size, bool sign, bool ext)
34
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
52
+{
35
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
+
36
index XXXXXXX..XXXXXXX 100644
54
+ if (sign) {
37
--- a/target/arm/mve_helper.c
55
+ /*
38
+++ b/target/arm/mve_helper.c
56
+ * Signed loads are 64 bit results if we are not going to
39
@@ -XXX,XX +XXX,XX @@ DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
57
+ * do a zero-extend from 32 to 64 after the load.
40
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
58
+ * (For a store, sign and ext are always false.)
41
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
59
+ */
42
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
60
+ return !ext;
43
+
61
+ } else {
44
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
62
+ /* Unsigned loads/stores work at the specified size */
45
+ uint32_t shift)
63
+ return size == MO_64;
46
+{
64
+ }
47
+ uint32_t *d = vd;
65
+}
48
+ uint16_t mask = mve_element_mask(env);
66
+
49
+ unsigned e;
67
+static bool trans_STXR(DisasContext *s, arg_stxr *a)
50
+ uint32_t r;
68
+{
69
+ if (a->rn == 31) {
70
+ gen_check_sp_alignment(s);
71
+ }
72
+ if (a->lasr) {
73
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
74
+ }
75
+ gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, false);
76
+ return true;
77
+}
78
+
79
+static bool trans_LDXR(DisasContext *s, arg_stxr *a)
80
+{
81
+ if (a->rn == 31) {
82
+ gen_check_sp_alignment(s);
83
+ }
84
+ gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, false);
85
+ if (a->lasr) {
86
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
87
+ }
88
+ return true;
89
+}
90
+
91
+static bool trans_STLR(DisasContext *s, arg_stlr *a)
92
+{
93
+ TCGv_i64 clean_addr;
94
+ MemOp memop;
95
+ bool iss_sf = ldst_iss_sf(a->sz, false, false);
51
+
96
+
52
+ /*
97
+ /*
53
+ * For each 32-bit element, we shift it left, bringing in the
98
+ * StoreLORelease is the same as Store-Release for QEMU, but
54
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
99
+ * needs the feature-test.
55
+ * the top become the new rdm, if the predicate mask permits.
56
+ * The final rdm value is returned to update the register.
57
+ * shift == 0 here means "shift by 32 bits".
58
+ */
100
+ */
59
+ if (shift == 0) {
101
+ if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
60
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
61
+ r = rdm;
62
+ if (mask & 1) {
63
+ rdm = d[H4(e)];
64
+ }
65
+ mergemask(&d[H4(e)], r, mask);
66
+ }
67
+ } else {
68
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
69
+
70
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
71
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
72
+ if (mask & 1) {
73
+ rdm = d[H4(e)] >> (32 - shift);
74
+ }
75
+ mergemask(&d[H4(e)], r, mask);
76
+ }
77
+ }
78
+ mve_advance_vpt(env);
79
+ return rdm;
80
+}
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/arm/translate-mve.c
84
+++ b/target/arm/translate-mve.c
85
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
86
DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
87
DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
88
DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
89
+
90
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
91
+{
92
+ /*
93
+ * Whole Vector Left Shift with Carry. The carry is taken
94
+ * from a general purpose register and written back there.
95
+ * An imm of 0 means "shift by 32".
96
+ */
97
+ TCGv_ptr qd;
98
+ TCGv_i32 rdm;
99
+
100
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
101
+ return false;
102
+ return false;
102
+ }
103
+ }
103
+ if (a->rdm == 13 || a->rdm == 15) {
104
+ /* Generate ISS for non-exclusive accesses including LASR. */
104
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
105
+ if (a->rn == 31) {
106
+ gen_check_sp_alignment(s);
107
+ }
108
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
109
+ memop = check_ordered_align(s, a->rn, 0, true, a->sz);
110
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
111
+ true, a->rn != 31, memop);
112
+ do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, memop, true, a->rt,
113
+ iss_sf, a->lasr);
114
+ return true;
115
+}
116
+
117
+static bool trans_LDAR(DisasContext *s, arg_stlr *a)
118
+{
119
+ TCGv_i64 clean_addr;
120
+ MemOp memop;
121
+ bool iss_sf = ldst_iss_sf(a->sz, false, false);
122
+
123
+ /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
124
+ if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
105
+ return false;
125
+ return false;
106
+ }
126
+ }
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
127
+ /* Generate ISS for non-exclusive accesses including LASR. */
108
+ return true;
128
+ if (a->rn == 31) {
109
+ }
129
+ gen_check_sp_alignment(s);
110
+
130
+ }
111
+ qd = mve_qreg_ptr(a->qd);
131
+ memop = check_ordered_align(s, a->rn, 0, false, a->sz);
112
+ rdm = load_reg(s, a->rdm);
132
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
113
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
133
+ false, a->rn != 31, memop);
114
+ store_reg(s, a->rdm, rdm);
134
+ do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, memop, false, true,
115
+ tcg_temp_free_ptr(qd);
135
+ a->rt, iss_sf, a->lasr);
116
+ mve_update_eci(s);
136
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
117
+ return true;
137
+ return true;
118
+}
138
+}
139
+
140
/* Load/store exclusive
141
*
142
* 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
143
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
144
int is_lasr = extract32(insn, 15, 1);
145
int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
146
int size = extract32(insn, 30, 2);
147
- TCGv_i64 clean_addr;
148
- MemOp memop;
149
150
switch (o2_L_o1_o0) {
151
- case 0x0: /* STXR */
152
- case 0x1: /* STLXR */
153
- if (rn == 31) {
154
- gen_check_sp_alignment(s);
155
- }
156
- if (is_lasr) {
157
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
158
- }
159
- gen_store_exclusive(s, rs, rt, rt2, rn, size, false);
160
- return;
161
-
162
- case 0x4: /* LDXR */
163
- case 0x5: /* LDAXR */
164
- if (rn == 31) {
165
- gen_check_sp_alignment(s);
166
- }
167
- gen_load_exclusive(s, rt, rt2, rn, size, false);
168
- if (is_lasr) {
169
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
170
- }
171
- return;
172
-
173
- case 0x8: /* STLLR */
174
- if (!dc_isar_feature(aa64_lor, s)) {
175
- break;
176
- }
177
- /* StoreLORelease is the same as Store-Release for QEMU. */
178
- /* fall through */
179
- case 0x9: /* STLR */
180
- /* Generate ISS for non-exclusive accesses including LASR. */
181
- if (rn == 31) {
182
- gen_check_sp_alignment(s);
183
- }
184
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
185
- memop = check_ordered_align(s, rn, 0, true, size);
186
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
187
- true, rn != 31, memop);
188
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, memop, true, rt,
189
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
190
- return;
191
-
192
- case 0xc: /* LDLAR */
193
- if (!dc_isar_feature(aa64_lor, s)) {
194
- break;
195
- }
196
- /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
197
- /* fall through */
198
- case 0xd: /* LDAR */
199
- /* Generate ISS for non-exclusive accesses including LASR. */
200
- if (rn == 31) {
201
- gen_check_sp_alignment(s);
202
- }
203
- memop = check_ordered_align(s, rn, 0, false, size);
204
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
205
- false, rn != 31, memop);
206
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, memop, false, true,
207
- rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
208
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
209
- return;
210
-
211
case 0x2: case 0x3: /* CASP / STXP */
212
if (size & 2) { /* STXP / STLXP */
213
if (rn == 31) {
214
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
215
return;
216
}
217
break;
218
+ default:
219
+ /* Handled in decodetree */
220
+ break;
221
}
222
unallocated_encoding(s);
223
}
119
--
224
--
120
2.20.1
225
2.34.1
121
122
diff view generated by jsdifflib
1
Implement the MVE vector shift right by immediate insns VSHRI and
1
Convert the load/store exclusive pair (LDXP, STXP, LDAXP, STLXP),
2
VRSHRI. As with Neon, we implement these by using helper functions
2
compare-and-swap pair (CASP, CASPA, CASPAL, CASPL), and compare-and
3
which perform left shifts but allow negative shift counts to indicate
3
swap (CAS, CASA, CASAL, CASL) instructions to decodetree.
4
right shifts.
5
4
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-9-peter.maydell@linaro.org
7
Message-id: 20230602155223.2040685-10-peter.maydell@linaro.org
9
---
8
---
10
target/arm/helper-mve.h | 12 ++++++++++++
9
target/arm/tcg/a64.decode | 11 +++
11
target/arm/translate.h | 20 ++++++++++++++++++++
10
target/arm/tcg/translate-a64.c | 121 ++++++++++++---------------------
12
target/arm/mve.decode | 28 ++++++++++++++++++++++++++++
11
2 files changed, 53 insertions(+), 79 deletions(-)
13
target/arm/mve_helper.c | 7 +++++++
14
target/arm/translate-mve.c | 5 +++++
15
target/arm/translate-neon.c | 18 ------------------
16
6 files changed, 72 insertions(+), 18 deletions(-)
17
12
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
19
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
15
--- a/target/arm/tcg/a64.decode
21
+++ b/target/arm/helper-mve.h
16
+++ b/target/arm/tcg/a64.decode
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
17
@@ -XXX,XX +XXX,XX @@ HLT 1101 0100 010 ................ 000 00 @i16
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
18
&stlr rn rt sz lasr
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
19
@stxr sz:2 ...... ... rs:5 lasr:1 rt2:5 rn:5 rt:5 &stxr
25
20
@stlr sz:2 ...... ... ..... lasr:1 ..... rn:5 rt:5 &stlr
26
+DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
+%imm1_30_p2 30:1 !function=plus_2
27
+DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+@stxp .. ...... ... rs:5 lasr:1 rt2:5 rn:5 rt:5 &stxr sz=%imm1_30_p2
28
+DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
STXR .. 001000 000 ..... . ..... ..... ..... @stxr # inc STLXR
24
LDXR .. 001000 010 ..... . ..... ..... ..... @stxr # inc LDAXR
25
STLR .. 001000 100 11111 . 11111 ..... ..... @stlr # inc STLLR
26
LDAR .. 001000 110 11111 . 11111 ..... ..... @stlr # inc LDLAR
29
+
27
+
30
DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+STXP 1 . 001000 001 ..... . ..... ..... ..... @stxp # inc STLXP
31
DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+LDXP 1 . 001000 011 ..... . ..... ..... ..... @stxp # inc LDAXP
32
DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
30
+
38
+DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+# CASP, CASPA, CASPAL, CASPL (we don't decode the bits that determine
39
+DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+# acquire/release semantics because QEMU's cmpxchg always has those)
40
+DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+CASP 0 . 001000 0 - 1 rs:5 - 11111 rn:5 rt:5 sz=%imm1_30_p2
41
+
34
+# CAS, CASA, CASAL, CASL
42
+DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+CAS sz:2 001000 1 - 1 rs:5 - 11111 rn:5 rt:5
43
+DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
44
+DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
diff --git a/target/arm/translate.h b/target/arm/translate.h
46
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/translate.h
38
--- a/target/arm/tcg/translate-a64.c
48
+++ b/target/arm/translate.h
39
+++ b/target/arm/tcg/translate-a64.c
49
@@ -XXX,XX +XXX,XX @@ static inline int times_2_plus_1(DisasContext *s, int x)
40
@@ -XXX,XX +XXX,XX @@ static bool trans_LDAR(DisasContext *s, arg_stlr *a)
50
return x * 2 + 1;
41
return true;
51
}
42
}
52
43
53
+static inline int rsub_64(DisasContext *s, int x)
44
-/* Load/store exclusive
54
+{
45
- *
55
+ return 64 - x;
46
- * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
47
- * +-----+-------------+----+---+----+------+----+-------+------+------+
48
- * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
49
- * +-----+-------------+----+---+----+------+----+-------+------+------+
50
- *
51
- * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
52
- * L: 0 -> store, 1 -> load
53
- * o2: 0 -> exclusive, 1 -> not
54
- * o1: 0 -> single register, 1 -> register pair
55
- * o0: 1 -> load-acquire/store-release, 0 -> not
56
- */
57
-static void disas_ldst_excl(DisasContext *s, uint32_t insn)
58
+static bool trans_STXP(DisasContext *s, arg_stxr *a)
59
{
60
- int rt = extract32(insn, 0, 5);
61
- int rn = extract32(insn, 5, 5);
62
- int rt2 = extract32(insn, 10, 5);
63
- int rs = extract32(insn, 16, 5);
64
- int is_lasr = extract32(insn, 15, 1);
65
- int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
66
- int size = extract32(insn, 30, 2);
67
-
68
- switch (o2_L_o1_o0) {
69
- case 0x2: case 0x3: /* CASP / STXP */
70
- if (size & 2) { /* STXP / STLXP */
71
- if (rn == 31) {
72
- gen_check_sp_alignment(s);
73
- }
74
- if (is_lasr) {
75
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
76
- }
77
- gen_store_exclusive(s, rs, rt, rt2, rn, size, true);
78
- return;
79
- }
80
- if (rt2 == 31
81
- && ((rt | rs) & 1) == 0
82
- && dc_isar_feature(aa64_atomics, s)) {
83
- /* CASP / CASPL */
84
- gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
85
- return;
86
- }
87
- break;
88
-
89
- case 0x6: case 0x7: /* CASPA / LDXP */
90
- if (size & 2) { /* LDXP / LDAXP */
91
- if (rn == 31) {
92
- gen_check_sp_alignment(s);
93
- }
94
- gen_load_exclusive(s, rt, rt2, rn, size, true);
95
- if (is_lasr) {
96
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
97
- }
98
- return;
99
- }
100
- if (rt2 == 31
101
- && ((rt | rs) & 1) == 0
102
- && dc_isar_feature(aa64_atomics, s)) {
103
- /* CASPA / CASPAL */
104
- gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
105
- return;
106
- }
107
- break;
108
-
109
- case 0xa: /* CAS */
110
- case 0xb: /* CASL */
111
- case 0xe: /* CASA */
112
- case 0xf: /* CASAL */
113
- if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
114
- gen_compare_and_swap(s, rs, rt, rn, size);
115
- return;
116
- }
117
- break;
118
- default:
119
- /* Handled in decodetree */
120
- break;
121
+ if (a->rn == 31) {
122
+ gen_check_sp_alignment(s);
123
}
124
- unallocated_encoding(s);
125
+ if (a->lasr) {
126
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
127
+ }
128
+ gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, true);
129
+ return true;
56
+}
130
+}
57
+
131
+
58
+static inline int rsub_32(DisasContext *s, int x)
132
+static bool trans_LDXP(DisasContext *s, arg_stxr *a)
59
+{
133
+{
60
+ return 32 - x;
134
+ if (a->rn == 31) {
135
+ gen_check_sp_alignment(s);
136
+ }
137
+ gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, true);
138
+ if (a->lasr) {
139
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
140
+ }
141
+ return true;
61
+}
142
+}
62
+
143
+
63
+static inline int rsub_16(DisasContext *s, int x)
144
+static bool trans_CASP(DisasContext *s, arg_CASP *a)
64
+{
145
+{
65
+ return 16 - x;
146
+ if (!dc_isar_feature(aa64_atomics, s)) {
147
+ return false;
148
+ }
149
+ if (((a->rt | a->rs) & 1) != 0) {
150
+ return false;
151
+ }
152
+
153
+ gen_compare_and_swap_pair(s, a->rs, a->rt, a->rn, a->sz);
154
+ return true;
66
+}
155
+}
67
+
156
+
68
+static inline int rsub_8(DisasContext *s, int x)
157
+static bool trans_CAS(DisasContext *s, arg_CAS *a)
69
+{
158
+{
70
+ return 8 - x;
159
+ if (!dc_isar_feature(aa64_atomics, s)) {
71
+}
160
+ return false;
72
+
161
+ }
73
static inline int arm_dc_feature(DisasContext *dc, int feature)
162
+ gen_compare_and_swap(s, a->rs, a->rt, a->rn, a->sz);
163
+ return true;
164
}
165
166
/*
167
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
168
static void disas_ldst(DisasContext *s, uint32_t insn)
74
{
169
{
75
return (dc->features & (1ULL << feature)) != 0;
170
switch (extract32(insn, 24, 6)) {
76
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
171
- case 0x08: /* Load/store exclusive */
77
index XXXXXXX..XXXXXXX 100644
172
- disas_ldst_excl(s, insn);
78
--- a/target/arm/mve.decode
173
- break;
79
+++ b/target/arm/mve.decode
174
case 0x18: case 0x1c: /* Load register (literal) */
80
@@ -XXX,XX +XXX,XX @@
175
disas_ld_lit(s, insn);
81
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
176
break;
82
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
83
84
+# Right shifts are encoded as N - shift, where N is the element size in bits.
85
+%rshift_i5 16:5 !function=rsub_32
86
+%rshift_i4 16:4 !function=rsub_16
87
+%rshift_i3 16:3 !function=rsub_8
88
+
89
+@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \
90
+ size=0 shift=%rshift_i3
91
+@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \
92
+ size=1 shift=%rshift_i4
93
+@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
94
+ size=2 shift=%rshift_i5
95
+
96
# Vector loads and stores
97
98
# Widening loads and narrowing stores:
99
@@ -XXX,XX +XXX,XX @@ VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
100
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
101
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
102
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
103
+
104
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
105
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
106
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
107
+
108
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
109
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
110
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
111
+
112
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
113
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
114
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
115
+
116
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
117
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
118
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
119
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/mve_helper.c
122
+++ b/target/arm/mve_helper.c
123
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
124
DO_2SHIFT(OP##b, 1, uint8_t, FN) \
125
DO_2SHIFT(OP##h, 2, uint16_t, FN) \
126
DO_2SHIFT(OP##w, 4, uint32_t, FN)
127
+#define DO_2SHIFT_S(OP, FN) \
128
+ DO_2SHIFT(OP##b, 1, int8_t, FN) \
129
+ DO_2SHIFT(OP##h, 2, int16_t, FN) \
130
+ DO_2SHIFT(OP##w, 4, int32_t, FN)
131
132
#define DO_2SHIFT_SAT_U(OP, FN) \
133
DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
134
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
135
DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
136
137
DO_2SHIFT_U(vshli_u, DO_VSHLU)
138
+DO_2SHIFT_S(vshli_s, DO_VSHLS)
139
DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
140
DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
141
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
142
+DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
143
+DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
144
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-mve.c
147
+++ b/target/arm/translate-mve.c
148
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHLI, vshli_u, false)
149
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
150
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
151
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
152
+/* These right shifts use a left-shift helper with negated shift count */
153
+DO_2SHIFT(VSHRI_S, vshli_s, true)
154
+DO_2SHIFT(VSHRI_U, vshli_u, true)
155
+DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
+DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/translate-neon.c
160
+++ b/target/arm/translate-neon.c
161
@@ -XXX,XX +XXX,XX @@ static inline int plus1(DisasContext *s, int x)
162
return x + 1;
163
}
164
165
-static inline int rsub_64(DisasContext *s, int x)
166
-{
167
- return 64 - x;
168
-}
169
-
170
-static inline int rsub_32(DisasContext *s, int x)
171
-{
172
- return 32 - x;
173
-}
174
-static inline int rsub_16(DisasContext *s, int x)
175
-{
176
- return 16 - x;
177
-}
178
-static inline int rsub_8(DisasContext *s, int x)
179
-{
180
- return 8 - x;
181
-}
182
-
183
static inline int neon_3same_fp_size(DisasContext *s, int x)
184
{
185
/* Convert 0==fp32, 1==fp16 into a MO_* value */
186
--
177
--
187
2.20.1
178
2.34.1
188
189
diff view generated by jsdifflib
New patch
1
Convert the "Load register (literal)" instruction class to
2
decodetree.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230602155223.2040685-11-peter.maydell@linaro.org
7
---
8
target/arm/tcg/a64.decode | 13 ++++++
9
target/arm/tcg/translate-a64.c | 76 ++++++++++------------------------
10
2 files changed, 35 insertions(+), 54 deletions(-)
11
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/tcg/a64.decode
15
+++ b/target/arm/tcg/a64.decode
16
@@ -XXX,XX +XXX,XX @@ LDXP 1 . 001000 011 ..... . ..... ..... ..... @stxp # inc LDAXP
17
CASP 0 . 001000 0 - 1 rs:5 - 11111 rn:5 rt:5 sz=%imm1_30_p2
18
# CAS, CASA, CASAL, CASL
19
CAS sz:2 001000 1 - 1 rs:5 - 11111 rn:5 rt:5
20
+
21
+&ldlit rt imm sz sign
22
+@ldlit .. ... . .. ................... rt:5 &ldlit imm=%imm19
23
+
24
+LD_lit 00 011 0 00 ................... ..... @ldlit sz=2 sign=0
25
+LD_lit 01 011 0 00 ................... ..... @ldlit sz=3 sign=0
26
+LD_lit 10 011 0 00 ................... ..... @ldlit sz=2 sign=1
27
+LD_lit_v 00 011 1 00 ................... ..... @ldlit sz=2 sign=0
28
+LD_lit_v 01 011 1 00 ................... ..... @ldlit sz=3 sign=0
29
+LD_lit_v 10 011 1 00 ................... ..... @ldlit sz=4 sign=0
30
+
31
+# PRFM
32
+NOP 11 011 0 00 ------------------- -----
33
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/tcg/translate-a64.c
36
+++ b/target/arm/tcg/translate-a64.c
37
@@ -XXX,XX +XXX,XX @@ static bool trans_CAS(DisasContext *s, arg_CAS *a)
38
return true;
39
}
40
41
-/*
42
- * Load register (literal)
43
- *
44
- * 31 30 29 27 26 25 24 23 5 4 0
45
- * +-----+-------+---+-----+-------------------+-------+
46
- * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
47
- * +-----+-------+---+-----+-------------------+-------+
48
- *
49
- * V: 1 -> vector (simd/fp)
50
- * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
51
- * 10-> 32 bit signed, 11 -> prefetch
52
- * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
53
- */
54
-static void disas_ld_lit(DisasContext *s, uint32_t insn)
55
+static bool trans_LD_lit(DisasContext *s, arg_ldlit *a)
56
{
57
- int rt = extract32(insn, 0, 5);
58
- int64_t imm = sextract32(insn, 5, 19) << 2;
59
- bool is_vector = extract32(insn, 26, 1);
60
- int opc = extract32(insn, 30, 2);
61
- bool is_signed = false;
62
- int size = 2;
63
- TCGv_i64 tcg_rt, clean_addr;
64
+ bool iss_sf = ldst_iss_sf(a->sz, a->sign, false);
65
+ TCGv_i64 tcg_rt = cpu_reg(s, a->rt);
66
+ TCGv_i64 clean_addr = tcg_temp_new_i64();
67
+ MemOp memop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
68
+
69
+ gen_pc_plus_diff(s, clean_addr, a->imm);
70
+ do_gpr_ld(s, tcg_rt, clean_addr, memop,
71
+ false, true, a->rt, iss_sf, false);
72
+ return true;
73
+}
74
+
75
+static bool trans_LD_lit_v(DisasContext *s, arg_ldlit *a)
76
+{
77
+ /* Load register (literal), vector version */
78
+ TCGv_i64 clean_addr;
79
MemOp memop;
80
81
- if (is_vector) {
82
- if (opc == 3) {
83
- unallocated_encoding(s);
84
- return;
85
- }
86
- size = 2 + opc;
87
- if (!fp_access_check(s)) {
88
- return;
89
- }
90
- memop = finalize_memop_asimd(s, size);
91
- } else {
92
- if (opc == 3) {
93
- /* PRFM (literal) : prefetch */
94
- return;
95
- }
96
- size = 2 + extract32(opc, 0, 1);
97
- is_signed = extract32(opc, 1, 1);
98
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
99
+ if (!fp_access_check(s)) {
100
+ return true;
101
}
102
-
103
- tcg_rt = cpu_reg(s, rt);
104
-
105
+ memop = finalize_memop_asimd(s, a->sz);
106
clean_addr = tcg_temp_new_i64();
107
- gen_pc_plus_diff(s, clean_addr, imm);
108
-
109
- if (is_vector) {
110
- do_fp_ld(s, rt, clean_addr, memop);
111
- } else {
112
- /* Only unsigned 32bit loads target 32bit registers. */
113
- bool iss_sf = opc != 0;
114
- do_gpr_ld(s, tcg_rt, clean_addr, memop, false, true, rt, iss_sf, false);
115
- }
116
+ gen_pc_plus_diff(s, clean_addr, a->imm);
117
+ do_fp_ld(s, a->rt, clean_addr, memop);
118
+ return true;
119
}
120
121
/*
122
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
123
static void disas_ldst(DisasContext *s, uint32_t insn)
124
{
125
switch (extract32(insn, 24, 6)) {
126
- case 0x18: case 0x1c: /* Load register (literal) */
127
- disas_ld_lit(s, insn);
128
- break;
129
case 0x28: case 0x29:
130
case 0x2c: case 0x2d: /* Load/store pair (all forms) */
131
disas_ldst_pair(s, insn);
132
--
133
2.34.1
diff view generated by jsdifflib
1
Implement the MVE shifts by immediate, which perform shifts
1
Convert the load/store register pair insns (LDP, STP,
2
on a single general-purpose register.
2
LDNP, STNP, LDPSW, STGP) to decodetree.
3
4
These patterns overlap with the long-shift-by-immediates,
5
so we have to rearrange the grouping a little here.
6
3
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Message-id: 20230602155223.2040685-12-peter.maydell@linaro.org
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210628135835.6690-18-peter.maydell@linaro.org
10
---
7
---
11
target/arm/helper-mve.h | 3 ++
8
target/arm/tcg/a64.decode | 61 +++++
12
target/arm/translate.h | 1 +
9
target/arm/tcg/translate-a64.c | 422 ++++++++++++++++-----------------
13
target/arm/t32.decode | 31 ++++++++++++++-----
10
2 files changed, 268 insertions(+), 215 deletions(-)
14
target/arm/mve_helper.c | 10 ++++++
15
target/arm/translate.c | 68 +++++++++++++++++++++++++++++++++++++++--
16
5 files changed, 104 insertions(+), 9 deletions(-)
17
11
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
19
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
14
--- a/target/arm/tcg/a64.decode
21
+++ b/target/arm/helper-mve.h
15
+++ b/target/arm/tcg/a64.decode
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
16
@@ -XXX,XX +XXX,XX @@ LD_lit_v 10 011 1 00 ................... ..... @ldlit sz=4 sign=0
23
DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
17
24
DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
18
# PRFM
25
DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
19
NOP 11 011 0 00 ------------------- -----
26
+
20
+
27
+DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
21
+&ldstpair rt2 rt rn imm sz sign w p
28
+DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
22
+@ldstpair .. ... . ... . imm:s7 rt2:5 rn:5 rt:5 &ldstpair
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
23
+
24
+# STNP, LDNP: Signed offset, non-temporal hint. We don't emulate caches
25
+# so we ignore hints about data access patterns, and handle these like
26
+# plain signed offset.
27
+STP 00 101 0 000 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
28
+LDP 00 101 0 000 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
29
+STP 10 101 0 000 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
30
+LDP 10 101 0 000 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
31
+STP_v 00 101 1 000 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
32
+LDP_v 00 101 1 000 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
33
+STP_v 01 101 1 000 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
34
+LDP_v 01 101 1 000 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
35
+STP_v 10 101 1 000 0 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=0 w=0
36
+LDP_v 10 101 1 000 1 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=0 w=0
37
+
38
+# STP and LDP: post-indexed
39
+STP 00 101 0 001 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=1 w=1
40
+LDP 00 101 0 001 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=1 w=1
41
+LDP 01 101 0 001 1 ....... ..... ..... ..... @ldstpair sz=2 sign=1 p=1 w=1
42
+STP 10 101 0 001 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=1 w=1
43
+LDP 10 101 0 001 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=1 w=1
44
+STP_v 00 101 1 001 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=1 w=1
45
+LDP_v 00 101 1 001 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=1 w=1
46
+STP_v 01 101 1 001 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=1 w=1
47
+LDP_v 01 101 1 001 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=1 w=1
48
+STP_v 10 101 1 001 0 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=1 w=1
49
+LDP_v 10 101 1 001 1 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=1 w=1
50
+
51
+# STP and LDP: offset
52
+STP 00 101 0 010 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
53
+LDP 00 101 0 010 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
54
+LDP 01 101 0 010 1 ....... ..... ..... ..... @ldstpair sz=2 sign=1 p=0 w=0
55
+STP 10 101 0 010 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
56
+LDP 10 101 0 010 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
57
+STP_v 00 101 1 010 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
58
+LDP_v 00 101 1 010 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=0
59
+STP_v 01 101 1 010 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
60
+LDP_v 01 101 1 010 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
61
+STP_v 10 101 1 010 0 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=0 w=0
62
+LDP_v 10 101 1 010 1 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=0 w=0
63
+
64
+# STP and LDP: pre-indexed
65
+STP 00 101 0 011 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=1
66
+LDP 00 101 0 011 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=1
67
+LDP 01 101 0 011 1 ....... ..... ..... ..... @ldstpair sz=2 sign=1 p=0 w=1
68
+STP 10 101 0 011 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=1
69
+LDP 10 101 0 011 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=1
70
+STP_v 00 101 1 011 0 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=1
71
+LDP_v 00 101 1 011 1 ....... ..... ..... ..... @ldstpair sz=2 sign=0 p=0 w=1
72
+STP_v 01 101 1 011 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=1
73
+LDP_v 01 101 1 011 1 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=1
74
+STP_v 10 101 1 011 0 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=0 w=1
75
+LDP_v 10 101 1 011 1 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p=0 w=1
76
+
77
+# STGP: store tag and pair
78
+STGP 01 101 0 001 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=1 w=1
79
+STGP 01 101 0 010 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
80
+STGP 01 101 0 011 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=1
81
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
82
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.h
83
--- a/target/arm/tcg/translate-a64.c
32
+++ b/target/arm/translate.h
84
+++ b/target/arm/tcg/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
85
@@ -XXX,XX +XXX,XX @@ static bool trans_LD_lit_v(DisasContext *s, arg_ldlit *a)
34
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
86
return true;
35
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
87
}
36
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
88
37
+typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
89
-/*
38
90
- * LDNP (Load Pair - non-temporal hint)
39
/**
91
- * LDP (Load Pair - non vector)
40
* arm_tbflags_from_tb:
92
- * LDPSW (Load Pair Signed Word - non vector)
41
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
93
- * STNP (Store Pair - non-temporal hint)
42
index XXXXXXX..XXXXXXX 100644
94
- * STP (Store Pair - non vector)
43
--- a/target/arm/t32.decode
95
- * LDNP (Load Pair of SIMD&FP - non-temporal hint)
44
+++ b/target/arm/t32.decode
96
- * LDP (Load Pair of SIMD&FP)
45
@@ -XXX,XX +XXX,XX @@
97
- * STNP (Store Pair of SIMD&FP - non-temporal hint)
46
98
- * STP (Store Pair of SIMD&FP)
47
&mve_shl_ri rdalo rdahi shim
99
- *
48
&mve_shl_rr rdalo rdahi rm
100
- * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
49
+&mve_sh_ri rda shim
101
- * +-----+-------+---+---+-------+---+-----------------------------+
50
102
- * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
51
# rdahi: bits [3:1] from insn, bit 0 is 1
103
- * +-----+-------+---+---+-------+---+-------+-------+------+------+
52
# rdalo: bits [3:1] from insn, bit 0 is 0
104
- *
53
@@ -XXX,XX +XXX,XX @@
105
- * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
54
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
106
- * LDPSW/STGP 01
55
@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
107
- * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
56
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
108
- * V: 0 -> GPR, 1 -> Vector
57
+@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
109
- * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
58
+ &mve_sh_ri shim=%imm5_12_6
110
- * 10 -> signed offset, 11 -> pre-index
59
111
- * L: 0 -> Store 1 -> Load
112
- *
113
- * Rt, Rt2 = GPR or SIMD registers to be stored
114
- * Rn = general purpose register containing address
115
- * imm7 = signed offset (multiple of 4 or 8 depending on size)
116
- */
117
-static void disas_ldst_pair(DisasContext *s, uint32_t insn)
118
+static void op_addr_ldstpair_pre(DisasContext *s, arg_ldstpair *a,
119
+ TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
120
+ uint64_t offset, bool is_store, MemOp mop)
60
{
121
{
61
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
122
- int rt = extract32(insn, 0, 5);
62
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
123
- int rn = extract32(insn, 5, 5);
63
# the rest fall through (where ORR_rrri and MOV_rxri will end up
124
- int rt2 = extract32(insn, 10, 5);
64
# handling them as r13 and r15 accesses with the same semantics as A32).
125
- uint64_t offset = sextract64(insn, 15, 7);
65
[
126
- int index = extract32(insn, 23, 2);
66
- LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
127
- bool is_vector = extract32(insn, 26, 1);
67
- LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
128
- bool is_load = extract32(insn, 22, 1);
68
- ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
129
- int opc = extract32(insn, 30, 2);
69
+ {
130
- bool is_signed = false;
70
+ UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
131
- bool postindex = false;
71
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
132
- bool wback = false;
72
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
133
- bool set_tag = false;
73
+ }
134
- TCGv_i64 clean_addr, dirty_addr;
74
135
- MemOp mop;
75
- UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
136
- int size;
76
- URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
137
-
77
- SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
138
- if (opc == 3) {
78
- SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
139
- unallocated_encoding(s);
79
+ {
140
- return;
80
+ URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
141
- }
81
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
142
-
82
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
143
- if (is_vector) {
83
+ }
144
- size = 2 + opc;
84
+
145
- } else if (opc == 1 && !is_load) {
85
+ {
146
- /* STGP */
86
+ SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
147
- if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
87
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
148
- unallocated_encoding(s);
88
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
149
- return;
89
+ }
150
- }
90
+
151
- size = 3;
91
+ {
152
- set_tag = true;
92
+ SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
153
- } else {
93
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
154
- size = 2 + extract32(opc, 1, 1);
94
+ }
155
- is_signed = extract32(opc, 0, 1);
95
156
- if (!is_load && is_signed) {
96
LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
157
- unallocated_encoding(s);
97
ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
158
- return;
98
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
159
- }
99
index XXXXXXX..XXXXXXX 100644
160
- }
100
--- a/target/arm/mve_helper.c
161
-
101
+++ b/target/arm/mve_helper.c
162
- switch (index) {
102
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
163
- case 1: /* post-index */
103
{
164
- postindex = true;
104
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
165
- wback = true;
105
}
166
- break;
106
+
167
- case 0:
107
+uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
168
- /* signed offset with "non-temporal" hint. Since we don't emulate
169
- * caches we don't care about hints to the cache system about
170
- * data access patterns, and handle this identically to plain
171
- * signed offset.
172
- */
173
- if (is_signed) {
174
- /* There is no non-temporal-hint version of LDPSW */
175
- unallocated_encoding(s);
176
- return;
177
- }
178
- postindex = false;
179
- break;
180
- case 2: /* signed offset, rn not updated */
181
- postindex = false;
182
- break;
183
- case 3: /* pre-index */
184
- postindex = false;
185
- wback = true;
186
- break;
187
- }
188
-
189
- if (is_vector && !fp_access_check(s)) {
190
- return;
191
- }
192
-
193
- offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
194
-
195
- if (rn == 31) {
196
+ if (a->rn == 31) {
197
gen_check_sp_alignment(s);
198
}
199
200
- dirty_addr = read_cpu_reg_sp(s, rn, 1);
201
- if (!postindex) {
202
+ *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
203
+ if (!a->p) {
204
+ tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
205
+ }
206
+
207
+ *clean_addr = gen_mte_checkN(s, *dirty_addr, is_store,
208
+ (a->w || a->rn != 31), 2 << a->sz, mop);
209
+}
210
+
211
+static void op_addr_ldstpair_post(DisasContext *s, arg_ldstpair *a,
212
+ TCGv_i64 dirty_addr, uint64_t offset)
108
+{
213
+{
109
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
214
+ if (a->w) {
215
+ if (a->p) {
216
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
217
+ }
218
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
219
+ }
110
+}
220
+}
111
+
221
+
112
+uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
222
+static bool trans_STP(DisasContext *s, arg_ldstpair *a)
113
+{
223
+{
114
+ return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
224
+ uint64_t offset = a->imm << a->sz;
115
+}
225
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
116
diff --git a/target/arm/translate.c b/target/arm/translate.c
226
+ MemOp mop = finalize_memop(s, a->sz);
117
index XXXXXXX..XXXXXXX 100644
227
+
118
--- a/target/arm/translate.c
228
+ op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop);
119
+++ b/target/arm/translate.c
229
+ tcg_rt = cpu_reg(s, a->rt);
120
@@ -XXX,XX +XXX,XX @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
230
+ tcg_rt2 = cpu_reg(s, a->rt2);
121
231
+ /*
122
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
232
+ * We built mop above for the single logical access -- rebuild it
123
{
233
+ * now for the paired operation.
124
- TCGv_i32 t = tcg_temp_new_i32();
234
+ *
125
+ TCGv_i32 t;
235
+ * With LSE2, non-sign-extending pairs are treated atomically if
126
236
+ * aligned, and if unaligned one of the pair will be completely
127
+ /* Handle shift by the input size for the benefit of trans_SRSHR_ri */
237
+ * within a 16-byte block and that element will be atomic.
128
+ if (sh == 32) {
238
+ * Otherwise each element is separately atomic.
129
+ tcg_gen_movi_i32(d, 0);
239
+ * In all cases, issue one operation with the correct atomicity.
130
+ return;
240
+ */
131
+ }
241
+ mop = a->sz + 1;
132
+ t = tcg_temp_new_i32();
242
+ if (s->align_mem) {
133
tcg_gen_extract_i32(t, a, sh - 1, 1);
243
+ mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
134
tcg_gen_sari_i32(d, a, sh);
244
+ }
135
tcg_gen_add_i32(d, d, t);
245
+ mop = finalize_memop_pair(s, mop);
136
@@ -XXX,XX +XXX,XX @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
246
+ if (a->sz == 2) {
137
247
+ TCGv_i64 tmp = tcg_temp_new_i64();
138
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
248
+
139
{
249
+ if (s->be_data == MO_LE) {
140
- TCGv_i32 t = tcg_temp_new_i32();
250
+ tcg_gen_concat32_i64(tmp, tcg_rt, tcg_rt2);
141
+ TCGv_i32 t;
251
+ } else {
142
252
+ tcg_gen_concat32_i64(tmp, tcg_rt2, tcg_rt);
143
+ /* Handle shift by the input size for the benefit of trans_URSHR_ri */
253
+ }
144
+ if (sh == 32) {
254
+ tcg_gen_qemu_st_i64(tmp, clean_addr, get_mem_index(s), mop);
145
+ tcg_gen_extract_i32(d, a, sh - 1, 1);
255
+ } else {
146
+ return;
256
+ TCGv_i128 tmp = tcg_temp_new_i128();
147
+ }
257
+
148
+ t = tcg_temp_new_i32();
258
+ if (s->be_data == MO_LE) {
149
tcg_gen_extract_i32(t, a, sh - 1, 1);
259
+ tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2);
150
tcg_gen_shri_i32(d, a, sh);
260
+ } else {
151
tcg_gen_add_i32(d, d, t);
261
+ tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt);
152
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
262
+ }
153
return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
263
+ tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
154
}
264
+ }
155
265
+ op_addr_ldstpair_post(s, a, dirty_addr, offset);
156
+static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
157
+{
158
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
159
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
160
+ return false;
161
+ }
162
+ if (!dc_isar_feature(aa32_mve, s) ||
163
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
164
+ a->rda == 13 || a->rda == 15) {
165
+ /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
166
+ unallocated_encoding(s);
167
+ return true;
168
+ }
169
+
170
+ if (a->shim == 0) {
171
+ a->shim = 32;
172
+ }
173
+ fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
174
+
175
+ return true;
266
+ return true;
176
+}
267
+}
177
+
268
+
178
+static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
269
+static bool trans_LDP(DisasContext *s, arg_ldstpair *a)
179
+{
270
+{
180
+ return do_mve_sh_ri(s, a, gen_urshr32_i32);
271
+ uint64_t offset = a->imm << a->sz;
272
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
273
+ MemOp mop = finalize_memop(s, a->sz);
274
+
275
+ op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop);
276
+ tcg_rt = cpu_reg(s, a->rt);
277
+ tcg_rt2 = cpu_reg(s, a->rt2);
278
+
279
+ /*
280
+ * We built mop above for the single logical access -- rebuild it
281
+ * now for the paired operation.
282
+ *
283
+ * With LSE2, non-sign-extending pairs are treated atomically if
284
+ * aligned, and if unaligned one of the pair will be completely
285
+ * within a 16-byte block and that element will be atomic.
286
+ * Otherwise each element is separately atomic.
287
+ * In all cases, issue one operation with the correct atomicity.
288
+ *
289
+ * This treats sign-extending loads like zero-extending loads,
290
+ * since that reuses the most code below.
291
+ */
292
+ mop = a->sz + 1;
293
+ if (s->align_mem) {
294
+ mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
295
+ }
296
+ mop = finalize_memop_pair(s, mop);
297
+ if (a->sz == 2) {
298
+ int o2 = s->be_data == MO_LE ? 32 : 0;
299
+ int o1 = o2 ^ 32;
300
+
301
+ tcg_gen_qemu_ld_i64(tcg_rt, clean_addr, get_mem_index(s), mop);
302
+ if (a->sign) {
303
+ tcg_gen_sextract_i64(tcg_rt2, tcg_rt, o2, 32);
304
+ tcg_gen_sextract_i64(tcg_rt, tcg_rt, o1, 32);
305
+ } else {
306
+ tcg_gen_extract_i64(tcg_rt2, tcg_rt, o2, 32);
307
+ tcg_gen_extract_i64(tcg_rt, tcg_rt, o1, 32);
308
+ }
309
+ } else {
310
+ TCGv_i128 tmp = tcg_temp_new_i128();
311
+
312
+ tcg_gen_qemu_ld_i128(tmp, clean_addr, get_mem_index(s), mop);
313
+ if (s->be_data == MO_LE) {
314
+ tcg_gen_extr_i128_i64(tcg_rt, tcg_rt2, tmp);
315
+ } else {
316
+ tcg_gen_extr_i128_i64(tcg_rt2, tcg_rt, tmp);
317
+ }
318
+ }
319
+ op_addr_ldstpair_post(s, a, dirty_addr, offset);
320
+ return true;
181
+}
321
+}
182
+
322
+
183
+static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
323
+static bool trans_STP_v(DisasContext *s, arg_ldstpair *a)
184
+{
324
+{
185
+ return do_mve_sh_ri(s, a, gen_srshr32_i32);
325
+ uint64_t offset = a->imm << a->sz;
326
+ TCGv_i64 clean_addr, dirty_addr;
327
+ MemOp mop;
328
+
329
+ if (!fp_access_check(s)) {
330
+ return true;
331
+ }
332
+
333
+ /* LSE2 does not merge FP pairs; leave these as separate operations. */
334
+ mop = finalize_memop_asimd(s, a->sz);
335
+ op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop);
336
+ do_fp_st(s, a->rt, clean_addr, mop);
337
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz);
338
+ do_fp_st(s, a->rt2, clean_addr, mop);
339
+ op_addr_ldstpair_post(s, a, dirty_addr, offset);
340
+ return true;
186
+}
341
+}
187
+
342
+
188
+static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
343
+static bool trans_LDP_v(DisasContext *s, arg_ldstpair *a)
189
+{
344
+{
190
+ gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
345
+ uint64_t offset = a->imm << a->sz;
346
+ TCGv_i64 clean_addr, dirty_addr;
347
+ MemOp mop;
348
+
349
+ if (!fp_access_check(s)) {
350
+ return true;
351
+ }
352
+
353
+ /* LSE2 does not merge FP pairs; leave these as separate operations. */
354
+ mop = finalize_memop_asimd(s, a->sz);
355
+ op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop);
356
+ do_fp_ld(s, a->rt, clean_addr, mop);
357
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz);
358
+ do_fp_ld(s, a->rt2, clean_addr, mop);
359
+ op_addr_ldstpair_post(s, a, dirty_addr, offset);
360
+ return true;
191
+}
361
+}
192
+
362
+
193
+static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
363
+static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
194
+{
364
+{
195
+ return do_mve_sh_ri(s, a, gen_mve_sqshl);
365
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
196
+}
366
+ uint64_t offset = a->imm << LOG2_TAG_GRANULE;
197
+
367
+ MemOp mop;
198
+static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
368
+ TCGv_i128 tmp;
199
+{
369
+
200
+ gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
370
+ if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
201
+}
371
+ return false;
202
+
372
+ }
203
+static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
373
+
204
+{
374
+ if (a->rn == 31) {
205
+ return do_mve_sh_ri(s, a, gen_mve_uqshl);
375
+ gen_check_sp_alignment(s);
206
+}
376
+ }
207
+
377
+
378
+ dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
379
+ if (!a->p) {
380
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
381
}
382
383
- if (set_tag) {
384
- if (!s->ata) {
385
- /*
386
- * TODO: We could rely on the stores below, at least for
387
- * system mode, if we arrange to add MO_ALIGN_16.
388
- */
389
- gen_helper_stg_stub(cpu_env, dirty_addr);
390
- } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
391
- gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
392
- } else {
393
- gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
394
- }
395
- }
396
-
397
- if (is_vector) {
398
- mop = finalize_memop_asimd(s, size);
399
- } else {
400
- mop = finalize_memop(s, size);
401
- }
402
- clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
403
- (wback || rn != 31) && !set_tag,
404
- 2 << size, mop);
405
-
406
- if (is_vector) {
407
- /* LSE2 does not merge FP pairs; leave these as separate operations. */
408
- if (is_load) {
409
- do_fp_ld(s, rt, clean_addr, mop);
410
- } else {
411
- do_fp_st(s, rt, clean_addr, mop);
412
- }
413
- tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
414
- if (is_load) {
415
- do_fp_ld(s, rt2, clean_addr, mop);
416
- } else {
417
- do_fp_st(s, rt2, clean_addr, mop);
418
- }
419
- } else {
420
- TCGv_i64 tcg_rt = cpu_reg(s, rt);
421
- TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
422
-
423
+ if (!s->ata) {
424
/*
425
- * We built mop above for the single logical access -- rebuild it
426
- * now for the paired operation.
427
- *
428
- * With LSE2, non-sign-extending pairs are treated atomically if
429
- * aligned, and if unaligned one of the pair will be completely
430
- * within a 16-byte block and that element will be atomic.
431
- * Otherwise each element is separately atomic.
432
- * In all cases, issue one operation with the correct atomicity.
433
- *
434
- * This treats sign-extending loads like zero-extending loads,
435
- * since that reuses the most code below.
436
+ * TODO: We could rely on the stores below, at least for
437
+ * system mode, if we arrange to add MO_ALIGN_16.
438
*/
439
- mop = size + 1;
440
- if (s->align_mem) {
441
- mop |= (size == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
442
- }
443
- mop = finalize_memop_pair(s, mop);
444
-
445
- if (is_load) {
446
- if (size == 2) {
447
- int o2 = s->be_data == MO_LE ? 32 : 0;
448
- int o1 = o2 ^ 32;
449
-
450
- tcg_gen_qemu_ld_i64(tcg_rt, clean_addr, get_mem_index(s), mop);
451
- if (is_signed) {
452
- tcg_gen_sextract_i64(tcg_rt2, tcg_rt, o2, 32);
453
- tcg_gen_sextract_i64(tcg_rt, tcg_rt, o1, 32);
454
- } else {
455
- tcg_gen_extract_i64(tcg_rt2, tcg_rt, o2, 32);
456
- tcg_gen_extract_i64(tcg_rt, tcg_rt, o1, 32);
457
- }
458
- } else {
459
- TCGv_i128 tmp = tcg_temp_new_i128();
460
-
461
- tcg_gen_qemu_ld_i128(tmp, clean_addr, get_mem_index(s), mop);
462
- if (s->be_data == MO_LE) {
463
- tcg_gen_extr_i128_i64(tcg_rt, tcg_rt2, tmp);
464
- } else {
465
- tcg_gen_extr_i128_i64(tcg_rt2, tcg_rt, tmp);
466
- }
467
- }
468
- } else {
469
- if (size == 2) {
470
- TCGv_i64 tmp = tcg_temp_new_i64();
471
-
472
- if (s->be_data == MO_LE) {
473
- tcg_gen_concat32_i64(tmp, tcg_rt, tcg_rt2);
474
- } else {
475
- tcg_gen_concat32_i64(tmp, tcg_rt2, tcg_rt);
476
- }
477
- tcg_gen_qemu_st_i64(tmp, clean_addr, get_mem_index(s), mop);
478
- } else {
479
- TCGv_i128 tmp = tcg_temp_new_i128();
480
-
481
- if (s->be_data == MO_LE) {
482
- tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2);
483
- } else {
484
- tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt);
485
- }
486
- tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
487
- }
488
- }
489
+ gen_helper_stg_stub(cpu_env, dirty_addr);
490
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
491
+ gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
492
+ } else {
493
+ gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
494
}
495
496
- if (wback) {
497
- if (postindex) {
498
- tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
499
- }
500
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
501
+ mop = finalize_memop(s, a->sz);
502
+ clean_addr = gen_mte_checkN(s, dirty_addr, true, false, 2 << a->sz, mop);
503
+
504
+ tcg_rt = cpu_reg(s, a->rt);
505
+ tcg_rt2 = cpu_reg(s, a->rt2);
506
+
507
+ assert(a->sz == 3);
508
+
509
+ tmp = tcg_temp_new_i128();
510
+ if (s->be_data == MO_LE) {
511
+ tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2);
512
+ } else {
513
+ tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt);
514
}
515
+ tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
516
+
517
+ op_addr_ldstpair_post(s, a, dirty_addr, offset);
518
+ return true;
519
}
520
208
/*
521
/*
209
* Multiply and multiply accumulate
522
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
210
*/
523
static void disas_ldst(DisasContext *s, uint32_t insn)
524
{
525
switch (extract32(insn, 24, 6)) {
526
- case 0x28: case 0x29:
527
- case 0x2c: case 0x2d: /* Load/store pair (all forms) */
528
- disas_ldst_pair(s, insn);
529
- break;
530
case 0x38: case 0x39:
531
case 0x3c: case 0x3d: /* Load/store register (all forms) */
532
disas_ldst_reg(s, insn);
211
--
533
--
212
2.20.1
534
2.34.1
213
214
diff view generated by jsdifflib
1
The A64 AdvSIMD modified-immediate grouping uses almost the same
1
Convert the load and store instructions which use a 9-bit
2
constant encoding that A32 Neon does; reuse asimd_imm_const() (to
2
immediate offset to decodetree.
3
which we add the AArch64-specific case for cmode 15 op 1) instead of
4
reimplementing it all.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-5-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-13-peter.maydell@linaro.org
9
---
7
---
10
target/arm/translate.h | 3 +-
8
target/arm/tcg/a64.decode | 69 +++++++++++
11
target/arm/translate-a64.c | 86 ++++----------------------------------
9
target/arm/tcg/translate-a64.c | 206 ++++++++++++++-------------------
12
target/arm/translate.c | 17 +++++++-
10
2 files changed, 153 insertions(+), 122 deletions(-)
13
3 files changed, 24 insertions(+), 82 deletions(-)
14
11
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
14
--- a/target/arm/tcg/a64.decode
18
+++ b/target/arm/translate.h
15
+++ b/target/arm/tcg/a64.decode
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
16
@@ -XXX,XX +XXX,XX @@ LDP_v 10 101 1 011 1 ....... ..... ..... ..... @ldstpair sz=4 sign=0 p
20
* VMVN and VBIC (when cmode < 14 && op == 1).
17
STGP 01 101 0 001 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=1 w=1
21
*
18
STGP 01 101 0 010 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=0
22
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
19
STGP 01 101 0 011 0 ....... ..... ..... ..... @ldstpair sz=3 sign=0 p=0 w=1
23
- * callers must catch this.
20
+
24
+ * callers must catch this; we return the 64-bit constant value defined
21
+# Load/store register (unscaled immediate)
25
+ * for AArch64.
22
+&ldst_imm rt rn imm sz sign w p unpriv ext
26
*
23
+@ldst_imm .. ... . .. .. . imm:s9 .. rn:5 rt:5 &ldst_imm unpriv=0 p=0 w=0
27
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
24
+@ldst_imm_pre .. ... . .. .. . imm:s9 .. rn:5 rt:5 &ldst_imm unpriv=0 p=0 w=1
28
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
25
+@ldst_imm_post .. ... . .. .. . imm:s9 .. rn:5 rt:5 &ldst_imm unpriv=0 p=1 w=1
29
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
26
+@ldst_imm_user .. ... . .. .. . imm:s9 .. rn:5 rt:5 &ldst_imm unpriv=1 p=0 w=0
27
+
28
+STR_i sz:2 111 0 00 00 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=0
29
+LDR_i 00 111 0 00 01 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=1 sz=0
30
+LDR_i 01 111 0 00 01 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=1 sz=1
31
+LDR_i 10 111 0 00 01 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=1 sz=2
32
+LDR_i 11 111 0 00 01 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=0 sz=3
33
+LDR_i 00 111 0 00 10 0 ......... 00 ..... ..... @ldst_imm sign=1 ext=0 sz=0
34
+LDR_i 01 111 0 00 10 0 ......... 00 ..... ..... @ldst_imm sign=1 ext=0 sz=1
35
+LDR_i 10 111 0 00 10 0 ......... 00 ..... ..... @ldst_imm sign=1 ext=0 sz=2
36
+LDR_i 00 111 0 00 11 0 ......... 00 ..... ..... @ldst_imm sign=1 ext=1 sz=0
37
+LDR_i 01 111 0 00 11 0 ......... 00 ..... ..... @ldst_imm sign=1 ext=1 sz=1
38
+
39
+STR_i sz:2 111 0 00 00 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=0
40
+LDR_i 00 111 0 00 01 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=1 sz=0
41
+LDR_i 01 111 0 00 01 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=1 sz=1
42
+LDR_i 10 111 0 00 01 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=1 sz=2
43
+LDR_i 11 111 0 00 01 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=0 sz=3
44
+LDR_i 00 111 0 00 10 0 ......... 01 ..... ..... @ldst_imm_post sign=1 ext=0 sz=0
45
+LDR_i 01 111 0 00 10 0 ......... 01 ..... ..... @ldst_imm_post sign=1 ext=0 sz=1
46
+LDR_i 10 111 0 00 10 0 ......... 01 ..... ..... @ldst_imm_post sign=1 ext=0 sz=2
47
+LDR_i 00 111 0 00 11 0 ......... 01 ..... ..... @ldst_imm_post sign=1 ext=1 sz=0
48
+LDR_i 01 111 0 00 11 0 ......... 01 ..... ..... @ldst_imm_post sign=1 ext=1 sz=1
49
+
50
+STR_i sz:2 111 0 00 00 0 ......... 10 ..... ..... @ldst_imm_user sign=0 ext=0
51
+LDR_i 00 111 0 00 01 0 ......... 10 ..... ..... @ldst_imm_user sign=0 ext=1 sz=0
52
+LDR_i 01 111 0 00 01 0 ......... 10 ..... ..... @ldst_imm_user sign=0 ext=1 sz=1
53
+LDR_i 10 111 0 00 01 0 ......... 10 ..... ..... @ldst_imm_user sign=0 ext=1 sz=2
54
+LDR_i 11 111 0 00 01 0 ......... 10 ..... ..... @ldst_imm_user sign=0 ext=0 sz=3
55
+LDR_i 00 111 0 00 10 0 ......... 10 ..... ..... @ldst_imm_user sign=1 ext=0 sz=0
56
+LDR_i 01 111 0 00 10 0 ......... 10 ..... ..... @ldst_imm_user sign=1 ext=0 sz=1
57
+LDR_i 10 111 0 00 10 0 ......... 10 ..... ..... @ldst_imm_user sign=1 ext=0 sz=2
58
+LDR_i 00 111 0 00 11 0 ......... 10 ..... ..... @ldst_imm_user sign=1 ext=1 sz=0
59
+LDR_i 01 111 0 00 11 0 ......... 10 ..... ..... @ldst_imm_user sign=1 ext=1 sz=1
60
+
61
+STR_i sz:2 111 0 00 00 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0
62
+LDR_i 00 111 0 00 01 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=1 sz=0
63
+LDR_i 01 111 0 00 01 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=1 sz=1
64
+LDR_i 10 111 0 00 01 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=1 sz=2
65
+LDR_i 11 111 0 00 01 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0 sz=3
66
+LDR_i 00 111 0 00 10 0 ......... 11 ..... ..... @ldst_imm_pre sign=1 ext=0 sz=0
67
+LDR_i 01 111 0 00 10 0 ......... 11 ..... ..... @ldst_imm_pre sign=1 ext=0 sz=1
68
+LDR_i 10 111 0 00 10 0 ......... 11 ..... ..... @ldst_imm_pre sign=1 ext=0 sz=2
69
+LDR_i 00 111 0 00 11 0 ......... 11 ..... ..... @ldst_imm_pre sign=1 ext=1 sz=0
70
+LDR_i 01 111 0 00 11 0 ......... 11 ..... ..... @ldst_imm_pre sign=1 ext=1 sz=1
71
+
72
+# PRFM : prefetch memory: a no-op for QEMU
73
+NOP 11 111 0 00 10 0 --------- 00 ----- -----
74
+
75
+STR_v_i sz:2 111 1 00 00 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=0
76
+STR_v_i 00 111 1 00 10 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=0 sz=4
77
+LDR_v_i sz:2 111 1 00 01 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=0
78
+LDR_v_i 00 111 1 00 11 0 ......... 00 ..... ..... @ldst_imm sign=0 ext=0 sz=4
79
+
80
+STR_v_i sz:2 111 1 00 00 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=0
81
+STR_v_i 00 111 1 00 10 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=0 sz=4
82
+LDR_v_i sz:2 111 1 00 01 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=0
83
+LDR_v_i 00 111 1 00 11 0 ......... 01 ..... ..... @ldst_imm_post sign=0 ext=0 sz=4
84
+
85
+STR_v_i sz:2 111 1 00 00 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0
86
+STR_v_i 00 111 1 00 10 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0 sz=4
87
+LDR_v_i sz:2 111 1 00 01 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0
88
+LDR_v_i 00 111 1 00 11 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0 sz=4
89
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
90
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-a64.c
91
--- a/target/arm/tcg/translate-a64.c
32
+++ b/target/arm/translate-a64.c
92
+++ b/target/arm/tcg/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
93
@@ -XXX,XX +XXX,XX @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
94
return true;
95
}
96
97
-/*
98
- * Load/store (immediate post-indexed)
99
- * Load/store (immediate pre-indexed)
100
- * Load/store (unscaled immediate)
101
- *
102
- * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
103
- * +----+-------+---+-----+-----+---+--------+-----+------+------+
104
- * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
105
- * +----+-------+---+-----+-----+---+--------+-----+------+------+
106
- *
107
- * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
108
- 10 -> unprivileged
109
- * V = 0 -> non-vector
110
- * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
111
- * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
112
- */
113
-static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
114
- int opc,
115
- int size,
116
- int rt,
117
- bool is_vector)
118
+static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a,
119
+ TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
120
+ uint64_t offset, bool is_store, MemOp mop)
34
{
121
{
35
int rd = extract32(insn, 0, 5);
122
- int rn = extract32(insn, 5, 5);
36
int cmode = extract32(insn, 12, 4);
123
- int imm9 = sextract32(insn, 12, 9);
37
- int cmode_3_1 = extract32(cmode, 1, 3);
124
- int idx = extract32(insn, 10, 2);
38
- int cmode_0 = extract32(cmode, 0, 1);
125
- bool is_signed = false;
39
int o2 = extract32(insn, 11, 1);
126
- bool is_store = false;
40
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
127
- bool is_extended = false;
41
bool is_neg = extract32(insn, 29, 1);
128
- bool is_unpriv = (idx == 2);
42
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
129
- bool iss_valid;
43
return;
130
- bool post_index;
44
}
131
- bool writeback;
45
132
int memidx;
46
- /* See AdvSIMDExpandImm() in ARM ARM */
133
- MemOp memop;
47
- switch (cmode_3_1) {
134
- TCGv_i64 clean_addr, dirty_addr;
48
- case 0: /* Replicate(Zeros(24):imm8, 2) */
135
49
- case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
136
- if (is_vector) {
50
- case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
137
- size |= (opc & 2) << 1;
51
- case 3: /* Replicate(imm8:Zeros(24), 2) */
138
- if (size > 4 || is_unpriv) {
52
- {
139
- unallocated_encoding(s);
53
- int shift = cmode_3_1 * 8;
140
- return;
54
- imm = bitfield_replicate(abcdefgh << shift, 32);
141
- }
142
- is_store = ((opc & 1) == 0);
143
- if (!fp_access_check(s)) {
144
- return;
145
- }
146
- memop = finalize_memop_asimd(s, size);
147
- } else {
148
- if (size == 3 && opc == 2) {
149
- /* PRFM - prefetch */
150
- if (idx != 0) {
151
- unallocated_encoding(s);
152
- return;
153
- }
154
- return;
155
- }
156
- if (opc == 3 && size > 1) {
157
- unallocated_encoding(s);
158
- return;
159
- }
160
- is_store = (opc == 0);
161
- is_signed = !is_store && extract32(opc, 1, 1);
162
- is_extended = (size < 3) && extract32(opc, 0, 1);
163
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
164
- }
165
-
166
- switch (idx) {
167
- case 0:
168
- case 2:
169
- post_index = false;
170
- writeback = false;
55
- break;
171
- break;
56
- }
172
- case 1:
57
- case 4: /* Replicate(Zeros(8):imm8, 4) */
173
- post_index = true;
58
- case 5: /* Replicate(imm8:Zeros(8), 4) */
174
- writeback = true;
59
- {
60
- int shift = (cmode_3_1 & 0x1) * 8;
61
- imm = bitfield_replicate(abcdefgh << shift, 16);
62
- break;
175
- break;
63
- }
176
- case 3:
64
- case 6:
177
- post_index = false;
65
- if (cmode_0) {
178
- writeback = true;
66
- /* Replicate(Zeros(8):imm8:Ones(16), 2) */
67
- imm = (abcdefgh << 16) | 0xffff;
68
- } else {
69
- /* Replicate(Zeros(16):imm8:Ones(8), 2) */
70
- imm = (abcdefgh << 8) | 0xff;
71
- }
72
- imm = bitfield_replicate(imm, 32);
73
- break;
74
- case 7:
75
- if (!cmode_0 && !is_neg) {
76
- imm = bitfield_replicate(abcdefgh, 8);
77
- } else if (!cmode_0 && is_neg) {
78
- int i;
79
- imm = 0;
80
- for (i = 0; i < 8; i++) {
81
- if ((abcdefgh) & (1 << i)) {
82
- imm |= 0xffULL << (i * 8);
83
- }
84
- }
85
- } else if (cmode_0) {
86
- if (is_neg) {
87
- imm = (abcdefgh & 0x3f) << 48;
88
- if (abcdefgh & 0x80) {
89
- imm |= 0x8000000000000000ULL;
90
- }
91
- if (abcdefgh & 0x40) {
92
- imm |= 0x3fc0000000000000ULL;
93
- } else {
94
- imm |= 0x4000000000000000ULL;
95
- }
96
- } else {
97
- if (o2) {
98
- /* FMOV (vector, immediate) - half-precision */
99
- imm = vfp_expand_imm(MO_16, abcdefgh);
100
- /* now duplicate across the lanes */
101
- imm = bitfield_replicate(imm, 16);
102
- } else {
103
- imm = (abcdefgh & 0x3f) << 19;
104
- if (abcdefgh & 0x80) {
105
- imm |= 0x80000000;
106
- }
107
- if (abcdefgh & 0x40) {
108
- imm |= 0x3e000000;
109
- } else {
110
- imm |= 0x40000000;
111
- }
112
- imm |= (imm << 32);
113
- }
114
- }
115
- }
116
- break;
179
- break;
117
- default:
180
- default:
118
- g_assert_not_reached();
181
- g_assert_not_reached();
119
- }
182
- }
120
-
183
-
121
- if (cmode_3_1 != 7 && is_neg) {
184
- iss_valid = !is_vector && !writeback;
122
- imm = ~imm;
185
-
123
+ if (cmode == 15 && o2 && !is_neg) {
186
- if (rn == 31) {
124
+ /* FMOV (vector, immediate) - half-precision */
187
+ if (a->rn == 31) {
125
+ imm = vfp_expand_imm(MO_16, abcdefgh);
188
gen_check_sp_alignment(s);
126
+ /* now duplicate across the lanes */
127
+ imm = bitfield_replicate(imm, 16);
128
+ } else {
129
+ imm = asimd_imm_const(abcdefgh, cmode, is_neg);
130
}
189
}
131
190
132
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
191
- dirty_addr = read_cpu_reg_sp(s, rn, 1);
133
diff --git a/target/arm/translate.c b/target/arm/translate.c
192
- if (!post_index) {
134
index XXXXXXX..XXXXXXX 100644
193
- tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
135
--- a/target/arm/translate.c
194
+ *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
136
+++ b/target/arm/translate.c
195
+ if (!a->p) {
137
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
196
+ tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
138
case 14:
197
}
139
if (op) {
198
+ memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
140
/*
199
+ *clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store,
141
- * This is the only case where the top and bottom 32 bits
200
+ a->w || a->rn != 31,
142
- * of the encoded constant differ.
201
+ mop, a->unpriv, memidx);
143
+ * This and cmode == 15 op == 1 are the only cases where
202
+}
144
+ * the top and bottom 32 bits of the encoded constant differ.
203
145
*/
204
- memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
146
uint64_t imm64 = 0;
205
-
147
int n;
206
- clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
148
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
207
- writeback || rn != 31,
149
imm |= (imm << 8) | (imm << 16) | (imm << 24);
208
- memop, is_unpriv, memidx);
150
break;
209
-
151
case 15:
210
- if (is_vector) {
152
+ if (op) {
211
- if (is_store) {
153
+ /* Reserved encoding for AArch32; valid for AArch64 */
212
- do_fp_st(s, rt, clean_addr, memop);
154
+ uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
213
- } else {
155
+ if (imm & 0x80) {
214
- do_fp_ld(s, rt, clean_addr, memop);
156
+ imm64 |= 0x8000000000000000ULL;
215
- }
157
+ }
216
- } else {
158
+ if (imm & 0x40) {
217
- TCGv_i64 tcg_rt = cpu_reg(s, rt);
159
+ imm64 |= 0x3fc0000000000000ULL;
218
- bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
160
+ } else {
219
-
161
+ imm64 |= 0x4000000000000000ULL;
220
- if (is_store) {
162
+ }
221
- do_gpr_st_memidx(s, tcg_rt, clean_addr, memop, memidx,
163
+ return imm64;
222
- iss_valid, rt, iss_sf, false);
164
+ }
223
- } else {
165
imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
224
- do_gpr_ld_memidx(s, tcg_rt, clean_addr, memop,
166
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
225
- is_extended, memidx,
167
break;
226
- iss_valid, rt, iss_sf, false);
227
+static void op_addr_ldst_imm_post(DisasContext *s, arg_ldst_imm *a,
228
+ TCGv_i64 dirty_addr, uint64_t offset)
229
+{
230
+ if (a->w) {
231
+ if (a->p) {
232
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
233
}
234
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
235
}
236
+}
237
238
- if (writeback) {
239
- TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
240
- if (post_index) {
241
- tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
242
- }
243
- tcg_gen_mov_i64(tcg_rn, dirty_addr);
244
+static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a)
245
+{
246
+ bool iss_sf, iss_valid = !a->w;
247
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt;
248
+ int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
249
+ MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
250
+
251
+ op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
252
+
253
+ tcg_rt = cpu_reg(s, a->rt);
254
+ iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
255
+
256
+ do_gpr_st_memidx(s, tcg_rt, clean_addr, mop, memidx,
257
+ iss_valid, a->rt, iss_sf, false);
258
+ op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
259
+ return true;
260
+}
261
+
262
+static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a)
263
+{
264
+ bool iss_sf, iss_valid = !a->w;
265
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt;
266
+ int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
267
+ MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
268
+
269
+ op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
270
+
271
+ tcg_rt = cpu_reg(s, a->rt);
272
+ iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
273
+
274
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, mop,
275
+ a->ext, memidx, iss_valid, a->rt, iss_sf, false);
276
+ op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
277
+ return true;
278
+}
279
+
280
+static bool trans_STR_v_i(DisasContext *s, arg_ldst_imm *a)
281
+{
282
+ TCGv_i64 clean_addr, dirty_addr;
283
+ MemOp mop;
284
+
285
+ if (!fp_access_check(s)) {
286
+ return true;
287
}
288
+ mop = finalize_memop_asimd(s, a->sz);
289
+ op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
290
+ do_fp_st(s, a->rt, clean_addr, mop);
291
+ op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
292
+ return true;
293
+}
294
+
295
+static bool trans_LDR_v_i(DisasContext *s, arg_ldst_imm *a)
296
+{
297
+ TCGv_i64 clean_addr, dirty_addr;
298
+ MemOp mop;
299
+
300
+ if (!fp_access_check(s)) {
301
+ return true;
302
+ }
303
+ mop = finalize_memop_asimd(s, a->sz);
304
+ op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
305
+ do_fp_ld(s, a->rt, clean_addr, mop);
306
+ op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
307
+ return true;
308
}
309
310
/*
311
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
312
switch (extract32(insn, 24, 2)) {
313
case 0:
314
if (extract32(insn, 21, 1) == 0) {
315
- /* Load/store register (unscaled immediate)
316
- * Load/store immediate pre/post-indexed
317
- * Load/store register unprivileged
318
- */
319
- disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
320
- return;
321
+ break;
322
}
323
switch (extract32(insn, 10, 2)) {
324
case 0:
168
--
325
--
169
2.20.1
326
2.34.1
170
171
diff view generated by jsdifflib
1
Implement the MVE shift-right-and-narrow insn VSHRN and VRSHRN.
1
Convert the LDR and STR instructions which use a 12-bit immediate
2
2
offset to decodetree. We can reuse the existing LDR and STR
3
do_urshr() is borrowed from sve_helper.c.
3
trans functions for these.
4
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-12-peter.maydell@linaro.org
7
Message-id: 20230602155223.2040685-14-peter.maydell@linaro.org
8
---
8
---
9
target/arm/helper-mve.h | 10 ++++++++++
9
target/arm/tcg/a64.decode | 25 ++++++++
10
target/arm/mve.decode | 11 +++++++++++
10
target/arm/tcg/translate-a64.c | 104 +++++----------------------------
11
target/arm/mve_helper.c | 40 ++++++++++++++++++++++++++++++++++++++
11
2 files changed, 41 insertions(+), 88 deletions(-)
12
target/arm/translate-mve.c | 15 ++++++++++++++
13
4 files changed, 76 insertions(+)
14
12
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
15
--- a/target/arm/tcg/a64.decode
18
+++ b/target/arm/helper-mve.h
16
+++ b/target/arm/tcg/a64.decode
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ STR_v_i sz:2 111 1 00 00 0 ......... 11 ..... ..... @ldst_imm_pre sign=0
20
DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
STR_v_i 00 111 1 00 10 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0 sz=4
21
DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
LDR_v_i sz:2 111 1 00 01 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0
22
DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
LDR_v_i 00 111 1 00 11 0 ......... 11 ..... ..... @ldst_imm_pre sign=0 ext=0 sz=4
23
+
21
+
24
+DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+# Load/store with an unsigned 12 bit immediate, which is scaled by the
25
+DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+# element size. The function gets the sz:imm and returns the scaled immediate.
26
+DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+%uimm_scaled 10:12 sz:3 !function=uimm_scaled
27
+DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
25
+
29
+DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+@ldst_uimm .. ... . .. .. ............ rn:5 rt:5 &ldst_imm unpriv=0 p=0 w=0 imm=%uimm_scaled
30
+DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+
31
+DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+STR_i sz:2 111 0 01 00 ............ ..... ..... @ldst_uimm sign=0 ext=0
32
+DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+LDR_i 00 111 0 01 01 ............ ..... ..... @ldst_uimm sign=0 ext=1 sz=0
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
+LDR_i 01 111 0 01 01 ............ ..... ..... @ldst_uimm sign=0 ext=1 sz=1
31
+LDR_i 10 111 0 01 01 ............ ..... ..... @ldst_uimm sign=0 ext=1 sz=2
32
+LDR_i 11 111 0 01 01 ............ ..... ..... @ldst_uimm sign=0 ext=0 sz=3
33
+LDR_i 00 111 0 01 10 ............ ..... ..... @ldst_uimm sign=1 ext=0 sz=0
34
+LDR_i 01 111 0 01 10 ............ ..... ..... @ldst_uimm sign=1 ext=0 sz=1
35
+LDR_i 10 111 0 01 10 ............ ..... ..... @ldst_uimm sign=1 ext=0 sz=2
36
+LDR_i 00 111 0 01 11 ............ ..... ..... @ldst_uimm sign=1 ext=1 sz=0
37
+LDR_i 01 111 0 01 11 ............ ..... ..... @ldst_uimm sign=1 ext=1 sz=1
38
+
39
+# PRFM
40
+NOP 11 111 0 01 10 ------------ ----- -----
41
+
42
+STR_v_i sz:2 111 1 01 00 ............ ..... ..... @ldst_uimm sign=0 ext=0
43
+STR_v_i 00 111 1 01 10 ............ ..... ..... @ldst_uimm sign=0 ext=0 sz=4
44
+LDR_v_i sz:2 111 1 01 01 ............ ..... ..... @ldst_uimm sign=0 ext=0
45
+LDR_v_i 00 111 1 01 11 ............ ..... ..... @ldst_uimm sign=0 ext=0 sz=4
46
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
34
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
48
--- a/target/arm/tcg/translate-a64.c
36
+++ b/target/arm/mve.decode
49
+++ b/target/arm/tcg/translate-a64.c
37
@@ -XXX,XX +XXX,XX @@ VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
50
@@ -XXX,XX +XXX,XX @@ enum a64_shift_type {
38
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
51
A64_SHIFT_TYPE_ROR = 3
39
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
52
};
40
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
53
41
+
54
+/*
42
+# Narrowing shifts (which only support b and h sizes)
55
+ * Helpers for extracting complex instruction fields
43
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
56
+ */
44
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
45
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
46
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
47
+
48
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
49
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
50
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
51
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
57
58
DO_VSHLL_ALL(vshllb, false)
59
DO_VSHLL_ALL(vshllt, true)
60
+
57
+
61
+/*
58
+/*
62
+ * Narrowing right shifts, taking a double sized input, shifting it
59
+ * For load/store with an unsigned 12 bit immediate scaled by the element
63
+ * and putting the result in either the top or bottom half of the output.
60
+ * size. The input has the immediate field in bits [14:3] and the element
64
+ * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
61
+ * size in [2:0].
65
+ */
62
+ */
66
+#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
63
+static int uimm_scaled(DisasContext *s, int x)
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
68
+ void *vm, uint32_t shift) \
69
+ { \
70
+ LTYPE *m = vm; \
71
+ TYPE *d = vd; \
72
+ uint16_t mask = mve_element_mask(env); \
73
+ unsigned le; \
74
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
75
+ TYPE r = FN(m[H##LESIZE(le)], shift); \
76
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
77
+ } \
78
+ mve_advance_vpt(env); \
79
+ }
80
+
81
+#define DO_VSHRN_ALL(OP, FN) \
82
+ DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
83
+ DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
84
+ DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
85
+ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
86
+
87
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
88
+{
64
+{
89
+ if (likely(sh < 64)) {
65
+ unsigned imm = x >> 3;
90
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
66
+ unsigned scale = extract32(x, 0, 3);
91
+ } else if (sh == 64) {
67
+ return imm << scale;
92
+ return x >> 63;
93
+ } else {
94
+ return 0;
95
+ }
96
+}
68
+}
97
+
69
+
98
+DO_VSHRN_ALL(vshrn, DO_SHR)
70
/*
99
+DO_VSHRN_ALL(vrshrn, do_urshr)
71
* Include the generated decoders.
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
72
*/
101
index XXXXXXX..XXXXXXX 100644
73
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
102
--- a/target/arm/translate-mve.c
74
}
103
+++ b/target/arm/translate-mve.c
75
}
104
@@ -XXX,XX +XXX,XX @@ DO_VSHLL(VSHLL_BS, vshllbs)
76
105
DO_VSHLL(VSHLL_BU, vshllbu)
77
-/*
106
DO_VSHLL(VSHLL_TS, vshllts)
78
- * Load/store (unsigned immediate)
107
DO_VSHLL(VSHLL_TU, vshlltu)
79
- *
108
+
80
- * 31 30 29 27 26 25 24 23 22 21 10 9 5
109
+#define DO_2SHIFT_N(INSN, FN) \
81
- * +----+-------+---+-----+-----+------------+-------+------+
110
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
82
- * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
111
+ { \
83
- * +----+-------+---+-----+-----+------------+-------+------+
112
+ static MVEGenTwoOpShiftFn * const fns[] = { \
84
- *
113
+ gen_helper_mve_##FN##b, \
85
- * For non-vector:
114
+ gen_helper_mve_##FN##h, \
86
- * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
115
+ }; \
87
- * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
116
+ return do_2shift(s, a, fns[a->size], false); \
88
- * For vector:
117
+ }
89
- * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
118
+
90
- * opc<0>: 0 -> store, 1 -> load
119
+DO_2SHIFT_N(VSHRNB, vshrnb)
91
- * Rn: base address register (inc SP)
120
+DO_2SHIFT_N(VSHRNT, vshrnt)
92
- * Rt: target register
121
+DO_2SHIFT_N(VRSHRNB, vrshrnb)
93
- */
122
+DO_2SHIFT_N(VRSHRNT, vrshrnt)
94
-static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
95
- int opc,
96
- int size,
97
- int rt,
98
- bool is_vector)
99
-{
100
- int rn = extract32(insn, 5, 5);
101
- unsigned int imm12 = extract32(insn, 10, 12);
102
- unsigned int offset;
103
- TCGv_i64 clean_addr, dirty_addr;
104
- bool is_store;
105
- bool is_signed = false;
106
- bool is_extended = false;
107
- MemOp memop;
108
-
109
- if (is_vector) {
110
- size |= (opc & 2) << 1;
111
- if (size > 4) {
112
- unallocated_encoding(s);
113
- return;
114
- }
115
- is_store = !extract32(opc, 0, 1);
116
- if (!fp_access_check(s)) {
117
- return;
118
- }
119
- memop = finalize_memop_asimd(s, size);
120
- } else {
121
- if (size == 3 && opc == 2) {
122
- /* PRFM - prefetch */
123
- return;
124
- }
125
- if (opc == 3 && size > 1) {
126
- unallocated_encoding(s);
127
- return;
128
- }
129
- is_store = (opc == 0);
130
- is_signed = !is_store && extract32(opc, 1, 1);
131
- is_extended = (size < 3) && extract32(opc, 0, 1);
132
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
133
- }
134
-
135
- if (rn == 31) {
136
- gen_check_sp_alignment(s);
137
- }
138
- dirty_addr = read_cpu_reg_sp(s, rn, 1);
139
- offset = imm12 << size;
140
- tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
141
-
142
- clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, memop);
143
-
144
- if (is_vector) {
145
- if (is_store) {
146
- do_fp_st(s, rt, clean_addr, memop);
147
- } else {
148
- do_fp_ld(s, rt, clean_addr, memop);
149
- }
150
- } else {
151
- TCGv_i64 tcg_rt = cpu_reg(s, rt);
152
- bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
153
- if (is_store) {
154
- do_gpr_st(s, tcg_rt, clean_addr, memop, true, rt, iss_sf, false);
155
- } else {
156
- do_gpr_ld(s, tcg_rt, clean_addr, memop,
157
- is_extended, true, rt, iss_sf, false);
158
- }
159
- }
160
-}
161
-
162
/* Atomic memory operations
163
*
164
* 31 30 27 26 24 22 21 16 15 12 10 5 0
165
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
166
return;
167
}
168
break;
169
- case 1:
170
- disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
171
- return;
172
}
173
unallocated_encoding(s);
174
}
123
--
175
--
124
2.20.1
176
2.34.1
125
126
diff view generated by jsdifflib
1
Implement the MVE VADDLV insn; this is similar to VADDV, except
1
Convert the LDR and STR instructions which take a register
2
that it accumulates 32-bit elements into a 64-bit accumulator
2
plus register offset to decodetree.
3
stored in a pair of general-purpose registers.
4
3
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-15-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-15-peter.maydell@linaro.org
8
---
7
---
9
target/arm/helper-mve.h | 3 ++
8
target/arm/tcg/a64.decode | 22 +++++
10
target/arm/mve.decode | 6 +++-
9
target/arm/tcg/translate-a64.c | 173 +++++++++++++++------------------
11
target/arm/mve_helper.c | 19 ++++++++++++
10
2 files changed, 103 insertions(+), 92 deletions(-)
12
target/arm/translate-mve.c | 63 ++++++++++++++++++++++++++++++++++++++
13
4 files changed, 90 insertions(+), 1 deletion(-)
14
11
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
14
--- a/target/arm/tcg/a64.decode
18
+++ b/target/arm/helper-mve.h
15
+++ b/target/arm/tcg/a64.decode
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
16
@@ -XXX,XX +XXX,XX @@ STR_v_i sz:2 111 1 01 00 ............ ..... ..... @ldst_uimm sign=0 ext=
20
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
17
STR_v_i 00 111 1 01 10 ............ ..... ..... @ldst_uimm sign=0 ext=0 sz=4
21
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
18
LDR_v_i sz:2 111 1 01 01 ............ ..... ..... @ldst_uimm sign=0 ext=0
22
19
LDR_v_i 00 111 1 01 11 ............ ..... ..... @ldst_uimm sign=0 ext=0 sz=4
23
+DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
20
+
24
+DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
21
+# Load/store with register offset
25
+
22
+&ldst rm rn rt sign ext sz opt s
26
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
23
+@ldst .. ... . .. .. . rm:5 opt:3 s:1 .. rn:5 rt:5 &ldst
27
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
24
+STR sz:2 111 0 00 00 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0
28
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
25
+LDR 00 111 0 00 01 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=1 sz=0
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
26
+LDR 01 111 0 00 01 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=1 sz=1
27
+LDR 10 111 0 00 01 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=1 sz=2
28
+LDR 11 111 0 00 01 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0 sz=3
29
+LDR 00 111 0 00 10 1 ..... ... . 10 ..... ..... @ldst sign=1 ext=0 sz=0
30
+LDR 01 111 0 00 10 1 ..... ... . 10 ..... ..... @ldst sign=1 ext=0 sz=1
31
+LDR 10 111 0 00 10 1 ..... ... . 10 ..... ..... @ldst sign=1 ext=0 sz=2
32
+LDR 00 111 0 00 11 1 ..... ... . 10 ..... ..... @ldst sign=1 ext=1 sz=0
33
+LDR 01 111 0 00 11 1 ..... ... . 10 ..... ..... @ldst sign=1 ext=1 sz=1
34
+
35
+# PRFM
36
+NOP 11 111 0 00 10 1 ----- -1- - 10 ----- -----
37
+
38
+STR_v sz:2 111 1 00 00 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0
39
+STR_v 00 111 1 00 10 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0 sz=4
40
+LDR_v sz:2 111 1 00 01 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0
41
+LDR_v 00 111 1 00 11 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0 sz=4
42
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
44
--- a/target/arm/tcg/translate-a64.c
32
+++ b/target/arm/mve.decode
45
+++ b/target/arm/tcg/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
46
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_v_i(DisasContext *s, arg_ldst_imm *a)
34
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
35
36
# Vector add across vector
37
-VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
38
+{
39
+ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
40
+ VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \
41
+ rdahi=%rdahi rdalo=%rdalo
42
+}
43
44
# Predicate operations
45
%mask_22_13 22:1 13:3
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvub, 1, uint8_t)
51
DO_VADDV(vaddvuh, 2, uint16_t)
52
DO_VADDV(vaddvuw, 4, uint32_t)
53
54
+#define DO_VADDLV(OP, TYPE, LTYPE) \
55
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
56
+ uint64_t ra) \
57
+ { \
58
+ uint16_t mask = mve_element_mask(env); \
59
+ unsigned e; \
60
+ TYPE *m = vm; \
61
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
62
+ if (mask & 1) { \
63
+ ra += (LTYPE)m[H4(e)]; \
64
+ } \
65
+ } \
66
+ mve_advance_vpt(env); \
67
+ return ra; \
68
+ } \
69
+
70
+DO_VADDLV(vaddlv_s, int32_t, int64_t)
71
+DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
72
+
73
/* Shifts by immediate */
74
#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
76
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate-mve.c
79
+++ b/target/arm/translate-mve.c
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
81
return true;
47
return true;
82
}
48
}
83
49
84
+static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
50
-/*
85
+{
51
- * Load/store (register offset)
86
+ /*
52
- *
87
+ * Vector Add Long Across Vector: accumulate the 32-bit
53
- * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
88
+ * elements of the vector into a 64-bit result stored in
54
- * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
89
+ * a pair of general-purpose registers.
55
- * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
90
+ * No need to check Qm's bank: it is only 3 bits in decode.
56
- * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
91
+ */
57
- *
92
+ TCGv_ptr qm;
58
- * For non-vector:
93
+ TCGv_i64 rda;
59
- * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
94
+ TCGv_i32 rdalo, rdahi;
60
- * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
95
+
61
- * For vector:
96
+ if (!dc_isar_feature(aa32_mve, s)) {
62
- * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
97
+ return false;
63
- * opc<0>: 0 -> store, 1 -> load
98
+ }
64
- * V: 1 -> vector/simd
99
+ /*
65
- * opt: extend encoding (see DecodeRegExtend)
100
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
66
- * S: if S=1 then scale (essentially index by sizeof(size))
101
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
67
- * Rt: register to transfer into/out of
102
+ */
68
- * Rn: address register or SP for base
103
+ if (a->rdahi == 13 || a->rdahi == 15) {
69
- * Rm: offset register or ZR for offset
104
+ return false;
70
- */
105
+ }
71
-static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
106
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
72
- int opc,
73
- int size,
74
- int rt,
75
- bool is_vector)
76
+static void op_addr_ldst_pre(DisasContext *s, arg_ldst *a,
77
+ TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
78
+ bool is_store, MemOp memop)
79
{
80
- int rn = extract32(insn, 5, 5);
81
- int shift = extract32(insn, 12, 1);
82
- int rm = extract32(insn, 16, 5);
83
- int opt = extract32(insn, 13, 3);
84
- bool is_signed = false;
85
- bool is_store = false;
86
- bool is_extended = false;
87
- TCGv_i64 tcg_rm, clean_addr, dirty_addr;
88
- MemOp memop;
89
+ TCGv_i64 tcg_rm;
90
91
- if (extract32(opt, 1, 1) == 0) {
92
- unallocated_encoding(s);
93
- return;
94
- }
95
-
96
- if (is_vector) {
97
- size |= (opc & 2) << 1;
98
- if (size > 4) {
99
- unallocated_encoding(s);
100
- return;
101
- }
102
- is_store = !extract32(opc, 0, 1);
103
- if (!fp_access_check(s)) {
104
- return;
105
- }
106
- memop = finalize_memop_asimd(s, size);
107
- } else {
108
- if (size == 3 && opc == 2) {
109
- /* PRFM - prefetch */
110
- return;
111
- }
112
- if (opc == 3 && size > 1) {
113
- unallocated_encoding(s);
114
- return;
115
- }
116
- is_store = (opc == 0);
117
- is_signed = !is_store && extract32(opc, 1, 1);
118
- is_extended = (size < 3) && extract32(opc, 0, 1);
119
- memop = finalize_memop(s, size + is_signed * MO_SIGN);
120
- }
121
-
122
- if (rn == 31) {
123
+ if (a->rn == 31) {
124
gen_check_sp_alignment(s);
125
}
126
- dirty_addr = read_cpu_reg_sp(s, rn, 1);
127
+ *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
128
129
- tcg_rm = read_cpu_reg(s, rm, 1);
130
- ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
131
+ tcg_rm = read_cpu_reg(s, a->rm, 1);
132
+ ext_and_shift_reg(tcg_rm, tcg_rm, a->opt, a->s ? a->sz : 0);
133
134
- tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
135
+ tcg_gen_add_i64(*dirty_addr, *dirty_addr, tcg_rm);
136
+ *clean_addr = gen_mte_check1(s, *dirty_addr, is_store, true, memop);
137
+}
138
139
- clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, memop);
140
+static bool trans_LDR(DisasContext *s, arg_ldst *a)
141
+{
142
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt;
143
+ bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
144
+ MemOp memop;
145
146
- if (is_vector) {
147
- if (is_store) {
148
- do_fp_st(s, rt, clean_addr, memop);
149
- } else {
150
- do_fp_ld(s, rt, clean_addr, memop);
151
- }
152
- } else {
153
- TCGv_i64 tcg_rt = cpu_reg(s, rt);
154
- bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
155
-
156
- if (is_store) {
157
- do_gpr_st(s, tcg_rt, clean_addr, memop,
158
- true, rt, iss_sf, false);
159
- } else {
160
- do_gpr_ld(s, tcg_rt, clean_addr, memop,
161
- is_extended, true, rt, iss_sf, false);
162
- }
163
+ if (extract32(a->opt, 1, 1) == 0) {
164
+ return false;
165
}
166
+
167
+ memop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
168
+ op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop);
169
+ tcg_rt = cpu_reg(s, a->rt);
170
+ do_gpr_ld(s, tcg_rt, clean_addr, memop,
171
+ a->ext, true, a->rt, iss_sf, false);
172
+ return true;
173
+}
174
+
175
+static bool trans_STR(DisasContext *s, arg_ldst *a)
176
+{
177
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt;
178
+ bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
179
+ MemOp memop;
180
+
181
+ if (extract32(a->opt, 1, 1) == 0) {
182
+ return false;
183
+ }
184
+
185
+ memop = finalize_memop(s, a->sz);
186
+ op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop);
187
+ tcg_rt = cpu_reg(s, a->rt);
188
+ do_gpr_st(s, tcg_rt, clean_addr, memop, true, a->rt, iss_sf, false);
189
+ return true;
190
+}
191
+
192
+static bool trans_LDR_v(DisasContext *s, arg_ldst *a)
193
+{
194
+ TCGv_i64 clean_addr, dirty_addr;
195
+ MemOp memop;
196
+
197
+ if (extract32(a->opt, 1, 1) == 0) {
198
+ return false;
199
+ }
200
+
201
+ if (!fp_access_check(s)) {
107
+ return true;
202
+ return true;
108
+ }
203
+ }
109
+
204
+
110
+ /*
205
+ memop = finalize_memop_asimd(s, a->sz);
111
+ * This insn is subject to beat-wise execution. Partial execution
206
+ op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop);
112
+ * of an A=0 (no-accumulate) insn which does not execute the first
207
+ do_fp_ld(s, a->rt, clean_addr, memop);
113
+ * beat must start with the current value of RdaHi:RdaLo, not zero.
208
+ return true;
114
+ */
209
+}
115
+ if (a->a || mve_skip_first_beat(s)) {
210
+
116
+ /* Accumulate input from RdaHi:RdaLo */
211
+static bool trans_STR_v(DisasContext *s, arg_ldst *a)
117
+ rda = tcg_temp_new_i64();
212
+{
118
+ rdalo = load_reg(s, a->rdalo);
213
+ TCGv_i64 clean_addr, dirty_addr;
119
+ rdahi = load_reg(s, a->rdahi);
214
+ MemOp memop;
120
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
215
+
121
+ tcg_temp_free_i32(rdalo);
216
+ if (extract32(a->opt, 1, 1) == 0) {
122
+ tcg_temp_free_i32(rdahi);
217
+ return false;
123
+ } else {
218
+ }
124
+ /* Accumulate starting at zero */
219
+
125
+ rda = tcg_const_i64(0);
220
+ if (!fp_access_check(s)) {
126
+ }
221
+ return true;
127
+
222
+ }
128
+ qm = mve_qreg_ptr(a->qm);
223
+
129
+ if (a->u) {
224
+ memop = finalize_memop_asimd(s, a->sz);
130
+ gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
225
+ op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop);
131
+ } else {
226
+ do_fp_st(s, a->rt, clean_addr, memop);
132
+ gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
227
+ return true;
133
+ }
228
}
134
+ tcg_temp_free_ptr(qm);
229
135
+
230
/* Atomic memory operations
136
+ rdalo = tcg_temp_new_i32();
231
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
137
+ rdahi = tcg_temp_new_i32();
232
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
138
+ tcg_gen_extrl_i64_i32(rdalo, rda);
139
+ tcg_gen_extrh_i64_i32(rdahi, rda);
140
+ store_reg(s, a->rdalo, rdalo);
141
+ store_reg(s, a->rdahi, rdahi);
142
+ tcg_temp_free_i64(rda);
143
+ mve_update_eci(s);
144
+ return true;
145
+}
146
+
147
static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
148
{
233
{
149
TCGv_ptr qd;
234
int rt = extract32(insn, 0, 5);
235
- int opc = extract32(insn, 22, 2);
236
bool is_vector = extract32(insn, 26, 1);
237
int size = extract32(insn, 30, 2);
238
239
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
240
disas_ldst_atomic(s, insn, size, rt, is_vector);
241
return;
242
case 2:
243
- disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
244
- return;
245
+ break;
246
default:
247
disas_ldst_pac(s, insn, size, rt, is_vector);
248
return;
150
--
249
--
151
2.20.1
250
2.34.1
152
153
diff view generated by jsdifflib
1
The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH
1
Convert the insns in the atomic memory operations group to
2
insns had some bugs:
2
decodetree.
3
* the 32x32 multiply of elements was being done as 32x32->32,
4
not 32x32->64
5
* we were incorrectly maintaining the accumulator in its full
6
72-bit form across all 4 beats of the insn; in the pseudocode
7
it is squashed back into the 64 bits of the RdaHi:RdaLo
8
registers after each beat
9
3
10
In particular, fixing the second of these allows us to recast
11
the implementation to avoid 128-bit arithmetic entirely.
12
13
Since the element size here is always 4, we can also drop the
14
parameterization of ESIZE to make the code a little more readable.
15
16
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20210628135835.6690-3-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-16-peter.maydell@linaro.org
20
---
7
---
21
target/arm/mve_helper.c | 38 +++++++++++++++++++++-----------------
8
target/arm/tcg/a64.decode | 15 ++++
22
1 file changed, 21 insertions(+), 17 deletions(-)
9
target/arm/tcg/translate-a64.c | 153 ++++++++++++---------------------
10
2 files changed, 70 insertions(+), 98 deletions(-)
23
11
24
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
25
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/mve_helper.c
14
--- a/target/arm/tcg/a64.decode
27
+++ b/target/arm/mve_helper.c
15
+++ b/target/arm/tcg/a64.decode
28
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ STR_v sz:2 111 1 00 00 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0
29
*/
17
STR_v 00 111 1 00 10 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0 sz=4
30
18
LDR_v sz:2 111 1 00 01 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0
31
#include "qemu/osdep.h"
19
LDR_v 00 111 1 00 11 1 ..... ... . 10 ..... ..... @ldst sign=0 ext=0 sz=4
32
-#include "qemu/int128.h"
20
+
33
#include "cpu.h"
21
+# Atomic memory operations
34
#include "internals.h"
22
+&atomic rs rn rt a r sz
35
#include "vec_internal.h"
23
+@atomic sz:2 ... . .. a:1 r:1 . rs:5 . ... .. rn:5 rt:5 &atomic
36
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
24
+LDADD .. 111 0 00 . . 1 ..... 0000 00 ..... ..... @atomic
37
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
25
+LDCLR .. 111 0 00 . . 1 ..... 0001 00 ..... ..... @atomic
26
+LDEOR .. 111 0 00 . . 1 ..... 0010 00 ..... ..... @atomic
27
+LDSET .. 111 0 00 . . 1 ..... 0011 00 ..... ..... @atomic
28
+LDSMAX .. 111 0 00 . . 1 ..... 0100 00 ..... ..... @atomic
29
+LDSMIN .. 111 0 00 . . 1 ..... 0101 00 ..... ..... @atomic
30
+LDUMAX .. 111 0 00 . . 1 ..... 0110 00 ..... ..... @atomic
31
+LDUMIN .. 111 0 00 . . 1 ..... 0111 00 ..... ..... @atomic
32
+SWP .. 111 0 00 . . 1 ..... 1000 00 ..... ..... @atomic
33
+
34
+LDAPR sz:2 111 0 00 1 0 1 11111 1100 00 rn:5 rt:5
35
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/tcg/translate-a64.c
38
+++ b/target/arm/tcg/translate-a64.c
39
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_v(DisasContext *s, arg_ldst *a)
40
return true;
41
}
42
43
-/* Atomic memory operations
44
- *
45
- * 31 30 27 26 24 22 21 16 15 12 10 5 0
46
- * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
47
- * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
48
- * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
49
- *
50
- * Rt: the result register
51
- * Rn: base address or SP
52
- * Rs: the source register for the operation
53
- * V: vector flag (always 0 as of v8.3)
54
- * A: acquire flag
55
- * R: release flag
56
- */
57
-static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
58
- int size, int rt, bool is_vector)
59
+
60
+static bool do_atomic_ld(DisasContext *s, arg_atomic *a, AtomicThreeOpFn *fn,
61
+ int sign, bool invert)
62
{
63
- int rs = extract32(insn, 16, 5);
64
- int rn = extract32(insn, 5, 5);
65
- int o3_opc = extract32(insn, 12, 4);
66
- bool r = extract32(insn, 22, 1);
67
- bool a = extract32(insn, 23, 1);
68
- TCGv_i64 tcg_rs, tcg_rt, clean_addr;
69
- AtomicThreeOpFn *fn = NULL;
70
- MemOp mop = size;
71
+ MemOp mop = a->sz | sign;
72
+ TCGv_i64 clean_addr, tcg_rs, tcg_rt;
73
74
- if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
75
- unallocated_encoding(s);
76
- return;
77
- }
78
- switch (o3_opc) {
79
- case 000: /* LDADD */
80
- fn = tcg_gen_atomic_fetch_add_i64;
81
- break;
82
- case 001: /* LDCLR */
83
- fn = tcg_gen_atomic_fetch_and_i64;
84
- break;
85
- case 002: /* LDEOR */
86
- fn = tcg_gen_atomic_fetch_xor_i64;
87
- break;
88
- case 003: /* LDSET */
89
- fn = tcg_gen_atomic_fetch_or_i64;
90
- break;
91
- case 004: /* LDSMAX */
92
- fn = tcg_gen_atomic_fetch_smax_i64;
93
- mop |= MO_SIGN;
94
- break;
95
- case 005: /* LDSMIN */
96
- fn = tcg_gen_atomic_fetch_smin_i64;
97
- mop |= MO_SIGN;
98
- break;
99
- case 006: /* LDUMAX */
100
- fn = tcg_gen_atomic_fetch_umax_i64;
101
- break;
102
- case 007: /* LDUMIN */
103
- fn = tcg_gen_atomic_fetch_umin_i64;
104
- break;
105
- case 010: /* SWP */
106
- fn = tcg_gen_atomic_xchg_i64;
107
- break;
108
- case 014: /* LDAPR, LDAPRH, LDAPRB */
109
- if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
110
- rs != 31 || a != 1 || r != 0) {
111
- unallocated_encoding(s);
112
- return;
113
- }
114
- break;
115
- default:
116
- unallocated_encoding(s);
117
- return;
118
- }
119
-
120
- if (rn == 31) {
121
+ if (a->rn == 31) {
122
gen_check_sp_alignment(s);
123
}
124
-
125
- mop = check_atomic_align(s, rn, mop);
126
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, mop);
127
-
128
- if (o3_opc == 014) {
129
- /*
130
- * LDAPR* are a special case because they are a simple load, not a
131
- * fetch-and-do-something op.
132
- * The architectural consistency requirements here are weaker than
133
- * full load-acquire (we only need "load-acquire processor consistent"),
134
- * but we choose to implement them as full LDAQ.
135
- */
136
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop, false,
137
- true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
138
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
139
- return;
140
- }
141
-
142
- tcg_rs = read_cpu_reg(s, rs, true);
143
- tcg_rt = cpu_reg(s, rt);
144
-
145
- if (o3_opc == 1) { /* LDCLR */
146
+ mop = check_atomic_align(s, a->rn, mop);
147
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false,
148
+ a->rn != 31, mop);
149
+ tcg_rs = read_cpu_reg(s, a->rs, true);
150
+ tcg_rt = cpu_reg(s, a->rt);
151
+ if (invert) {
152
tcg_gen_not_i64(tcg_rs, tcg_rs);
153
}
154
-
155
- /* The tcg atomic primitives are all full barriers. Therefore we
156
+ /*
157
+ * The tcg atomic primitives are all full barriers. Therefore we
158
* can ignore the Acquire and Release bits of this instruction.
159
*/
160
fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
161
162
if (mop & MO_SIGN) {
163
- switch (size) {
164
+ switch (a->sz) {
165
case MO_8:
166
tcg_gen_ext8u_i64(tcg_rt, tcg_rt);
167
break;
168
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
169
g_assert_not_reached();
170
}
171
}
172
+ return true;
173
+}
174
+
175
+TRANS_FEAT(LDADD, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_add_i64, 0, false)
176
+TRANS_FEAT(LDCLR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_and_i64, 0, true)
177
+TRANS_FEAT(LDEOR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_xor_i64, 0, false)
178
+TRANS_FEAT(LDSET, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_or_i64, 0, false)
179
+TRANS_FEAT(LDSMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smax_i64, MO_SIGN, false)
180
+TRANS_FEAT(LDSMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smin_i64, MO_SIGN, false)
181
+TRANS_FEAT(LDUMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umax_i64, 0, false)
182
+TRANS_FEAT(LDUMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umin_i64, 0, false)
183
+TRANS_FEAT(SWP, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_xchg_i64, 0, false)
184
+
185
+static bool trans_LDAPR(DisasContext *s, arg_LDAPR *a)
186
+{
187
+ bool iss_sf = ldst_iss_sf(a->sz, false, false);
188
+ TCGv_i64 clean_addr;
189
+ MemOp mop;
190
+
191
+ if (!dc_isar_feature(aa64_atomics, s) ||
192
+ !dc_isar_feature(aa64_rcpc_8_3, s)) {
193
+ return false;
194
+ }
195
+ if (a->rn == 31) {
196
+ gen_check_sp_alignment(s);
197
+ }
198
+ mop = check_atomic_align(s, a->rn, a->sz);
199
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false,
200
+ a->rn != 31, mop);
201
+ /*
202
+ * LDAPR* are a special case because they are a simple load, not a
203
+ * fetch-and-do-something op.
204
+ * The architectural consistency requirements here are weaker than
205
+ * full load-acquire (we only need "load-acquire processor consistent"),
206
+ * but we choose to implement them as full LDAQ.
207
+ */
208
+ do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, false,
209
+ true, a->rt, iss_sf, true);
210
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
211
+ return true;
212
}
38
213
39
/*
214
/*
40
- * Rounding multiply add long dual accumulate high: we must keep
215
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
41
- * a 72-bit internal accumulator value and return the top 64 bits.
216
}
42
+ * Rounding multiply add long dual accumulate high. In the pseudocode
217
switch (extract32(insn, 10, 2)) {
43
+ * this is implemented with a 72-bit internal accumulator value of which
218
case 0:
44
+ * the top 64 bits are returned. We optimize this to avoid having to
219
- disas_ldst_atomic(s, insn, size, rt, is_vector);
45
+ * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
220
- return;
46
+ * is squashed back into 64-bits after each beat.
221
case 2:
47
*/
222
break;
48
-#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
223
default:
49
+#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
50
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
51
void *vm, uint64_t a) \
52
{ \
53
uint16_t mask = mve_element_mask(env); \
54
unsigned e; \
55
TYPE *n = vn, *m = vm; \
56
- Int128 acc = int128_lshift(TO128(a), 8); \
57
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
58
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
59
if (mask & 1) { \
60
+ LTYPE mul; \
61
if (e & 1) { \
62
- acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
63
- m[H##ESIZE(e)])); \
64
+ mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
65
+ if (SUB) { \
66
+ mul = -mul; \
67
+ } \
68
} else { \
69
- acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
70
- m[H##ESIZE(e)])); \
71
+ mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
72
} \
73
- acc = int128_add(acc, int128_make64(1 << 7)); \
74
+ mul = (mul >> 8) + ((mul >> 7) & 1); \
75
+ a += mul; \
76
} \
77
} \
78
mve_advance_vpt(env); \
79
- return int128_getlo(int128_rshift(acc, 8)); \
80
+ return a; \
81
}
82
83
-DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
84
-DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
85
+DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
86
+DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
87
88
-DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
89
+DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
90
91
-DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
92
-DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
93
+DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
94
+DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
95
96
/* Vector add across vector */
97
#define DO_VADDV(OP, ESIZE, TYPE) \
98
--
224
--
99
2.20.1
225
2.34.1
100
101
diff view generated by jsdifflib
1
The function asimd_imm_const() in translate-neon.c is an
1
Convert the instructions in the load/store register (pointer
2
implementation of the pseudocode AdvSIMDExpandImm(), which we will
2
authentication) group ot decodetree: LDRAA, LDRAB.
3
also want for MVE. Move the implementation to translate.c, with a
4
prototype in translate.h.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-4-peter.maydell@linaro.org
7
Message-id: 20230602155223.2040685-17-peter.maydell@linaro.org
9
---
8
---
10
target/arm/translate.h | 16 ++++++++++
9
target/arm/tcg/a64.decode | 7 +++
11
target/arm/translate-neon.c | 63 -------------------------------------
10
target/arm/tcg/translate-a64.c | 83 +++++++---------------------------
12
target/arm/translate.c | 57 +++++++++++++++++++++++++++++++++
11
2 files changed, 23 insertions(+), 67 deletions(-)
13
3 files changed, 73 insertions(+), 63 deletions(-)
14
12
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
13
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
15
--- a/target/arm/tcg/a64.decode
18
+++ b/target/arm/translate.h
16
+++ b/target/arm/tcg/a64.decode
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
17
@@ -XXX,XX +XXX,XX @@ LDUMIN .. 111 0 00 . . 1 ..... 0111 00 ..... ..... @atomic
20
return opc | s->be_data;
18
SWP .. 111 0 00 . . 1 ..... 1000 00 ..... ..... @atomic
19
20
LDAPR sz:2 111 0 00 1 0 1 11111 1100 00 rn:5 rt:5
21
+
22
+# Load/store register (pointer authentication)
23
+
24
+# LDRA immediate is 10 bits signed and scaled, but the bits aren't all contiguous
25
+%ldra_imm 22:s1 12:9 !function=times_2
26
+
27
+LDRA 11 111 0 00 m:1 . 1 ......... w:1 1 rn:5 rt:5 imm=%ldra_imm
28
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/tcg/translate-a64.c
31
+++ b/target/arm/tcg/translate-a64.c
32
@@ -XXX,XX +XXX,XX @@ static bool trans_LDAPR(DisasContext *s, arg_LDAPR *a)
33
return true;
21
}
34
}
22
35
23
+/**
36
-/*
24
+ * asimd_imm_const: Expand an encoded SIMD constant value
37
- * PAC memory operations
25
+ *
38
- *
26
+ * Expand a SIMD constant value. This is essentially the pseudocode
39
- * 31 30 27 26 24 22 21 12 11 10 5 0
27
+ * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
40
- * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
28
+ * VMVN and VBIC (when cmode < 14 && op == 1).
41
- * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
29
+ *
42
- * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
30
+ * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
43
- *
31
+ * callers must catch this.
44
- * Rt: the result register
32
+ *
45
- * Rn: base address or SP
33
+ * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
46
- * V: vector flag (always 0 as of v8.3)
34
+ * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
47
- * M: clear for key DA, set for key DB
35
+ * we produce an immediate constant value of 0 in these cases.
48
- * W: pre-indexing flag
36
+ */
49
- * S: sign for imm9.
37
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
50
- */
38
+
51
-static void disas_ldst_pac(DisasContext *s, uint32_t insn,
39
#endif /* TARGET_ARM_TRANSLATE_H */
52
- int size, int rt, bool is_vector)
40
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
53
+static bool trans_LDRA(DisasContext *s, arg_LDRA *a)
41
index XXXXXXX..XXXXXXX 100644
54
{
42
--- a/target/arm/translate-neon.c
55
- int rn = extract32(insn, 5, 5);
43
+++ b/target/arm/translate-neon.c
56
- bool is_wback = extract32(insn, 11, 1);
44
@@ -XXX,XX +XXX,XX @@ DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
57
- bool use_key_a = !extract32(insn, 23, 1);
45
DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
58
- int offset;
46
DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
59
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
47
60
MemOp memop;
48
-static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
61
62
- if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
63
- unallocated_encoding(s);
64
- return;
65
+ /* Load with pointer authentication */
66
+ if (!dc_isar_feature(aa64_pauth, s)) {
67
+ return false;
68
}
69
70
- if (rn == 31) {
71
+ if (a->rn == 31) {
72
gen_check_sp_alignment(s);
73
}
74
- dirty_addr = read_cpu_reg_sp(s, rn, 1);
75
+ dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
76
77
if (s->pauth_active) {
78
- if (use_key_a) {
79
+ if (!a->m) {
80
gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
81
tcg_constant_i64(0));
82
} else {
83
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
84
}
85
}
86
87
- /* Form the 10-bit signed, scaled offset. */
88
- offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
89
- offset = sextract32(offset << size, 0, 10 + size);
90
- tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
91
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
92
93
- memop = finalize_memop(s, size);
94
+ memop = finalize_memop(s, MO_64);
95
96
/* Note that "clean" and "dirty" here refer to TBI not PAC. */
97
clean_addr = gen_mte_check1(s, dirty_addr, false,
98
- is_wback || rn != 31, memop);
99
+ a->w || a->rn != 31, memop);
100
101
- tcg_rt = cpu_reg(s, rt);
102
+ tcg_rt = cpu_reg(s, a->rt);
103
do_gpr_ld(s, tcg_rt, clean_addr, memop,
104
- /* extend */ false, /* iss_valid */ !is_wback,
105
- /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
106
+ /* extend */ false, /* iss_valid */ !a->w,
107
+ /* iss_srt */ a->rt, /* iss_sf */ true, /* iss_ar */ false);
108
109
- if (is_wback) {
110
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
111
+ if (a->w) {
112
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
113
}
114
+ return true;
115
}
116
117
/*
118
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
119
}
120
}
121
122
-/* Load/store register (all forms) */
123
-static void disas_ldst_reg(DisasContext *s, uint32_t insn)
49
-{
124
-{
50
- /*
125
- int rt = extract32(insn, 0, 5);
51
- * Expand the encoded constant.
126
- bool is_vector = extract32(insn, 26, 1);
52
- * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
127
- int size = extract32(insn, 30, 2);
53
- * We choose to not special-case this and will behave as if a
54
- * valid constant encoding of 0 had been given.
55
- * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
56
- */
57
- switch (cmode) {
58
- case 0: case 1:
59
- /* no-op */
60
- break;
61
- case 2: case 3:
62
- imm <<= 8;
63
- break;
64
- case 4: case 5:
65
- imm <<= 16;
66
- break;
67
- case 6: case 7:
68
- imm <<= 24;
69
- break;
70
- case 8: case 9:
71
- imm |= imm << 16;
72
- break;
73
- case 10: case 11:
74
- imm = (imm << 8) | (imm << 24);
75
- break;
76
- case 12:
77
- imm = (imm << 8) | 0xff;
78
- break;
79
- case 13:
80
- imm = (imm << 16) | 0xffff;
81
- break;
82
- case 14:
83
- if (op) {
84
- /*
85
- * This is the only case where the top and bottom 32 bits
86
- * of the encoded constant differ.
87
- */
88
- uint64_t imm64 = 0;
89
- int n;
90
-
128
-
91
- for (n = 0; n < 8; n++) {
129
- switch (extract32(insn, 24, 2)) {
92
- if (imm & (1 << n)) {
130
- case 0:
93
- imm64 |= (0xffULL << (n * 8));
131
- if (extract32(insn, 21, 1) == 0) {
94
- }
132
- break;
95
- }
96
- return imm64;
97
- }
133
- }
98
- imm |= (imm << 8) | (imm << 16) | (imm << 24);
134
- switch (extract32(insn, 10, 2)) {
99
- break;
135
- case 0:
100
- case 15:
136
- case 2:
101
- imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
137
- break;
102
- | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
138
- default:
139
- disas_ldst_pac(s, insn, size, rt, is_vector);
140
- return;
141
- }
103
- break;
142
- break;
104
- }
143
- }
105
- if (op) {
144
- unallocated_encoding(s);
106
- imm = ~imm;
107
- }
108
- return dup_const(MO_32, imm);
109
-}
145
-}
110
-
146
-
111
static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
147
/* AdvSIMD load/store multiple structures
112
GVecGen2iFn *fn)
148
*
149
* 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
150
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
151
static void disas_ldst(DisasContext *s, uint32_t insn)
113
{
152
{
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
153
switch (extract32(insn, 24, 6)) {
115
index XXXXXXX..XXXXXXX 100644
154
- case 0x38: case 0x39:
116
--- a/target/arm/translate.c
155
- case 0x3c: case 0x3d: /* Load/store register (all forms) */
117
+++ b/target/arm/translate.c
156
- disas_ldst_reg(s, insn);
118
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
157
- break;
119
a64_translate_init();
158
case 0x0c: /* AdvSIMD load/store multiple structures */
120
}
159
disas_ldst_multiple_struct(s, insn);
121
160
break;
122
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
123
+{
124
+ /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
125
+ switch (cmode) {
126
+ case 0: case 1:
127
+ /* no-op */
128
+ break;
129
+ case 2: case 3:
130
+ imm <<= 8;
131
+ break;
132
+ case 4: case 5:
133
+ imm <<= 16;
134
+ break;
135
+ case 6: case 7:
136
+ imm <<= 24;
137
+ break;
138
+ case 8: case 9:
139
+ imm |= imm << 16;
140
+ break;
141
+ case 10: case 11:
142
+ imm = (imm << 8) | (imm << 24);
143
+ break;
144
+ case 12:
145
+ imm = (imm << 8) | 0xff;
146
+ break;
147
+ case 13:
148
+ imm = (imm << 16) | 0xffff;
149
+ break;
150
+ case 14:
151
+ if (op) {
152
+ /*
153
+ * This is the only case where the top and bottom 32 bits
154
+ * of the encoded constant differ.
155
+ */
156
+ uint64_t imm64 = 0;
157
+ int n;
158
+
159
+ for (n = 0; n < 8; n++) {
160
+ if (imm & (1 << n)) {
161
+ imm64 |= (0xffULL << (n * 8));
162
+ }
163
+ }
164
+ return imm64;
165
+ }
166
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
167
+ break;
168
+ case 15:
169
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
170
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
171
+ break;
172
+ }
173
+ if (op) {
174
+ imm = ~imm;
175
+ }
176
+ return dup_const(MO_32, imm);
177
+}
178
+
179
/* Generate a label used for skipping this instruction */
180
void arm_gen_condlabel(DisasContext *s)
181
{
182
--
161
--
183
2.20.1
162
2.34.1
184
163
185
164
diff view generated by jsdifflib
1
Implement the MVE shift-vector-left-by-immediate insns VSHL, VQSHL
1
Convert the instructions in the LDAPR/STLR (unscaled immediate)
2
and VQSHLU.
2
group to decodetree.
3
4
The size-and-immediate encoding here is the same as Neon, and we
5
handle it the same way neon-dp.decode does.
6
3
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210628135835.6690-8-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-18-peter.maydell@linaro.org
10
---
7
---
11
target/arm/helper-mve.h | 16 +++++++++++
8
target/arm/tcg/a64.decode | 10 +++
12
target/arm/mve.decode | 23 +++++++++++++++
9
target/arm/tcg/translate-a64.c | 132 ++++++++++++---------------------
13
target/arm/mve_helper.c | 57 ++++++++++++++++++++++++++++++++++++++
10
2 files changed, 56 insertions(+), 86 deletions(-)
14
target/arm/translate-mve.c | 51 ++++++++++++++++++++++++++++++++++
15
4 files changed, 147 insertions(+)
16
11
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
14
--- a/target/arm/tcg/a64.decode
20
+++ b/target/arm/helper-mve.h
15
+++ b/target/arm/tcg/a64.decode
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
16
@@ -XXX,XX +XXX,XX @@ LDAPR sz:2 111 0 00 1 0 1 11111 1100 00 rn:5 rt:5
22
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
17
%ldra_imm 22:s1 12:9 !function=times_2
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
18
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
19
LDRA 11 111 0 00 m:1 . 1 ......... w:1 1 rn:5 rt:5 imm=%ldra_imm
25
+
20
+
26
+DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
+&ldapr_stlr_i rn rt imm sz sign ext
27
+DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+@ldapr_stlr_i .. ...... .. . imm:9 .. rn:5 rt:5 &ldapr_stlr_i
28
+DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+STLR_i sz:2 011001 00 0 ......... 00 ..... ..... @ldapr_stlr_i sign=0 ext=0
29
+
24
+LDAPR_i sz:2 011001 01 0 ......... 00 ..... ..... @ldapr_stlr_i sign=0 ext=0
30
+DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+LDAPR_i 00 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=0 sz=0
31
+DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+LDAPR_i 01 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=0 sz=1
32
+DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+LDAPR_i 10 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=0 sz=2
33
+
28
+LDAPR_i 00 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=0
34
+DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+LDAPR_i 01 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=1
35
+DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
36
+DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
42
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/mve.decode
32
--- a/target/arm/tcg/translate-a64.c
44
+++ b/target/arm/mve.decode
33
+++ b/target/arm/tcg/translate-a64.c
45
@@ -XXX,XX +XXX,XX @@
34
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
46
&2op qd qm qn size
35
}
47
&2scalar qd qn rm size
48
&1imm qd imm cmode op
49
+&2shift qd qm shift size
50
51
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
52
# Note that both Rn and Qd are 3 bits only (no D bit)
53
@@ -XXX,XX +XXX,XX @@
54
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
55
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
56
57
+@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
58
+@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
59
+@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
60
+
61
# Vector loads and stores
62
63
# Widening loads and narrowing stores:
64
@@ -XXX,XX +XXX,XX @@ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
65
# So we have a single decode line and check the cmode/op in the
66
# trans function.
67
Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
68
+
69
+# Shifts by immediate
70
+
71
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
72
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
73
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
74
+
75
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
76
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
77
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
78
+
79
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
80
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
81
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
82
+
83
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
84
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
85
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
86
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/mve_helper.c
89
+++ b/target/arm/mve_helper.c
90
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
91
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
92
#define DO_UQRSHL_OP(N, M, satp) \
93
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
94
+#define DO_SUQSHL_OP(N, M, satp) \
95
+ WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
96
97
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
98
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
99
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvsw, 4, uint32_t)
100
DO_VADDV(vaddvub, 1, uint8_t)
101
DO_VADDV(vaddvuh, 2, uint16_t)
102
DO_VADDV(vaddvuw, 4, uint32_t)
103
+
104
+/* Shifts by immediate */
105
+#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
106
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
107
+ void *vm, uint32_t shift) \
108
+ { \
109
+ TYPE *d = vd, *m = vm; \
110
+ uint16_t mask = mve_element_mask(env); \
111
+ unsigned e; \
112
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
113
+ mergemask(&d[H##ESIZE(e)], \
114
+ FN(m[H##ESIZE(e)], shift), mask); \
115
+ } \
116
+ mve_advance_vpt(env); \
117
+ }
118
+
119
+#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
120
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
121
+ void *vm, uint32_t shift) \
122
+ { \
123
+ TYPE *d = vd, *m = vm; \
124
+ uint16_t mask = mve_element_mask(env); \
125
+ unsigned e; \
126
+ bool qc = false; \
127
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
128
+ bool sat = false; \
129
+ mergemask(&d[H##ESIZE(e)], \
130
+ FN(m[H##ESIZE(e)], shift, &sat), mask); \
131
+ qc |= sat & mask & 1; \
132
+ } \
133
+ if (qc) { \
134
+ env->vfp.qc[0] = qc; \
135
+ } \
136
+ mve_advance_vpt(env); \
137
+ }
138
+
139
+/* provide unsigned 2-op shift helpers for all sizes */
140
+#define DO_2SHIFT_U(OP, FN) \
141
+ DO_2SHIFT(OP##b, 1, uint8_t, FN) \
142
+ DO_2SHIFT(OP##h, 2, uint16_t, FN) \
143
+ DO_2SHIFT(OP##w, 4, uint32_t, FN)
144
+
145
+#define DO_2SHIFT_SAT_U(OP, FN) \
146
+ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
147
+ DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
148
+ DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
149
+#define DO_2SHIFT_SAT_S(OP, FN) \
150
+ DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
151
+ DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
152
+ DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
153
+
154
+DO_2SHIFT_U(vshli_u, DO_VSHLU)
155
+DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
156
+DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
157
+DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
162
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
163
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
164
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
165
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
166
+typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
167
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
168
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
169
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
170
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
171
}
172
return do_1imm(s, a, fn);
173
}
36
}
174
+
37
175
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
38
-/* Update the Sixty-Four bit (SF) registersize. This logic is derived
176
+ bool negateshift)
39
+/*
177
+{
40
+ * Compute the ISS.SF bit for syndrome information if an exception
178
+ TCGv_ptr qd, qm;
41
+ * is taken on a load or store. This indicates whether the instruction
179
+ int shift = a->shift;
42
+ * is accessing a 32-bit or 64-bit register. This logic is derived
180
+
43
* from the ARMv8 specs for LDR (Shared decode for all encodings).
181
+ if (!dc_isar_feature(aa32_mve, s) ||
44
*/
182
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
45
-static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
183
+ !fn) {
46
-{
47
- int opc0 = extract32(opc, 0, 1);
48
- int regsize;
49
-
50
- if (is_signed) {
51
- regsize = opc0 ? 32 : 64;
52
- } else {
53
- regsize = size == 3 ? 64 : 32;
54
- }
55
- return regsize == 64;
56
-}
57
-
58
static bool ldst_iss_sf(int size, bool sign, bool ext)
59
{
60
61
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRA(DisasContext *s, arg_LDRA *a)
62
return true;
63
}
64
65
-/*
66
- * LDAPR/STLR (unscaled immediate)
67
- *
68
- * 31 30 24 22 21 12 10 5 0
69
- * +------+-------------+-----+---+--------+-----+----+-----+
70
- * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt |
71
- * +------+-------------+-----+---+--------+-----+----+-----+
72
- *
73
- * Rt: source or destination register
74
- * Rn: base register
75
- * imm9: unscaled immediate offset
76
- * opc: 00: STLUR*, 01/10/11: various LDAPUR*
77
- * size: size of load/store
78
- */
79
-static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
80
+static bool trans_LDAPR_i(DisasContext *s, arg_ldapr_stlr_i *a)
81
{
82
- int rt = extract32(insn, 0, 5);
83
- int rn = extract32(insn, 5, 5);
84
- int offset = sextract32(insn, 12, 9);
85
- int opc = extract32(insn, 22, 2);
86
- int size = extract32(insn, 30, 2);
87
TCGv_i64 clean_addr, dirty_addr;
88
- bool is_store = false;
89
- bool extend = false;
90
- bool iss_sf;
91
- MemOp mop = size;
92
+ MemOp mop = a->sz | (a->sign ? MO_SIGN : 0);
93
+ bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
94
95
if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
96
- unallocated_encoding(s);
97
- return;
184
+ return false;
98
+ return false;
185
+ }
99
}
186
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
100
187
+ return true;
101
- switch (opc) {
188
+ }
102
- case 0: /* STLURB */
189
+
103
- is_store = true;
104
- break;
105
- case 1: /* LDAPUR* */
106
- break;
107
- case 2: /* LDAPURS* 64-bit variant */
108
- if (size == 3) {
109
- unallocated_encoding(s);
110
- return;
111
- }
112
- mop |= MO_SIGN;
113
- break;
114
- case 3: /* LDAPURS* 32-bit variant */
115
- if (size > 1) {
116
- unallocated_encoding(s);
117
- return;
118
- }
119
- mop |= MO_SIGN;
120
- extend = true; /* zero-extend 32->64 after signed load */
121
- break;
122
- default:
123
- g_assert_not_reached();
124
- }
125
-
126
- iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
127
-
128
- if (rn == 31) {
129
+ if (a->rn == 31) {
130
gen_check_sp_alignment(s);
131
}
132
133
- mop = check_ordered_align(s, rn, offset, is_store, mop);
134
-
135
- dirty_addr = read_cpu_reg_sp(s, rn, 1);
136
- tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
137
+ mop = check_ordered_align(s, a->rn, a->imm, false, mop);
138
+ dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
139
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
140
clean_addr = clean_data_tbi(s, dirty_addr);
141
142
- if (is_store) {
143
- /* Store-Release semantics */
144
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
145
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
146
- } else {
147
- /*
148
- * Load-AcquirePC semantics; we implement as the slightly more
149
- * restrictive Load-Acquire.
150
- */
151
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
152
- extend, true, rt, iss_sf, true);
153
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
190
+ /*
154
+ /*
191
+ * When we handle a right shift insn using a left-shift helper
155
+ * Load-AcquirePC semantics; we implement as the slightly more
192
+ * which permits a negative shift count to indicate a right-shift,
156
+ * restrictive Load-Acquire.
193
+ * we must negate the shift count.
194
+ */
157
+ */
195
+ if (negateshift) {
158
+ do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, a->ext, true,
196
+ shift = -shift;
159
+ a->rt, iss_sf, true);
197
+ }
160
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
198
+
199
+ qd = mve_qreg_ptr(a->qd);
200
+ qm = mve_qreg_ptr(a->qm);
201
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
202
+ tcg_temp_free_ptr(qd);
203
+ tcg_temp_free_ptr(qm);
204
+ mve_update_eci(s);
205
+ return true;
161
+ return true;
206
+}
162
+}
207
+
163
+
208
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
164
+static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a)
209
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
165
+{
210
+ { \
166
+ TCGv_i64 clean_addr, dirty_addr;
211
+ static MVEGenTwoOpShiftFn * const fns[] = { \
167
+ MemOp mop = a->sz;
212
+ gen_helper_mve_##FN##b, \
168
+ bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
213
+ gen_helper_mve_##FN##h, \
169
+
214
+ gen_helper_mve_##FN##w, \
170
+ if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
215
+ NULL, \
171
+ return false;
216
+ }; \
172
}
217
+ return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
173
+
174
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
175
+
176
+ if (a->rn == 31) {
177
+ gen_check_sp_alignment(s);
218
+ }
178
+ }
219
+
179
+
220
+DO_2SHIFT(VSHLI, vshli_u, false)
180
+ mop = check_ordered_align(s, a->rn, a->imm, true, mop);
221
+DO_2SHIFT(VQSHLI_S, vqshli_s, false)
181
+ dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
222
+DO_2SHIFT(VQSHLI_U, vqshli_u, false)
182
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
223
+DO_2SHIFT(VQSHLUI, vqshlui_s, false)
183
+ clean_addr = clean_data_tbi(s, dirty_addr);
184
+
185
+ /* Store-Release semantics */
186
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
187
+ do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, mop, true, a->rt, iss_sf, true);
188
+ return true;
189
}
190
191
/* AdvSIMD load/store multiple structures
192
@@ -XXX,XX +XXX,XX @@ static void disas_ldst(DisasContext *s, uint32_t insn)
193
case 0x19:
194
if (extract32(insn, 21, 1) != 0) {
195
disas_ldst_tag(s, insn);
196
- } else if (extract32(insn, 10, 2) == 0) {
197
- disas_ldst_ldapr_stlr(s, insn);
198
} else {
199
unallocated_encoding(s);
200
}
224
--
201
--
225
2.20.1
202
2.34.1
226
227
diff view generated by jsdifflib
1
Implement the MVE logical-immediate insns (VMOV, VMVN,
1
Convert the instructions in the ASIMD load/store multiple structures
2
VORR and VBIC). These have essentially the same encoding
2
instruction classes to decodetree.
3
as their Neon equivalents, and we implement the decode
4
in the same way.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-7-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-19-peter.maydell@linaro.org
9
---
7
---
10
target/arm/helper-mve.h | 4 +++
8
target/arm/tcg/a64.decode | 20 +++
11
target/arm/mve.decode | 17 +++++++++++++
9
target/arm/tcg/translate-a64.c | 222 ++++++++++++++++-----------------
12
target/arm/mve_helper.c | 24 ++++++++++++++++++
10
2 files changed, 131 insertions(+), 111 deletions(-)
13
target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++
14
4 files changed, 95 insertions(+)
15
11
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
14
--- a/target/arm/tcg/a64.decode
19
+++ b/target/arm/helper-mve.h
15
+++ b/target/arm/tcg/a64.decode
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
16
@@ -XXX,XX +XXX,XX @@ LDAPR_i 01 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext
21
DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
17
LDAPR_i 10 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=0 sz=2
22
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
18
LDAPR_i 00 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=0
23
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
19
LDAPR_i 01 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=1
24
+
20
+
25
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
21
+# Load/store multiple structures
26
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
22
+# The 4-bit opcode in [15:12] encodes repeat count and structure elements
27
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
23
+&ldst_mult rm rn rt sz q p rpt selem
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
24
+@ldst_mult . q:1 ...... p:1 . . rm:5 .... sz:2 rn:5 rt:5 &ldst_mult
25
+ST_mult 0 . 001100 . 0 0 ..... 0000 .. ..... ..... @ldst_mult rpt=1 selem=4
26
+ST_mult 0 . 001100 . 0 0 ..... 0010 .. ..... ..... @ldst_mult rpt=4 selem=1
27
+ST_mult 0 . 001100 . 0 0 ..... 0100 .. ..... ..... @ldst_mult rpt=1 selem=3
28
+ST_mult 0 . 001100 . 0 0 ..... 0110 .. ..... ..... @ldst_mult rpt=3 selem=1
29
+ST_mult 0 . 001100 . 0 0 ..... 0111 .. ..... ..... @ldst_mult rpt=1 selem=1
30
+ST_mult 0 . 001100 . 0 0 ..... 1000 .. ..... ..... @ldst_mult rpt=1 selem=2
31
+ST_mult 0 . 001100 . 0 0 ..... 1010 .. ..... ..... @ldst_mult rpt=2 selem=1
32
+
33
+LD_mult 0 . 001100 . 1 0 ..... 0000 .. ..... ..... @ldst_mult rpt=1 selem=4
34
+LD_mult 0 . 001100 . 1 0 ..... 0010 .. ..... ..... @ldst_mult rpt=4 selem=1
35
+LD_mult 0 . 001100 . 1 0 ..... 0100 .. ..... ..... @ldst_mult rpt=1 selem=3
36
+LD_mult 0 . 001100 . 1 0 ..... 0110 .. ..... ..... @ldst_mult rpt=3 selem=1
37
+LD_mult 0 . 001100 . 1 0 ..... 0111 .. ..... ..... @ldst_mult rpt=1 selem=1
38
+LD_mult 0 . 001100 . 1 0 ..... 1000 .. ..... ..... @ldst_mult rpt=1 selem=2
39
+LD_mult 0 . 001100 . 1 0 ..... 1010 .. ..... ..... @ldst_mult rpt=2 selem=1
40
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
29
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
42
--- a/target/arm/tcg/translate-a64.c
31
+++ b/target/arm/mve.decode
43
+++ b/target/arm/tcg/translate-a64.c
32
@@ -XXX,XX +XXX,XX @@
44
@@ -XXX,XX +XXX,XX @@ static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a)
33
# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
34
%size_28 28:1 !function=plus_1
35
36
+# 1imm format immediate
37
+%imm_28_16_0 28:1 16:3 0:4
38
+
39
&vldr_vstr rn qd imm p a w size l u
40
&1op qd qm size
41
&2op qd qm qn size
42
&2scalar qd qn rm size
43
+&1imm qd imm cmode op
44
45
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
46
# Note that both Rn and Qd are 3 bits only (no D bit)
47
@@ -XXX,XX +XXX,XX @@
48
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
49
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
50
size=%size_28
51
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
52
53
# The _rev suffix indicates that Vn and Vm are reversed. This is
54
# the case for shifts. In the Arm ARM these insns are documented
55
@@ -XXX,XX +XXX,XX @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
56
# Predicate operations
57
%mask_22_13 22:1 13:3
58
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
59
+
60
+# Logical immediate operations (1 reg and modified-immediate)
61
+
62
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
63
+# not in a way we can conveniently represent in decodetree without
64
+# a lot of repetition:
65
+# VORR: op=0, (cmode & 1) && cmode < 12
66
+# VBIC: op=1, (cmode & 1) && cmode < 12
67
+# VMOV: everything else
68
+# So we have a single decode line and check the cmode/op in the
69
+# trans function.
70
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
71
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/mve_helper.c
74
+++ b/target/arm/mve_helper.c
75
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
76
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
77
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
78
79
+/*
80
+ * 1 operand immediates: Vda is destination and possibly also one source.
81
+ * All these insns work at 64-bit widths.
82
+ */
83
+#define DO_1OP_IMM(OP, FN) \
84
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
85
+ { \
86
+ uint64_t *da = vda; \
87
+ uint16_t mask = mve_element_mask(env); \
88
+ unsigned e; \
89
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
90
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
91
+ } \
92
+ mve_advance_vpt(env); \
93
+ }
94
+
95
+#define DO_MOVI(N, I) (I)
96
+#define DO_ANDI(N, I) ((N) & (I))
97
+#define DO_ORRI(N, I) ((N) | (I))
98
+
99
+DO_1OP_IMM(vmovi, DO_MOVI)
100
+DO_1OP_IMM(vandi, DO_ANDI)
101
+DO_1OP_IMM(vorri, DO_ORRI)
102
+
103
#define DO_2OP(OP, ESIZE, TYPE, FN) \
104
void HELPER(glue(mve_, OP))(CPUARMState *env, \
105
void *vd, void *vn, void *vm) \
106
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/translate-mve.c
109
+++ b/target/arm/translate-mve.c
110
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
111
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
112
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
113
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
114
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
115
116
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
117
static inline long mve_qreg_offset(unsigned reg)
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
119
mve_update_eci(s);
120
return true;
45
return true;
121
}
46
}
122
+
47
123
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
48
-/* AdvSIMD load/store multiple structures
124
+{
49
- *
125
+ TCGv_ptr qd;
50
- * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
126
+ uint64_t imm;
51
- * +---+---+---------------+---+-------------+--------+------+------+------+
127
+
52
- * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
128
+ if (!dc_isar_feature(aa32_mve, s) ||
53
- * +---+---+---------------+---+-------------+--------+------+------+------+
129
+ !mve_check_qreg_bank(s, a->qd) ||
54
- *
130
+ !fn) {
55
- * AdvSIMD load/store multiple structures (post-indexed)
56
- *
57
- * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
58
- * +---+---+---------------+---+---+---------+--------+------+------+------+
59
- * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
60
- * +---+---+---------------+---+---+---------+--------+------+------+------+
61
- *
62
- * Rt: first (or only) SIMD&FP register to be transferred
63
- * Rn: base address or SP
64
- * Rm (post-index only): post-index register (when !31) or size dependent #imm
65
- */
66
-static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
67
+static bool trans_LD_mult(DisasContext *s, arg_ldst_mult *a)
68
{
69
- int rt = extract32(insn, 0, 5);
70
- int rn = extract32(insn, 5, 5);
71
- int rm = extract32(insn, 16, 5);
72
- int size = extract32(insn, 10, 2);
73
- int opcode = extract32(insn, 12, 4);
74
- bool is_store = !extract32(insn, 22, 1);
75
- bool is_postidx = extract32(insn, 23, 1);
76
- bool is_q = extract32(insn, 30, 1);
77
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
78
MemOp endian, align, mop;
79
80
int total; /* total bytes */
81
int elements; /* elements per vector */
82
- int rpt; /* num iterations */
83
- int selem; /* structure elements */
84
int r;
85
+ int size = a->sz;
86
87
- if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
88
- unallocated_encoding(s);
89
- return;
90
+ if (!a->p && a->rm != 0) {
91
+ /* For non-postindexed accesses the Rm field must be 0 */
131
+ return false;
92
+ return false;
132
+ }
93
}
133
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
94
-
95
- if (!is_postidx && rm != 0) {
96
- unallocated_encoding(s);
97
- return;
98
+ if (size == 3 && !a->q && a->selem != 1) {
99
+ return false;
100
}
101
-
102
- /* From the shared decode logic */
103
- switch (opcode) {
104
- case 0x0:
105
- rpt = 1;
106
- selem = 4;
107
- break;
108
- case 0x2:
109
- rpt = 4;
110
- selem = 1;
111
- break;
112
- case 0x4:
113
- rpt = 1;
114
- selem = 3;
115
- break;
116
- case 0x6:
117
- rpt = 3;
118
- selem = 1;
119
- break;
120
- case 0x7:
121
- rpt = 1;
122
- selem = 1;
123
- break;
124
- case 0x8:
125
- rpt = 1;
126
- selem = 2;
127
- break;
128
- case 0xa:
129
- rpt = 2;
130
- selem = 1;
131
- break;
132
- default:
133
- unallocated_encoding(s);
134
- return;
135
- }
136
-
137
- if (size == 3 && !is_q && selem != 1) {
138
- /* reserved */
139
- unallocated_encoding(s);
140
- return;
141
- }
142
-
143
if (!fp_access_check(s)) {
144
- return;
134
+ return true;
145
+ return true;
135
+ }
146
}
136
+
147
137
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
148
- if (rn == 31) {
138
+
149
+ if (a->rn == 31) {
139
+ qd = mve_qreg_ptr(a->qd);
150
gen_check_sp_alignment(s);
140
+ fn(cpu_env, qd, tcg_constant_i64(imm));
151
}
141
+ tcg_temp_free_ptr(qd);
152
142
+ mve_update_eci(s);
153
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
154
endian = MO_LE;
155
}
156
157
- total = rpt * selem * (is_q ? 16 : 8);
158
- tcg_rn = cpu_reg_sp(s, rn);
159
+ total = a->rpt * a->selem * (a->q ? 16 : 8);
160
+ tcg_rn = cpu_reg_sp(s, a->rn);
161
162
/*
163
* Issue the MTE check vs the logical repeat count, before we
164
* promote consecutive little-endian elements below.
165
*/
166
- clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
167
- total, finalize_memop_asimd(s, size));
168
+ clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31, total,
169
+ finalize_memop_asimd(s, size));
170
171
/*
172
* Consecutive little-endian elements from a single register
173
* can be promoted to a larger little-endian operation.
174
*/
175
align = MO_ALIGN;
176
- if (selem == 1 && endian == MO_LE) {
177
+ if (a->selem == 1 && endian == MO_LE) {
178
align = pow2_align(size);
179
size = 3;
180
}
181
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
182
}
183
mop = endian | size | align;
184
185
- elements = (is_q ? 16 : 8) >> size;
186
+ elements = (a->q ? 16 : 8) >> size;
187
tcg_ebytes = tcg_constant_i64(1 << size);
188
- for (r = 0; r < rpt; r++) {
189
+ for (r = 0; r < a->rpt; r++) {
190
int e;
191
for (e = 0; e < elements; e++) {
192
int xs;
193
- for (xs = 0; xs < selem; xs++) {
194
- int tt = (rt + r + xs) % 32;
195
- if (is_store) {
196
- do_vec_st(s, tt, e, clean_addr, mop);
197
- } else {
198
- do_vec_ld(s, tt, e, clean_addr, mop);
199
- }
200
+ for (xs = 0; xs < a->selem; xs++) {
201
+ int tt = (a->rt + r + xs) % 32;
202
+ do_vec_ld(s, tt, e, clean_addr, mop);
203
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
204
}
205
}
206
}
207
208
- if (!is_store) {
209
- /* For non-quad operations, setting a slice of the low
210
- * 64 bits of the register clears the high 64 bits (in
211
- * the ARM ARM pseudocode this is implicit in the fact
212
- * that 'rval' is a 64 bit wide variable).
213
- * For quad operations, we might still need to zero the
214
- * high bits of SVE.
215
- */
216
- for (r = 0; r < rpt * selem; r++) {
217
- int tt = (rt + r) % 32;
218
- clear_vec_high(s, is_q, tt);
219
+ /*
220
+ * For non-quad operations, setting a slice of the low 64 bits of
221
+ * the register clears the high 64 bits (in the ARM ARM pseudocode
222
+ * this is implicit in the fact that 'rval' is a 64 bit wide
223
+ * variable). For quad operations, we might still need to zero
224
+ * the high bits of SVE.
225
+ */
226
+ for (r = 0; r < a->rpt * a->selem; r++) {
227
+ int tt = (a->rt + r) % 32;
228
+ clear_vec_high(s, a->q, tt);
229
+ }
230
+
231
+ if (a->p) {
232
+ if (a->rm == 31) {
233
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
234
+ } else {
235
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
236
+ }
237
+ }
143
+ return true;
238
+ return true;
144
+}
239
+}
145
+
240
+
146
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
241
+static bool trans_ST_mult(DisasContext *s, arg_ldst_mult *a)
147
+{
242
+{
148
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
243
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
149
+ MVEGenOneOpImmFn *fn;
244
+ MemOp endian, align, mop;
150
+
245
+
151
+ if ((a->cmode & 1) && a->cmode < 12) {
246
+ int total; /* total bytes */
152
+ if (a->op) {
247
+ int elements; /* elements per vector */
153
+ /*
248
+ int r;
154
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
249
+ int size = a->sz;
155
+ * so the VBIC becomes a logical AND operation.
250
+
156
+ */
251
+ if (!a->p && a->rm != 0) {
157
+ fn = gen_helper_mve_vandi;
252
+ /* For non-postindexed accesses the Rm field must be 0 */
158
+ } else {
253
+ return false;
159
+ fn = gen_helper_mve_vorri;
254
+ }
160
+ }
255
+ if (size == 3 && !a->q && a->selem != 1) {
161
+ } else {
256
+ return false;
162
+ /* There is one unallocated cmode/op combination in this space */
257
+ }
163
+ if (a->cmode == 15 && a->op == 1) {
258
+ if (!fp_access_check(s)) {
164
+ return false;
259
+ return true;
165
+ }
260
+ }
166
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
261
+
167
+ fn = gen_helper_mve_vmovi;
262
+ if (a->rn == 31) {
168
+ }
263
+ gen_check_sp_alignment(s);
169
+ return do_1imm(s, a, fn);
264
+ }
170
+}
265
+
266
+ /* For our purposes, bytes are always little-endian. */
267
+ endian = s->be_data;
268
+ if (size == 0) {
269
+ endian = MO_LE;
270
+ }
271
+
272
+ total = a->rpt * a->selem * (a->q ? 16 : 8);
273
+ tcg_rn = cpu_reg_sp(s, a->rn);
274
+
275
+ /*
276
+ * Issue the MTE check vs the logical repeat count, before we
277
+ * promote consecutive little-endian elements below.
278
+ */
279
+ clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31, total,
280
+ finalize_memop_asimd(s, size));
281
+
282
+ /*
283
+ * Consecutive little-endian elements from a single register
284
+ * can be promoted to a larger little-endian operation.
285
+ */
286
+ align = MO_ALIGN;
287
+ if (a->selem == 1 && endian == MO_LE) {
288
+ align = pow2_align(size);
289
+ size = 3;
290
+ }
291
+ if (!s->align_mem) {
292
+ align = 0;
293
+ }
294
+ mop = endian | size | align;
295
+
296
+ elements = (a->q ? 16 : 8) >> size;
297
+ tcg_ebytes = tcg_constant_i64(1 << size);
298
+ for (r = 0; r < a->rpt; r++) {
299
+ int e;
300
+ for (e = 0; e < elements; e++) {
301
+ int xs;
302
+ for (xs = 0; xs < a->selem; xs++) {
303
+ int tt = (a->rt + r + xs) % 32;
304
+ do_vec_st(s, tt, e, clean_addr, mop);
305
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
306
+ }
307
}
308
}
309
310
- if (is_postidx) {
311
- if (rm == 31) {
312
+ if (a->p) {
313
+ if (a->rm == 31) {
314
tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
315
} else {
316
- tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
317
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
318
}
319
}
320
+ return true;
321
}
322
323
/* AdvSIMD load/store single structure
324
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
325
static void disas_ldst(DisasContext *s, uint32_t insn)
326
{
327
switch (extract32(insn, 24, 6)) {
328
- case 0x0c: /* AdvSIMD load/store multiple structures */
329
- disas_ldst_multiple_struct(s, insn);
330
- break;
331
case 0x0d: /* AdvSIMD load/store single structure */
332
disas_ldst_single_struct(s, insn);
333
break;
171
--
334
--
172
2.20.1
335
2.34.1
173
174
diff view generated by jsdifflib
1
Implement the MVE saturating shift-right-and-narrow insns
1
Convert the ASIMD load/store single structure insns to decodetree.
2
VQSHRN, VQSHRUN, VQRSHRN and VQRSHRUN.
3
4
do_srshr() is borrowed from sve_helper.c.
5
2
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Message-id: 20230602155223.2040685-20-peter.maydell@linaro.org
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-13-peter.maydell@linaro.org
9
---
6
---
10
target/arm/helper-mve.h | 30 +++++++++++
7
target/arm/tcg/a64.decode | 34 +++++
11
target/arm/mve.decode | 28 ++++++++++
8
target/arm/tcg/translate-a64.c | 219 +++++++++++++++------------------
12
target/arm/mve_helper.c | 104 +++++++++++++++++++++++++++++++++++++
9
2 files changed, 136 insertions(+), 117 deletions(-)
13
target/arm/translate-mve.c | 12 +++++
14
4 files changed, 174 insertions(+)
15
10
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
11
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
13
--- a/target/arm/tcg/a64.decode
19
+++ b/target/arm/helper-mve.h
14
+++ b/target/arm/tcg/a64.decode
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
15
@@ -XXX,XX +XXX,XX @@ LD_mult 0 . 001100 . 1 0 ..... 0110 .. ..... ..... @ldst_mult rpt=3 sele
21
DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
16
LD_mult 0 . 001100 . 1 0 ..... 0111 .. ..... ..... @ldst_mult rpt=1 selem=1
22
DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
17
LD_mult 0 . 001100 . 1 0 ..... 1000 .. ..... ..... @ldst_mult rpt=1 selem=2
23
DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
LD_mult 0 . 001100 . 1 0 ..... 1010 .. ..... ..... @ldst_mult rpt=2 selem=1
24
+
19
+
25
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
+# Load/store single structure
26
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
+&ldst_single rm rn rt p selem index scale
27
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+
28
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+%ldst_single_selem 13:1 21:1 !function=plus_1
29
+
24
+
30
+DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+%ldst_single_index_b 30:1 10:3
31
+DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+%ldst_single_index_h 30:1 11:2
32
+DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+%ldst_single_index_s 30:1 12:1
33
+DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
34
+
29
+@ldst_single_b .. ...... p:1 .. rm:5 ...... rn:5 rt:5 \
35
+DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+ &ldst_single scale=0 selem=%ldst_single_selem \
36
+DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+ index=%ldst_single_index_b
37
+DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+@ldst_single_h .. ...... p:1 .. rm:5 ...... rn:5 rt:5 \
38
+DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+ &ldst_single scale=1 selem=%ldst_single_selem \
39
+
34
+ index=%ldst_single_index_h
40
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+@ldst_single_s .. ...... p:1 .. rm:5 ...... rn:5 rt:5 \
41
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+ &ldst_single scale=2 selem=%ldst_single_selem \
42
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+ index=%ldst_single_index_s
43
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+@ldst_single_d . index:1 ...... p:1 .. rm:5 ...... rn:5 rt:5 \
44
+
39
+ &ldst_single scale=3 selem=%ldst_single_selem
45
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+
46
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+ST_single 0 . 001101 . 0 . ..... 00 . ... ..... ..... @ldst_single_b
47
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+ST_single 0 . 001101 . 0 . ..... 01 . ..0 ..... ..... @ldst_single_h
48
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+ST_single 0 . 001101 . 0 . ..... 10 . .00 ..... ..... @ldst_single_s
49
+
44
+ST_single 0 . 001101 . 0 . ..... 10 . 001 ..... ..... @ldst_single_d
50
+DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
+
51
+DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+LD_single 0 . 001101 . 1 . ..... 00 . ... ..... ..... @ldst_single_b
52
+DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+LD_single 0 . 001101 . 1 . ..... 01 . ..0 ..... ..... @ldst_single_h
53
+DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+LD_single 0 . 001101 . 1 . ..... 10 . .00 ..... ..... @ldst_single_s
54
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
49
+LD_single 0 . 001101 . 1 . ..... 10 . 001 ..... ..... @ldst_single_d
50
+
51
+# Replicating load case
52
+LD_single_repl 0 q:1 001101 p:1 1 . rm:5 11 . 0 scale:2 rn:5 rt:5 selem=%ldst_single_selem
53
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
55
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/mve.decode
55
--- a/target/arm/tcg/translate-a64.c
57
+++ b/target/arm/mve.decode
56
+++ b/target/arm/tcg/translate-a64.c
58
@@ -XXX,XX +XXX,XX @@ VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
57
@@ -XXX,XX +XXX,XX @@ static bool trans_ST_mult(DisasContext *s, arg_ldst_mult *a)
59
VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
58
return true;
60
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
61
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
62
+
63
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
64
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
65
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
66
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
67
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
68
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
69
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
70
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
71
+
72
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
73
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
74
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
75
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
76
+
77
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
78
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
79
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
80
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
81
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
82
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
83
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
84
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
85
+
86
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
87
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
88
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
89
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
90
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve_helper.c
93
+++ b/target/arm/mve_helper.c
94
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh)
95
}
96
}
59
}
97
60
98
+static inline int64_t do_srshr(int64_t x, unsigned sh)
61
-/* AdvSIMD load/store single structure
62
- *
63
- * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
64
- * +---+---+---------------+-----+-----------+-----+---+------+------+------+
65
- * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
66
- * +---+---+---------------+-----+-----------+-----+---+------+------+------+
67
- *
68
- * AdvSIMD load/store single structure (post-indexed)
69
- *
70
- * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
71
- * +---+---+---------------+-----+-----------+-----+---+------+------+------+
72
- * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
73
- * +---+---+---------------+-----+-----------+-----+---+------+------+------+
74
- *
75
- * Rt: first (or only) SIMD&FP register to be transferred
76
- * Rn: base address or SP
77
- * Rm (post-index only): post-index register (when !31) or size dependent #imm
78
- * index = encoded in Q:S:size dependent on size
79
- *
80
- * lane_size = encoded in R, opc
81
- * transfer width = encoded in opc, S, size
82
- */
83
-static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
84
+static bool trans_ST_single(DisasContext *s, arg_ldst_single *a)
85
{
86
- int rt = extract32(insn, 0, 5);
87
- int rn = extract32(insn, 5, 5);
88
- int rm = extract32(insn, 16, 5);
89
- int size = extract32(insn, 10, 2);
90
- int S = extract32(insn, 12, 1);
91
- int opc = extract32(insn, 13, 3);
92
- int R = extract32(insn, 21, 1);
93
- int is_load = extract32(insn, 22, 1);
94
- int is_postidx = extract32(insn, 23, 1);
95
- int is_q = extract32(insn, 30, 1);
96
-
97
- int scale = extract32(opc, 1, 2);
98
- int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
99
- bool replicate = false;
100
- int index = is_q << 3 | S << 2 | size;
101
- int xs, total;
102
+ int xs, total, rt;
103
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
104
MemOp mop;
105
106
- if (extract32(insn, 31, 1)) {
107
- unallocated_encoding(s);
108
- return;
109
+ if (!a->p && a->rm != 0) {
110
+ return false;
111
}
112
- if (!is_postidx && rm != 0) {
113
- unallocated_encoding(s);
114
- return;
115
- }
116
-
117
- switch (scale) {
118
- case 3:
119
- if (!is_load || S) {
120
- unallocated_encoding(s);
121
- return;
122
- }
123
- scale = size;
124
- replicate = true;
125
- break;
126
- case 0:
127
- break;
128
- case 1:
129
- if (extract32(size, 0, 1)) {
130
- unallocated_encoding(s);
131
- return;
132
- }
133
- index >>= 1;
134
- break;
135
- case 2:
136
- if (extract32(size, 1, 1)) {
137
- unallocated_encoding(s);
138
- return;
139
- }
140
- if (!extract32(size, 0, 1)) {
141
- index >>= 2;
142
- } else {
143
- if (S) {
144
- unallocated_encoding(s);
145
- return;
146
- }
147
- index >>= 3;
148
- scale = 3;
149
- }
150
- break;
151
- default:
152
- g_assert_not_reached();
153
- }
154
-
155
if (!fp_access_check(s)) {
156
- return;
157
+ return true;
158
}
159
160
- if (rn == 31) {
161
+ if (a->rn == 31) {
162
gen_check_sp_alignment(s);
163
}
164
165
- total = selem << scale;
166
- tcg_rn = cpu_reg_sp(s, rn);
167
+ total = a->selem << a->scale;
168
+ tcg_rn = cpu_reg_sp(s, a->rn);
169
170
- mop = finalize_memop_asimd(s, scale);
171
-
172
- clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
173
+ mop = finalize_memop_asimd(s, a->scale);
174
+ clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31,
175
total, mop);
176
177
- tcg_ebytes = tcg_constant_i64(1 << scale);
178
- for (xs = 0; xs < selem; xs++) {
179
- if (replicate) {
180
- /* Load and replicate to all elements */
181
- TCGv_i64 tcg_tmp = tcg_temp_new_i64();
182
-
183
- tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
184
- tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
185
- (is_q + 1) * 8, vec_full_reg_size(s),
186
- tcg_tmp);
187
- } else {
188
- /* Load/store one element per register */
189
- if (is_load) {
190
- do_vec_ld(s, rt, index, clean_addr, mop);
191
- } else {
192
- do_vec_st(s, rt, index, clean_addr, mop);
193
- }
194
- }
195
+ tcg_ebytes = tcg_constant_i64(1 << a->scale);
196
+ for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
197
+ do_vec_st(s, rt, a->index, clean_addr, mop);
198
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
199
- rt = (rt + 1) % 32;
200
}
201
202
- if (is_postidx) {
203
- if (rm == 31) {
204
+ if (a->p) {
205
+ if (a->rm == 31) {
206
tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
207
} else {
208
- tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
209
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
210
}
211
}
212
+ return true;
213
+}
214
+
215
+static bool trans_LD_single(DisasContext *s, arg_ldst_single *a)
99
+{
216
+{
100
+ if (likely(sh < 64)) {
217
+ int xs, total, rt;
101
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
218
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
102
+ } else {
219
+ MemOp mop;
103
+ /* Rounding the sign bit always produces 0. */
220
+
104
+ return 0;
221
+ if (!a->p && a->rm != 0) {
105
+ }
222
+ return false;
223
+ }
224
+ if (!fp_access_check(s)) {
225
+ return true;
226
+ }
227
+
228
+ if (a->rn == 31) {
229
+ gen_check_sp_alignment(s);
230
+ }
231
+
232
+ total = a->selem << a->scale;
233
+ tcg_rn = cpu_reg_sp(s, a->rn);
234
+
235
+ mop = finalize_memop_asimd(s, a->scale);
236
+ clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31,
237
+ total, mop);
238
+
239
+ tcg_ebytes = tcg_constant_i64(1 << a->scale);
240
+ for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
241
+ do_vec_ld(s, rt, a->index, clean_addr, mop);
242
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
243
+ }
244
+
245
+ if (a->p) {
246
+ if (a->rm == 31) {
247
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
248
+ } else {
249
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
250
+ }
251
+ }
252
+ return true;
106
+}
253
+}
107
+
254
+
108
DO_VSHRN_ALL(vshrn, DO_SHR)
255
+static bool trans_LD_single_repl(DisasContext *s, arg_LD_single_repl *a)
109
DO_VSHRN_ALL(vrshrn, do_urshr)
110
+
111
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
112
+ bool *satp)
113
+{
256
+{
114
+ if (val > max) {
257
+ int xs, total, rt;
115
+ *satp = true;
258
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
116
+ return max;
259
+ MemOp mop;
117
+ } else if (val < min) {
260
+
118
+ *satp = true;
261
+ if (!a->p && a->rm != 0) {
119
+ return min;
262
+ return false;
120
+ } else {
263
+ }
121
+ return val;
264
+ if (!fp_access_check(s)) {
122
+ }
265
+ return true;
123
+}
266
+ }
124
+
267
+
125
+/* Saturating narrowing right shifts */
268
+ if (a->rn == 31) {
126
+#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
269
+ gen_check_sp_alignment(s);
127
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
270
+ }
128
+ void *vm, uint32_t shift) \
271
+
129
+ { \
272
+ total = a->selem << a->scale;
130
+ LTYPE *m = vm; \
273
+ tcg_rn = cpu_reg_sp(s, a->rn);
131
+ TYPE *d = vd; \
274
+
132
+ uint16_t mask = mve_element_mask(env); \
275
+ mop = finalize_memop_asimd(s, a->scale);
133
+ bool qc = false; \
276
+ clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31,
134
+ unsigned le; \
277
+ total, mop);
135
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
278
+
136
+ bool sat = false; \
279
+ tcg_ebytes = tcg_constant_i64(1 << a->scale);
137
+ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
280
+ for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
138
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
281
+ /* Load and replicate to all elements */
139
+ qc |= sat && (mask & 1 << (TOP * ESIZE)); \
282
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
140
+ } \
283
+
141
+ if (qc) { \
284
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
142
+ env->vfp.qc[0] = qc; \
285
+ tcg_gen_gvec_dup_i64(a->scale, vec_full_reg_offset(s, rt),
143
+ } \
286
+ (a->q + 1) * 8, vec_full_reg_size(s), tcg_tmp);
144
+ mve_advance_vpt(env); \
287
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
145
+ }
288
+ }
146
+
289
+
147
+#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
290
+ if (a->p) {
148
+ DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
291
+ if (a->rm == 31) {
149
+ DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
292
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
150
+
293
+ } else {
151
+#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
294
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
152
+ DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
295
+ }
153
+ DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
296
+ }
154
+
297
+ return true;
155
+#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
298
}
156
+ DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
299
157
+ DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
300
/*
158
+
301
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
159
+#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
302
static void disas_ldst(DisasContext *s, uint32_t insn)
160
+ DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
303
{
161
+ DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
304
switch (extract32(insn, 24, 6)) {
162
+
305
- case 0x0d: /* AdvSIMD load/store single structure */
163
+#define DO_SHRN_SB(N, M, SATP) \
306
- disas_ldst_single_struct(s, insn);
164
+ do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
307
- break;
165
+#define DO_SHRN_UB(N, M, SATP) \
308
case 0x19:
166
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
309
if (extract32(insn, 21, 1) != 0) {
167
+#define DO_SHRUN_B(N, M, SATP) \
310
disas_ldst_tag(s, insn);
168
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
169
+
170
+#define DO_SHRN_SH(N, M, SATP) \
171
+ do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
172
+#define DO_SHRN_UH(N, M, SATP) \
173
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
174
+#define DO_SHRUN_H(N, M, SATP) \
175
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
176
+
177
+#define DO_RSHRN_SB(N, M, SATP) \
178
+ do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
179
+#define DO_RSHRN_UB(N, M, SATP) \
180
+ do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
181
+#define DO_RSHRUN_B(N, M, SATP) \
182
+ do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
183
+
184
+#define DO_RSHRN_SH(N, M, SATP) \
185
+ do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
186
+#define DO_RSHRN_UH(N, M, SATP) \
187
+ do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
188
+#define DO_RSHRUN_H(N, M, SATP) \
189
+ do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
190
+
191
+DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
192
+DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
193
+DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
194
+DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
195
+DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
196
+DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
197
+
198
+DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
199
+DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
200
+DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
201
+DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
202
+DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
203
+DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
204
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/translate-mve.c
207
+++ b/target/arm/translate-mve.c
208
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VSHRNB, vshrnb)
209
DO_2SHIFT_N(VSHRNT, vshrnt)
210
DO_2SHIFT_N(VRSHRNB, vrshrnb)
211
DO_2SHIFT_N(VRSHRNT, vrshrnt)
212
+DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s)
213
+DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s)
214
+DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u)
215
+DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u)
216
+DO_2SHIFT_N(VQSHRUNB, vqshrunb)
217
+DO_2SHIFT_N(VQSHRUNT, vqshrunt)
218
+DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s)
219
+DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s)
220
+DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
221
+DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
222
+DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
223
+DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
224
--
311
--
225
2.20.1
312
2.34.1
226
227
diff view generated by jsdifflib
1
The MVE extension to v8.1M includes some new shift instructions which
1
Convert the instructions in the load/store memory tags instruction
2
sit entirely within the non-coprocessor part of the encoding space
2
group to decodetree.
3
and which operate only on general-purpose registers. They take up
4
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
5
with Rm == 13 or 15.
6
7
Implement the long shifts by immediate, which perform shifts on a
8
pair of general-purpose registers treated as a 64-bit quantity, with
9
an immediate shift count between 1 and 32.
10
11
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
12
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
13
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
14
is too difficult, because the functions that generate the code are
15
shared between a dozen different kinds of arithmetic or logical
16
instruction for all A32, T16 and T32 encodings, and for some insns
17
and some encodings Rm==13,15 are valid.)
18
19
We make the helper functions we need for UQSHLL and SQSHLL take
20
a 32-bit value which the helper casts to int8_t because we'll need
21
these helpers also for the shift-by-register insns, where the shift
22
count might be < 0 or > 32.
23
3
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
6
Message-id: 20230602155223.2040685-21-peter.maydell@linaro.org
27
---
7
---
28
target/arm/helper-mve.h | 3 ++
8
target/arm/tcg/a64.decode | 25 +++
29
target/arm/translate.h | 1 +
9
target/arm/tcg/translate-a64.c | 360 ++++++++++++++++-----------------
30
target/arm/t32.decode | 28 +++++++++++++
10
2 files changed, 199 insertions(+), 186 deletions(-)
31
target/arm/mve_helper.c | 10 +++++
32
target/arm/translate.c | 90 +++++++++++++++++++++++++++++++++++++++++
33
5 files changed, 132 insertions(+)
34
11
35
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
12
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
36
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper-mve.h
14
--- a/target/arm/tcg/a64.decode
38
+++ b/target/arm/helper-mve.h
15
+++ b/target/arm/tcg/a64.decode
39
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
16
@@ -XXX,XX +XXX,XX @@ LD_single 0 . 001101 . 1 . ..... 10 . 001 ..... ..... @ldst_single_d
40
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
17
41
18
# Replicating load case
42
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
19
LD_single_repl 0 q:1 001101 p:1 1 . rm:5 11 . 0 scale:2 rn:5 rt:5 selem=%ldst_single_selem
43
+
20
+
44
+DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
21
+%tag_offset 12:s9 !function=scale_by_log2_tag_granule
45
+DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
22
+&ldst_tag rn rt imm p w
46
diff --git a/target/arm/translate.h b/target/arm/translate.h
23
+@ldst_tag ........ .. . ......... .. rn:5 rt:5 &ldst_tag imm=%tag_offset
24
+@ldst_tag_mult ........ .. . 000000000 .. rn:5 rt:5 &ldst_tag imm=0
25
+
26
+STZGM 11011001 00 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
27
+STG 11011001 00 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
28
+STG 11011001 00 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
29
+STG 11011001 00 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
30
+
31
+LDG 11011001 01 1 ......... 00 ..... ..... @ldst_tag p=0 w=0
32
+STZG 11011001 01 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
33
+STZG 11011001 01 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
34
+STZG 11011001 01 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
35
+
36
+STGM 11011001 10 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
37
+ST2G 11011001 10 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
38
+ST2G 11011001 10 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
39
+ST2G 11011001 10 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
40
+
41
+LDGM 11011001 11 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
42
+STZ2G 11011001 11 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
43
+STZ2G 11011001 11 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
44
+STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
45
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
47
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate.h
47
--- a/target/arm/tcg/translate-a64.c
49
+++ b/target/arm/translate.h
48
+++ b/target/arm/tcg/translate-a64.c
50
@@ -XXX,XX +XXX,XX @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
49
@@ -XXX,XX +XXX,XX @@ static int uimm_scaled(DisasContext *s, int x)
51
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
50
return imm << scale;
52
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
53
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
54
+typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
55
56
/**
57
* arm_tbflags_from_tb:
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
62
@@ -XXX,XX +XXX,XX @@
63
&mcr !extern cp opc1 crn crm opc2 rt
64
&mcrr !extern cp opc1 crm rt rt2
65
66
+&mve_shl_ri rdalo rdahi shim
67
+
68
+# rdahi: bits [3:1] from insn, bit 0 is 1
69
+# rdalo: bits [3:1] from insn, bit 0 is 0
70
+%rdahi_9 9:3 !function=times_2_plus_1
71
+%rdalo_17 17:3 !function=times_2
72
+
73
# Data-processing (register)
74
75
%imm5_12_6 12:3 6:2
76
@@ -XXX,XX +XXX,XX @@
77
@S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \
78
&s_rrr_shi shim=%imm5_12_6 s=1 rd=0
79
80
+@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
81
+ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
82
+
83
{
84
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
85
AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi
86
}
51
}
87
BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
52
88
{
53
+/* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
89
+ # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS
54
+static int scale_by_log2_tag_granule(DisasContext *s, int x)
90
+ # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE
91
+ # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that
92
+ # they explicitly call unallocated_encoding() for cases that must UNDEF
93
+ # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting
94
+ # the rest fall through (where ORR_rrri and MOV_rxri will end up
95
+ # handling them as r13 and r15 accesses with the same semantics as A32).
96
+ [
97
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
98
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
99
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
100
+
101
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
102
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
103
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
104
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
105
+ ]
106
+
107
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
108
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
109
}
110
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/mve_helper.c
113
+++ b/target/arm/mve_helper.c
114
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
115
mve_advance_vpt(env);
116
return rdm;
117
}
118
+
119
+uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
120
+{
55
+{
121
+ return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
56
+ return x << LOG2_TAG_GRANULE;
122
+}
57
+}
123
+
58
+
124
+uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
59
/*
125
+{
60
* Include the generated decoders.
126
+ return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
61
*/
127
+}
62
@@ -XXX,XX +XXX,XX @@ static bool trans_LD_single_repl(DisasContext *s, arg_LD_single_repl *a)
128
diff --git a/target/arm/translate.c b/target/arm/translate.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate.c
131
+++ b/target/arm/translate.c
132
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
133
return true;
63
return true;
134
}
64
}
135
65
136
+/*
66
-/*
137
+ * v8.1M MVE wide-shifts
67
- * Load/Store memory tags
138
+ */
68
- *
139
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
69
- * 31 30 29 24 22 21 12 10 5 0
140
+ WideShiftImmFn *fn)
70
- * +-----+-------------+-----+---+------+-----+------+------+
141
+{
71
- * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
142
+ TCGv_i64 rda;
72
- * +-----+-------------+-----+---+------+-----+------+------+
143
+ TCGv_i32 rdalo, rdahi;
73
- */
144
+
74
-static void disas_ldst_tag(DisasContext *s, uint32_t insn)
145
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
75
+static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a)
146
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
76
{
147
+ return false;
77
- int rt = extract32(insn, 0, 5);
148
+ }
78
- int rn = extract32(insn, 5, 5);
149
+ if (a->rdahi == 15) {
79
- uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
150
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
80
- int op2 = extract32(insn, 10, 2);
151
+ return false;
81
- int op1 = extract32(insn, 22, 2);
152
+ }
82
- bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
153
+ if (!dc_isar_feature(aa32_mve, s) ||
83
- int index = 0;
154
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
84
TCGv_i64 addr, clean_addr, tcg_rt;
155
+ a->rdahi == 13) {
85
+ int size = 4 << s->dcz_blocksize;
156
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
86
157
+ unallocated_encoding(s);
87
- /* We checked insn bits [29:24,21] in the caller. */
158
+ return true;
88
- if (extract32(insn, 30, 2) != 3) {
159
+ }
89
- goto do_unallocated;
160
+
90
+ if (!dc_isar_feature(aa64_mte, s)) {
161
+ if (a->shim == 0) {
91
+ return false;
162
+ a->shim = 32;
92
+ }
163
+ }
93
+ if (s->current_el == 0) {
164
+
94
+ return false;
165
+ rda = tcg_temp_new_i64();
95
}
166
+ rdalo = load_reg(s, a->rdalo);
96
167
+ rdahi = load_reg(s, a->rdahi);
97
- /*
168
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
98
- * @index is a tri-state variable which has 3 states:
169
+
99
- * < 0 : post-index, writeback
170
+ fn(rda, rda, a->shim);
100
- * = 0 : signed offset
171
+
101
- * > 0 : pre-index, writeback
172
+ tcg_gen_extrl_i64_i32(rdalo, rda);
102
- */
173
+ tcg_gen_extrh_i64_i32(rdahi, rda);
103
- switch (op1) {
174
+ store_reg(s, a->rdalo, rdalo);
104
- case 0:
175
+ store_reg(s, a->rdahi, rdahi);
105
- if (op2 != 0) {
176
+ tcg_temp_free_i64(rda);
106
- /* STG */
177
+
107
- index = op2 - 2;
108
- } else {
109
- /* STZGM */
110
- if (s->current_el == 0 || offset != 0) {
111
- goto do_unallocated;
112
- }
113
- is_mult = is_zero = true;
114
- }
115
- break;
116
- case 1:
117
- if (op2 != 0) {
118
- /* STZG */
119
- is_zero = true;
120
- index = op2 - 2;
121
- } else {
122
- /* LDG */
123
- is_load = true;
124
- }
125
- break;
126
- case 2:
127
- if (op2 != 0) {
128
- /* ST2G */
129
- is_pair = true;
130
- index = op2 - 2;
131
- } else {
132
- /* STGM */
133
- if (s->current_el == 0 || offset != 0) {
134
- goto do_unallocated;
135
- }
136
- is_mult = true;
137
- }
138
- break;
139
- case 3:
140
- if (op2 != 0) {
141
- /* STZ2G */
142
- is_pair = is_zero = true;
143
- index = op2 - 2;
144
- } else {
145
- /* LDGM */
146
- if (s->current_el == 0 || offset != 0) {
147
- goto do_unallocated;
148
- }
149
- is_mult = is_load = true;
150
- }
151
- break;
152
-
153
- default:
154
- do_unallocated:
155
- unallocated_encoding(s);
156
- return;
157
- }
158
-
159
- if (is_mult
160
- ? !dc_isar_feature(aa64_mte, s)
161
- : !dc_isar_feature(aa64_mte_insn_reg, s)) {
162
- goto do_unallocated;
163
- }
164
-
165
- if (rn == 31) {
166
+ if (a->rn == 31) {
167
gen_check_sp_alignment(s);
168
}
169
170
- addr = read_cpu_reg_sp(s, rn, true);
171
- if (index >= 0) {
172
+ addr = read_cpu_reg_sp(s, a->rn, true);
173
+ tcg_gen_addi_i64(addr, addr, a->imm);
174
+ tcg_rt = cpu_reg(s, a->rt);
175
+
176
+ if (s->ata) {
177
+ gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
178
+ }
179
+ /*
180
+ * The non-tags portion of STZGM is mostly like DC_ZVA,
181
+ * except the alignment happens before the access.
182
+ */
183
+ clean_addr = clean_data_tbi(s, addr);
184
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
185
+ gen_helper_dc_zva(cpu_env, clean_addr);
178
+ return true;
186
+ return true;
179
+}
187
+}
180
+
188
+
181
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
189
+static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
182
+{
190
+{
183
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
191
+ TCGv_i64 addr, clean_addr, tcg_rt;
192
+
193
+ if (!dc_isar_feature(aa64_mte, s)) {
194
+ return false;
195
+ }
196
+ if (s->current_el == 0) {
197
+ return false;
198
+ }
199
+
200
+ if (a->rn == 31) {
201
+ gen_check_sp_alignment(s);
202
+ }
203
+
204
+ addr = read_cpu_reg_sp(s, a->rn, true);
205
+ tcg_gen_addi_i64(addr, addr, a->imm);
206
+ tcg_rt = cpu_reg(s, a->rt);
207
+
208
+ if (s->ata) {
209
+ gen_helper_stgm(cpu_env, addr, tcg_rt);
210
+ } else {
211
+ MMUAccessType acc = MMU_DATA_STORE;
212
+ int size = 4 << GMID_EL1_BS;
213
+
214
+ clean_addr = clean_data_tbi(s, addr);
215
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
216
+ gen_probe_access(s, clean_addr, acc, size);
217
+ }
218
+ return true;
184
+}
219
+}
185
+
220
+
186
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
221
+static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
187
+{
222
+{
188
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
223
+ TCGv_i64 addr, clean_addr, tcg_rt;
224
+
225
+ if (!dc_isar_feature(aa64_mte, s)) {
226
+ return false;
227
+ }
228
+ if (s->current_el == 0) {
229
+ return false;
230
+ }
231
+
232
+ if (a->rn == 31) {
233
+ gen_check_sp_alignment(s);
234
+ }
235
+
236
+ addr = read_cpu_reg_sp(s, a->rn, true);
237
+ tcg_gen_addi_i64(addr, addr, a->imm);
238
+ tcg_rt = cpu_reg(s, a->rt);
239
+
240
+ if (s->ata) {
241
+ gen_helper_ldgm(tcg_rt, cpu_env, addr);
242
+ } else {
243
+ MMUAccessType acc = MMU_DATA_LOAD;
244
+ int size = 4 << GMID_EL1_BS;
245
+
246
+ clean_addr = clean_data_tbi(s, addr);
247
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
248
+ gen_probe_access(s, clean_addr, acc, size);
249
+ /* The result tags are zeros. */
250
+ tcg_gen_movi_i64(tcg_rt, 0);
251
+ }
252
+ return true;
189
+}
253
+}
190
+
254
+
191
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
255
+static bool trans_LDG(DisasContext *s, arg_ldst_tag *a)
192
+{
256
+{
193
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
257
+ TCGv_i64 addr, clean_addr, tcg_rt;
258
+
259
+ if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
260
+ return false;
261
+ }
262
+
263
+ if (a->rn == 31) {
264
+ gen_check_sp_alignment(s);
265
+ }
266
+
267
+ addr = read_cpu_reg_sp(s, a->rn, true);
268
+ if (!a->p) {
269
/* pre-index or signed offset */
270
- tcg_gen_addi_i64(addr, addr, offset);
271
+ tcg_gen_addi_i64(addr, addr, a->imm);
272
}
273
274
- if (is_mult) {
275
- tcg_rt = cpu_reg(s, rt);
276
+ tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
277
+ tcg_rt = cpu_reg(s, a->rt);
278
+ if (s->ata) {
279
+ gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
280
+ } else {
281
+ /*
282
+ * Tag access disabled: we must check for aborts on the load
283
+ * load from [rn+offset], and then insert a 0 tag into rt.
284
+ */
285
+ clean_addr = clean_data_tbi(s, addr);
286
+ gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
287
+ gen_address_with_allocation_tag0(tcg_rt, tcg_rt);
288
+ }
289
290
- if (is_zero) {
291
- int size = 4 << s->dcz_blocksize;
292
-
293
- if (s->ata) {
294
- gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
295
- }
296
- /*
297
- * The non-tags portion of STZGM is mostly like DC_ZVA,
298
- * except the alignment happens before the access.
299
- */
300
- clean_addr = clean_data_tbi(s, addr);
301
- tcg_gen_andi_i64(clean_addr, clean_addr, -size);
302
- gen_helper_dc_zva(cpu_env, clean_addr);
303
- } else if (s->ata) {
304
- if (is_load) {
305
- gen_helper_ldgm(tcg_rt, cpu_env, addr);
306
- } else {
307
- gen_helper_stgm(cpu_env, addr, tcg_rt);
308
- }
309
- } else {
310
- MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
311
- int size = 4 << GMID_EL1_BS;
312
-
313
- clean_addr = clean_data_tbi(s, addr);
314
- tcg_gen_andi_i64(clean_addr, clean_addr, -size);
315
- gen_probe_access(s, clean_addr, acc, size);
316
-
317
- if (is_load) {
318
- /* The result tags are zeros. */
319
- tcg_gen_movi_i64(tcg_rt, 0);
320
- }
321
+ if (a->w) {
322
+ /* pre-index or post-index */
323
+ if (a->p) {
324
+ /* post-index */
325
+ tcg_gen_addi_i64(addr, addr, a->imm);
326
}
327
- return;
328
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
329
+ }
330
+ return true;
194
+}
331
+}
195
+
332
+
196
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
333
+static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair)
197
+{
334
+{
198
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
335
+ TCGv_i64 addr, tcg_rt;
199
+}
336
+
200
+
337
+ if (a->rn == 31) {
201
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
338
+ gen_check_sp_alignment(s);
202
+{
339
}
203
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
340
204
+}
341
- if (is_load) {
205
+
342
- tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
206
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
343
- tcg_rt = cpu_reg(s, rt);
207
+{
344
- if (s->ata) {
208
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
345
- gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
209
+}
346
+ addr = read_cpu_reg_sp(s, a->rn, true);
210
+
347
+ if (!a->p) {
211
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
348
+ /* pre-index or signed offset */
212
+{
349
+ tcg_gen_addi_i64(addr, addr, a->imm);
213
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
350
+ }
214
+}
351
+ tcg_rt = cpu_reg_sp(s, a->rt);
215
+
352
+ if (!s->ata) {
216
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
353
+ /*
217
+{
354
+ * For STG and ST2G, we need to check alignment and probe memory.
218
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
355
+ * TODO: For STZG and STZ2G, we could rely on the stores below,
219
+}
356
+ * at least for system mode; user-only won't enforce alignment.
220
+
357
+ */
221
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
358
+ if (is_pair) {
222
+{
359
+ gen_helper_st2g_stub(cpu_env, addr);
223
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
360
} else {
224
+}
361
- /*
225
+
362
- * Tag access disabled: we must check for aborts on the load
226
/*
363
- * load from [rn+offset], and then insert a 0 tag into rt.
227
* Multiply and multiply accumulate
364
- */
228
*/
365
- clean_addr = clean_data_tbi(s, addr);
366
- gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
367
- gen_address_with_allocation_tag0(tcg_rt, tcg_rt);
368
+ gen_helper_stg_stub(cpu_env, addr);
369
+ }
370
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
371
+ if (is_pair) {
372
+ gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
373
+ } else {
374
+ gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
375
}
376
} else {
377
- tcg_rt = cpu_reg_sp(s, rt);
378
- if (!s->ata) {
379
- /*
380
- * For STG and ST2G, we need to check alignment and probe memory.
381
- * TODO: For STZG and STZ2G, we could rely on the stores below,
382
- * at least for system mode; user-only won't enforce alignment.
383
- */
384
- if (is_pair) {
385
- gen_helper_st2g_stub(cpu_env, addr);
386
- } else {
387
- gen_helper_stg_stub(cpu_env, addr);
388
- }
389
- } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
390
- if (is_pair) {
391
- gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
392
- } else {
393
- gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
394
- }
395
+ if (is_pair) {
396
+ gen_helper_st2g(cpu_env, addr, tcg_rt);
397
} else {
398
- if (is_pair) {
399
- gen_helper_st2g(cpu_env, addr, tcg_rt);
400
- } else {
401
- gen_helper_stg(cpu_env, addr, tcg_rt);
402
- }
403
+ gen_helper_stg(cpu_env, addr, tcg_rt);
404
}
405
}
406
407
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
408
}
409
}
410
411
- if (index != 0) {
412
+ if (a->w) {
413
/* pre-index or post-index */
414
- if (index < 0) {
415
+ if (a->p) {
416
/* post-index */
417
- tcg_gen_addi_i64(addr, addr, offset);
418
+ tcg_gen_addi_i64(addr, addr, a->imm);
419
}
420
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
421
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
422
}
423
+ return true;
424
}
425
426
-/* Loads and stores */
427
-static void disas_ldst(DisasContext *s, uint32_t insn)
428
-{
429
- switch (extract32(insn, 24, 6)) {
430
- case 0x19:
431
- if (extract32(insn, 21, 1) != 0) {
432
- disas_ldst_tag(s, insn);
433
- } else {
434
- unallocated_encoding(s);
435
- }
436
- break;
437
- default:
438
- unallocated_encoding(s);
439
- break;
440
- }
441
-}
442
+TRANS_FEAT(STG, aa64_mte_insn_reg, do_STG, a, false, false)
443
+TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false)
444
+TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true)
445
+TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
446
447
typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
448
449
@@ -XXX,XX +XXX,XX @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
450
static void disas_a64_legacy(DisasContext *s, uint32_t insn)
451
{
452
switch (extract32(insn, 25, 4)) {
453
- case 0x4:
454
- case 0x6:
455
- case 0xc:
456
- case 0xe: /* Loads and stores */
457
- disas_ldst(s, insn);
458
- break;
459
case 0x5:
460
case 0xd: /* Data processing - register */
461
disas_data_proc_reg(s, insn);
229
--
462
--
230
2.20.1
463
2.34.1
231
232
diff view generated by jsdifflib
1
From: Maxim Uvarov <maxim.uvarov@linaro.org>
1
In commit 2c5fa0778c3b430 we fixed an endianness bug in the Allwinner
2
A10 PIC model; however in the process we introduced a regression.
3
This is because the old code was robust against the incoming 'level'
4
argument being something other than 0 or 1, whereas the new code was
5
not.
2
6
3
qemu has 2 type of functions: shutdown and reboot. Shutdown
7
In particular, the allwinner-sdhost code treats its IRQ line
4
function has to be used for machine shutdown. Otherwise we cause
8
as 0-vs-non-0 rather than 0-vs-1, so when the SD controller
5
a reset with a bogus "cause" value, when we intended a shutdown.
9
set its IRQ line for any reason other than transmit the
10
interrupt controller would ignore it. The observed effect
11
was a guest timeout when rebooting the guest kernel.
6
12
7
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
13
Handle level values other than 0 or 1, to restore the old
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
behaviour.
9
Message-id: 20210625111842.3790-3-maxim.uvarov@linaro.org
15
10
[PMM: tweaked commit message]
16
Fixes: 2c5fa0778c3b430 ("hw/intc/allwinner-a10-pic: Don't use set_bit()/clear_bit()")
17
Cc: qemu-stable@nongnu.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
20
Tested-by: Guenter Roeck <linux@roeck-us.net>
21
Message-id: 20230606104609.3692557-2-peter.maydell@linaro.org
12
---
22
---
13
hw/gpio/gpio_pwr.c | 2 +-
23
hw/intc/allwinner-a10-pic.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
24
1 file changed, 1 insertion(+), 1 deletion(-)
15
25
16
diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c
26
diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c
17
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/gpio/gpio_pwr.c
28
--- a/hw/intc/allwinner-a10-pic.c
19
+++ b/hw/gpio/gpio_pwr.c
29
+++ b/hw/intc/allwinner-a10-pic.c
20
@@ -XXX,XX +XXX,XX @@ static void gpio_pwr_reset(void *opaque, int n, int level)
30
@@ -XXX,XX +XXX,XX @@ static void aw_a10_pic_set_irq(void *opaque, int irq, int level)
21
static void gpio_pwr_shutdown(void *opaque, int n, int level)
31
AwA10PICState *s = opaque;
22
{
32
uint32_t *pending_reg = &s->irq_pending[irq / 32];
23
if (level) {
33
24
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
34
- *pending_reg = deposit32(*pending_reg, irq % 32, 1, level);
25
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
35
+ *pending_reg = deposit32(*pending_reg, irq % 32, 1, !!level);
26
}
36
aw_a10_pic_update(s);
27
}
37
}
28
38
29
--
39
--
30
2.20.1
40
2.34.1
31
41
32
42
diff view generated by jsdifflib
1
Use dup_const() instead of bitfield_replicate() in
1
QEMU allows qemu_irq lines to transfer arbitrary integers. However
2
disas_simd_mod_imm().
2
the convention is that for a simple IRQ line the values transferred
3
are always 0 and 1. The A10 SD controller device instead assumes a
4
0-vs-non-0 convention, which happens to work with the interrupt
5
controller it is wired up to.
3
6
4
(We can't replace the other use of bitfield_replicate() in this file,
7
Coerce the value to boolean to follow our usual convention.
5
in logic_imm_decode_wmask(), because that location needs to handle 2
6
and 4 bit elements, which dup_const() cannot.)
7
8
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Message-id: 20210628135835.6690-6-peter.maydell@linaro.org
11
Tested-by: Guenter Roeck <linux@roeck-us.net>
12
Message-id: 20230606104609.3692557-3-peter.maydell@linaro.org
11
---
13
---
12
target/arm/translate-a64.c | 2 +-
14
hw/sd/allwinner-sdhost.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
15
1 file changed, 1 insertion(+), 1 deletion(-)
14
16
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
19
--- a/hw/sd/allwinner-sdhost.c
18
+++ b/target/arm/translate-a64.c
20
+++ b/hw/sd/allwinner-sdhost.c
19
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
21
@@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_update_irq(AwSdHostState *s)
20
/* FMOV (vector, immediate) - half-precision */
21
imm = vfp_expand_imm(MO_16, abcdefgh);
22
/* now duplicate across the lanes */
23
- imm = bitfield_replicate(imm, 16);
24
+ imm = dup_const(MO_16, imm);
25
} else {
26
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
27
}
22
}
23
24
trace_allwinner_sdhost_update_irq(irq);
25
- qemu_set_irq(s->irq, irq);
26
+ qemu_set_irq(s->irq, !!irq);
27
}
28
29
static void allwinner_sdhost_update_transfer_cnt(AwSdHostState *s,
28
--
30
--
29
2.20.1
31
2.34.1
30
32
31
33
diff view generated by jsdifflib
New patch
1
The nrf51_timer has a free-running counter which we implement using
2
the pattern of using two fields (update_counter_ns, counter) to track
3
the last point at which we calculated the counter value, and the
4
counter value at that time. Then we can find the current counter
5
value by converting the difference in wall-clock time between then
6
and now to a tick count that we need to add to the counter value.
1
7
8
Unfortunately the nrf51_timer's implementation of this has a bug
9
which means it loses time every time update_counter() is called.
10
After updating s->counter it always sets s->update_counter_ns to
11
'now', even though the actual point when s->counter hit the new value
12
will be some point in the past (half a tick, say). In the worst case
13
(guest code in a tight loop reading the counter, icount mode) the
14
counter is continually queried less than a tick after it was last
15
read, so s->counter never advances but s->update_counter_ns does, and
16
the guest never makes forward progress.
17
18
The fix for this is to only advance update_counter_ns to the
19
timestamp of the last tick, not all the way to 'now'. (This is the
20
pattern used in hw/misc/mps2-fpgaio.c's counter.)
21
22
Cc: qemu-stable@nongnu.org
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Joel Stanley <joel@jms.id.au>
25
Message-id: 20230606134917.3782215-1-peter.maydell@linaro.org
26
---
27
hw/timer/nrf51_timer.c | 7 ++++++-
28
1 file changed, 6 insertions(+), 1 deletion(-)
29
30
diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/timer/nrf51_timer.c
33
+++ b/hw/timer/nrf51_timer.c
34
@@ -XXX,XX +XXX,XX @@ static uint32_t update_counter(NRF51TimerState *s, int64_t now)
35
uint32_t ticks = ns_to_ticks(s, now - s->update_counter_ns);
36
37
s->counter = (s->counter + ticks) % BIT(bitwidths[s->bitmode]);
38
- s->update_counter_ns = now;
39
+ /*
40
+ * Only advance the sync time to the timestamp of the last tick,
41
+ * not all the way to 'now', so we don't lose time if we do
42
+ * multiple resyncs in a single tick.
43
+ */
44
+ s->update_counter_ns += ticks_to_ns(s, ticks);
45
return ticks;
46
}
47
48
--
49
2.34.1
diff view generated by jsdifflib
New patch
1
From: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
1
2
3
Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
4
Reviewed-by: Thomas Huth <thuth@redhat.com>
5
Message-id: 20230607092112.655098-1-marcin.juszkiewicz@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
hw/arm/Kconfig | 1 +
9
1 file changed, 1 insertion(+)
10
11
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
12
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/arm/Kconfig
14
+++ b/hw/arm/Kconfig
15
@@ -XXX,XX +XXX,XX @@ config SBSA_REF
16
select PL061 # GPIO
17
select USB_EHCI_SYSBUS
18
select WDT_SBSA
19
+ select BOCHS_DISPLAY
20
21
config SABRELITE
22
bool
23
--
24
2.34.1
diff view generated by jsdifflib
1
From: Patrick Venture <venture@google.com>
1
From: Martin Kaiser <martin@kaiser.cx>
2
2
3
Adds a line-item reference to the supported quanta-q71l-bmc aspeed
3
The Linux kernel added a flood check for RX data recently in commit
4
entry.
4
496a4471b7c3 ("serial: imx: work-around for hardware RX flood"). This
5
check uses the wake bit in the UART status register 2. The wake bit
6
indicates that the receiver detected a start bit on the RX line. If the
7
kernel sees a number of RX interrupts without the wake bit being set, it
8
treats this as spurious data and resets the UART port. imx_serial does
9
never set the wake bit and triggers the kernel's flood check.
5
10
6
Signed-off-by: Patrick Venture <venture@google.com>
11
This patch adds support for the wake bit. wake is set when we receive a
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
12
new character (it's not set for break events). It seems that wake is
8
Message-id: 20210615192848.1065297-2-venture@google.com
13
cleared by the kernel driver, the hardware does not have to clear it
14
automatically after data was read.
15
16
The wake bit can be configured as an interrupt source. Support this
17
mechanism as well.
18
19
Co-developed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
20
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
21
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
22
Signed-off-by: Martin Kaiser <martin@kaiser.cx>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
24
---
11
docs/system/arm/aspeed.rst | 1 +
25
include/hw/char/imx_serial.h | 1 +
12
1 file changed, 1 insertion(+)
26
hw/char/imx_serial.c | 5 ++++-
27
2 files changed, 5 insertions(+), 1 deletion(-)
13
28
14
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
29
diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h
15
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/system/arm/aspeed.rst
31
--- a/include/hw/char/imx_serial.h
17
+++ b/docs/system/arm/aspeed.rst
32
+++ b/include/hw/char/imx_serial.h
18
@@ -XXX,XX +XXX,XX @@ etc.
33
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(IMXSerialState, IMX_SERIAL)
19
AST2400 SoC based machines :
34
20
35
#define UCR4_DREN BIT(0) /* Receive Data Ready interrupt enable */
21
- ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC
36
#define UCR4_TCEN BIT(3) /* TX complete interrupt enable */
22
+- ``quanta-q71l-bmc`` OpenBMC Quanta BMC
37
+#define UCR4_WKEN BIT(7) /* WAKE interrupt enable */
23
38
24
AST2500 SoC based machines :
39
#define UTS1_TXEMPTY (1<<6)
40
#define UTS1_RXEMPTY (1<<5)
41
diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/hw/char/imx_serial.c
44
+++ b/hw/char/imx_serial.c
45
@@ -XXX,XX +XXX,XX @@ static void imx_update(IMXSerialState *s)
46
* TCEN and TXDC are both bit 3
47
* RDR and DREN are both bit 0
48
*/
49
- mask |= s->ucr4 & (UCR4_TCEN | UCR4_DREN);
50
+ mask |= s->ucr4 & (UCR4_WKEN | UCR4_TCEN | UCR4_DREN);
51
52
usr2 = s->usr2 & mask;
53
54
@@ -XXX,XX +XXX,XX @@ static void imx_put_data(void *opaque, uint32_t value)
55
56
static void imx_receive(void *opaque, const uint8_t *buf, int size)
57
{
58
+ IMXSerialState *s = (IMXSerialState *)opaque;
59
+
60
+ s->usr2 |= USR2_WAKE;
61
imx_put_data(opaque, *buf);
62
}
25
63
26
--
64
--
27
2.20.1
65
2.34.1
28
66
29
67
diff view generated by jsdifflib
New patch
1
From: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
1
2
3
We plan to add more hardware information into DeviceTree to limit amount
4
of hardcoded values in firmware.
5
6
Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
7
Message-id: 20230531171834.236569-1-marcin.juszkiewicz@linaro.org
8
[PMM: fix format nits, add text about platform version fields from
9
a comment in the C source file]
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
docs/system/arm/sbsa.rst | 38 +++++++++++++++++++++++++++++++-------
14
1 file changed, 31 insertions(+), 7 deletions(-)
15
16
diff --git a/docs/system/arm/sbsa.rst b/docs/system/arm/sbsa.rst
17
index XXXXXXX..XXXXXXX 100644
18
--- a/docs/system/arm/sbsa.rst
19
+++ b/docs/system/arm/sbsa.rst
20
@@ -XXX,XX +XXX,XX @@ any real hardware the ``sbsa-ref`` board intends to look like real
21
hardware. The `Server Base System Architecture
22
<https://developer.arm.com/documentation/den0029/latest>`_ defines a
23
minimum base line of hardware support and importantly how the firmware
24
-reports that to any operating system. It is a static system that
25
-reports a very minimal DT to the firmware for non-discoverable
26
-information about components affected by the qemu command line (i.e.
27
-cpus and memory). As a result it must have a firmware specifically
28
-built to expect a certain hardware layout (as you would in a real
29
-machine).
30
+reports that to any operating system.
31
32
It is intended to be a machine for developing firmware and testing
33
standards compliance with operating systems.
34
@@ -XXX,XX +XXX,XX @@ standards compliance with operating systems.
35
Supported devices
36
"""""""""""""""""
37
38
-The sbsa-ref board supports:
39
+The ``sbsa-ref`` board supports:
40
41
- A configurable number of AArch64 CPUs
42
- GIC version 3
43
@@ -XXX,XX +XXX,XX @@ The sbsa-ref board supports:
44
- Bochs display adapter on PCIe bus
45
- A generic SBSA watchdog device
46
47
+
48
+Board to firmware interface
49
+"""""""""""""""""""""""""""
50
+
51
+``sbsa-ref`` is a static system that reports a very minimal devicetree to the
52
+firmware for non-discoverable information about system components. This
53
+includes both internal hardware and parts affected by the qemu command line
54
+(i.e. CPUs and memory). As a result it must have a firmware specifically built
55
+to expect a certain hardware layout (as you would in a real machine).
56
+
57
+DeviceTree information
58
+''''''''''''''''''''''
59
+
60
+The devicetree provided by the board model to the firmware is not intended
61
+to be a complete compliant DT. It currently reports:
62
+
63
+ - CPUs
64
+ - memory
65
+ - platform version
66
+ - GIC addresses
67
+
68
+The platform version is only for informing platform firmware about
69
+what kind of ``sbsa-ref`` board it is running on. It is neither
70
+a QEMU versioned machine type nor a reflection of the level of the
71
+SBSA/SystemReady SR support provided.
72
+
73
+The ``machine-version-major`` value is updated when changes breaking
74
+fw compatibility are introduced. The ``machine-version-minor`` value
75
+is updated when features are added that don't break fw compatibility.
76
--
77
2.34.1
diff view generated by jsdifflib
1
From: Nolan Leake <nolan@sigbus.net>
1
From: Sergey Kambalin <sergey.kambalin@auriga.com>
2
2
3
This is just enough to make reboot and poweroff work. Works for
3
Signed-off-by: Sergey Kambalin <sergey.kambalin@auriga.com>
4
linux, u-boot, and the arm trusted firmware. Not tested, but should
4
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
work for plan9, and bare-metal/hobby OSes, since they seem to generally
5
Acked-by: Richard Henderson <richard.henderson@linaro.org>
6
do what linux does for reset.
6
Message-id: 20230612223456.33824-2-philmd@linaro.org
7
7
Message-Id: <20230531155258.8361-1-sergey.kambalin@auriga.com>
8
The watchdog timer functionality is not yet implemented.
8
[PMD: Split from bigger patch: 1/4]
9
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/64
11
Signed-off-by: Nolan Leake <nolan@sigbus.net>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Message-id: 20210625210209.1870217-1-nolan@sigbus.net
15
[PMM: tweaked commit title; fixed region size to 0x200;
16
moved header file to include/]
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
11
---
19
include/hw/arm/bcm2835_peripherals.h | 3 +-
12
include/hw/misc/raspberrypi-fw-defs.h | 163 ++++++++++++++++++++++++++
20
include/hw/misc/bcm2835_powermgt.h | 29 +++++
13
1 file changed, 163 insertions(+)
21
hw/arm/bcm2835_peripherals.c | 13 ++-
14
create mode 100644 include/hw/misc/raspberrypi-fw-defs.h
22
hw/misc/bcm2835_powermgt.c | 160 +++++++++++++++++++++++++++
23
hw/misc/meson.build | 1 +
24
5 files changed, 204 insertions(+), 2 deletions(-)
25
create mode 100644 include/hw/misc/bcm2835_powermgt.h
26
create mode 100644 hw/misc/bcm2835_powermgt.c
27
15
28
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
16
diff --git a/include/hw/misc/raspberrypi-fw-defs.h b/include/hw/misc/raspberrypi-fw-defs.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/include/hw/arm/bcm2835_peripherals.h
31
+++ b/include/hw/arm/bcm2835_peripherals.h
32
@@ -XXX,XX +XXX,XX @@
33
#include "hw/misc/bcm2835_mphi.h"
34
#include "hw/misc/bcm2835_thermal.h"
35
#include "hw/misc/bcm2835_cprman.h"
36
+#include "hw/misc/bcm2835_powermgt.h"
37
#include "hw/sd/sdhci.h"
38
#include "hw/sd/bcm2835_sdhost.h"
39
#include "hw/gpio/bcm2835_gpio.h"
40
@@ -XXX,XX +XXX,XX @@ struct BCM2835PeripheralState {
41
BCM2835MphiState mphi;
42
UnimplementedDeviceState txp;
43
UnimplementedDeviceState armtmr;
44
- UnimplementedDeviceState powermgt;
45
+ BCM2835PowerMgtState powermgt;
46
BCM2835CprmanState cprman;
47
PL011State uart0;
48
BCM2835AuxState aux;
49
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
50
new file mode 100644
17
new file mode 100644
51
index XXXXXXX..XXXXXXX
18
index XXXXXXX..XXXXXXX
52
--- /dev/null
19
--- /dev/null
53
+++ b/include/hw/misc/bcm2835_powermgt.h
20
+++ b/include/hw/misc/raspberrypi-fw-defs.h
54
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@
55
+/*
22
+/*
56
+ * BCM2835 Power Management emulation
23
+ * Raspberry Pi firmware definitions
57
+ *
24
+ *
58
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
25
+ * Copyright (C) 2022 Auriga LLC, based on Linux kernel
59
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
26
+ * `include/soc/bcm2835/raspberrypi-firmware.h` (Copyright © 2015 Broadcom)
60
+ *
27
+ *
61
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
28
+ * SPDX-License-Identifier: GPL-2.0-or-later
62
+ * See the COPYING file in the top-level directory.
63
+ */
29
+ */
64
+
30
+
65
+#ifndef BCM2835_POWERMGT_H
31
+#ifndef INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_
66
+#define BCM2835_POWERMGT_H
32
+#define INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_
67
+
33
+
68
+#include "hw/sysbus.h"
34
+#include "qemu/osdep.h"
69
+#include "qom/object.h"
70
+
35
+
71
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
36
+enum rpi_firmware_property_tag {
72
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
37
+ RPI_FWREQ_PROPERTY_END = 0,
38
+ RPI_FWREQ_GET_FIRMWARE_REVISION = 0x00000001,
39
+ RPI_FWREQ_GET_FIRMWARE_VARIANT = 0x00000002,
40
+ RPI_FWREQ_GET_FIRMWARE_HASH = 0x00000003,
73
+
41
+
74
+struct BCM2835PowerMgtState {
42
+ RPI_FWREQ_SET_CURSOR_INFO = 0x00008010,
75
+ SysBusDevice busdev;
43
+ RPI_FWREQ_SET_CURSOR_STATE = 0x00008011,
76
+ MemoryRegion iomem;
77
+
44
+
78
+ uint32_t rstc;
45
+ RPI_FWREQ_GET_BOARD_MODEL = 0x00010001,
79
+ uint32_t rsts;
46
+ RPI_FWREQ_GET_BOARD_REVISION = 0x00010002,
80
+ uint32_t wdog;
47
+ RPI_FWREQ_GET_BOARD_MAC_ADDRESS = 0x00010003,
48
+ RPI_FWREQ_GET_BOARD_SERIAL = 0x00010004,
49
+ RPI_FWREQ_GET_ARM_MEMORY = 0x00010005,
50
+ RPI_FWREQ_GET_VC_MEMORY = 0x00010006,
51
+ RPI_FWREQ_GET_CLOCKS = 0x00010007,
52
+ RPI_FWREQ_GET_POWER_STATE = 0x00020001,
53
+ RPI_FWREQ_GET_TIMING = 0x00020002,
54
+ RPI_FWREQ_SET_POWER_STATE = 0x00028001,
55
+ RPI_FWREQ_GET_CLOCK_STATE = 0x00030001,
56
+ RPI_FWREQ_GET_CLOCK_RATE = 0x00030002,
57
+ RPI_FWREQ_GET_VOLTAGE = 0x00030003,
58
+ RPI_FWREQ_GET_MAX_CLOCK_RATE = 0x00030004,
59
+ RPI_FWREQ_GET_MAX_VOLTAGE = 0x00030005,
60
+ RPI_FWREQ_GET_TEMPERATURE = 0x00030006,
61
+ RPI_FWREQ_GET_MIN_CLOCK_RATE = 0x00030007,
62
+ RPI_FWREQ_GET_MIN_VOLTAGE = 0x00030008,
63
+ RPI_FWREQ_GET_TURBO = 0x00030009,
64
+ RPI_FWREQ_GET_MAX_TEMPERATURE = 0x0003000a,
65
+ RPI_FWREQ_GET_STC = 0x0003000b,
66
+ RPI_FWREQ_ALLOCATE_MEMORY = 0x0003000c,
67
+ RPI_FWREQ_LOCK_MEMORY = 0x0003000d,
68
+ RPI_FWREQ_UNLOCK_MEMORY = 0x0003000e,
69
+ RPI_FWREQ_RELEASE_MEMORY = 0x0003000f,
70
+ RPI_FWREQ_EXECUTE_CODE = 0x00030010,
71
+ RPI_FWREQ_EXECUTE_QPU = 0x00030011,
72
+ RPI_FWREQ_SET_ENABLE_QPU = 0x00030012,
73
+ RPI_FWREQ_GET_DISPMANX_RESOURCE_MEM_HANDLE = 0x00030014,
74
+ RPI_FWREQ_GET_EDID_BLOCK = 0x00030020,
75
+ RPI_FWREQ_GET_CUSTOMER_OTP = 0x00030021,
76
+ RPI_FWREQ_GET_EDID_BLOCK_DISPLAY = 0x00030023,
77
+ RPI_FWREQ_GET_DOMAIN_STATE = 0x00030030,
78
+ RPI_FWREQ_GET_THROTTLED = 0x00030046,
79
+ RPI_FWREQ_GET_CLOCK_MEASURED = 0x00030047,
80
+ RPI_FWREQ_NOTIFY_REBOOT = 0x00030048,
81
+ RPI_FWREQ_SET_CLOCK_STATE = 0x00038001,
82
+ RPI_FWREQ_SET_CLOCK_RATE = 0x00038002,
83
+ RPI_FWREQ_SET_VOLTAGE = 0x00038003,
84
+ RPI_FWREQ_SET_MAX_CLOCK_RATE = 0x00038004,
85
+ RPI_FWREQ_SET_MIN_CLOCK_RATE = 0x00038007,
86
+ RPI_FWREQ_SET_TURBO = 0x00038009,
87
+ RPI_FWREQ_SET_CUSTOMER_OTP = 0x00038021,
88
+ RPI_FWREQ_SET_DOMAIN_STATE = 0x00038030,
89
+ RPI_FWREQ_GET_GPIO_STATE = 0x00030041,
90
+ RPI_FWREQ_SET_GPIO_STATE = 0x00038041,
91
+ RPI_FWREQ_SET_SDHOST_CLOCK = 0x00038042,
92
+ RPI_FWREQ_GET_GPIO_CONFIG = 0x00030043,
93
+ RPI_FWREQ_SET_GPIO_CONFIG = 0x00038043,
94
+ RPI_FWREQ_GET_PERIPH_REG = 0x00030045,
95
+ RPI_FWREQ_SET_PERIPH_REG = 0x00038045,
96
+ RPI_FWREQ_GET_POE_HAT_VAL = 0x00030049,
97
+ RPI_FWREQ_SET_POE_HAT_VAL = 0x00038049,
98
+ RPI_FWREQ_SET_POE_HAT_VAL_OLD = 0x00030050,
99
+ RPI_FWREQ_NOTIFY_XHCI_RESET = 0x00030058,
100
+ RPI_FWREQ_GET_REBOOT_FLAGS = 0x00030064,
101
+ RPI_FWREQ_SET_REBOOT_FLAGS = 0x00038064,
102
+ RPI_FWREQ_NOTIFY_DISPLAY_DONE = 0x00030066,
103
+
104
+ /* Dispmanx TAGS */
105
+ RPI_FWREQ_FRAMEBUFFER_ALLOCATE = 0x00040001,
106
+ RPI_FWREQ_FRAMEBUFFER_BLANK = 0x00040002,
107
+ RPI_FWREQ_FRAMEBUFFER_GET_PHYSICAL_WIDTH_HEIGHT = 0x00040003,
108
+ RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_WIDTH_HEIGHT = 0x00040004,
109
+ RPI_FWREQ_FRAMEBUFFER_GET_DEPTH = 0x00040005,
110
+ RPI_FWREQ_FRAMEBUFFER_GET_PIXEL_ORDER = 0x00040006,
111
+ RPI_FWREQ_FRAMEBUFFER_GET_ALPHA_MODE = 0x00040007,
112
+ RPI_FWREQ_FRAMEBUFFER_GET_PITCH = 0x00040008,
113
+ RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_OFFSET = 0x00040009,
114
+ RPI_FWREQ_FRAMEBUFFER_GET_OVERSCAN = 0x0004000a,
115
+ RPI_FWREQ_FRAMEBUFFER_GET_PALETTE = 0x0004000b,
116
+ RPI_FWREQ_FRAMEBUFFER_GET_LAYER = 0x0004000c,
117
+ RPI_FWREQ_FRAMEBUFFER_GET_TRANSFORM = 0x0004000d,
118
+ RPI_FWREQ_FRAMEBUFFER_GET_VSYNC = 0x0004000e,
119
+ RPI_FWREQ_FRAMEBUFFER_GET_TOUCHBUF = 0x0004000f,
120
+ RPI_FWREQ_FRAMEBUFFER_GET_GPIOVIRTBUF = 0x00040010,
121
+ RPI_FWREQ_FRAMEBUFFER_RELEASE = 0x00048001,
122
+ RPI_FWREQ_FRAMEBUFFER_GET_DISPLAY_ID = 0x00040016,
123
+ RPI_FWREQ_FRAMEBUFFER_SET_DISPLAY_NUM = 0x00048013,
124
+ RPI_FWREQ_FRAMEBUFFER_GET_NUM_DISPLAYS = 0x00040013,
125
+ RPI_FWREQ_FRAMEBUFFER_GET_DISPLAY_SETTINGS = 0x00040014,
126
+ RPI_FWREQ_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT = 0x00044003,
127
+ RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT = 0x00044004,
128
+ RPI_FWREQ_FRAMEBUFFER_TEST_DEPTH = 0x00044005,
129
+ RPI_FWREQ_FRAMEBUFFER_TEST_PIXEL_ORDER = 0x00044006,
130
+ RPI_FWREQ_FRAMEBUFFER_TEST_ALPHA_MODE = 0x00044007,
131
+ RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_OFFSET = 0x00044009,
132
+ RPI_FWREQ_FRAMEBUFFER_TEST_OVERSCAN = 0x0004400a,
133
+ RPI_FWREQ_FRAMEBUFFER_TEST_PALETTE = 0x0004400b,
134
+ RPI_FWREQ_FRAMEBUFFER_TEST_LAYER = 0x0004400c,
135
+ RPI_FWREQ_FRAMEBUFFER_TEST_TRANSFORM = 0x0004400d,
136
+ RPI_FWREQ_FRAMEBUFFER_TEST_VSYNC = 0x0004400e,
137
+ RPI_FWREQ_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT = 0x00048003,
138
+ RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT = 0x00048004,
139
+ RPI_FWREQ_FRAMEBUFFER_SET_DEPTH = 0x00048005,
140
+ RPI_FWREQ_FRAMEBUFFER_SET_PIXEL_ORDER = 0x00048006,
141
+ RPI_FWREQ_FRAMEBUFFER_SET_ALPHA_MODE = 0x00048007,
142
+ RPI_FWREQ_FRAMEBUFFER_SET_PITCH = 0x00048008,
143
+ RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_OFFSET = 0x00048009,
144
+ RPI_FWREQ_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
145
+ RPI_FWREQ_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
146
+
147
+ RPI_FWREQ_FRAMEBUFFER_SET_TOUCHBUF = 0x0004801f,
148
+ RPI_FWREQ_FRAMEBUFFER_SET_GPIOVIRTBUF = 0x00048020,
149
+ RPI_FWREQ_FRAMEBUFFER_SET_VSYNC = 0x0004800e,
150
+ RPI_FWREQ_FRAMEBUFFER_SET_LAYER = 0x0004800c,
151
+ RPI_FWREQ_FRAMEBUFFER_SET_TRANSFORM = 0x0004800d,
152
+ RPI_FWREQ_FRAMEBUFFER_SET_BACKLIGHT = 0x0004800f,
153
+
154
+ RPI_FWREQ_VCHIQ_INIT = 0x00048010,
155
+
156
+ RPI_FWREQ_SET_PLANE = 0x00048015,
157
+ RPI_FWREQ_GET_DISPLAY_TIMING = 0x00040017,
158
+ RPI_FWREQ_SET_TIMING = 0x00048017,
159
+ RPI_FWREQ_GET_DISPLAY_CFG = 0x00040018,
160
+ RPI_FWREQ_SET_DISPLAY_POWER = 0x00048019,
161
+ RPI_FWREQ_GET_COMMAND_LINE = 0x00050001,
162
+ RPI_FWREQ_GET_DMA_CHANNELS = 0x00060001,
81
+};
163
+};
82
+
164
+
83
+#endif
165
+enum rpi_firmware_clk_id {
84
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
166
+ RPI_FIRMWARE_EMMC_CLK_ID = 1,
85
index XXXXXXX..XXXXXXX 100644
167
+ RPI_FIRMWARE_UART_CLK_ID,
86
--- a/hw/arm/bcm2835_peripherals.c
168
+ RPI_FIRMWARE_ARM_CLK_ID,
87
+++ b/hw/arm/bcm2835_peripherals.c
169
+ RPI_FIRMWARE_CORE_CLK_ID,
88
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
170
+ RPI_FIRMWARE_V3D_CLK_ID,
89
171
+ RPI_FIRMWARE_H264_CLK_ID,
90
object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr",
172
+ RPI_FIRMWARE_ISP_CLK_ID,
91
OBJECT(&s->gpu_bus_mr));
173
+ RPI_FIRMWARE_SDRAM_CLK_ID,
92
+
174
+ RPI_FIRMWARE_PIXEL_CLK_ID,
93
+ /* Power Management */
175
+ RPI_FIRMWARE_PWM_CLK_ID,
94
+ object_initialize_child(obj, "powermgt", &s->powermgt,
176
+ RPI_FIRMWARE_HEVC_CLK_ID,
95
+ TYPE_BCM2835_POWERMGT);
177
+ RPI_FIRMWARE_EMMC2_CLK_ID,
96
}
178
+ RPI_FIRMWARE_M2MC_CLK_ID,
97
179
+ RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
98
static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
180
+ RPI_FIRMWARE_VEC_CLK_ID,
99
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
181
+ RPI_FIRMWARE_NUM_CLK_ID,
100
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
101
INTERRUPT_USB));
102
103
+ /* Power Management */
104
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) {
105
+ return;
106
+ }
107
+
108
+ memory_region_add_subregion(&s->peri_mr, PM_OFFSET,
109
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0));
110
+
111
create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000);
112
create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40);
113
- create_unimp(s, &s->powermgt, "bcm2835-powermgt", PM_OFFSET, 0x114);
114
create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100);
115
create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100);
116
create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20);
117
diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c
118
new file mode 100644
119
index XXXXXXX..XXXXXXX
120
--- /dev/null
121
+++ b/hw/misc/bcm2835_powermgt.c
122
@@ -XXX,XX +XXX,XX @@
123
+/*
124
+ * BCM2835 Power Management emulation
125
+ *
126
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
127
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
128
+ *
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
131
+ */
132
+
133
+#include "qemu/osdep.h"
134
+#include "qemu/log.h"
135
+#include "qemu/module.h"
136
+#include "hw/misc/bcm2835_powermgt.h"
137
+#include "migration/vmstate.h"
138
+#include "sysemu/runstate.h"
139
+
140
+#define PASSWORD 0x5a000000
141
+#define PASSWORD_MASK 0xff000000
142
+
143
+#define R_RSTC 0x1c
144
+#define V_RSTC_RESET 0x20
145
+#define R_RSTS 0x20
146
+#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */
147
+#define R_WDOG 0x24
148
+
149
+static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset,
150
+ unsigned size)
151
+{
152
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
153
+ uint32_t res = 0;
154
+
155
+ switch (offset) {
156
+ case R_RSTC:
157
+ res = s->rstc;
158
+ break;
159
+ case R_RSTS:
160
+ res = s->rsts;
161
+ break;
162
+ case R_WDOG:
163
+ res = s->wdog;
164
+ break;
165
+
166
+ default:
167
+ qemu_log_mask(LOG_UNIMP,
168
+ "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx
169
+ "\n", offset);
170
+ res = 0;
171
+ break;
172
+ }
173
+
174
+ return res;
175
+}
176
+
177
+static void bcm2835_powermgt_write(void *opaque, hwaddr offset,
178
+ uint64_t value, unsigned size)
179
+{
180
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
181
+
182
+ if ((value & PASSWORD_MASK) != PASSWORD) {
183
+ qemu_log_mask(LOG_GUEST_ERROR,
184
+ "bcm2835_powermgt_write: Bad password 0x%"PRIx64
185
+ " at offset 0x%08"HWADDR_PRIx"\n",
186
+ value, offset);
187
+ return;
188
+ }
189
+
190
+ value = value & ~PASSWORD_MASK;
191
+
192
+ switch (offset) {
193
+ case R_RSTC:
194
+ s->rstc = value;
195
+ if (value & V_RSTC_RESET) {
196
+ if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) {
197
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
198
+ } else {
199
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
200
+ }
201
+ }
202
+ break;
203
+ case R_RSTS:
204
+ qemu_log_mask(LOG_UNIMP,
205
+ "bcm2835_powermgt_write: RSTS\n");
206
+ s->rsts = value;
207
+ break;
208
+ case R_WDOG:
209
+ qemu_log_mask(LOG_UNIMP,
210
+ "bcm2835_powermgt_write: WDOG\n");
211
+ s->wdog = value;
212
+ break;
213
+
214
+ default:
215
+ qemu_log_mask(LOG_UNIMP,
216
+ "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx
217
+ "\n", offset);
218
+ break;
219
+ }
220
+}
221
+
222
+static const MemoryRegionOps bcm2835_powermgt_ops = {
223
+ .read = bcm2835_powermgt_read,
224
+ .write = bcm2835_powermgt_write,
225
+ .endianness = DEVICE_NATIVE_ENDIAN,
226
+ .impl.min_access_size = 4,
227
+ .impl.max_access_size = 4,
228
+};
182
+};
229
+
183
+
230
+static const VMStateDescription vmstate_bcm2835_powermgt = {
184
+#endif /* INCLUDE_HW_MISC_RASPBERRYPI_FW_DEFS_H_ */
231
+ .name = TYPE_BCM2835_POWERMGT,
232
+ .version_id = 1,
233
+ .minimum_version_id = 1,
234
+ .fields = (VMStateField[]) {
235
+ VMSTATE_UINT32(rstc, BCM2835PowerMgtState),
236
+ VMSTATE_UINT32(rsts, BCM2835PowerMgtState),
237
+ VMSTATE_UINT32(wdog, BCM2835PowerMgtState),
238
+ VMSTATE_END_OF_LIST()
239
+ }
240
+};
241
+
242
+static void bcm2835_powermgt_init(Object *obj)
243
+{
244
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj);
245
+
246
+ memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s,
247
+ TYPE_BCM2835_POWERMGT, 0x200);
248
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
249
+}
250
+
251
+static void bcm2835_powermgt_reset(DeviceState *dev)
252
+{
253
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev);
254
+
255
+ /* https://elinux.org/BCM2835_registers#PM */
256
+ s->rstc = 0x00000102;
257
+ s->rsts = 0x00001000;
258
+ s->wdog = 0x00000000;
259
+}
260
+
261
+static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data)
262
+{
263
+ DeviceClass *dc = DEVICE_CLASS(klass);
264
+
265
+ dc->reset = bcm2835_powermgt_reset;
266
+ dc->vmsd = &vmstate_bcm2835_powermgt;
267
+}
268
+
269
+static TypeInfo bcm2835_powermgt_info = {
270
+ .name = TYPE_BCM2835_POWERMGT,
271
+ .parent = TYPE_SYS_BUS_DEVICE,
272
+ .instance_size = sizeof(BCM2835PowerMgtState),
273
+ .class_init = bcm2835_powermgt_class_init,
274
+ .instance_init = bcm2835_powermgt_init,
275
+};
276
+
277
+static void bcm2835_powermgt_register_types(void)
278
+{
279
+ type_register_static(&bcm2835_powermgt_info);
280
+}
281
+
282
+type_init(bcm2835_powermgt_register_types)
283
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
284
index XXXXXXX..XXXXXXX 100644
285
--- a/hw/misc/meson.build
286
+++ b/hw/misc/meson.build
287
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files(
288
'bcm2835_rng.c',
289
'bcm2835_thermal.c',
290
'bcm2835_cprman.c',
291
+ 'bcm2835_powermgt.c',
292
))
293
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c'))
294
softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c', 'zynq-xadc.c'))
295
--
185
--
296
2.20.1
186
2.34.1
297
187
298
188
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Sergey Kambalin <sergey.kambalin@auriga.com>
2
2
3
Add a test booting and quickly shutdown a raspi2 machine,
3
Replace magic property values by a proper definition,
4
to test the power management model:
4
removing redundant comments.
5
5
6
(1/1) tests/acceptance/boot_linux_console.py:BootLinuxConsole.test_arm_raspi2_initrd:
6
Signed-off-by: Sergey Kambalin <sergey.kambalin@auriga.com>
7
console: [ 0.000000] Booting Linux on physical CPU 0xf00
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
console: [ 0.000000] Linux version 4.14.98-v7+ (dom@dom-XPS-13-9370) (gcc version 4.9.3 (crosstool-NG crosstool-ng-1.22.0-88-g8460611)) #1200 SMP Tue Feb 12 20:27:48 GMT 2019
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
console: [ 0.000000] CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c5387d
9
Message-id: 20230612223456.33824-3-philmd@linaro.org
10
console: [ 0.000000] CPU: div instructions available: patching division code
10
Message-Id: <20230531155258.8361-1-sergey.kambalin@auriga.com>
11
console: [ 0.000000] CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
11
[PMD: Split from bigger patch: 2/4]
12
console: [ 0.000000] OF: fdt: Machine model: Raspberry Pi 2 Model B
12
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
13
...
14
console: Boot successful.
15
console: cat /proc/cpuinfo
16
console: / # cat /proc/cpuinfo
17
...
18
console: processor : 3
19
console: model name : ARMv7 Processor rev 5 (v7l)
20
console: BogoMIPS : 125.00
21
console: Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm
22
console: CPU implementer : 0x41
23
console: CPU architecture: 7
24
console: CPU variant : 0x0
25
console: CPU part : 0xc07
26
console: CPU revision : 5
27
console: Hardware : BCM2835
28
console: Revision : 0000
29
console: Serial : 0000000000000000
30
console: cat /proc/iomem
31
console: / # cat /proc/iomem
32
console: 00000000-3bffffff : System RAM
33
console: 00008000-00afffff : Kernel code
34
console: 00c00000-00d468ef : Kernel data
35
console: 3f006000-3f006fff : dwc_otg
36
console: 3f007000-3f007eff : /soc/dma@7e007000
37
console: 3f00b880-3f00b8bf : /soc/mailbox@7e00b880
38
console: 3f100000-3f100027 : /soc/watchdog@7e100000
39
console: 3f101000-3f102fff : /soc/cprman@7e101000
40
console: 3f200000-3f2000b3 : /soc/gpio@7e200000
41
PASS (24.59 s)
42
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
43
JOB TIME : 25.02 s
44
45
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Reviewed-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
47
Message-id: 20210531113837.1689775-1-f4bug@amsat.org
48
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
49
---
14
---
50
tests/acceptance/boot_linux_console.py | 43 ++++++++++++++++++++++++++
15
hw/misc/bcm2835_property.c | 101 +++++++++++++++++++------------------
51
1 file changed, 43 insertions(+)
16
1 file changed, 51 insertions(+), 50 deletions(-)
52
17
53
diff --git a/tests/acceptance/boot_linux_console.py b/tests/acceptance/boot_linux_console.py
18
diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c
54
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
55
--- a/tests/acceptance/boot_linux_console.py
20
--- a/hw/misc/bcm2835_property.c
56
+++ b/tests/acceptance/boot_linux_console.py
21
+++ b/hw/misc/bcm2835_property.c
57
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@
58
from avocado import skip
23
#include "migration/vmstate.h"
59
from avocado import skipUnless
24
#include "hw/irq.h"
60
from avocado_qemu import Test
25
#include "hw/misc/bcm2835_mbox_defs.h"
61
+from avocado_qemu import exec_command
26
+#include "hw/misc/raspberrypi-fw-defs.h"
62
from avocado_qemu import exec_command_and_wait_for_pattern
27
#include "sysemu/dma.h"
63
from avocado_qemu import interrupt_interactive_console_until_pattern
28
#include "qemu/log.h"
64
from avocado_qemu import wait_for_console_pattern
29
#include "qemu/module.h"
65
@@ -XXX,XX +XXX,XX @@ def test_arm_raspi2_uart0(self):
30
@@ -XXX,XX +XXX,XX @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
66
"""
31
/* @(value + 8) : Request/response indicator */
67
self.do_test_arm_raspi2(0)
32
resplen = 0;
68
33
switch (tag) {
69
+ def test_arm_raspi2_initrd(self):
34
- case 0x00000000: /* End tag */
70
+ """
35
+ case RPI_FWREQ_PROPERTY_END:
71
+ :avocado: tags=arch:arm
36
break;
72
+ :avocado: tags=machine:raspi2
37
- case 0x00000001: /* Get firmware revision */
73
+ """
38
+ case RPI_FWREQ_GET_FIRMWARE_REVISION:
74
+ deb_url = ('http://archive.raspberrypi.org/debian/'
39
stl_le_phys(&s->dma_as, value + 12, 346337);
75
+ 'pool/main/r/raspberrypi-firmware/'
40
resplen = 4;
76
+ 'raspberrypi-kernel_1.20190215-1_armhf.deb')
41
break;
77
+ deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
42
- case 0x00010001: /* Get board model */
78
+ deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
43
+ case RPI_FWREQ_GET_BOARD_MODEL:
79
+ kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
44
qemu_log_mask(LOG_UNIMP,
80
+ dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
45
"bcm2835_property: 0x%08x get board model NYI\n",
81
+
46
tag);
82
+ initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
47
resplen = 4;
83
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
48
break;
84
+ 'arm/rootfs-armv7a.cpio.gz')
49
- case 0x00010002: /* Get board revision */
85
+ initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
50
+ case RPI_FWREQ_GET_BOARD_REVISION:
86
+ initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
51
stl_le_phys(&s->dma_as, value + 12, s->board_rev);
87
+ initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
52
resplen = 4;
88
+ archive.gzip_uncompress(initrd_path_gz, initrd_path)
53
break;
89
+
54
- case 0x00010003: /* Get board MAC address */
90
+ self.vm.set_console()
55
+ case RPI_FWREQ_GET_BOARD_MAC_ADDRESS:
91
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
56
resplen = sizeof(s->macaddr.a);
92
+ 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
57
dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen,
93
+ 'panic=-1 noreboot ' +
58
MEMTXATTRS_UNSPECIFIED);
94
+ 'dwc_otg.fiq_fsm_enable=0')
59
break;
95
+ self.vm.add_args('-kernel', kernel_path,
60
- case 0x00010004: /* Get board serial */
96
+ '-dtb', dtb_path,
61
+ case RPI_FWREQ_GET_BOARD_SERIAL:
97
+ '-initrd', initrd_path,
62
qemu_log_mask(LOG_UNIMP,
98
+ '-append', kernel_command_line,
63
"bcm2835_property: 0x%08x get board serial NYI\n",
99
+ '-no-reboot')
64
tag);
100
+ self.vm.launch()
65
resplen = 8;
101
+ self.wait_for_console_pattern('Boot successful.')
66
break;
102
+
67
- case 0x00010005: /* Get ARM memory */
103
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
68
+ case RPI_FWREQ_GET_ARM_MEMORY:
104
+ 'BCM2835')
69
/* base */
105
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
70
stl_le_phys(&s->dma_as, value + 12, 0);
106
+ '/soc/cprman@7e101000')
71
/* size */
107
+ exec_command(self, 'halt')
72
stl_le_phys(&s->dma_as, value + 16, s->fbdev->vcram_base);
108
+ # Wait for VM to shut down gracefully
73
resplen = 8;
109
+ self.vm.wait()
74
break;
110
+
75
- case 0x00010006: /* Get VC memory */
111
def test_arm_exynos4210_initrd(self):
76
+ case RPI_FWREQ_GET_VC_MEMORY:
112
"""
77
/* base */
113
:avocado: tags=arch:arm
78
stl_le_phys(&s->dma_as, value + 12, s->fbdev->vcram_base);
79
/* size */
80
stl_le_phys(&s->dma_as, value + 16, s->fbdev->vcram_size);
81
resplen = 8;
82
break;
83
- case 0x00028001: /* Set power state */
84
+ case RPI_FWREQ_SET_POWER_STATE:
85
/* Assume that whatever device they asked for exists,
86
* and we'll just claim we set it to the desired state
87
*/
88
@@ -XXX,XX +XXX,XX @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
89
90
/* Clocks */
91
92
- case 0x00030001: /* Get clock state */
93
+ case RPI_FWREQ_GET_CLOCK_STATE:
94
stl_le_phys(&s->dma_as, value + 16, 0x1);
95
resplen = 8;
96
break;
97
98
- case 0x00038001: /* Set clock state */
99
+ case RPI_FWREQ_SET_CLOCK_STATE:
100
qemu_log_mask(LOG_UNIMP,
101
"bcm2835_property: 0x%08x set clock state NYI\n",
102
tag);
103
resplen = 8;
104
break;
105
106
- case 0x00030002: /* Get clock rate */
107
- case 0x00030004: /* Get max clock rate */
108
- case 0x00030007: /* Get min clock rate */
109
+ case RPI_FWREQ_GET_CLOCK_RATE:
110
+ case RPI_FWREQ_GET_MAX_CLOCK_RATE:
111
+ case RPI_FWREQ_GET_MIN_CLOCK_RATE:
112
switch (ldl_le_phys(&s->dma_as, value + 12)) {
113
- case 1: /* EMMC */
114
+ case RPI_FIRMWARE_EMMC_CLK_ID:
115
stl_le_phys(&s->dma_as, value + 16, 50000000);
116
break;
117
- case 2: /* UART */
118
+ case RPI_FIRMWARE_UART_CLK_ID:
119
stl_le_phys(&s->dma_as, value + 16, 3000000);
120
break;
121
default:
122
@@ -XXX,XX +XXX,XX @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
123
resplen = 8;
124
break;
125
126
- case 0x00038002: /* Set clock rate */
127
- case 0x00038004: /* Set max clock rate */
128
- case 0x00038007: /* Set min clock rate */
129
+ case RPI_FWREQ_SET_CLOCK_RATE:
130
+ case RPI_FWREQ_SET_MAX_CLOCK_RATE:
131
+ case RPI_FWREQ_SET_MIN_CLOCK_RATE:
132
qemu_log_mask(LOG_UNIMP,
133
"bcm2835_property: 0x%08x set clock rate NYI\n",
134
tag);
135
@@ -XXX,XX +XXX,XX @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
136
137
/* Temperature */
138
139
- case 0x00030006: /* Get temperature */
140
+ case RPI_FWREQ_GET_TEMPERATURE:
141
stl_le_phys(&s->dma_as, value + 16, 25000);
142
resplen = 8;
143
break;
144
145
- case 0x0003000A: /* Get max temperature */
146
+ case RPI_FWREQ_GET_MAX_TEMPERATURE:
147
stl_le_phys(&s->dma_as, value + 16, 99000);
148
resplen = 8;
149
break;
150
151
/* Frame buffer */
152
153
- case 0x00040001: /* Allocate buffer */
154
+ case RPI_FWREQ_FRAMEBUFFER_ALLOCATE:
155
stl_le_phys(&s->dma_as, value + 12, fbconfig.base);
156
stl_le_phys(&s->dma_as, value + 16,
157
bcm2835_fb_get_size(&fbconfig));
158
resplen = 8;
159
break;
160
- case 0x00048001: /* Release buffer */
161
+ case RPI_FWREQ_FRAMEBUFFER_RELEASE:
162
resplen = 0;
163
break;
164
- case 0x00040002: /* Blank screen */
165
+ case RPI_FWREQ_FRAMEBUFFER_BLANK:
166
resplen = 4;
167
break;
168
- case 0x00044003: /* Test physical display width/height */
169
- case 0x00044004: /* Test virtual display width/height */
170
+ case RPI_FWREQ_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT:
171
+ case RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT:
172
resplen = 8;
173
break;
174
- case 0x00048003: /* Set physical display width/height */
175
+ case RPI_FWREQ_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT:
176
fbconfig.xres = ldl_le_phys(&s->dma_as, value + 12);
177
fbconfig.yres = ldl_le_phys(&s->dma_as, value + 16);
178
bcm2835_fb_validate_config(&fbconfig);
179
fbconfig_updated = true;
180
/* fall through */
181
- case 0x00040003: /* Get physical display width/height */
182
+ case RPI_FWREQ_FRAMEBUFFER_GET_PHYSICAL_WIDTH_HEIGHT:
183
stl_le_phys(&s->dma_as, value + 12, fbconfig.xres);
184
stl_le_phys(&s->dma_as, value + 16, fbconfig.yres);
185
resplen = 8;
186
break;
187
- case 0x00048004: /* Set virtual display width/height */
188
+ case RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT:
189
fbconfig.xres_virtual = ldl_le_phys(&s->dma_as, value + 12);
190
fbconfig.yres_virtual = ldl_le_phys(&s->dma_as, value + 16);
191
bcm2835_fb_validate_config(&fbconfig);
192
fbconfig_updated = true;
193
/* fall through */
194
- case 0x00040004: /* Get virtual display width/height */
195
+ case RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_WIDTH_HEIGHT:
196
stl_le_phys(&s->dma_as, value + 12, fbconfig.xres_virtual);
197
stl_le_phys(&s->dma_as, value + 16, fbconfig.yres_virtual);
198
resplen = 8;
199
break;
200
- case 0x00044005: /* Test depth */
201
+ case RPI_FWREQ_FRAMEBUFFER_TEST_DEPTH:
202
resplen = 4;
203
break;
204
- case 0x00048005: /* Set depth */
205
+ case RPI_FWREQ_FRAMEBUFFER_SET_DEPTH:
206
fbconfig.bpp = ldl_le_phys(&s->dma_as, value + 12);
207
bcm2835_fb_validate_config(&fbconfig);
208
fbconfig_updated = true;
209
/* fall through */
210
- case 0x00040005: /* Get depth */
211
+ case RPI_FWREQ_FRAMEBUFFER_GET_DEPTH:
212
stl_le_phys(&s->dma_as, value + 12, fbconfig.bpp);
213
resplen = 4;
214
break;
215
- case 0x00044006: /* Test pixel order */
216
+ case RPI_FWREQ_FRAMEBUFFER_TEST_PIXEL_ORDER:
217
resplen = 4;
218
break;
219
- case 0x00048006: /* Set pixel order */
220
+ case RPI_FWREQ_FRAMEBUFFER_SET_PIXEL_ORDER:
221
fbconfig.pixo = ldl_le_phys(&s->dma_as, value + 12);
222
bcm2835_fb_validate_config(&fbconfig);
223
fbconfig_updated = true;
224
/* fall through */
225
- case 0x00040006: /* Get pixel order */
226
+ case RPI_FWREQ_FRAMEBUFFER_GET_PIXEL_ORDER:
227
stl_le_phys(&s->dma_as, value + 12, fbconfig.pixo);
228
resplen = 4;
229
break;
230
- case 0x00044007: /* Test pixel alpha */
231
+ case RPI_FWREQ_FRAMEBUFFER_TEST_ALPHA_MODE:
232
resplen = 4;
233
break;
234
- case 0x00048007: /* Set alpha */
235
+ case RPI_FWREQ_FRAMEBUFFER_SET_ALPHA_MODE:
236
fbconfig.alpha = ldl_le_phys(&s->dma_as, value + 12);
237
bcm2835_fb_validate_config(&fbconfig);
238
fbconfig_updated = true;
239
/* fall through */
240
- case 0x00040007: /* Get alpha */
241
+ case RPI_FWREQ_FRAMEBUFFER_GET_ALPHA_MODE:
242
stl_le_phys(&s->dma_as, value + 12, fbconfig.alpha);
243
resplen = 4;
244
break;
245
- case 0x00040008: /* Get pitch */
246
+ case RPI_FWREQ_FRAMEBUFFER_GET_PITCH:
247
stl_le_phys(&s->dma_as, value + 12,
248
bcm2835_fb_get_pitch(&fbconfig));
249
resplen = 4;
250
break;
251
- case 0x00044009: /* Test virtual offset */
252
+ case RPI_FWREQ_FRAMEBUFFER_TEST_VIRTUAL_OFFSET:
253
resplen = 8;
254
break;
255
- case 0x00048009: /* Set virtual offset */
256
+ case RPI_FWREQ_FRAMEBUFFER_SET_VIRTUAL_OFFSET:
257
fbconfig.xoffset = ldl_le_phys(&s->dma_as, value + 12);
258
fbconfig.yoffset = ldl_le_phys(&s->dma_as, value + 16);
259
bcm2835_fb_validate_config(&fbconfig);
260
fbconfig_updated = true;
261
/* fall through */
262
- case 0x00040009: /* Get virtual offset */
263
+ case RPI_FWREQ_FRAMEBUFFER_GET_VIRTUAL_OFFSET:
264
stl_le_phys(&s->dma_as, value + 12, fbconfig.xoffset);
265
stl_le_phys(&s->dma_as, value + 16, fbconfig.yoffset);
266
resplen = 8;
267
break;
268
- case 0x0004000a: /* Get/Test/Set overscan */
269
- case 0x0004400a:
270
- case 0x0004800a:
271
+ case RPI_FWREQ_FRAMEBUFFER_GET_OVERSCAN:
272
+ case RPI_FWREQ_FRAMEBUFFER_TEST_OVERSCAN:
273
+ case RPI_FWREQ_FRAMEBUFFER_SET_OVERSCAN:
274
stl_le_phys(&s->dma_as, value + 12, 0);
275
stl_le_phys(&s->dma_as, value + 16, 0);
276
stl_le_phys(&s->dma_as, value + 20, 0);
277
stl_le_phys(&s->dma_as, value + 24, 0);
278
resplen = 16;
279
break;
280
- case 0x0004800b: /* Set palette */
281
+ case RPI_FWREQ_FRAMEBUFFER_SET_PALETTE:
282
offset = ldl_le_phys(&s->dma_as, value + 12);
283
length = ldl_le_phys(&s->dma_as, value + 16);
284
n = 0;
285
@@ -XXX,XX +XXX,XX @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
286
stl_le_phys(&s->dma_as, value + 12, 0);
287
resplen = 4;
288
break;
289
- case 0x00040013: /* Get number of displays */
290
+ case RPI_FWREQ_FRAMEBUFFER_GET_NUM_DISPLAYS:
291
stl_le_phys(&s->dma_as, value + 12, 1);
292
resplen = 4;
293
break;
294
295
- case 0x00060001: /* Get DMA channels */
296
+ case RPI_FWREQ_GET_DMA_CHANNELS:
297
/* channels 2-5 */
298
stl_le_phys(&s->dma_as, value + 12, 0x003C);
299
resplen = 4;
300
break;
301
302
- case 0x00050001: /* Get command line */
303
+ case RPI_FWREQ_GET_COMMAND_LINE:
304
/*
305
* We follow the firmware behaviour: no NUL terminator is
306
* written to the buffer, and if the buffer is too short
114
--
307
--
115
2.20.1
308
2.34.1
116
309
117
310
diff view generated by jsdifflib
1
From: Patrick Venture <venture@google.com>
1
From: Sergey Kambalin <sergey.kambalin@auriga.com>
2
2
3
Add line item reference to quanta-gbs-bmc machine.
3
Signed-off-by: Sergey Kambalin <sergey.kambalin@auriga.com>
4
4
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Patrick Venture <venture@google.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Cédric Le Goater <clg@kaod.org>
6
Message-id: 20230612223456.33824-4-philmd@linaro.org
7
Message-id: 20210615192848.1065297-3-venture@google.com
7
Message-Id: <20230531155258.8361-1-sergey.kambalin@auriga.com>
8
[PMM: fixed underline Sphinx warning]
8
[PMD: Split from bigger patch: 4/4]
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
docs/system/arm/nuvoton.rst | 5 +++--
12
include/hw/arm/raspi_platform.h | 5 +++++
12
1 file changed, 3 insertions(+), 2 deletions(-)
13
hw/misc/bcm2835_property.c | 8 +++++---
14
2 files changed, 10 insertions(+), 3 deletions(-)
13
15
14
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
16
diff --git a/include/hw/arm/raspi_platform.h b/include/hw/arm/raspi_platform.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/system/arm/nuvoton.rst
18
--- a/include/hw/arm/raspi_platform.h
17
+++ b/docs/system/arm/nuvoton.rst
19
+++ b/include/hw/arm/raspi_platform.h
18
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@
19
-Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
21
#define INTERRUPT_ILLEGAL_TYPE0 6
20
-=====================================================
22
#define INTERRUPT_ILLEGAL_TYPE1 7
21
+Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``)
23
22
+================================================================
24
+/* Clock rates */
23
25
+#define RPI_FIRMWARE_EMMC_CLK_RATE 50000000
24
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
26
+#define RPI_FIRMWARE_UART_CLK_RATE 3000000
25
designed to be used as Baseboard Management Controllers (BMCs) in various
27
+#define RPI_FIRMWARE_DEFAULT_CLK_RATE 700000000
26
@@ -XXX,XX +XXX,XX @@ segment. The following machines are based on this chip :
28
+
27
The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
29
#endif
28
Hyperscale applications. The following machines are based on this chip :
30
diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c
29
31
index XXXXXXX..XXXXXXX 100644
30
+- ``quanta-gbs-bmc`` Quanta GBS server BMC
32
--- a/hw/misc/bcm2835_property.c
31
- ``quanta-gsj`` Quanta GSJ server BMC
33
+++ b/hw/misc/bcm2835_property.c
32
34
@@ -XXX,XX +XXX,XX @@
33
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
35
#include "qemu/log.h"
36
#include "qemu/module.h"
37
#include "trace.h"
38
+#include "hw/arm/raspi_platform.h"
39
40
/* https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface */
41
42
@@ -XXX,XX +XXX,XX @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
43
case RPI_FWREQ_GET_MIN_CLOCK_RATE:
44
switch (ldl_le_phys(&s->dma_as, value + 12)) {
45
case RPI_FIRMWARE_EMMC_CLK_ID:
46
- stl_le_phys(&s->dma_as, value + 16, 50000000);
47
+ stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_EMMC_CLK_RATE);
48
break;
49
case RPI_FIRMWARE_UART_CLK_ID:
50
- stl_le_phys(&s->dma_as, value + 16, 3000000);
51
+ stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_UART_CLK_RATE);
52
break;
53
default:
54
- stl_le_phys(&s->dma_as, value + 16, 700000000);
55
+ stl_le_phys(&s->dma_as, value + 16,
56
+ RPI_FIRMWARE_DEFAULT_CLK_RATE);
57
break;
58
}
59
resplen = 8;
34
--
60
--
35
2.20.1
61
2.34.1
36
62
37
63
diff view generated by jsdifflib
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
1
From: Sergey Kambalin <sergey.kambalin@auriga.com>
2
2
3
If the CPU is running in default NaN mode (FPCR.DN == 1) and we execute
3
Signed-off-by: Sergey Kambalin <sergey.kambalin@auriga.com>
4
FRSQRTE, FRECPE, or FRECPX with a signaling NaN, parts_silence_nan_frac() will
4
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
assert due to fpst->default_nan_mode being set.
5
Message-id: 20230612223456.33824-5-philmd@linaro.org
6
6
Message-Id: <20230531155258.8361-1-sergey.kambalin@auriga.com>
7
To avoid this, we check to see what NaN mode we're running in before we call
7
[PMD: Split from bigger patch: 3/4]
8
floatxx_silence_nan().
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
9
[PMM: added a comment about RPI_FIRMWARE_CORE_CLK_RATE
10
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
10
really being SoC-specific]
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1624662174-175828-2-git-send-email-joe.komlodi@xilinx.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
13
---
16
target/arm/helper-a64.c | 12 +++++++++---
14
include/hw/arm/raspi_platform.h | 5 +++++
17
target/arm/vfp_helper.c | 24 ++++++++++++++++++------
15
hw/misc/bcm2835_property.c | 3 +++
18
2 files changed, 27 insertions(+), 9 deletions(-)
16
2 files changed, 8 insertions(+)
19
17
20
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
18
diff --git a/include/hw/arm/raspi_platform.h b/include/hw/arm/raspi_platform.h
21
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper-a64.c
20
--- a/include/hw/arm/raspi_platform.h
23
+++ b/target/arm/helper-a64.c
21
+++ b/include/hw/arm/raspi_platform.h
24
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
22
@@ -XXX,XX +XXX,XX @@
25
float16 nan = a;
23
/* Clock rates */
26
if (float16_is_signaling_nan(a, fpst)) {
24
#define RPI_FIRMWARE_EMMC_CLK_RATE 50000000
27
float_raise(float_flag_invalid, fpst);
25
#define RPI_FIRMWARE_UART_CLK_RATE 3000000
28
- nan = float16_silence_nan(a, fpst);
26
+/*
29
+ if (!fpst->default_nan_mode) {
27
+ * TODO: this is really SoC-specific; we might want to
30
+ nan = float16_silence_nan(a, fpst);
28
+ * set it per-SoC if it turns out any guests care.
31
+ }
29
+ */
32
}
30
+#define RPI_FIRMWARE_CORE_CLK_RATE 350000000
33
if (fpst->default_nan_mode) {
31
#define RPI_FIRMWARE_DEFAULT_CLK_RATE 700000000
34
nan = float16_default_nan(fpst);
32
35
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
33
#endif
36
float32 nan = a;
34
diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c
37
if (float32_is_signaling_nan(a, fpst)) {
38
float_raise(float_flag_invalid, fpst);
39
- nan = float32_silence_nan(a, fpst);
40
+ if (!fpst->default_nan_mode) {
41
+ nan = float32_silence_nan(a, fpst);
42
+ }
43
}
44
if (fpst->default_nan_mode) {
45
nan = float32_default_nan(fpst);
46
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
47
float64 nan = a;
48
if (float64_is_signaling_nan(a, fpst)) {
49
float_raise(float_flag_invalid, fpst);
50
- nan = float64_silence_nan(a, fpst);
51
+ if (!fpst->default_nan_mode) {
52
+ nan = float64_silence_nan(a, fpst);
53
+ }
54
}
55
if (fpst->default_nan_mode) {
56
nan = float64_default_nan(fpst);
57
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
58
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/vfp_helper.c
36
--- a/hw/misc/bcm2835_property.c
60
+++ b/target/arm/vfp_helper.c
37
+++ b/hw/misc/bcm2835_property.c
61
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
38
@@ -XXX,XX +XXX,XX @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
62
float16 nan = f16;
39
case RPI_FIRMWARE_UART_CLK_ID:
63
if (float16_is_signaling_nan(f16, fpst)) {
40
stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_UART_CLK_RATE);
64
float_raise(float_flag_invalid, fpst);
41
break;
65
- nan = float16_silence_nan(f16, fpst);
42
+ case RPI_FIRMWARE_CORE_CLK_ID:
66
+ if (!fpst->default_nan_mode) {
43
+ stl_le_phys(&s->dma_as, value + 16, RPI_FIRMWARE_CORE_CLK_RATE);
67
+ nan = float16_silence_nan(f16, fpst);
44
+ break;
68
+ }
45
default:
69
}
46
stl_le_phys(&s->dma_as, value + 16,
70
if (fpst->default_nan_mode) {
47
RPI_FIRMWARE_DEFAULT_CLK_RATE);
71
nan = float16_default_nan(fpst);
72
@@ -XXX,XX +XXX,XX @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp)
73
float32 nan = f32;
74
if (float32_is_signaling_nan(f32, fpst)) {
75
float_raise(float_flag_invalid, fpst);
76
- nan = float32_silence_nan(f32, fpst);
77
+ if (!fpst->default_nan_mode) {
78
+ nan = float32_silence_nan(f32, fpst);
79
+ }
80
}
81
if (fpst->default_nan_mode) {
82
nan = float32_default_nan(fpst);
83
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
84
float64 nan = f64;
85
if (float64_is_signaling_nan(f64, fpst)) {
86
float_raise(float_flag_invalid, fpst);
87
- nan = float64_silence_nan(f64, fpst);
88
+ if (!fpst->default_nan_mode) {
89
+ nan = float64_silence_nan(f64, fpst);
90
+ }
91
}
92
if (fpst->default_nan_mode) {
93
nan = float64_default_nan(fpst);
94
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
95
float16 nan = f16;
96
if (float16_is_signaling_nan(f16, s)) {
97
float_raise(float_flag_invalid, s);
98
- nan = float16_silence_nan(f16, s);
99
+ if (!s->default_nan_mode) {
100
+ nan = float16_silence_nan(f16, fpstp);
101
+ }
102
}
103
if (s->default_nan_mode) {
104
nan = float16_default_nan(s);
105
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
106
float32 nan = f32;
107
if (float32_is_signaling_nan(f32, s)) {
108
float_raise(float_flag_invalid, s);
109
- nan = float32_silence_nan(f32, s);
110
+ if (!s->default_nan_mode) {
111
+ nan = float32_silence_nan(f32, fpstp);
112
+ }
113
}
114
if (s->default_nan_mode) {
115
nan = float32_default_nan(s);
116
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
117
float64 nan = f64;
118
if (float64_is_signaling_nan(f64, s)) {
119
float_raise(float_flag_invalid, s);
120
- nan = float64_silence_nan(f64, s);
121
+ if (!s->default_nan_mode) {
122
+ nan = float64_silence_nan(f64, fpstp);
123
+ }
124
}
125
if (s->default_nan_mode) {
126
nan = float64_default_nan(s);
127
--
48
--
128
2.20.1
49
2.34.1
129
50
130
51
diff view generated by jsdifflib