1
The following changes since commit 5a67d7735d4162630769ef495cf813244fc850df:
1
Hi; this pullreq contains mainly a chunk of RTH's refactoring
2
of the Arm pagetable walk code, plus a series from me fixing
3
configure checkpatch warnings, and some old patches to various
4
files all over the tree getting rid of dynamic stack allocation.
2
5
3
Merge remote-tracking branch 'remotes/berrange-gitlab/tags/tls-deps-pull-request' into staging (2021-07-02 08:22:39 +0100)
6
thanks
7
-- PMM
8
9
The following changes since commit 6338c30111d596d955e6bc933a82184a0b910c43:
10
11
Merge tag 'm68k-for-7.2-pull-request' of https://github.com/vivier/qemu-m68k into staging (2022-09-21 13:12:36 -0400)
4
12
5
are available in the Git repository at:
13
are available in the Git repository at:
6
14
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210702
15
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220922
8
16
9
for you to fetch changes up to 04ea4d3cfd0a21b248ece8eb7a9436a3d9898dd8:
17
for you to fetch changes up to b3b5472db0ab7a53499441c1fe1dedec05b1e285:
10
18
11
target/arm: Implement MVE shifts by register (2021-07-02 11:48:38 +0100)
19
configure: Avoid use of 'local' as it is non-POSIX (2022-09-22 16:38:29 +0100)
12
20
13
----------------------------------------------------------------
21
----------------------------------------------------------------
14
target-arm queue:
22
target-arm queue:
15
* more MVE instructions
23
* hw/net/can: fix Xilinx ZynqMP CAN RX FIFO logic
16
* hw/gpio/gpio_pwr: use shutdown function for reboot
24
* Fix alignment for Neon VLD4.32
17
* target/arm: Check NaN mode before silencing NaN
25
* Refactoring of page-table-walk code
18
* tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
26
* hw/acpi: Add ospm_status hook implementation for acpi-ged
19
* hw/arm: Add basic power management to raspi.
27
* hw/net/lan9118: Signal TSFL_INT flag when TX FIFO reaches specified level
20
* docs/system/arm: Add quanta-gbs-bmc, quanta-q7l1-bmc
28
* chardev/baum: avoid variable-length arrays
29
* io/channel-websock: avoid variable-length arrays
30
* hw/net/e1000e_core: Use definition to avoid dynamic stack allocation
31
* hw/ppc/pnv: Avoid dynamic stack allocation
32
* hw/intc/xics: Avoid dynamic stack allocation
33
* hw/i386/multiboot: Avoid dynamic stack allocation
34
* hw/usb/hcd-ohci: Use definition to avoid dynamic stack allocation
35
* ui/curses: Avoid dynamic stack allocation
36
* tests/unit/test-vmstate: Avoid dynamic stack allocation
37
* configure: fix various shellcheck-spotted issues and nits
21
38
22
----------------------------------------------------------------
39
----------------------------------------------------------------
23
Joe Komlodi (1):
40
Anton Kochkov (1):
24
target/arm: Check NaN mode before silencing NaN
41
hw/net/can: fix Xilinx ZynqMP CAN RX FIFO logic
25
42
26
Maxim Uvarov (1):
43
Clément Chigot (1):
27
hw/gpio/gpio_pwr: use shutdown function for reboot
44
target/arm: Fix alignment for VLD4.32
28
45
29
Nolan Leake (1):
46
Keqian Zhu (1):
30
hw/arm: Add basic power management to raspi.
47
hw/acpi: Add ospm_status hook implementation for acpi-ged
31
48
32
Patrick Venture (2):
49
Lucas Dietrich (1):
33
docs/system/arm: Add quanta-q7l1-bmc reference
50
hw/net/lan9118: Signal TSFL_INT flag when TX FIFO reaches specified level
34
docs/system/arm: Add quanta-gbs-bmc reference
35
51
36
Peter Maydell (18):
52
Peter Maydell (7):
37
target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation
53
configure: Remove unused python_version variable
38
target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH
54
configure: Remove unused meson_args variable
39
target/arm: Make asimd_imm_const() public
55
configure: Add missing quoting for some easy cases
40
target/arm: Use asimd_imm_const for A64 decode
56
configure: Add './' on front of glob of */config-devices.mak.d
41
target/arm: Use dup_const() instead of bitfield_replicate()
57
configure: Remove use of backtick `...` syntax
42
target/arm: Implement MVE logical immediate insns
58
configure: Check mkdir result directly, not via $?
43
target/arm: Implement MVE vector shift left by immediate insns
59
configure: Avoid use of 'local' as it is non-POSIX
44
target/arm: Implement MVE vector shift right by immediate insns
45
target/arm: Implement MVE VSHLL
46
target/arm: Implement MVE VSRI, VSLI
47
target/arm: Implement MVE VSHRN, VRSHRN
48
target/arm: Implement MVE saturating narrowing shifts
49
target/arm: Implement MVE VSHLC
50
target/arm: Implement MVE VADDLV
51
target/arm: Implement MVE long shifts by immediate
52
target/arm: Implement MVE long shifts by register
53
target/arm: Implement MVE shifts by immediate
54
target/arm: Implement MVE shifts by register
55
60
56
Philippe Mathieu-Daudé (1):
61
Philippe Mathieu-Daudé (11):
57
tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
62
chardev/baum: Replace magic values by X_MAX / Y_MAX definitions
63
chardev/baum: Use definitions to avoid dynamic stack allocation
64
chardev/baum: Avoid dynamic stack allocation
65
io/channel-websock: Replace strlen(const_str) by sizeof(const_str) - 1
66
hw/net/e1000e_core: Use definition to avoid dynamic stack allocation
67
hw/ppc/pnv: Avoid dynamic stack allocation
68
hw/intc/xics: Avoid dynamic stack allocation
69
hw/i386/multiboot: Avoid dynamic stack allocation
70
hw/usb/hcd-ohci: Use definition to avoid dynamic stack allocation
71
ui/curses: Avoid dynamic stack allocation
72
tests/unit/test-vmstate: Avoid dynamic stack allocation
58
73
59
docs/system/arm/aspeed.rst | 1 +
74
Richard Henderson (17):
60
docs/system/arm/nuvoton.rst | 5 +-
75
target/arm: Create GetPhysAddrResult
61
include/hw/arm/bcm2835_peripherals.h | 3 +-
76
target/arm: Use GetPhysAddrResult in get_phys_addr_lpae
62
include/hw/misc/bcm2835_powermgt.h | 29 ++
77
target/arm: Use GetPhysAddrResult in get_phys_addr_v6
63
target/arm/helper-mve.h | 108 +++++++
78
target/arm: Use GetPhysAddrResult in get_phys_addr_v5
64
target/arm/translate.h | 41 +++
79
target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav5
65
target/arm/mve.decode | 177 ++++++++++-
80
target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav7
66
target/arm/t32.decode | 71 ++++-
81
target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav8
67
hw/arm/bcm2835_peripherals.c | 13 +-
82
target/arm: Use GetPhysAddrResult in pmsav8_mpu_lookup
68
hw/gpio/gpio_pwr.c | 2 +-
83
target/arm: Remove is_subpage argument to pmsav8_mpu_lookup
69
hw/misc/bcm2835_powermgt.c | 160 ++++++++++
84
target/arm: Add is_secure parameter to v8m_security_lookup
70
target/arm/helper-a64.c | 12 +-
85
target/arm: Add secure parameter to pmsav8_mpu_lookup
71
target/arm/mve_helper.c | 524 +++++++++++++++++++++++++++++++--
86
target/arm: Add is_secure parameter to get_phys_addr_v5
72
target/arm/translate-a64.c | 86 +-----
87
target/arm: Add is_secure parameter to get_phys_addr_v6
73
target/arm/translate-mve.c | 261 +++++++++++++++-
88
target/arm: Add secure parameter to get_phys_addr_pmsav8
74
target/arm/translate-neon.c | 81 -----
89
target/arm: Add is_secure parameter to pmsav7_use_background_region
75
target/arm/translate.c | 327 +++++++++++++++++++-
90
target/arm: Add secure parameter to get_phys_addr_pmsav7
76
target/arm/vfp_helper.c | 24 +-
91
target/arm: Add is_secure parameter to get_phys_addr_pmsav5
77
hw/misc/meson.build | 1 +
78
tests/acceptance/boot_linux_console.py | 43 +++
79
20 files changed, 1760 insertions(+), 209 deletions(-)
80
create mode 100644 include/hw/misc/bcm2835_powermgt.h
81
create mode 100644 hw/misc/bcm2835_powermgt.c
82
92
93
configure | 82 +++++-----
94
target/arm/internals.h | 26 +--
95
chardev/baum.c | 22 ++-
96
hw/acpi/generic_event_device.c | 8 +
97
hw/i386/multiboot.c | 5 +-
98
hw/intc/xics.c | 2 +-
99
hw/net/can/xlnx-zynqmp-can.c | 32 ++--
100
hw/net/e1000e_core.c | 7 +-
101
hw/net/lan9118.c | 8 +
102
hw/ppc/pnv.c | 4 +-
103
hw/ppc/spapr.c | 8 +-
104
hw/ppc/spapr_pci_nvlink2.c | 2 +-
105
hw/usb/hcd-ohci.c | 7 +-
106
io/channel-websock.c | 2 +-
107
target/arm/helper.c | 27 ++-
108
target/arm/m_helper.c | 78 ++++-----
109
target/arm/ptw.c | 364 +++++++++++++++++++----------------------
110
target/arm/tlb_helper.c | 22 +--
111
target/arm/translate-neon.c | 6 +-
112
tests/unit/test-vmstate.c | 7 +-
113
ui/curses.c | 2 +-
114
21 files changed, 347 insertions(+), 374 deletions(-)
115
diff view generated by jsdifflib
New patch
1
From: Anton Kochkov <anton.kochkov@proton.me>
1
2
3
For consistency, function "update_rx_fifo()" should use the RX FIFO
4
register field names, not the TX FIFO ones, even if they refer to the
5
same bit positions in the register.
6
7
Signed-off-by: Anton Kochkov <anton.kochkov@proton.me>
8
Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com>
9
Message-id: 20220817141754.2105981-1-anton.kochkov@proton.me
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1123
11
[PMM: tweaked commit message]
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
hw/net/can/xlnx-zynqmp-can.c | 32 ++++++++++++++++----------------
15
1 file changed, 16 insertions(+), 16 deletions(-)
16
17
diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/net/can/xlnx-zynqmp-can.c
20
+++ b/hw/net/can/xlnx-zynqmp-can.c
21
@@ -XXX,XX +XXX,XX @@ static void update_rx_fifo(XlnxZynqMPCANState *s, const qemu_can_frame *frame)
22
timestamp));
23
24
/* First 32 bit of the data. */
25
- fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA1_DB3_SHIFT,
26
- R_TXFIFO_DATA1_DB3_LENGTH,
27
+ fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DATA1_DB3_SHIFT,
28
+ R_RXFIFO_DATA1_DB3_LENGTH,
29
frame->data[0]) |
30
- deposit32(0, R_TXFIFO_DATA1_DB2_SHIFT,
31
- R_TXFIFO_DATA1_DB2_LENGTH,
32
+ deposit32(0, R_RXFIFO_DATA1_DB2_SHIFT,
33
+ R_RXFIFO_DATA1_DB2_LENGTH,
34
frame->data[1]) |
35
- deposit32(0, R_TXFIFO_DATA1_DB1_SHIFT,
36
- R_TXFIFO_DATA1_DB1_LENGTH,
37
+ deposit32(0, R_RXFIFO_DATA1_DB1_SHIFT,
38
+ R_RXFIFO_DATA1_DB1_LENGTH,
39
frame->data[2]) |
40
- deposit32(0, R_TXFIFO_DATA1_DB0_SHIFT,
41
- R_TXFIFO_DATA1_DB0_LENGTH,
42
+ deposit32(0, R_RXFIFO_DATA1_DB0_SHIFT,
43
+ R_RXFIFO_DATA1_DB0_LENGTH,
44
frame->data[3]));
45
/* Last 32 bit of the data. */
46
- fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA2_DB7_SHIFT,
47
- R_TXFIFO_DATA2_DB7_LENGTH,
48
+ fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DATA2_DB7_SHIFT,
49
+ R_RXFIFO_DATA2_DB7_LENGTH,
50
frame->data[4]) |
51
- deposit32(0, R_TXFIFO_DATA2_DB6_SHIFT,
52
- R_TXFIFO_DATA2_DB6_LENGTH,
53
+ deposit32(0, R_RXFIFO_DATA2_DB6_SHIFT,
54
+ R_RXFIFO_DATA2_DB6_LENGTH,
55
frame->data[5]) |
56
- deposit32(0, R_TXFIFO_DATA2_DB5_SHIFT,
57
- R_TXFIFO_DATA2_DB5_LENGTH,
58
+ deposit32(0, R_RXFIFO_DATA2_DB5_SHIFT,
59
+ R_RXFIFO_DATA2_DB5_LENGTH,
60
frame->data[6]) |
61
- deposit32(0, R_TXFIFO_DATA2_DB4_SHIFT,
62
- R_TXFIFO_DATA2_DB4_LENGTH,
63
+ deposit32(0, R_RXFIFO_DATA2_DB4_SHIFT,
64
+ R_RXFIFO_DATA2_DB4_LENGTH,
65
frame->data[7]));
66
67
ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1);
68
--
69
2.25.1
diff view generated by jsdifflib
1
Implement the MVE vector shift right by immediate insns VSHRI and
1
From: Clément Chigot <chigot@adacore.com>
2
VRSHRI. As with Neon, we implement these by using helper functions
3
which perform left shifts but allow negative shift counts to indicate
4
right shifts.
5
2
3
When requested, the alignment for VLD4.32 is 8 and not 16.
4
5
See ARM documentation about VLD4 encoding:
6
ebytes = 1 << UInt(size);
7
if size == '10' then
8
alignment = if a == '0' then 1 else 8;
9
else
10
alignment = if a == '0' then 1 else 4*ebytes;
11
12
Signed-off-by: Clément Chigot <chigot@adacore.com>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20220914105058.2787404-1-chigot@adacore.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-9-peter.maydell@linaro.org
9
---
16
---
10
target/arm/helper-mve.h | 12 ++++++++++++
17
target/arm/translate-neon.c | 6 +++++-
11
target/arm/translate.h | 20 ++++++++++++++++++++
18
1 file changed, 5 insertions(+), 1 deletion(-)
12
target/arm/mve.decode | 28 ++++++++++++++++++++++++++++
13
target/arm/mve_helper.c | 7 +++++++
14
target/arm/translate-mve.c | 5 +++++
15
target/arm/translate-neon.c | 18 ------------------
16
6 files changed, 72 insertions(+), 18 deletions(-)
17
19
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
21
+++ b/target/arm/helper-mve.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
25
26
+DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+
42
+DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
diff --git a/target/arm/translate.h b/target/arm/translate.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/translate.h
48
+++ b/target/arm/translate.h
49
@@ -XXX,XX +XXX,XX @@ static inline int times_2_plus_1(DisasContext *s, int x)
50
return x * 2 + 1;
51
}
52
53
+static inline int rsub_64(DisasContext *s, int x)
54
+{
55
+ return 64 - x;
56
+}
57
+
58
+static inline int rsub_32(DisasContext *s, int x)
59
+{
60
+ return 32 - x;
61
+}
62
+
63
+static inline int rsub_16(DisasContext *s, int x)
64
+{
65
+ return 16 - x;
66
+}
67
+
68
+static inline int rsub_8(DisasContext *s, int x)
69
+{
70
+ return 8 - x;
71
+}
72
+
73
static inline int arm_dc_feature(DisasContext *dc, int feature)
74
{
75
return (dc->features & (1ULL << feature)) != 0;
76
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/mve.decode
79
+++ b/target/arm/mve.decode
80
@@ -XXX,XX +XXX,XX @@
81
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
82
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
83
84
+# Right shifts are encoded as N - shift, where N is the element size in bits.
85
+%rshift_i5 16:5 !function=rsub_32
86
+%rshift_i4 16:4 !function=rsub_16
87
+%rshift_i3 16:3 !function=rsub_8
88
+
89
+@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \
90
+ size=0 shift=%rshift_i3
91
+@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \
92
+ size=1 shift=%rshift_i4
93
+@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
94
+ size=2 shift=%rshift_i5
95
+
96
# Vector loads and stores
97
98
# Widening loads and narrowing stores:
99
@@ -XXX,XX +XXX,XX @@ VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
100
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
101
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
102
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
103
+
104
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
105
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
106
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
107
+
108
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
109
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
110
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
111
+
112
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
113
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
114
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
115
+
116
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
117
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
118
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
119
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/mve_helper.c
122
+++ b/target/arm/mve_helper.c
123
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
124
DO_2SHIFT(OP##b, 1, uint8_t, FN) \
125
DO_2SHIFT(OP##h, 2, uint16_t, FN) \
126
DO_2SHIFT(OP##w, 4, uint32_t, FN)
127
+#define DO_2SHIFT_S(OP, FN) \
128
+ DO_2SHIFT(OP##b, 1, int8_t, FN) \
129
+ DO_2SHIFT(OP##h, 2, int16_t, FN) \
130
+ DO_2SHIFT(OP##w, 4, int32_t, FN)
131
132
#define DO_2SHIFT_SAT_U(OP, FN) \
133
DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
134
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
135
DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
136
137
DO_2SHIFT_U(vshli_u, DO_VSHLU)
138
+DO_2SHIFT_S(vshli_s, DO_VSHLS)
139
DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
140
DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
141
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
142
+DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
143
+DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
144
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-mve.c
147
+++ b/target/arm/translate-mve.c
148
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHLI, vshli_u, false)
149
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
150
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
151
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
152
+/* These right shifts use a left-shift helper with negated shift count */
153
+DO_2SHIFT(VSHRI_S, vshli_s, true)
154
+DO_2SHIFT(VSHRI_U, vshli_u, true)
155
+DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
+DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
20
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
158
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/translate-neon.c
22
--- a/target/arm/translate-neon.c
160
+++ b/target/arm/translate-neon.c
23
+++ b/target/arm/translate-neon.c
161
@@ -XXX,XX +XXX,XX @@ static inline int plus1(DisasContext *s, int x)
24
@@ -XXX,XX +XXX,XX @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
162
return x + 1;
25
case 3:
163
}
26
return false;
164
27
case 4:
165
-static inline int rsub_64(DisasContext *s, int x)
28
- align = pow2_align(size + 2);
166
-{
29
+ if (size == 2) {
167
- return 64 - x;
30
+ align = pow2_align(3);
168
-}
31
+ } else {
169
-
32
+ align = pow2_align(size + 2);
170
-static inline int rsub_32(DisasContext *s, int x)
33
+ }
171
-{
34
break;
172
- return 32 - x;
35
default:
173
-}
36
g_assert_not_reached();
174
-static inline int rsub_16(DisasContext *s, int x)
175
-{
176
- return 16 - x;
177
-}
178
-static inline int rsub_8(DisasContext *s, int x)
179
-{
180
- return 8 - x;
181
-}
182
-
183
static inline int neon_3same_fp_size(DisasContext *s, int x)
184
{
185
/* Convert 0==fp32, 1==fp16 into a MO_* value */
186
--
37
--
187
2.20.1
38
2.25.1
188
39
189
40
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Combine 5 output pointer arguments from get_phys_addr
4
into a single struct. Adjust all callers.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220822152741.1617527-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/internals.h | 13 ++++-
12
target/arm/helper.c | 27 ++++-----
13
target/arm/m_helper.c | 52 ++++++-----------
14
target/arm/ptw.c | 120 +++++++++++++++++++++-------------------
15
target/arm/tlb_helper.c | 22 +++-----
16
5 files changed, 109 insertions(+), 125 deletions(-)
17
18
diff --git a/target/arm/internals.h b/target/arm/internals.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/internals.h
21
+++ b/target/arm/internals.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct ARMCacheAttrs {
23
bool is_s2_format:1;
24
} ARMCacheAttrs;
25
26
+/* Fields that are valid upon success. */
27
+typedef struct GetPhysAddrResult {
28
+ hwaddr phys;
29
+ target_ulong page_size;
30
+ int prot;
31
+ MemTxAttrs attrs;
32
+ ARMCacheAttrs cacheattrs;
33
+} GetPhysAddrResult;
34
+
35
bool get_phys_addr(CPUARMState *env, target_ulong address,
36
MMUAccessType access_type, ARMMMUIdx mmu_idx,
37
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
38
- target_ulong *page_size,
39
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
40
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
41
__attribute__((nonnull));
42
43
void arm_log_exception(CPUState *cs);
44
diff --git a/target/arm/helper.c b/target/arm/helper.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/helper.c
47
+++ b/target/arm/helper.c
48
@@ -XXX,XX +XXX,XX @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
49
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
50
MMUAccessType access_type, ARMMMUIdx mmu_idx)
51
{
52
- hwaddr phys_addr;
53
- target_ulong page_size;
54
- int prot;
55
bool ret;
56
uint64_t par64;
57
bool format64 = false;
58
- MemTxAttrs attrs = {};
59
ARMMMUFaultInfo fi = {};
60
- ARMCacheAttrs cacheattrs = {};
61
+ GetPhysAddrResult res = {};
62
63
- ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
64
- &prot, &page_size, &fi, &cacheattrs);
65
+ ret = get_phys_addr(env, value, access_type, mmu_idx, &res, &fi);
66
67
/*
68
* ATS operations only do S1 or S1+S2 translations, so we never
69
* have to deal with the ARMCacheAttrs format for S2 only.
70
*/
71
- assert(!cacheattrs.is_s2_format);
72
+ assert(!res.cacheattrs.is_s2_format);
73
74
if (ret) {
75
/*
76
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
77
/* Create a 64-bit PAR */
78
par64 = (1 << 11); /* LPAE bit always set */
79
if (!ret) {
80
- par64 |= phys_addr & ~0xfffULL;
81
- if (!attrs.secure) {
82
+ par64 |= res.phys & ~0xfffULL;
83
+ if (!res.attrs.secure) {
84
par64 |= (1 << 9); /* NS */
85
}
86
- par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
87
- par64 |= cacheattrs.shareability << 7; /* SH */
88
+ par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
89
+ par64 |= res.cacheattrs.shareability << 7; /* SH */
90
} else {
91
uint32_t fsr = arm_fi_to_lfsc(&fi);
92
93
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
94
*/
95
if (!ret) {
96
/* We do not set any attribute bits in the PAR */
97
- if (page_size == (1 << 24)
98
+ if (res.page_size == (1 << 24)
99
&& arm_feature(env, ARM_FEATURE_V7)) {
100
- par64 = (phys_addr & 0xff000000) | (1 << 1);
101
+ par64 = (res.phys & 0xff000000) | (1 << 1);
102
} else {
103
- par64 = phys_addr & 0xfffff000;
104
+ par64 = res.phys & 0xfffff000;
105
}
106
- if (!attrs.secure) {
107
+ if (!res.attrs.secure) {
108
par64 |= (1 << 9); /* NS */
109
}
110
} else {
111
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/target/arm/m_helper.c
114
+++ b/target/arm/m_helper.c
115
@@ -XXX,XX +XXX,XX @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
116
{
117
CPUState *cs = CPU(cpu);
118
CPUARMState *env = &cpu->env;
119
- MemTxAttrs attrs = {};
120
MemTxResult txres;
121
- target_ulong page_size;
122
- hwaddr physaddr;
123
- int prot;
124
+ GetPhysAddrResult res = {};
125
ARMMMUFaultInfo fi = {};
126
- ARMCacheAttrs cacheattrs = {};
127
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
128
int exc;
129
bool exc_secure;
130
131
- if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
132
- &attrs, &prot, &page_size, &fi, &cacheattrs)) {
133
+ if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
134
/* MPU/SAU lookup failed */
135
if (fi.type == ARMFault_QEMU_SFault) {
136
if (mode == STACK_LAZYFP) {
137
@@ -XXX,XX +XXX,XX @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
138
}
139
goto pend_fault;
140
}
141
- address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
142
- attrs, &txres);
143
+ address_space_stl_le(arm_addressspace(cs, res.attrs), res.phys, value,
144
+ res.attrs, &txres);
145
if (txres != MEMTX_OK) {
146
/* BusFault trying to write the data */
147
if (mode == STACK_LAZYFP) {
148
@@ -XXX,XX +XXX,XX @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
149
{
150
CPUState *cs = CPU(cpu);
151
CPUARMState *env = &cpu->env;
152
- MemTxAttrs attrs = {};
153
MemTxResult txres;
154
- target_ulong page_size;
155
- hwaddr physaddr;
156
- int prot;
157
+ GetPhysAddrResult res = {};
158
ARMMMUFaultInfo fi = {};
159
- ARMCacheAttrs cacheattrs = {};
160
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
161
int exc;
162
bool exc_secure;
163
uint32_t value;
164
165
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
166
- &attrs, &prot, &page_size, &fi, &cacheattrs)) {
167
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
168
/* MPU/SAU lookup failed */
169
if (fi.type == ARMFault_QEMU_SFault) {
170
qemu_log_mask(CPU_LOG_INT,
171
@@ -XXX,XX +XXX,XX @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
172
goto pend_fault;
173
}
174
175
- value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
176
- attrs, &txres);
177
+ value = address_space_ldl(arm_addressspace(cs, res.attrs), res.phys,
178
+ res.attrs, &txres);
179
if (txres != MEMTX_OK) {
180
/* BusFault trying to read the data */
181
qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
182
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
183
CPUState *cs = CPU(cpu);
184
CPUARMState *env = &cpu->env;
185
V8M_SAttributes sattrs = {};
186
- MemTxAttrs attrs = {};
187
+ GetPhysAddrResult res = {};
188
ARMMMUFaultInfo fi = {};
189
- ARMCacheAttrs cacheattrs = {};
190
MemTxResult txres;
191
- target_ulong page_size;
192
- hwaddr physaddr;
193
- int prot;
194
195
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
196
if (!sattrs.nsc || sattrs.ns) {
197
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
198
"...really SecureFault with SFSR.INVEP\n");
199
return false;
200
}
201
- if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
202
- &attrs, &prot, &page_size, &fi, &cacheattrs)) {
203
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
204
/* the MPU lookup failed */
205
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
206
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
207
qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
208
return false;
209
}
210
- *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
211
- attrs, &txres);
212
+ *insn = address_space_lduw_le(arm_addressspace(cs, res.attrs), res.phys,
213
+ res.attrs, &txres);
214
if (txres != MEMTX_OK) {
215
env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
216
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
217
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
218
*/
219
CPUState *cs = CPU(cpu);
220
CPUARMState *env = &cpu->env;
221
- MemTxAttrs attrs = {};
222
MemTxResult txres;
223
- target_ulong page_size;
224
- hwaddr physaddr;
225
- int prot;
226
+ GetPhysAddrResult res = {};
227
ARMMMUFaultInfo fi = {};
228
- ARMCacheAttrs cacheattrs = {};
229
uint32_t value;
230
231
- if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
232
- &attrs, &prot, &page_size, &fi, &cacheattrs)) {
233
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
234
/* MPU/SAU lookup failed */
235
if (fi.type == ARMFault_QEMU_SFault) {
236
qemu_log_mask(CPU_LOG_INT,
237
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
238
}
239
return false;
240
}
241
- value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
242
- attrs, &txres);
243
+ value = address_space_ldl(arm_addressspace(cs, res.attrs), res.phys,
244
+ res.attrs, &txres);
245
if (txres != MEMTX_OK) {
246
/* BusFault trying to read the data */
247
qemu_log_mask(CPU_LOG_INT,
248
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
249
index XXXXXXX..XXXXXXX 100644
250
--- a/target/arm/ptw.c
251
+++ b/target/arm/ptw.c
252
@@ -XXX,XX +XXX,XX @@ static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
253
* @address: virtual address to get physical address for
254
* @access_type: 0 for read, 1 for write, 2 for execute
255
* @mmu_idx: MMU index indicating required translation regime
256
- * @phys_ptr: set to the physical address corresponding to the virtual address
257
- * @attrs: set to the memory transaction attributes to use
258
- * @prot: set to the permissions for the page containing phys_ptr
259
- * @page_size: set to the size of the page containing phys_ptr
260
+ * @result: set on translation success.
261
* @fi: set to fault info if the translation fails
262
- * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
263
*/
264
bool get_phys_addr(CPUARMState *env, target_ulong address,
265
MMUAccessType access_type, ARMMMUIdx mmu_idx,
266
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
267
- target_ulong *page_size,
268
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
269
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
270
{
271
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
272
273
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
274
*/
275
if (arm_feature(env, ARM_FEATURE_EL2)) {
276
hwaddr ipa;
277
- int s2_prot;
278
+ int s1_prot;
279
int ret;
280
bool ipa_secure;
281
- ARMCacheAttrs cacheattrs2 = {};
282
+ ARMCacheAttrs cacheattrs1;
283
ARMMMUIdx s2_mmu_idx;
284
bool is_el0;
285
286
- ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
287
- attrs, prot, page_size, fi, cacheattrs);
288
+ ret = get_phys_addr(env, address, access_type, s1_mmu_idx,
289
+ result, fi);
290
291
/* If S1 fails or S2 is disabled, return early. */
292
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
293
- *phys_ptr = ipa;
294
return ret;
295
}
296
297
- ipa_secure = attrs->secure;
298
+ ipa = result->phys;
299
+ ipa_secure = result->attrs.secure;
300
if (arm_is_secure_below_el3(env)) {
301
if (ipa_secure) {
302
- attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
303
+ result->attrs.secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
304
} else {
305
- attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
306
+ result->attrs.secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
307
}
308
} else {
309
assert(!ipa_secure);
310
}
311
312
- s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
313
+ s2_mmu_idx = (result->attrs.secure
314
+ ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
315
is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
316
317
- /* S1 is done. Now do S2 translation. */
318
+ /*
319
+ * S1 is done, now do S2 translation.
320
+ * Save the stage1 results so that we may merge
321
+ * prot and cacheattrs later.
322
+ */
323
+ s1_prot = result->prot;
324
+ cacheattrs1 = result->cacheattrs;
325
+ memset(result, 0, sizeof(*result));
326
+
327
ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
328
- phys_ptr, attrs, &s2_prot,
329
- page_size, fi, &cacheattrs2);
330
+ &result->phys, &result->attrs,
331
+ &result->prot, &result->page_size,
332
+ fi, &result->cacheattrs);
333
fi->s2addr = ipa;
334
+
335
/* Combine the S1 and S2 perms. */
336
- *prot &= s2_prot;
337
+ result->prot &= s1_prot;
338
339
/* If S2 fails, return early. */
340
if (ret) {
341
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
342
* Outer Write-Back Read-Allocate Write-Allocate.
343
* Do not overwrite Tagged within attrs.
344
*/
345
- if (cacheattrs->attrs != 0xf0) {
346
- cacheattrs->attrs = 0xff;
347
+ if (cacheattrs1.attrs != 0xf0) {
348
+ cacheattrs1.attrs = 0xff;
349
}
350
- cacheattrs->shareability = 0;
351
+ cacheattrs1.shareability = 0;
352
}
353
- *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
354
+ result->cacheattrs = combine_cacheattrs(env, cacheattrs1,
355
+ result->cacheattrs);
356
357
/* Check if IPA translates to secure or non-secure PA space. */
358
if (arm_is_secure_below_el3(env)) {
359
if (ipa_secure) {
360
- attrs->secure =
361
+ result->attrs.secure =
362
!(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
363
} else {
364
- attrs->secure =
365
+ result->attrs.secure =
366
!((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
367
|| (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
368
}
369
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
370
* cannot upgrade an non-secure translation regime's attributes
371
* to secure.
372
*/
373
- attrs->secure = regime_is_secure(env, mmu_idx);
374
- attrs->user = regime_is_user(env, mmu_idx);
375
+ result->attrs.secure = regime_is_secure(env, mmu_idx);
376
+ result->attrs.user = regime_is_user(env, mmu_idx);
377
378
/*
379
* Fast Context Switch Extension. This doesn't exist at all in v8.
380
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
381
382
if (arm_feature(env, ARM_FEATURE_PMSA)) {
383
bool ret;
384
- *page_size = TARGET_PAGE_SIZE;
385
+ result->page_size = TARGET_PAGE_SIZE;
386
387
if (arm_feature(env, ARM_FEATURE_V8)) {
388
/* PMSAv8 */
389
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
390
- phys_ptr, attrs, prot, page_size, fi);
391
+ &result->phys, &result->attrs,
392
+ &result->prot, &result->page_size, fi);
393
} else if (arm_feature(env, ARM_FEATURE_V7)) {
394
/* PMSAv7 */
395
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
396
- phys_ptr, prot, page_size, fi);
397
+ &result->phys, &result->prot,
398
+ &result->page_size, fi);
399
} else {
400
/* Pre-v7 MPU */
401
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
402
- phys_ptr, prot, fi);
403
+ &result->phys, &result->prot, fi);
404
}
405
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
406
" mmu_idx %u -> %s (prot %c%c%c)\n",
407
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
408
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
409
(uint32_t)address, mmu_idx,
410
ret ? "Miss" : "Hit",
411
- *prot & PAGE_READ ? 'r' : '-',
412
- *prot & PAGE_WRITE ? 'w' : '-',
413
- *prot & PAGE_EXEC ? 'x' : '-');
414
+ result->prot & PAGE_READ ? 'r' : '-',
415
+ result->prot & PAGE_WRITE ? 'w' : '-',
416
+ result->prot & PAGE_EXEC ? 'x' : '-');
417
418
return ret;
419
}
420
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
421
address = extract64(address, 0, 52);
422
}
423
}
424
- *phys_ptr = address;
425
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
426
- *page_size = TARGET_PAGE_SIZE;
427
+ result->phys = address;
428
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
429
+ result->page_size = TARGET_PAGE_SIZE;
430
431
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
432
hcr = arm_hcr_el2_eff(env);
433
- cacheattrs->shareability = 0;
434
- cacheattrs->is_s2_format = false;
435
+ result->cacheattrs.shareability = 0;
436
+ result->cacheattrs.is_s2_format = false;
437
if (hcr & HCR_DC) {
438
if (hcr & HCR_DCT) {
439
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
440
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
441
} else {
442
memattr = 0x44; /* Normal, NC, No */
443
}
444
- cacheattrs->shareability = 2; /* outer sharable */
445
+ result->cacheattrs.shareability = 2; /* outer sharable */
446
} else {
447
memattr = 0x00; /* Device, nGnRnE */
448
}
449
- cacheattrs->attrs = memattr;
450
+ result->cacheattrs.attrs = memattr;
451
return 0;
452
}
453
454
if (regime_using_lpae_format(env, mmu_idx)) {
455
return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
456
- phys_ptr, attrs, prot, page_size,
457
- fi, cacheattrs);
458
+ &result->phys, &result->attrs,
459
+ &result->prot, &result->page_size,
460
+ fi, &result->cacheattrs);
461
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
462
return get_phys_addr_v6(env, address, access_type, mmu_idx,
463
- phys_ptr, attrs, prot, page_size, fi);
464
+ &result->phys, &result->attrs,
465
+ &result->prot, &result->page_size, fi);
466
} else {
467
return get_phys_addr_v5(env, address, access_type, mmu_idx,
468
- phys_ptr, prot, page_size, fi);
469
+ &result->phys, &result->prot,
470
+ &result->page_size, fi);
471
}
472
}
473
474
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
475
{
476
ARMCPU *cpu = ARM_CPU(cs);
477
CPUARMState *env = &cpu->env;
478
- hwaddr phys_addr;
479
- target_ulong page_size;
480
- int prot;
481
- bool ret;
482
+ GetPhysAddrResult res = {};
483
ARMMMUFaultInfo fi = {};
484
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
485
- ARMCacheAttrs cacheattrs = {};
486
+ bool ret;
487
488
- *attrs = (MemTxAttrs) {};
489
-
490
- ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
491
- attrs, &prot, &page_size, &fi, &cacheattrs);
492
+ ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
493
+ *attrs = res.attrs;
494
495
if (ret) {
496
return -1;
497
}
498
- return phys_addr;
499
+ return res.phys;
500
}
501
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
502
index XXXXXXX..XXXXXXX 100644
503
--- a/target/arm/tlb_helper.c
504
+++ b/target/arm/tlb_helper.c
505
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
506
{
507
ARMCPU *cpu = ARM_CPU(cs);
508
ARMMMUFaultInfo fi = {};
509
- hwaddr phys_addr;
510
- target_ulong page_size;
511
- int prot, ret;
512
- MemTxAttrs attrs = {};
513
- ARMCacheAttrs cacheattrs = {};
514
+ GetPhysAddrResult res = {};
515
+ int ret;
516
517
/*
518
* Walk the page table and (if the mapping exists) add the page
519
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
520
*/
521
ret = get_phys_addr(&cpu->env, address, access_type,
522
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
523
- &phys_addr, &attrs, &prot, &page_size,
524
- &fi, &cacheattrs);
525
+ &res, &fi);
526
if (likely(!ret)) {
527
/*
528
* Map a single [sub]page. Regions smaller than our declared
529
* target page size are handled specially, so for those we
530
* pass in the exact addresses.
531
*/
532
- if (page_size >= TARGET_PAGE_SIZE) {
533
- phys_addr &= TARGET_PAGE_MASK;
534
+ if (res.page_size >= TARGET_PAGE_SIZE) {
535
+ res.phys &= TARGET_PAGE_MASK;
536
address &= TARGET_PAGE_MASK;
537
}
538
/* Notice and record tagged memory. */
539
- if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) {
540
- arm_tlb_mte_tagged(&attrs) = true;
541
+ if (cpu_isar_feature(aa64_mte, cpu) && res.cacheattrs.attrs == 0xf0) {
542
+ arm_tlb_mte_tagged(&res.attrs) = true;
543
}
544
545
- tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
546
- prot, mmu_idx, page_size);
547
+ tlb_set_page_with_attrs(cs, address, res.phys, res.attrs,
548
+ res.prot, mmu_idx, res.page_size);
549
return true;
550
} else if (probe) {
551
return false;
552
--
553
2.25.1
diff view generated by jsdifflib
1
Implement the MVE long shifts by register, which perform shifts on a
1
From: Richard Henderson <richard.henderson@linaro.org>
2
pair of general-purpose registers treated as a 64-bit quantity, with
3
the shift count in another general-purpose register, which might be
4
either positive or negative.
5
2
6
Like the long-shifts-by-immediate, these encodings sit in the space
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and
5
Message-id: 20220822152741.1617527-4-richard.henderson@linaro.org
9
also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases),
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
we have to move the CSEL pattern into the same decodetree group.
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/ptw.c | 69 ++++++++++++++++++------------------------------
10
1 file changed, 26 insertions(+), 43 deletions(-)
11
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210628135835.6690-17-peter.maydell@linaro.org
15
---
16
target/arm/helper-mve.h | 6 +++
17
target/arm/translate.h | 1 +
18
target/arm/t32.decode | 16 +++++--
19
target/arm/mve_helper.c | 93 +++++++++++++++++++++++++++++++++++++++++
20
target/arm/translate.c | 69 ++++++++++++++++++++++++++++++
21
5 files changed, 182 insertions(+), 3 deletions(-)
22
23
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
24
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/helper-mve.h
14
--- a/target/arm/ptw.c
26
+++ b/target/arm/helper-mve.h
15
+++ b/target/arm/ptw.c
27
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
29
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
30
31
+DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
32
+DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32)
33
DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
34
DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
35
+DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
36
+DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
37
+DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
38
+DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
39
diff --git a/target/arm/translate.h b/target/arm/translate.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate.h
42
+++ b/target/arm/translate.h
43
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
44
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
45
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
46
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
47
+typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
48
49
/**
50
* arm_tbflags_from_tb:
51
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/t32.decode
54
+++ b/target/arm/t32.decode
55
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@
56
&mcrr !extern cp opc1 crm rt rt2
17
57
18
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
58
&mve_shl_ri rdalo rdahi shim
19
MMUAccessType access_type, ARMMMUIdx mmu_idx,
59
+&mve_shl_rr rdalo rdahi rm
20
- bool s1_is_el0, hwaddr *phys_ptr,
60
21
- MemTxAttrs *txattrs, int *prot,
61
# rdahi: bits [3:1] from insn, bit 0 is 1
22
- target_ulong *page_size_ptr,
62
# rdalo: bits [3:1] from insn, bit 0 is 0
23
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
63
@@ -XXX,XX +XXX,XX @@
24
+ bool s1_is_el0, GetPhysAddrResult *result,
64
25
+ ARMMMUFaultInfo *fi)
65
@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
26
__attribute__((nonnull));
66
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
27
67
+@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
28
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
68
+ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
29
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
69
70
{
30
{
71
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
31
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
72
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
32
!regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
73
URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
33
- target_ulong s2size;
74
SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
34
- hwaddr s2pa;
75
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
35
- int s2prot;
76
+
36
- int ret;
77
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
37
ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
78
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
38
: ARMMMUIdx_Stage2;
79
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
39
- ARMCacheAttrs cacheattrs = {};
80
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
40
- MemTxAttrs txattrs = {};
81
+ UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
41
+ GetPhysAddrResult s2 = {};
82
+ SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
42
+ int ret;
83
]
43
84
44
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
85
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
45
- &s2pa, &txattrs, &s2prot, &s2size, fi,
86
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
46
- &cacheattrs);
87
+
47
+ &s2, fi);
88
+ # v8.1M CSEL and friends
48
if (ret) {
89
+ CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
49
assert(fi->type != ARMFault_None);
50
fi->s2addr = addr;
51
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
52
return ~0;
53
}
54
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
55
- ptw_attrs_are_device(env, cacheattrs)) {
56
+ ptw_attrs_are_device(env, s2.cacheattrs)) {
57
/*
58
* PTW set and S1 walk touched S2 Device memory:
59
* generate Permission fault.
60
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
61
assert(!*is_secure);
62
}
63
64
- addr = s2pa;
65
+ addr = s2.phys;
66
}
67
return addr;
90
}
68
}
69
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
70
* table walk), must be true if this is stage 2 of a stage 1+2
71
* walk for an EL0 access. If @mmu_idx is anything else,
72
* @s1_is_el0 is ignored.
73
- * @phys_ptr: set to the physical address corresponding to the virtual address
74
- * @attrs: set to the memory transaction attributes to use
75
- * @prot: set to the permissions for the page containing phys_ptr
76
- * @page_size_ptr: set to the size of the page containing phys_ptr
77
+ * @result: set on translation success,
78
* @fi: set to fault info if the translation fails
79
- * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
80
*/
81
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
82
MMUAccessType access_type, ARMMMUIdx mmu_idx,
83
- bool s1_is_el0, hwaddr *phys_ptr,
84
- MemTxAttrs *txattrs, int *prot,
85
- target_ulong *page_size_ptr,
86
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
87
+ bool s1_is_el0, GetPhysAddrResult *result,
88
+ ARMMMUFaultInfo *fi)
91
{
89
{
92
MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi
90
ARMCPU *cpu = env_archcpu(env);
93
@@ -XXX,XX +XXX,XX @@ SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi
91
/* Read an LPAE long-descriptor translation table. */
94
}
92
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
95
RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi
93
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
96
94
ns = mmu_idx == ARMMMUIdx_Stage2;
97
-# v8.1M CSEL and friends
95
xn = extract32(attrs, 11, 2);
98
-CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
96
- *prot = get_S2prot(env, ap, xn, s1_is_el0);
99
-
97
+ result->prot = get_S2prot(env, ap, xn, s1_is_el0);
100
# Data-processing (register-shifted register)
98
} else {
101
99
ns = extract32(attrs, 3, 1);
102
MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \
100
xn = extract32(attrs, 12, 1);
103
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
101
pxn = extract32(attrs, 11, 1);
104
index XXXXXXX..XXXXXXX 100644
102
- *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
105
--- a/target/arm/mve_helper.c
103
+ result->prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
106
+++ b/target/arm/mve_helper.c
104
}
107
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
105
108
return rdm;
106
fault_type = ARMFault_Permission;
109
}
107
- if (!(*prot & (1 << access_type))) {
110
108
+ if (!(result->prot & (1 << access_type))) {
111
+uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
109
goto do_fault;
112
+{
110
}
113
+ return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
111
114
+}
112
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
115
+
113
* the CPU doesn't support TZ or this is a non-secure translation
116
+uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
114
* regime, because the attribute will already be non-secure.
117
+{
115
*/
118
+ return do_uqrshl_d(n, (int8_t)shift, false, NULL);
116
- txattrs->secure = false;
119
+}
117
+ result->attrs.secure = false;
120
+
118
}
121
uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
119
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
122
{
120
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
123
return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
121
- arm_tlb_bti_gp(txattrs) = true;
124
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
122
+ arm_tlb_bti_gp(&result->attrs) = true;
125
{
123
}
126
return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
124
127
}
125
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
128
+
126
- cacheattrs->is_s2_format = true;
129
+uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
127
- cacheattrs->attrs = extract32(attrs, 0, 4);
130
+{
128
+ result->cacheattrs.is_s2_format = true;
131
+ return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
129
+ result->cacheattrs.attrs = extract32(attrs, 0, 4);
132
+}
130
} else {
133
+
131
/* Index into MAIR registers for cache attributes */
134
+uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
132
uint8_t attrindx = extract32(attrs, 0, 3);
135
+{
133
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
136
+ return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
134
assert(attrindx <= 7);
137
+}
135
- cacheattrs->is_s2_format = false;
138
+
136
- cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
139
+/* Operate on 64-bit values, but saturate at 48 bits */
137
+ result->cacheattrs.is_s2_format = false;
140
+static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
138
+ result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
141
+ bool round, uint32_t *sat)
139
}
142
+{
140
143
+ if (shift <= -48) {
141
/*
144
+ /* Rounding the sign bit always produces 0. */
142
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
145
+ if (round) {
143
* that case comes from TCR_ELx, which we extracted earlier.
146
+ return 0;
144
*/
147
+ }
145
if (param.ds) {
148
+ return src >> 63;
146
- cacheattrs->shareability = param.sh;
149
+ } else if (shift < 0) {
147
+ result->cacheattrs.shareability = param.sh;
150
+ if (round) {
148
} else {
151
+ src >>= -shift - 1;
149
- cacheattrs->shareability = extract32(attrs, 6, 2);
152
+ return (src >> 1) + (src & 1);
150
+ result->cacheattrs.shareability = extract32(attrs, 6, 2);
153
+ }
151
}
154
+ return src >> -shift;
152
155
+ } else if (shift < 48) {
153
- *phys_ptr = descaddr;
156
+ int64_t val = src << shift;
154
- *page_size_ptr = page_size;
157
+ int64_t extval = sextract64(val, 0, 48);
155
+ result->phys = descaddr;
158
+ if (!sat || val == extval) {
156
+ result->page_size = page_size;
159
+ return extval;
157
return false;
160
+ }
158
161
+ } else if (!sat || src == 0) {
159
do_fault:
162
+ return 0;
160
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
163
+ }
161
cacheattrs1 = result->cacheattrs;
164
+
162
memset(result, 0, sizeof(*result));
165
+ *sat = 1;
163
166
+ return (1ULL << 47) - (src >= 0);
164
- ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
167
+}
165
- &result->phys, &result->attrs,
168
+
166
- &result->prot, &result->page_size,
169
+/* Operate on 64-bit values, but saturate at 48 bits */
167
- fi, &result->cacheattrs);
170
+static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
168
+ ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
171
+ bool round, uint32_t *sat)
169
+ is_el0, result, fi);
172
+{
170
fi->s2addr = ipa;
173
+ uint64_t val, extval;
171
174
+
172
/* Combine the S1 and S2 perms. */
175
+ if (shift <= -(48 + round)) {
173
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
176
+ return 0;
174
177
+ } else if (shift < 0) {
175
if (regime_using_lpae_format(env, mmu_idx)) {
178
+ if (round) {
176
return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
179
+ val = src >> (-shift - 1);
177
- &result->phys, &result->attrs,
180
+ val = (val >> 1) + (val & 1);
178
- &result->prot, &result->page_size,
181
+ } else {
179
- fi, &result->cacheattrs);
182
+ val = src >> -shift;
180
+ result, fi);
183
+ }
181
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
184
+ extval = extract64(val, 0, 48);
182
return get_phys_addr_v6(env, address, access_type, mmu_idx,
185
+ if (!sat || val == extval) {
183
&result->phys, &result->attrs,
186
+ return extval;
187
+ }
188
+ } else if (shift < 48) {
189
+ uint64_t val = src << shift;
190
+ uint64_t extval = extract64(val, 0, 48);
191
+ if (!sat || val == extval) {
192
+ return extval;
193
+ }
194
+ } else if (!sat || src == 0) {
195
+ return 0;
196
+ }
197
+
198
+ *sat = 1;
199
+ return MAKE_64BIT_MASK(0, 48);
200
+}
201
+
202
+uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
203
+{
204
+ return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
205
+}
206
+
207
+uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
208
+{
209
+ return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
210
+}
211
diff --git a/target/arm/translate.c b/target/arm/translate.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/arm/translate.c
214
+++ b/target/arm/translate.c
215
@@ -XXX,XX +XXX,XX @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
216
return do_mve_shl_ri(s, a, gen_urshr64_i64);
217
}
218
219
+static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
220
+{
221
+ TCGv_i64 rda;
222
+ TCGv_i32 rdalo, rdahi;
223
+
224
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
225
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
226
+ return false;
227
+ }
228
+ if (a->rdahi == 15) {
229
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
230
+ return false;
231
+ }
232
+ if (!dc_isar_feature(aa32_mve, s) ||
233
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
234
+ a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
235
+ a->rm == a->rdahi || a->rm == a->rdalo) {
236
+ /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
237
+ unallocated_encoding(s);
238
+ return true;
239
+ }
240
+
241
+ rda = tcg_temp_new_i64();
242
+ rdalo = load_reg(s, a->rdalo);
243
+ rdahi = load_reg(s, a->rdahi);
244
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
245
+
246
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
247
+ fn(rda, cpu_env, rda, cpu_R[a->rm]);
248
+
249
+ tcg_gen_extrl_i64_i32(rdalo, rda);
250
+ tcg_gen_extrh_i64_i32(rdahi, rda);
251
+ store_reg(s, a->rdalo, rdalo);
252
+ store_reg(s, a->rdahi, rdahi);
253
+ tcg_temp_free_i64(rda);
254
+
255
+ return true;
256
+}
257
+
258
+static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
259
+{
260
+ return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
261
+}
262
+
263
+static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
264
+{
265
+ return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
266
+}
267
+
268
+static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
269
+{
270
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
271
+}
272
+
273
+static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
274
+{
275
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
276
+}
277
+
278
+static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
279
+{
280
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
281
+}
282
+
283
+static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
284
+{
285
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
286
+}
287
+
288
/*
289
* Multiply and multiply accumulate
290
*/
291
--
184
--
292
2.20.1
185
2.25.1
293
186
294
187
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220822152741.1617527-5-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/ptw.c | 30 ++++++++++++++----------------
10
1 file changed, 14 insertions(+), 16 deletions(-)
11
12
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/ptw.c
15
+++ b/target/arm/ptw.c
16
@@ -XXX,XX +XXX,XX @@ do_fault:
17
18
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
19
MMUAccessType access_type, ARMMMUIdx mmu_idx,
20
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
21
- target_ulong *page_size, ARMMMUFaultInfo *fi)
22
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
23
{
24
ARMCPU *cpu = env_archcpu(env);
25
int level = 1;
26
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
27
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
28
phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
29
phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
30
- *page_size = 0x1000000;
31
+ result->page_size = 0x1000000;
32
} else {
33
/* Section. */
34
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
35
- *page_size = 0x100000;
36
+ result->page_size = 0x100000;
37
}
38
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
39
xn = desc & (1 << 4);
40
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
41
case 1: /* 64k page. */
42
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
43
xn = desc & (1 << 15);
44
- *page_size = 0x10000;
45
+ result->page_size = 0x10000;
46
break;
47
case 2: case 3: /* 4k page. */
48
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
49
xn = desc & 1;
50
- *page_size = 0x1000;
51
+ result->page_size = 0x1000;
52
break;
53
default:
54
/* Never happens, but compiler isn't smart enough to tell. */
55
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
56
}
57
}
58
if (domain_prot == 3) {
59
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
60
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
61
} else {
62
if (pxn && !regime_is_user(env, mmu_idx)) {
63
xn = 1;
64
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
65
fi->type = ARMFault_AccessFlag;
66
goto do_fault;
67
}
68
- *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
69
+ result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
70
} else {
71
- *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
72
+ result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
73
}
74
- if (*prot && !xn) {
75
- *prot |= PAGE_EXEC;
76
+ if (result->prot && !xn) {
77
+ result->prot |= PAGE_EXEC;
78
}
79
- if (!(*prot & (1 << access_type))) {
80
+ if (!(result->prot & (1 << access_type))) {
81
/* Access permission fault. */
82
fi->type = ARMFault_Permission;
83
goto do_fault;
84
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
85
* the CPU doesn't support TZ or this is a non-secure translation
86
* regime, because the attribute will already be non-secure.
87
*/
88
- attrs->secure = false;
89
+ result->attrs.secure = false;
90
}
91
- *phys_ptr = phys_addr;
92
+ result->phys = phys_addr;
93
return false;
94
do_fault:
95
fi->domain = domain;
96
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
97
result, fi);
98
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
99
return get_phys_addr_v6(env, address, access_type, mmu_idx,
100
- &result->phys, &result->attrs,
101
- &result->prot, &result->page_size, fi);
102
+ result, fi);
103
} else {
104
return get_phys_addr_v5(env, address, access_type, mmu_idx,
105
&result->phys, &result->prot,
106
--
107
2.25.1
108
109
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220822152741.1617527-6-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/ptw.c | 25 +++++++++++--------------
10
1 file changed, 11 insertions(+), 14 deletions(-)
11
12
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/ptw.c
15
+++ b/target/arm/ptw.c
16
@@ -XXX,XX +XXX,XX @@ static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
17
18
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
19
MMUAccessType access_type, ARMMMUIdx mmu_idx,
20
- hwaddr *phys_ptr, int *prot,
21
- target_ulong *page_size,
22
- ARMMMUFaultInfo *fi)
23
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
24
{
25
int level = 1;
26
uint32_t table;
27
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
28
/* 1Mb section. */
29
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
30
ap = (desc >> 10) & 3;
31
- *page_size = 1024 * 1024;
32
+ result->page_size = 1024 * 1024;
33
} else {
34
/* Lookup l2 entry. */
35
if (type == 1) {
36
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
37
case 1: /* 64k page. */
38
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
39
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
40
- *page_size = 0x10000;
41
+ result->page_size = 0x10000;
42
break;
43
case 2: /* 4k page. */
44
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
45
ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
46
- *page_size = 0x1000;
47
+ result->page_size = 0x1000;
48
break;
49
case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
50
if (type == 1) {
51
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
52
if (arm_feature(env, ARM_FEATURE_XSCALE)
53
|| arm_feature(env, ARM_FEATURE_V6)) {
54
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
55
- *page_size = 0x1000;
56
+ result->page_size = 0x1000;
57
} else {
58
/*
59
* UNPREDICTABLE in ARMv5; we choose to take a
60
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
61
}
62
} else {
63
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
64
- *page_size = 0x400;
65
+ result->page_size = 0x400;
66
}
67
ap = (desc >> 4) & 3;
68
break;
69
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
70
g_assert_not_reached();
71
}
72
}
73
- *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
74
- *prot |= *prot ? PAGE_EXEC : 0;
75
- if (!(*prot & (1 << access_type))) {
76
+ result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
77
+ result->prot |= result->prot ? PAGE_EXEC : 0;
78
+ if (!(result->prot & (1 << access_type))) {
79
/* Access permission fault. */
80
fi->type = ARMFault_Permission;
81
goto do_fault;
82
}
83
- *phys_ptr = phys_addr;
84
+ result->phys = phys_addr;
85
return false;
86
do_fault:
87
fi->domain = domain;
88
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
89
result, fi);
90
} else {
91
return get_phys_addr_v5(env, address, access_type, mmu_idx,
92
- &result->phys, &result->prot,
93
- &result->page_size, fi);
94
+ result, fi);
95
}
96
}
97
98
--
99
2.25.1
100
101
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220822152741.1617527-7-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
target/arm/ptw.c | 24 ++++++++++++------------
10
1 file changed, 12 insertions(+), 12 deletions(-)
11
12
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/ptw.c
15
+++ b/target/arm/ptw.c
16
@@ -XXX,XX +XXX,XX @@ do_fault:
17
18
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
19
MMUAccessType access_type, ARMMMUIdx mmu_idx,
20
- hwaddr *phys_ptr, int *prot,
21
+ GetPhysAddrResult *result,
22
ARMMMUFaultInfo *fi)
23
{
24
int n;
25
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
26
27
if (regime_translation_disabled(env, mmu_idx)) {
28
/* MPU disabled. */
29
- *phys_ptr = address;
30
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
31
+ result->phys = address;
32
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
33
return false;
34
}
35
36
- *phys_ptr = address;
37
+ result->phys = address;
38
for (n = 7; n >= 0; n--) {
39
base = env->cp15.c6_region[n];
40
if ((base & 1) == 0) {
41
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
42
fi->level = 1;
43
return true;
44
}
45
- *prot = PAGE_READ | PAGE_WRITE;
46
+ result->prot = PAGE_READ | PAGE_WRITE;
47
break;
48
case 2:
49
- *prot = PAGE_READ;
50
+ result->prot = PAGE_READ;
51
if (!is_user) {
52
- *prot |= PAGE_WRITE;
53
+ result->prot |= PAGE_WRITE;
54
}
55
break;
56
case 3:
57
- *prot = PAGE_READ | PAGE_WRITE;
58
+ result->prot = PAGE_READ | PAGE_WRITE;
59
break;
60
case 5:
61
if (is_user) {
62
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
63
fi->level = 1;
64
return true;
65
}
66
- *prot = PAGE_READ;
67
+ result->prot = PAGE_READ;
68
break;
69
case 6:
70
- *prot = PAGE_READ;
71
+ result->prot = PAGE_READ;
72
break;
73
default:
74
/* Bad permission. */
75
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
76
fi->level = 1;
77
return true;
78
}
79
- *prot |= PAGE_EXEC;
80
+ result->prot |= PAGE_EXEC;
81
return false;
82
}
83
84
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
85
} else {
86
/* Pre-v7 MPU */
87
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
88
- &result->phys, &result->prot, fi);
89
+ result, fi);
90
}
91
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
92
" mmu_idx %u -> %s (prot %c%c%c)\n",
93
--
94
2.25.1
95
96
diff view generated by jsdifflib
1
Implement the MVE VADDLV insn; this is similar to VADDV, except
1
From: Richard Henderson <richard.henderson@linaro.org>
2
that it accumulates 32-bit elements into a 64-bit accumulator
3
stored in a pair of general-purpose registers.
4
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220822152741.1617527-8-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-15-peter.maydell@linaro.org
8
---
8
---
9
target/arm/helper-mve.h | 3 ++
9
target/arm/ptw.c | 36 +++++++++++++++++-------------------
10
target/arm/mve.decode | 6 +++-
10
1 file changed, 17 insertions(+), 19 deletions(-)
11
target/arm/mve_helper.c | 19 ++++++++++++
12
target/arm/translate-mve.c | 63 ++++++++++++++++++++++++++++++++++++++
13
4 files changed, 90 insertions(+), 1 deletion(-)
14
11
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
12
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
14
--- a/target/arm/ptw.c
18
+++ b/target/arm/helper-mve.h
15
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
16
@@ -XXX,XX +XXX,XX @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
20
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
17
21
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
18
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
22
19
MMUAccessType access_type, ARMMMUIdx mmu_idx,
23
+DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
20
- hwaddr *phys_ptr, int *prot,
24
+DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
21
- target_ulong *page_size,
25
+
22
+ GetPhysAddrResult *result,
26
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
23
ARMMMUFaultInfo *fi)
27
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
24
{
28
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
25
ARMCPU *cpu = env_archcpu(env);
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
26
int n;
30
index XXXXXXX..XXXXXXX 100644
27
bool is_user = regime_is_user(env, mmu_idx);
31
--- a/target/arm/mve.decode
28
32
+++ b/target/arm/mve.decode
29
- *phys_ptr = address;
33
@@ -XXX,XX +XXX,XX @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
30
- *page_size = TARGET_PAGE_SIZE;
34
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
31
- *prot = 0;
35
32
+ result->phys = address;
36
# Vector add across vector
33
+ result->page_size = TARGET_PAGE_SIZE;
37
-VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
34
+ result->prot = 0;
38
+{
35
39
+ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
36
if (regime_translation_disabled(env, mmu_idx) ||
40
+ VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \
37
m_is_ppb_region(env, address)) {
41
+ rdahi=%rdahi rdalo=%rdalo
38
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
42
+}
39
* which always does a direct read using address_space_ldl(), rather
43
40
* than going via this function, so we don't need to check that here.
44
# Predicate operations
41
*/
45
%mask_22_13 22:1 13:3
42
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
47
index XXXXXXX..XXXXXXX 100644
44
} else { /* MPU enabled */
48
--- a/target/arm/mve_helper.c
45
for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
49
+++ b/target/arm/mve_helper.c
46
/* region search */
50
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvub, 1, uint8_t)
47
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
51
DO_VADDV(vaddvuh, 2, uint16_t)
48
if (ranges_overlap(base, rmask,
52
DO_VADDV(vaddvuw, 4, uint32_t)
49
address & TARGET_PAGE_MASK,
53
50
TARGET_PAGE_SIZE)) {
54
+#define DO_VADDLV(OP, TYPE, LTYPE) \
51
- *page_size = 1;
55
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
52
+ result->page_size = 1;
56
+ uint64_t ra) \
53
}
57
+ { \
54
continue;
58
+ uint16_t mask = mve_element_mask(env); \
55
}
59
+ unsigned e; \
56
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
60
+ TYPE *m = vm; \
57
continue;
61
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
58
}
62
+ if (mask & 1) { \
59
if (rsize < TARGET_PAGE_BITS) {
63
+ ra += (LTYPE)m[H4(e)]; \
60
- *page_size = 1 << rsize;
64
+ } \
61
+ result->page_size = 1 << rsize;
65
+ } \
62
}
66
+ mve_advance_vpt(env); \
63
break;
67
+ return ra; \
64
}
68
+ } \
65
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
69
+
66
fi->type = ARMFault_Background;
70
+DO_VADDLV(vaddlv_s, int32_t, int64_t)
67
return true;
71
+DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
68
}
72
+
69
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
73
/* Shifts by immediate */
70
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
74
#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
71
} else { /* a MPU hit! */
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
72
uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
76
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
73
uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
77
index XXXXXXX..XXXXXXX 100644
74
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
78
--- a/target/arm/translate-mve.c
75
case 5:
79
+++ b/target/arm/translate-mve.c
76
break; /* no access */
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
77
case 3:
81
return true;
78
- *prot |= PAGE_WRITE;
79
+ result->prot |= PAGE_WRITE;
80
/* fall through */
81
case 2:
82
case 6:
83
- *prot |= PAGE_READ | PAGE_EXEC;
84
+ result->prot |= PAGE_READ | PAGE_EXEC;
85
break;
86
case 7:
87
/* for v7M, same as 6; for R profile a reserved value */
88
if (arm_feature(env, ARM_FEATURE_M)) {
89
- *prot |= PAGE_READ | PAGE_EXEC;
90
+ result->prot |= PAGE_READ | PAGE_EXEC;
91
break;
92
}
93
/* fall through */
94
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
95
case 1:
96
case 2:
97
case 3:
98
- *prot |= PAGE_WRITE;
99
+ result->prot |= PAGE_WRITE;
100
/* fall through */
101
case 5:
102
case 6:
103
- *prot |= PAGE_READ | PAGE_EXEC;
104
+ result->prot |= PAGE_READ | PAGE_EXEC;
105
break;
106
case 7:
107
/* for v7M, same as 6; for R profile a reserved value */
108
if (arm_feature(env, ARM_FEATURE_M)) {
109
- *prot |= PAGE_READ | PAGE_EXEC;
110
+ result->prot |= PAGE_READ | PAGE_EXEC;
111
break;
112
}
113
/* fall through */
114
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
115
116
/* execute never */
117
if (xn) {
118
- *prot &= ~PAGE_EXEC;
119
+ result->prot &= ~PAGE_EXEC;
120
}
121
}
122
}
123
124
fi->type = ARMFault_Permission;
125
fi->level = 1;
126
- return !(*prot & (1 << access_type));
127
+ return !(result->prot & (1 << access_type));
82
}
128
}
83
129
84
+static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
130
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
85
+{
131
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
86
+ /*
132
} else if (arm_feature(env, ARM_FEATURE_V7)) {
87
+ * Vector Add Long Across Vector: accumulate the 32-bit
133
/* PMSAv7 */
88
+ * elements of the vector into a 64-bit result stored in
134
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
89
+ * a pair of general-purpose registers.
135
- &result->phys, &result->prot,
90
+ * No need to check Qm's bank: it is only 3 bits in decode.
136
- &result->page_size, fi);
91
+ */
137
+ result, fi);
92
+ TCGv_ptr qm;
138
} else {
93
+ TCGv_i64 rda;
139
/* Pre-v7 MPU */
94
+ TCGv_i32 rdalo, rdahi;
140
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
95
+
96
+ if (!dc_isar_feature(aa32_mve, s)) {
97
+ return false;
98
+ }
99
+ /*
100
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
101
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
102
+ */
103
+ if (a->rdahi == 13 || a->rdahi == 15) {
104
+ return false;
105
+ }
106
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
107
+ return true;
108
+ }
109
+
110
+ /*
111
+ * This insn is subject to beat-wise execution. Partial execution
112
+ * of an A=0 (no-accumulate) insn which does not execute the first
113
+ * beat must start with the current value of RdaHi:RdaLo, not zero.
114
+ */
115
+ if (a->a || mve_skip_first_beat(s)) {
116
+ /* Accumulate input from RdaHi:RdaLo */
117
+ rda = tcg_temp_new_i64();
118
+ rdalo = load_reg(s, a->rdalo);
119
+ rdahi = load_reg(s, a->rdahi);
120
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
121
+ tcg_temp_free_i32(rdalo);
122
+ tcg_temp_free_i32(rdahi);
123
+ } else {
124
+ /* Accumulate starting at zero */
125
+ rda = tcg_const_i64(0);
126
+ }
127
+
128
+ qm = mve_qreg_ptr(a->qm);
129
+ if (a->u) {
130
+ gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
131
+ } else {
132
+ gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
133
+ }
134
+ tcg_temp_free_ptr(qm);
135
+
136
+ rdalo = tcg_temp_new_i32();
137
+ rdahi = tcg_temp_new_i32();
138
+ tcg_gen_extrl_i64_i32(rdalo, rda);
139
+ tcg_gen_extrh_i64_i32(rdahi, rda);
140
+ store_reg(s, a->rdalo, rdalo);
141
+ store_reg(s, a->rdahi, rdahi);
142
+ tcg_temp_free_i64(rda);
143
+ mve_update_eci(s);
144
+ return true;
145
+}
146
+
147
static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
148
{
149
TCGv_ptr qd;
150
--
141
--
151
2.20.1
142
2.25.1
152
143
153
144
diff view generated by jsdifflib
1
From: Nolan Leake <nolan@sigbus.net>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is just enough to make reboot and poweroff work. Works for
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
linux, u-boot, and the arm trusted firmware. Not tested, but should
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
work for plan9, and bare-metal/hobby OSes, since they seem to generally
5
Message-id: 20220822152741.1617527-9-richard.henderson@linaro.org
6
do what linux does for reset.
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
8
The watchdog timer functionality is not yet implemented.
9
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/64
11
Signed-off-by: Nolan Leake <nolan@sigbus.net>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Message-id: 20210625210209.1870217-1-nolan@sigbus.net
15
[PMM: tweaked commit title; fixed region size to 0x200;
16
moved header file to include/]
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
8
---
19
include/hw/arm/bcm2835_peripherals.h | 3 +-
9
target/arm/ptw.c | 28 ++++++++++++++--------------
20
include/hw/misc/bcm2835_powermgt.h | 29 +++++
10
1 file changed, 14 insertions(+), 14 deletions(-)
21
hw/arm/bcm2835_peripherals.c | 13 ++-
22
hw/misc/bcm2835_powermgt.c | 160 +++++++++++++++++++++++++++
23
hw/misc/meson.build | 1 +
24
5 files changed, 204 insertions(+), 2 deletions(-)
25
create mode 100644 include/hw/misc/bcm2835_powermgt.h
26
create mode 100644 hw/misc/bcm2835_powermgt.c
27
11
28
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
12
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
29
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
30
--- a/include/hw/arm/bcm2835_peripherals.h
14
--- a/target/arm/ptw.c
31
+++ b/include/hw/arm/bcm2835_peripherals.h
15
+++ b/target/arm/ptw.c
32
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
33
#include "hw/misc/bcm2835_mphi.h"
17
34
#include "hw/misc/bcm2835_thermal.h"
18
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
35
#include "hw/misc/bcm2835_cprman.h"
19
MMUAccessType access_type, ARMMMUIdx mmu_idx,
36
+#include "hw/misc/bcm2835_powermgt.h"
20
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
37
#include "hw/sd/sdhci.h"
21
- int *prot, target_ulong *page_size,
38
#include "hw/sd/bcm2835_sdhost.h"
22
+ GetPhysAddrResult *result,
39
#include "hw/gpio/bcm2835_gpio.h"
23
ARMMMUFaultInfo *fi)
40
@@ -XXX,XX +XXX,XX @@ struct BCM2835PeripheralState {
24
{
41
BCM2835MphiState mphi;
25
uint32_t secure = regime_is_secure(env, mmu_idx);
42
UnimplementedDeviceState txp;
26
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
43
UnimplementedDeviceState armtmr;
27
} else {
44
- UnimplementedDeviceState powermgt;
28
fi->type = ARMFault_QEMU_SFault;
45
+ BCM2835PowerMgtState powermgt;
29
}
46
BCM2835CprmanState cprman;
30
- *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
47
PL011State uart0;
31
- *phys_ptr = address;
48
BCM2835AuxState aux;
32
- *prot = 0;
49
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
33
+ result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
50
new file mode 100644
34
+ result->phys = address;
51
index XXXXXXX..XXXXXXX
35
+ result->prot = 0;
52
--- /dev/null
36
return true;
53
+++ b/include/hw/misc/bcm2835_powermgt.h
37
}
54
@@ -XXX,XX +XXX,XX @@
38
} else {
55
+/*
39
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
56
+ * BCM2835 Power Management emulation
40
* might downgrade a secure access to nonsecure.
57
+ *
41
*/
58
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
42
if (sattrs.ns) {
59
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
43
- txattrs->secure = false;
60
+ *
44
+ result->attrs.secure = false;
61
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
45
} else if (!secure) {
62
+ * See the COPYING file in the top-level directory.
46
/*
63
+ */
47
* NS access to S memory must fault.
64
+
48
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
65
+#ifndef BCM2835_POWERMGT_H
49
* for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
66
+#define BCM2835_POWERMGT_H
50
*/
67
+
51
fi->type = ARMFault_QEMU_SFault;
68
+#include "hw/sysbus.h"
52
- *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
69
+#include "qom/object.h"
53
- *phys_ptr = address;
70
+
54
- *prot = 0;
71
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
55
+ result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
72
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
56
+ result->phys = address;
73
+
57
+ result->prot = 0;
74
+struct BCM2835PowerMgtState {
58
return true;
75
+ SysBusDevice busdev;
59
}
76
+ MemoryRegion iomem;
60
}
77
+
61
}
78
+ uint32_t rstc;
62
79
+ uint32_t rsts;
63
- ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
80
+ uint32_t wdog;
64
- txattrs, prot, &mpu_is_subpage, fi, NULL);
81
+};
65
- *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
82
+
66
+ ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx,
83
+#endif
67
+ &result->phys, &result->attrs, &result->prot,
84
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
68
+ &mpu_is_subpage, fi, NULL);
85
index XXXXXXX..XXXXXXX 100644
69
+ result->page_size =
86
--- a/hw/arm/bcm2835_peripherals.c
70
+ sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
87
+++ b/hw/arm/bcm2835_peripherals.c
71
return ret;
88
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
89
90
object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr",
91
OBJECT(&s->gpu_bus_mr));
92
+
93
+ /* Power Management */
94
+ object_initialize_child(obj, "powermgt", &s->powermgt,
95
+ TYPE_BCM2835_POWERMGT);
96
}
72
}
97
73
98
static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
74
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
99
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
75
if (arm_feature(env, ARM_FEATURE_V8)) {
100
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
76
/* PMSAv8 */
101
INTERRUPT_USB));
77
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
102
78
- &result->phys, &result->attrs,
103
+ /* Power Management */
79
- &result->prot, &result->page_size, fi);
104
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) {
80
+ result, fi);
105
+ return;
81
} else if (arm_feature(env, ARM_FEATURE_V7)) {
106
+ }
82
/* PMSAv7 */
107
+
83
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
108
+ memory_region_add_subregion(&s->peri_mr, PM_OFFSET,
109
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0));
110
+
111
create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000);
112
create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40);
113
- create_unimp(s, &s->powermgt, "bcm2835-powermgt", PM_OFFSET, 0x114);
114
create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100);
115
create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100);
116
create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20);
117
diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c
118
new file mode 100644
119
index XXXXXXX..XXXXXXX
120
--- /dev/null
121
+++ b/hw/misc/bcm2835_powermgt.c
122
@@ -XXX,XX +XXX,XX @@
123
+/*
124
+ * BCM2835 Power Management emulation
125
+ *
126
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
127
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
128
+ *
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
131
+ */
132
+
133
+#include "qemu/osdep.h"
134
+#include "qemu/log.h"
135
+#include "qemu/module.h"
136
+#include "hw/misc/bcm2835_powermgt.h"
137
+#include "migration/vmstate.h"
138
+#include "sysemu/runstate.h"
139
+
140
+#define PASSWORD 0x5a000000
141
+#define PASSWORD_MASK 0xff000000
142
+
143
+#define R_RSTC 0x1c
144
+#define V_RSTC_RESET 0x20
145
+#define R_RSTS 0x20
146
+#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */
147
+#define R_WDOG 0x24
148
+
149
+static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset,
150
+ unsigned size)
151
+{
152
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
153
+ uint32_t res = 0;
154
+
155
+ switch (offset) {
156
+ case R_RSTC:
157
+ res = s->rstc;
158
+ break;
159
+ case R_RSTS:
160
+ res = s->rsts;
161
+ break;
162
+ case R_WDOG:
163
+ res = s->wdog;
164
+ break;
165
+
166
+ default:
167
+ qemu_log_mask(LOG_UNIMP,
168
+ "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx
169
+ "\n", offset);
170
+ res = 0;
171
+ break;
172
+ }
173
+
174
+ return res;
175
+}
176
+
177
+static void bcm2835_powermgt_write(void *opaque, hwaddr offset,
178
+ uint64_t value, unsigned size)
179
+{
180
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
181
+
182
+ if ((value & PASSWORD_MASK) != PASSWORD) {
183
+ qemu_log_mask(LOG_GUEST_ERROR,
184
+ "bcm2835_powermgt_write: Bad password 0x%"PRIx64
185
+ " at offset 0x%08"HWADDR_PRIx"\n",
186
+ value, offset);
187
+ return;
188
+ }
189
+
190
+ value = value & ~PASSWORD_MASK;
191
+
192
+ switch (offset) {
193
+ case R_RSTC:
194
+ s->rstc = value;
195
+ if (value & V_RSTC_RESET) {
196
+ if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) {
197
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
198
+ } else {
199
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
200
+ }
201
+ }
202
+ break;
203
+ case R_RSTS:
204
+ qemu_log_mask(LOG_UNIMP,
205
+ "bcm2835_powermgt_write: RSTS\n");
206
+ s->rsts = value;
207
+ break;
208
+ case R_WDOG:
209
+ qemu_log_mask(LOG_UNIMP,
210
+ "bcm2835_powermgt_write: WDOG\n");
211
+ s->wdog = value;
212
+ break;
213
+
214
+ default:
215
+ qemu_log_mask(LOG_UNIMP,
216
+ "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx
217
+ "\n", offset);
218
+ break;
219
+ }
220
+}
221
+
222
+static const MemoryRegionOps bcm2835_powermgt_ops = {
223
+ .read = bcm2835_powermgt_read,
224
+ .write = bcm2835_powermgt_write,
225
+ .endianness = DEVICE_NATIVE_ENDIAN,
226
+ .impl.min_access_size = 4,
227
+ .impl.max_access_size = 4,
228
+};
229
+
230
+static const VMStateDescription vmstate_bcm2835_powermgt = {
231
+ .name = TYPE_BCM2835_POWERMGT,
232
+ .version_id = 1,
233
+ .minimum_version_id = 1,
234
+ .fields = (VMStateField[]) {
235
+ VMSTATE_UINT32(rstc, BCM2835PowerMgtState),
236
+ VMSTATE_UINT32(rsts, BCM2835PowerMgtState),
237
+ VMSTATE_UINT32(wdog, BCM2835PowerMgtState),
238
+ VMSTATE_END_OF_LIST()
239
+ }
240
+};
241
+
242
+static void bcm2835_powermgt_init(Object *obj)
243
+{
244
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj);
245
+
246
+ memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s,
247
+ TYPE_BCM2835_POWERMGT, 0x200);
248
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
249
+}
250
+
251
+static void bcm2835_powermgt_reset(DeviceState *dev)
252
+{
253
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev);
254
+
255
+ /* https://elinux.org/BCM2835_registers#PM */
256
+ s->rstc = 0x00000102;
257
+ s->rsts = 0x00001000;
258
+ s->wdog = 0x00000000;
259
+}
260
+
261
+static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data)
262
+{
263
+ DeviceClass *dc = DEVICE_CLASS(klass);
264
+
265
+ dc->reset = bcm2835_powermgt_reset;
266
+ dc->vmsd = &vmstate_bcm2835_powermgt;
267
+}
268
+
269
+static TypeInfo bcm2835_powermgt_info = {
270
+ .name = TYPE_BCM2835_POWERMGT,
271
+ .parent = TYPE_SYS_BUS_DEVICE,
272
+ .instance_size = sizeof(BCM2835PowerMgtState),
273
+ .class_init = bcm2835_powermgt_class_init,
274
+ .instance_init = bcm2835_powermgt_init,
275
+};
276
+
277
+static void bcm2835_powermgt_register_types(void)
278
+{
279
+ type_register_static(&bcm2835_powermgt_info);
280
+}
281
+
282
+type_init(bcm2835_powermgt_register_types)
283
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
284
index XXXXXXX..XXXXXXX 100644
285
--- a/hw/misc/meson.build
286
+++ b/hw/misc/meson.build
287
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files(
288
'bcm2835_rng.c',
289
'bcm2835_thermal.c',
290
'bcm2835_cprman.c',
291
+ 'bcm2835_powermgt.c',
292
))
293
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c'))
294
softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c', 'zynq-xadc.c'))
295
--
84
--
296
2.20.1
85
2.25.1
297
86
298
87
diff view generated by jsdifflib
1
In do_ldst(), the calculation of the offset needs to be based on the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
size of the memory access, not the size of the elements in the
3
vector. This meant we were getting it wrong for the widening and
4
narrowing variants of the various VLDR and VSTR insns.
5
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220822152741.1617527-10-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-2-peter.maydell@linaro.org
9
---
8
---
10
target/arm/translate-mve.c | 17 +++++++++--------
9
target/arm/internals.h | 11 +++++------
11
1 file changed, 9 insertions(+), 8 deletions(-)
10
target/arm/m_helper.c | 16 +++++++---------
11
target/arm/ptw.c | 20 +++++++++-----------
12
3 files changed, 21 insertions(+), 26 deletions(-)
12
13
13
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
14
diff --git a/target/arm/internals.h b/target/arm/internals.h
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-mve.c
16
--- a/target/arm/internals.h
16
+++ b/target/arm/translate-mve.c
17
+++ b/target/arm/internals.h
17
@@ -XXX,XX +XXX,XX @@ static bool mve_skip_first_beat(DisasContext *s)
18
@@ -XXX,XX +XXX,XX @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
19
MMUAccessType access_type, ARMMMUIdx mmu_idx,
20
V8M_SAttributes *sattrs);
21
22
-bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
23
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
24
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
25
- int *prot, bool *is_subpage,
26
- ARMMMUFaultInfo *fi, uint32_t *mregion);
27
-
28
/* Cacheability and shareability attributes for a memory access */
29
typedef struct ARMCacheAttrs {
30
/*
31
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
32
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
33
__attribute__((nonnull));
34
35
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
36
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
37
+ GetPhysAddrResult *result, bool *is_subpage,
38
+ ARMMMUFaultInfo *fi, uint32_t *mregion);
39
+
40
void arm_log_exception(CPUState *cs);
41
42
#endif /* !CONFIG_USER_ONLY */
43
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/m_helper.c
46
+++ b/target/arm/m_helper.c
47
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
48
V8M_SAttributes sattrs = {};
49
uint32_t tt_resp;
50
bool r, rw, nsr, nsrw, mrvalid;
51
- int prot;
52
- ARMMMUFaultInfo fi = {};
53
- MemTxAttrs attrs = {};
54
- hwaddr phys_addr;
55
ARMMMUIdx mmu_idx;
56
uint32_t mregion;
57
bool targetpriv;
58
bool targetsec = env->v7m.secure;
59
- bool is_subpage;
60
61
/*
62
* Work out what the security state and privilege level we're
63
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
64
* inspecting the other MPU state.
65
*/
66
if (arm_current_el(env) != 0 || alt) {
67
+ GetPhysAddrResult res = {};
68
+ ARMMMUFaultInfo fi = {};
69
+ bool is_subpage;
70
+
71
/* We can ignore the return value as prot is always set */
72
pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
73
- &phys_addr, &attrs, &prot, &is_subpage,
74
- &fi, &mregion);
75
+ &res, &is_subpage, &fi, &mregion);
76
if (mregion == -1) {
77
mrvalid = false;
78
mregion = 0;
79
} else {
80
mrvalid = true;
81
}
82
- r = prot & PAGE_READ;
83
- rw = prot & PAGE_WRITE;
84
+ r = res.prot & PAGE_READ;
85
+ rw = res.prot & PAGE_WRITE;
86
} else {
87
r = false;
88
rw = false;
89
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/ptw.c
92
+++ b/target/arm/ptw.c
93
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
94
95
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
96
MMUAccessType access_type, ARMMMUIdx mmu_idx,
97
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
98
- int *prot, bool *is_subpage,
99
+ GetPhysAddrResult *result, bool *is_subpage,
100
ARMMMUFaultInfo *fi, uint32_t *mregion)
101
{
102
/*
103
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
104
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
105
106
*is_subpage = false;
107
- *phys_ptr = address;
108
- *prot = 0;
109
+ result->phys = address;
110
+ result->prot = 0;
111
if (mregion) {
112
*mregion = -1;
18
}
113
}
114
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
115
116
if (matchregion == -1) {
117
/* hit using the background region */
118
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
119
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
120
} else {
121
uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
122
uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
123
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
124
xn = 1;
125
}
126
127
- *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
128
- if (*prot && !xn && !(pxn && !is_user)) {
129
- *prot |= PAGE_EXEC;
130
+ result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
131
+ if (result->prot && !xn && !(pxn && !is_user)) {
132
+ result->prot |= PAGE_EXEC;
133
}
134
/*
135
* We don't need to look the attribute up in the MAIR0/MAIR1
136
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
137
138
fi->type = ARMFault_Permission;
139
fi->level = 1;
140
- return !(*prot & (1 << access_type));
141
+ return !(result->prot & (1 << access_type));
19
}
142
}
20
143
21
-static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
144
static bool v8m_is_sau_exempt(CPUARMState *env,
22
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
145
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
23
+ unsigned msize)
24
{
25
TCGv_i32 addr;
26
uint32_t offset;
27
@@ -XXX,XX +XXX,XX @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
28
return true;
29
}
146
}
30
147
31
- offset = a->imm << a->size;
148
ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx,
32
+ offset = a->imm << msize;
149
- &result->phys, &result->attrs, &result->prot,
33
if (!a->a) {
150
- &mpu_is_subpage, fi, NULL);
34
offset = -offset;
151
+ result, &mpu_is_subpage, fi, NULL);
35
}
152
result->page_size =
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
153
sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
37
{ gen_helper_mve_vstrw, gen_helper_mve_vldrw },
154
return ret;
38
{ NULL, NULL }
39
};
40
- return do_ldst(s, a, ldstfns[a->size][a->l]);
41
+ return do_ldst(s, a, ldstfns[a->size][a->l], a->size);
42
}
43
44
-#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
45
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \
46
static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
47
{ \
48
static MVEGenLdStFn * const ldstfns[2][2] = { \
49
{ gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
50
{ NULL, gen_helper_mve_##ULD }, \
51
}; \
52
- return do_ldst(s, a, ldstfns[a->u][a->l]); \
53
+ return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \
54
}
55
56
-DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
57
-DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
58
-DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
59
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
60
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
61
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
62
63
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
64
{
65
--
155
--
66
2.20.1
156
2.25.1
67
157
68
158
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This can be made redundant with result->page_size, by moving the basic
4
set of page_size from get_phys_addr_pmsav8. We still need to overwrite
5
page_size when v8m_security_lookup signals a subpage.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220822152741.1617527-11-richard.henderson@linaro.org
9
[PMM: Update a comment that used to refer to is_subpage]
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/internals.h | 4 ++--
14
target/arm/m_helper.c | 3 +--
15
target/arm/ptw.c | 23 ++++++++++++-----------
16
3 files changed, 15 insertions(+), 15 deletions(-)
17
18
diff --git a/target/arm/internals.h b/target/arm/internals.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/internals.h
21
+++ b/target/arm/internals.h
22
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
23
24
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
25
MMUAccessType access_type, ARMMMUIdx mmu_idx,
26
- GetPhysAddrResult *result, bool *is_subpage,
27
- ARMMMUFaultInfo *fi, uint32_t *mregion);
28
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi,
29
+ uint32_t *mregion);
30
31
void arm_log_exception(CPUState *cs);
32
33
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/m_helper.c
36
+++ b/target/arm/m_helper.c
37
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
38
if (arm_current_el(env) != 0 || alt) {
39
GetPhysAddrResult res = {};
40
ARMMMUFaultInfo fi = {};
41
- bool is_subpage;
42
43
/* We can ignore the return value as prot is always set */
44
pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
45
- &res, &is_subpage, &fi, &mregion);
46
+ &res, &fi, &mregion);
47
if (mregion == -1) {
48
mrvalid = false;
49
mregion = 0;
50
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/ptw.c
53
+++ b/target/arm/ptw.c
54
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
55
56
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
57
MMUAccessType access_type, ARMMMUIdx mmu_idx,
58
- GetPhysAddrResult *result, bool *is_subpage,
59
- ARMMMUFaultInfo *fi, uint32_t *mregion)
60
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi,
61
+ uint32_t *mregion)
62
{
63
/*
64
* Perform a PMSAv8 MPU lookup (without also doing the SAU check
65
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
66
* mregion is (if not NULL) set to the region number which matched,
67
* or -1 if no region number is returned (MPU off, address did not
68
* hit a region, address hit in multiple regions).
69
- * We set is_subpage to true if the region hit doesn't cover the
70
- * entire TARGET_PAGE the address is within.
71
+ * If the region hit doesn't cover the entire TARGET_PAGE the address
72
+ * is within, then we set the result page_size to 1 to force the
73
+ * memory system to use a subpage.
74
*/
75
ARMCPU *cpu = env_archcpu(env);
76
bool is_user = regime_is_user(env, mmu_idx);
77
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
78
uint32_t addr_page_base = address & TARGET_PAGE_MASK;
79
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
80
81
- *is_subpage = false;
82
+ result->page_size = TARGET_PAGE_SIZE;
83
result->phys = address;
84
result->prot = 0;
85
if (mregion) {
86
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
87
ranges_overlap(base, limit - base + 1,
88
addr_page_base,
89
TARGET_PAGE_SIZE)) {
90
- *is_subpage = true;
91
+ result->page_size = 1;
92
}
93
continue;
94
}
95
96
if (base > addr_page_base || limit < addr_page_limit) {
97
- *is_subpage = true;
98
+ result->page_size = 1;
99
}
100
101
if (matchregion != -1) {
102
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
103
uint32_t secure = regime_is_secure(env, mmu_idx);
104
V8M_SAttributes sattrs = {};
105
bool ret;
106
- bool mpu_is_subpage;
107
108
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
109
v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
110
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
111
}
112
113
ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx,
114
- result, &mpu_is_subpage, fi, NULL);
115
- result->page_size =
116
- sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
117
+ result, fi, NULL);
118
+ if (sattrs.subpage) {
119
+ result->page_size = 1;
120
+ }
121
return ret;
122
}
123
124
--
125
2.25.1
diff view generated by jsdifflib
1
The A64 AdvSIMD modified-immediate grouping uses almost the same
1
From: Richard Henderson <richard.henderson@linaro.org>
2
constant encoding that A32 Neon does; reuse asimd_imm_const() (to
3
which we add the AArch64-specific case for cmode 15 op 1) instead of
4
reimplementing it all.
5
2
3
Remove the use of regime_is_secure from v8m_security_lookup,
4
passing the new parameter to the lookup instead.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220822152741.1617527-12-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-5-peter.maydell@linaro.org
9
---
11
---
10
target/arm/translate.h | 3 +-
12
target/arm/internals.h | 2 +-
11
target/arm/translate-a64.c | 86 ++++----------------------------------
13
target/arm/m_helper.c | 9 ++++++---
12
target/arm/translate.c | 17 +++++++-
14
target/arm/ptw.c | 9 +++++----
13
3 files changed, 24 insertions(+), 82 deletions(-)
15
3 files changed, 12 insertions(+), 8 deletions(-)
14
16
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
17
diff --git a/target/arm/internals.h b/target/arm/internals.h
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
19
--- a/target/arm/internals.h
18
+++ b/target/arm/translate.h
20
+++ b/target/arm/internals.h
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
21
@@ -XXX,XX +XXX,XX @@ typedef struct V8M_SAttributes {
20
* VMVN and VBIC (when cmode < 14 && op == 1).
22
21
*
23
void v8m_security_lookup(CPUARMState *env, uint32_t address,
22
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
24
MMUAccessType access_type, ARMMMUIdx mmu_idx,
23
- * callers must catch this.
25
- V8M_SAttributes *sattrs);
24
+ * callers must catch this; we return the 64-bit constant value defined
26
+ bool secure, V8M_SAttributes *sattrs);
25
+ * for AArch64.
27
26
*
28
/* Cacheability and shareability attributes for a memory access */
27
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
29
typedef struct ARMCacheAttrs {
28
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
30
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
29
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-a64.c
32
--- a/target/arm/m_helper.c
32
+++ b/target/arm/translate-a64.c
33
+++ b/target/arm/m_helper.c
33
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
34
@@ -XXX,XX +XXX,XX @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
35
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
36
V8M_SAttributes sattrs = {};
37
38
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
39
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
40
+ targets_secure, &sattrs);
41
if (sattrs.ns) {
42
attrs.secure = false;
43
} else if (!targets_secure) {
44
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
45
ARMMMUFaultInfo fi = {};
46
MemTxResult txres;
47
48
- v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
49
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx,
50
+ regime_is_secure(env, mmu_idx), &sattrs);
51
if (!sattrs.nsc || sattrs.ns) {
52
/*
53
* This must be the second half of the insn, and it straddles a
54
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
55
}
56
57
if (env->v7m.secure) {
58
- v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
59
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
60
+ targetsec, &sattrs);
61
nsr = sattrs.ns && r;
62
nsrw = sattrs.ns && rw;
63
} else {
64
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/ptw.c
67
+++ b/target/arm/ptw.c
68
@@ -XXX,XX +XXX,XX @@ static bool v8m_is_sau_exempt(CPUARMState *env,
69
}
70
71
void v8m_security_lookup(CPUARMState *env, uint32_t address,
72
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
73
- V8M_SAttributes *sattrs)
74
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
75
+ bool is_secure, V8M_SAttributes *sattrs)
34
{
76
{
35
int rd = extract32(insn, 0, 5);
77
/*
36
int cmode = extract32(insn, 12, 4);
78
* Look up the security attributes for this address. Compare the
37
- int cmode_3_1 = extract32(cmode, 1, 3);
79
@@ -XXX,XX +XXX,XX @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
38
- int cmode_0 = extract32(cmode, 0, 1);
80
}
39
int o2 = extract32(insn, 11, 1);
81
40
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
82
if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
41
bool is_neg = extract32(insn, 29, 1);
83
- sattrs->ns = !regime_is_secure(env, mmu_idx);
42
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
84
+ sattrs->ns = !is_secure;
43
return;
85
return;
44
}
86
}
45
87
46
- /* See AdvSIMDExpandImm() in ARM ARM */
88
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
47
- switch (cmode_3_1) {
89
bool ret;
48
- case 0: /* Replicate(Zeros(24):imm8, 2) */
90
49
- case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
91
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
50
- case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
92
- v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
51
- case 3: /* Replicate(imm8:Zeros(24), 2) */
93
+ v8m_security_lookup(env, address, access_type, mmu_idx,
52
- {
94
+ secure, &sattrs);
53
- int shift = cmode_3_1 * 8;
95
if (access_type == MMU_INST_FETCH) {
54
- imm = bitfield_replicate(abcdefgh << shift, 32);
55
- break;
56
- }
57
- case 4: /* Replicate(Zeros(8):imm8, 4) */
58
- case 5: /* Replicate(imm8:Zeros(8), 4) */
59
- {
60
- int shift = (cmode_3_1 & 0x1) * 8;
61
- imm = bitfield_replicate(abcdefgh << shift, 16);
62
- break;
63
- }
64
- case 6:
65
- if (cmode_0) {
66
- /* Replicate(Zeros(8):imm8:Ones(16), 2) */
67
- imm = (abcdefgh << 16) | 0xffff;
68
- } else {
69
- /* Replicate(Zeros(16):imm8:Ones(8), 2) */
70
- imm = (abcdefgh << 8) | 0xff;
71
- }
72
- imm = bitfield_replicate(imm, 32);
73
- break;
74
- case 7:
75
- if (!cmode_0 && !is_neg) {
76
- imm = bitfield_replicate(abcdefgh, 8);
77
- } else if (!cmode_0 && is_neg) {
78
- int i;
79
- imm = 0;
80
- for (i = 0; i < 8; i++) {
81
- if ((abcdefgh) & (1 << i)) {
82
- imm |= 0xffULL << (i * 8);
83
- }
84
- }
85
- } else if (cmode_0) {
86
- if (is_neg) {
87
- imm = (abcdefgh & 0x3f) << 48;
88
- if (abcdefgh & 0x80) {
89
- imm |= 0x8000000000000000ULL;
90
- }
91
- if (abcdefgh & 0x40) {
92
- imm |= 0x3fc0000000000000ULL;
93
- } else {
94
- imm |= 0x4000000000000000ULL;
95
- }
96
- } else {
97
- if (o2) {
98
- /* FMOV (vector, immediate) - half-precision */
99
- imm = vfp_expand_imm(MO_16, abcdefgh);
100
- /* now duplicate across the lanes */
101
- imm = bitfield_replicate(imm, 16);
102
- } else {
103
- imm = (abcdefgh & 0x3f) << 19;
104
- if (abcdefgh & 0x80) {
105
- imm |= 0x80000000;
106
- }
107
- if (abcdefgh & 0x40) {
108
- imm |= 0x3e000000;
109
- } else {
110
- imm |= 0x40000000;
111
- }
112
- imm |= (imm << 32);
113
- }
114
- }
115
- }
116
- break;
117
- default:
118
- g_assert_not_reached();
119
- }
120
-
121
- if (cmode_3_1 != 7 && is_neg) {
122
- imm = ~imm;
123
+ if (cmode == 15 && o2 && !is_neg) {
124
+ /* FMOV (vector, immediate) - half-precision */
125
+ imm = vfp_expand_imm(MO_16, abcdefgh);
126
+ /* now duplicate across the lanes */
127
+ imm = bitfield_replicate(imm, 16);
128
+ } else {
129
+ imm = asimd_imm_const(abcdefgh, cmode, is_neg);
130
}
131
132
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
133
diff --git a/target/arm/translate.c b/target/arm/translate.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/arm/translate.c
136
+++ b/target/arm/translate.c
137
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
138
case 14:
139
if (op) {
140
/*
96
/*
141
- * This is the only case where the top and bottom 32 bits
97
* Instruction fetches always use the MMU bank and the
142
- * of the encoded constant differ.
143
+ * This and cmode == 15 op == 1 are the only cases where
144
+ * the top and bottom 32 bits of the encoded constant differ.
145
*/
146
uint64_t imm64 = 0;
147
int n;
148
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
149
imm |= (imm << 8) | (imm << 16) | (imm << 24);
150
break;
151
case 15:
152
+ if (op) {
153
+ /* Reserved encoding for AArch32; valid for AArch64 */
154
+ uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
155
+ if (imm & 0x80) {
156
+ imm64 |= 0x8000000000000000ULL;
157
+ }
158
+ if (imm & 0x40) {
159
+ imm64 |= 0x3fc0000000000000ULL;
160
+ } else {
161
+ imm64 |= 0x4000000000000000ULL;
162
+ }
163
+ return imm64;
164
+ }
165
imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
166
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
167
break;
168
--
98
--
169
2.20.1
99
2.25.1
170
100
171
101
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Remove the use of regime_is_secure from pmsav8_mpu_lookup,
4
passing the new parameter to the lookup instead.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220822152741.1617527-13-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/internals.h | 4 ++--
13
target/arm/m_helper.c | 2 +-
14
target/arm/ptw.c | 7 +++----
15
3 files changed, 6 insertions(+), 7 deletions(-)
16
17
diff --git a/target/arm/internals.h b/target/arm/internals.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/internals.h
20
+++ b/target/arm/internals.h
21
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
22
23
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
24
MMUAccessType access_type, ARMMMUIdx mmu_idx,
25
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi,
26
- uint32_t *mregion);
27
+ bool is_secure, GetPhysAddrResult *result,
28
+ ARMMMUFaultInfo *fi, uint32_t *mregion);
29
30
void arm_log_exception(CPUState *cs);
31
32
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/m_helper.c
35
+++ b/target/arm/m_helper.c
36
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
37
ARMMMUFaultInfo fi = {};
38
39
/* We can ignore the return value as prot is always set */
40
- pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
41
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec,
42
&res, &fi, &mregion);
43
if (mregion == -1) {
44
mrvalid = false;
45
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/ptw.c
48
+++ b/target/arm/ptw.c
49
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
50
51
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
52
MMUAccessType access_type, ARMMMUIdx mmu_idx,
53
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi,
54
- uint32_t *mregion)
55
+ bool secure, GetPhysAddrResult *result,
56
+ ARMMMUFaultInfo *fi, uint32_t *mregion)
57
{
58
/*
59
* Perform a PMSAv8 MPU lookup (without also doing the SAU check
60
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
61
*/
62
ARMCPU *cpu = env_archcpu(env);
63
bool is_user = regime_is_user(env, mmu_idx);
64
- uint32_t secure = regime_is_secure(env, mmu_idx);
65
int n;
66
int matchregion = -1;
67
bool hit = false;
68
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
69
}
70
}
71
72
- ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx,
73
+ ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
74
result, fi, NULL);
75
if (sattrs.subpage) {
76
result->page_size = 1;
77
--
78
2.25.1
79
80
diff view generated by jsdifflib
1
From: Maxim Uvarov <maxim.uvarov@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
qemu has 2 type of functions: shutdown and reboot. Shutdown
3
Remove the use of regime_is_secure from get_phys_addr_v5,
4
function has to be used for machine shutdown. Otherwise we cause
4
passing the new parameter to the lookup instead.
5
a reset with a bogus "cause" value, when we intended a shutdown.
6
5
7
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
[PMM: Folded in definition of local is_secure in get_phys_addr(),
9
since I dropped the earlier patch that would have provided it]
10
Message-id: 20220822152741.1617527-14-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20210625111842.3790-3-maxim.uvarov@linaro.org
10
[PMM: tweaked commit message]
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
---
13
hw/gpio/gpio_pwr.c | 2 +-
14
target/arm/ptw.c | 14 +++++++-------
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
1 file changed, 7 insertions(+), 7 deletions(-)
15
16
16
diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c
17
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/gpio/gpio_pwr.c
19
--- a/target/arm/ptw.c
19
+++ b/hw/gpio/gpio_pwr.c
20
+++ b/target/arm/ptw.c
20
@@ -XXX,XX +XXX,XX @@ static void gpio_pwr_reset(void *opaque, int n, int level)
21
@@ -XXX,XX +XXX,XX @@ static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
21
static void gpio_pwr_shutdown(void *opaque, int n, int level)
22
23
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
24
MMUAccessType access_type, ARMMMUIdx mmu_idx,
25
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
26
+ bool is_secure, GetPhysAddrResult *result,
27
+ ARMMMUFaultInfo *fi)
22
{
28
{
23
if (level) {
29
int level = 1;
24
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
30
uint32_t table;
25
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
31
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
32
fi->type = ARMFault_Translation;
33
goto do_fault;
34
}
35
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
36
- mmu_idx, fi);
37
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
38
if (fi->type != ARMFault_None) {
39
goto do_fault;
40
}
41
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
42
/* Fine pagetable. */
43
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
44
}
45
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
46
- mmu_idx, fi);
47
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
48
if (fi->type != ARMFault_None) {
49
goto do_fault;
50
}
51
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
52
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
53
{
54
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
55
+ bool is_secure = regime_is_secure(env, mmu_idx);
56
57
if (mmu_idx != s1_mmu_idx) {
58
/*
59
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
60
* cannot upgrade an non-secure translation regime's attributes
61
* to secure.
62
*/
63
- result->attrs.secure = regime_is_secure(env, mmu_idx);
64
+ result->attrs.secure = is_secure;
65
result->attrs.user = regime_is_user(env, mmu_idx);
66
67
/*
68
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
69
result, fi);
70
} else {
71
return get_phys_addr_v5(env, address, access_type, mmu_idx,
72
- result, fi);
73
+ is_secure, result, fi);
26
}
74
}
27
}
75
}
28
76
29
--
77
--
30
2.20.1
78
2.25.1
31
79
32
80
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Remove the use of regime_is_secure from get_phys_addr_v6,
4
passing the new parameter to the lookup instead.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220822152741.1617527-15-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/ptw.c | 11 +++++------
13
1 file changed, 5 insertions(+), 6 deletions(-)
14
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/ptw.c
18
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ do_fault:
20
21
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
22
MMUAccessType access_type, ARMMMUIdx mmu_idx,
23
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
24
+ bool is_secure, GetPhysAddrResult *result,
25
+ ARMMMUFaultInfo *fi)
26
{
27
ARMCPU *cpu = env_archcpu(env);
28
int level = 1;
29
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
30
fi->type = ARMFault_Translation;
31
goto do_fault;
32
}
33
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
34
- mmu_idx, fi);
35
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
36
if (fi->type != ARMFault_None) {
37
goto do_fault;
38
}
39
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
40
ns = extract32(desc, 3, 1);
41
/* Lookup l2 entry. */
42
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
43
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
44
- mmu_idx, fi);
45
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
46
if (fi->type != ARMFault_None) {
47
goto do_fault;
48
}
49
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
50
result, fi);
51
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
52
return get_phys_addr_v6(env, address, access_type, mmu_idx,
53
- result, fi);
54
+ is_secure, result, fi);
55
} else {
56
return get_phys_addr_v5(env, address, access_type, mmu_idx,
57
is_secure, result, fi);
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add a test booting and quickly shutdown a raspi2 machine,
3
Remove the use of regime_is_secure from get_phys_addr_pmsav8.
4
to test the power management model:
4
Since we already had a local variable named secure, use that.
5
5
6
(1/1) tests/acceptance/boot_linux_console.py:BootLinuxConsole.test_arm_raspi2_initrd:
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
console: [ 0.000000] Booting Linux on physical CPU 0xf00
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
console: [ 0.000000] Linux version 4.14.98-v7+ (dom@dom-XPS-13-9370) (gcc version 4.9.3 (crosstool-NG crosstool-ng-1.22.0-88-g8460611)) #1200 SMP Tue Feb 12 20:27:48 GMT 2019
8
Message-id: 20220822152741.1617527-16-richard.henderson@linaro.org
9
console: [ 0.000000] CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c5387d
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
console: [ 0.000000] CPU: div instructions available: patching division code
11
console: [ 0.000000] CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
12
console: [ 0.000000] OF: fdt: Machine model: Raspberry Pi 2 Model B
13
...
14
console: Boot successful.
15
console: cat /proc/cpuinfo
16
console: / # cat /proc/cpuinfo
17
...
18
console: processor : 3
19
console: model name : ARMv7 Processor rev 5 (v7l)
20
console: BogoMIPS : 125.00
21
console: Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm
22
console: CPU implementer : 0x41
23
console: CPU architecture: 7
24
console: CPU variant : 0x0
25
console: CPU part : 0xc07
26
console: CPU revision : 5
27
console: Hardware : BCM2835
28
console: Revision : 0000
29
console: Serial : 0000000000000000
30
console: cat /proc/iomem
31
console: / # cat /proc/iomem
32
console: 00000000-3bffffff : System RAM
33
console: 00008000-00afffff : Kernel code
34
console: 00c00000-00d468ef : Kernel data
35
console: 3f006000-3f006fff : dwc_otg
36
console: 3f007000-3f007eff : /soc/dma@7e007000
37
console: 3f00b880-3f00b8bf : /soc/mailbox@7e00b880
38
console: 3f100000-3f100027 : /soc/watchdog@7e100000
39
console: 3f101000-3f102fff : /soc/cprman@7e101000
40
console: 3f200000-3f2000b3 : /soc/gpio@7e200000
41
PASS (24.59 s)
42
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
43
JOB TIME : 25.02 s
44
45
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Reviewed-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
47
Message-id: 20210531113837.1689775-1-f4bug@amsat.org
48
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
49
---
11
---
50
tests/acceptance/boot_linux_console.py | 43 ++++++++++++++++++++++++++
12
target/arm/ptw.c | 5 ++---
51
1 file changed, 43 insertions(+)
13
1 file changed, 2 insertions(+), 3 deletions(-)
52
14
53
diff --git a/tests/acceptance/boot_linux_console.py b/tests/acceptance/boot_linux_console.py
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
54
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
55
--- a/tests/acceptance/boot_linux_console.py
17
--- a/target/arm/ptw.c
56
+++ b/tests/acceptance/boot_linux_console.py
18
+++ b/target/arm/ptw.c
57
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
58
from avocado import skip
20
59
from avocado import skipUnless
21
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
60
from avocado_qemu import Test
22
MMUAccessType access_type, ARMMMUIdx mmu_idx,
61
+from avocado_qemu import exec_command
23
- GetPhysAddrResult *result,
62
from avocado_qemu import exec_command_and_wait_for_pattern
24
+ bool secure, GetPhysAddrResult *result,
63
from avocado_qemu import interrupt_interactive_console_until_pattern
25
ARMMMUFaultInfo *fi)
64
from avocado_qemu import wait_for_console_pattern
26
{
65
@@ -XXX,XX +XXX,XX @@ def test_arm_raspi2_uart0(self):
27
- uint32_t secure = regime_is_secure(env, mmu_idx);
66
"""
28
V8M_SAttributes sattrs = {};
67
self.do_test_arm_raspi2(0)
29
bool ret;
68
30
69
+ def test_arm_raspi2_initrd(self):
31
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
70
+ """
32
if (arm_feature(env, ARM_FEATURE_V8)) {
71
+ :avocado: tags=arch:arm
33
/* PMSAv8 */
72
+ :avocado: tags=machine:raspi2
34
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
73
+ """
35
- result, fi);
74
+ deb_url = ('http://archive.raspberrypi.org/debian/'
36
+ is_secure, result, fi);
75
+ 'pool/main/r/raspberrypi-firmware/'
37
} else if (arm_feature(env, ARM_FEATURE_V7)) {
76
+ 'raspberrypi-kernel_1.20190215-1_armhf.deb')
38
/* PMSAv7 */
77
+ deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
39
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
78
+ deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
79
+ kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
80
+ dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
81
+
82
+ initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
83
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
84
+ 'arm/rootfs-armv7a.cpio.gz')
85
+ initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
86
+ initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
87
+ initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
88
+ archive.gzip_uncompress(initrd_path_gz, initrd_path)
89
+
90
+ self.vm.set_console()
91
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
92
+ 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
93
+ 'panic=-1 noreboot ' +
94
+ 'dwc_otg.fiq_fsm_enable=0')
95
+ self.vm.add_args('-kernel', kernel_path,
96
+ '-dtb', dtb_path,
97
+ '-initrd', initrd_path,
98
+ '-append', kernel_command_line,
99
+ '-no-reboot')
100
+ self.vm.launch()
101
+ self.wait_for_console_pattern('Boot successful.')
102
+
103
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
104
+ 'BCM2835')
105
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
106
+ '/soc/cprman@7e101000')
107
+ exec_command(self, 'halt')
108
+ # Wait for VM to shut down gracefully
109
+ self.vm.wait()
110
+
111
def test_arm_exynos4210_initrd(self):
112
"""
113
:avocado: tags=arch:arm
114
--
40
--
115
2.20.1
41
2.25.1
116
42
117
43
diff view generated by jsdifflib
1
From: Patrick Venture <venture@google.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Adds a line-item reference to the supported quanta-q71l-bmc aspeed
3
Remove the use of regime_is_secure from pmsav7_use_background_region,
4
entry.
4
using the new parameter instead.
5
5
6
Signed-off-by: Patrick Venture <venture@google.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210615192848.1065297-2-venture@google.com
8
Message-id: 20220822152741.1617527-17-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
docs/system/arm/aspeed.rst | 1 +
12
target/arm/ptw.c | 10 +++++-----
12
1 file changed, 1 insertion(+)
13
1 file changed, 5 insertions(+), 5 deletions(-)
13
14
14
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/system/arm/aspeed.rst
17
--- a/target/arm/ptw.c
17
+++ b/docs/system/arm/aspeed.rst
18
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@ etc.
19
@@ -XXX,XX +XXX,XX @@ static bool m_is_system_region(CPUARMState *env, uint32_t address)
19
AST2400 SoC based machines :
20
}
20
21
21
- ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC
22
static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
22
+- ``quanta-q71l-bmc`` OpenBMC Quanta BMC
23
- bool is_user)
23
24
+ bool is_secure, bool is_user)
24
AST2500 SoC based machines :
25
{
26
/*
27
* Return true if we should use the default memory map as a
28
@@ -XXX,XX +XXX,XX @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
29
}
30
31
if (arm_feature(env, ARM_FEATURE_M)) {
32
- return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
33
- & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
34
+ return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
35
} else {
36
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
37
}
38
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
39
{
40
ARMCPU *cpu = env_archcpu(env);
41
int n;
42
+ bool secure = regime_is_secure(env, mmu_idx);
43
bool is_user = regime_is_user(env, mmu_idx);
44
45
result->phys = address;
46
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
47
}
48
49
if (n == -1) { /* no hits */
50
- if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
51
+ if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
52
/* background fault */
53
fi->type = ARMFault_Background;
54
return true;
55
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
56
} else if (m_is_ppb_region(env, address)) {
57
hit = true;
58
} else {
59
- if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
60
+ if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
61
hit = true;
62
}
25
63
26
--
64
--
27
2.20.1
65
2.25.1
28
66
29
67
diff view generated by jsdifflib
1
From: Patrick Venture <venture@google.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add line item reference to quanta-gbs-bmc machine.
3
Remove the use of regime_is_secure from get_phys_addr_pmsav7,
4
using the new parameter instead.
4
5
5
Signed-off-by: Patrick Venture <venture@google.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210615192848.1065297-3-venture@google.com
8
Message-id: 20220822152741.1617527-19-richard.henderson@linaro.org
8
[PMM: fixed underline Sphinx warning]
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
docs/system/arm/nuvoton.rst | 5 +++--
12
target/arm/ptw.c | 5 ++---
12
1 file changed, 3 insertions(+), 2 deletions(-)
13
1 file changed, 2 insertions(+), 3 deletions(-)
13
14
14
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/system/arm/nuvoton.rst
17
--- a/target/arm/ptw.c
17
+++ b/docs/system/arm/nuvoton.rst
18
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
19
-Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
20
20
-=====================================================
21
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
21
+Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``)
22
MMUAccessType access_type, ARMMMUIdx mmu_idx,
22
+================================================================
23
- GetPhysAddrResult *result,
23
24
+ bool secure, GetPhysAddrResult *result,
24
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
25
ARMMMUFaultInfo *fi)
25
designed to be used as Baseboard Management Controllers (BMCs) in various
26
{
26
@@ -XXX,XX +XXX,XX @@ segment. The following machines are based on this chip :
27
ARMCPU *cpu = env_archcpu(env);
27
The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
28
int n;
28
Hyperscale applications. The following machines are based on this chip :
29
- bool secure = regime_is_secure(env, mmu_idx);
29
30
bool is_user = regime_is_user(env, mmu_idx);
30
+- ``quanta-gbs-bmc`` Quanta GBS server BMC
31
31
- ``quanta-gsj`` Quanta GSJ server BMC
32
result->phys = address;
32
33
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
33
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
34
} else if (arm_feature(env, ARM_FEATURE_V7)) {
35
/* PMSAv7 */
36
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
37
- result, fi);
38
+ is_secure, result, fi);
39
} else {
40
/* Pre-v7 MPU */
41
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
34
--
42
--
35
2.20.1
43
2.25.1
36
44
37
45
diff view generated by jsdifflib
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
If the CPU is running in default NaN mode (FPCR.DN == 1) and we execute
3
Remove the use of regime_is_secure from get_phys_addr_pmsav5.
4
FRSQRTE, FRECPE, or FRECPX with a signaling NaN, parts_silence_nan_frac() will
5
assert due to fpst->default_nan_mode being set.
6
4
7
To avoid this, we check to see what NaN mode we're running in before we call
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
floatxx_silence_nan().
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
7
Message-id: 20220822152741.1617527-21-richard.henderson@linaro.org
10
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1624662174-175828-2-git-send-email-joe.komlodi@xilinx.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
10
---
16
target/arm/helper-a64.c | 12 +++++++++---
11
target/arm/ptw.c | 4 ++--
17
target/arm/vfp_helper.c | 24 ++++++++++++++++++------
12
1 file changed, 2 insertions(+), 2 deletions(-)
18
2 files changed, 27 insertions(+), 9 deletions(-)
19
13
20
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
14
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
21
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper-a64.c
16
--- a/target/arm/ptw.c
23
+++ b/target/arm/helper-a64.c
17
+++ b/target/arm/ptw.c
24
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
18
@@ -XXX,XX +XXX,XX @@ do_fault:
25
float16 nan = a;
19
26
if (float16_is_signaling_nan(a, fpst)) {
20
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
27
float_raise(float_flag_invalid, fpst);
21
MMUAccessType access_type, ARMMMUIdx mmu_idx,
28
- nan = float16_silence_nan(a, fpst);
22
- GetPhysAddrResult *result,
29
+ if (!fpst->default_nan_mode) {
23
+ bool is_secure, GetPhysAddrResult *result,
30
+ nan = float16_silence_nan(a, fpst);
24
ARMMMUFaultInfo *fi)
31
+ }
25
{
26
int n;
27
@@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
28
} else {
29
/* Pre-v7 MPU */
30
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
31
- result, fi);
32
+ is_secure, result, fi);
32
}
33
}
33
if (fpst->default_nan_mode) {
34
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
34
nan = float16_default_nan(fpst);
35
" mmu_idx %u -> %s (prot %c%c%c)\n",
35
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
36
float32 nan = a;
37
if (float32_is_signaling_nan(a, fpst)) {
38
float_raise(float_flag_invalid, fpst);
39
- nan = float32_silence_nan(a, fpst);
40
+ if (!fpst->default_nan_mode) {
41
+ nan = float32_silence_nan(a, fpst);
42
+ }
43
}
44
if (fpst->default_nan_mode) {
45
nan = float32_default_nan(fpst);
46
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
47
float64 nan = a;
48
if (float64_is_signaling_nan(a, fpst)) {
49
float_raise(float_flag_invalid, fpst);
50
- nan = float64_silence_nan(a, fpst);
51
+ if (!fpst->default_nan_mode) {
52
+ nan = float64_silence_nan(a, fpst);
53
+ }
54
}
55
if (fpst->default_nan_mode) {
56
nan = float64_default_nan(fpst);
57
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/vfp_helper.c
60
+++ b/target/arm/vfp_helper.c
61
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
62
float16 nan = f16;
63
if (float16_is_signaling_nan(f16, fpst)) {
64
float_raise(float_flag_invalid, fpst);
65
- nan = float16_silence_nan(f16, fpst);
66
+ if (!fpst->default_nan_mode) {
67
+ nan = float16_silence_nan(f16, fpst);
68
+ }
69
}
70
if (fpst->default_nan_mode) {
71
nan = float16_default_nan(fpst);
72
@@ -XXX,XX +XXX,XX @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp)
73
float32 nan = f32;
74
if (float32_is_signaling_nan(f32, fpst)) {
75
float_raise(float_flag_invalid, fpst);
76
- nan = float32_silence_nan(f32, fpst);
77
+ if (!fpst->default_nan_mode) {
78
+ nan = float32_silence_nan(f32, fpst);
79
+ }
80
}
81
if (fpst->default_nan_mode) {
82
nan = float32_default_nan(fpst);
83
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
84
float64 nan = f64;
85
if (float64_is_signaling_nan(f64, fpst)) {
86
float_raise(float_flag_invalid, fpst);
87
- nan = float64_silence_nan(f64, fpst);
88
+ if (!fpst->default_nan_mode) {
89
+ nan = float64_silence_nan(f64, fpst);
90
+ }
91
}
92
if (fpst->default_nan_mode) {
93
nan = float64_default_nan(fpst);
94
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
95
float16 nan = f16;
96
if (float16_is_signaling_nan(f16, s)) {
97
float_raise(float_flag_invalid, s);
98
- nan = float16_silence_nan(f16, s);
99
+ if (!s->default_nan_mode) {
100
+ nan = float16_silence_nan(f16, fpstp);
101
+ }
102
}
103
if (s->default_nan_mode) {
104
nan = float16_default_nan(s);
105
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
106
float32 nan = f32;
107
if (float32_is_signaling_nan(f32, s)) {
108
float_raise(float_flag_invalid, s);
109
- nan = float32_silence_nan(f32, s);
110
+ if (!s->default_nan_mode) {
111
+ nan = float32_silence_nan(f32, fpstp);
112
+ }
113
}
114
if (s->default_nan_mode) {
115
nan = float32_default_nan(s);
116
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
117
float64 nan = f64;
118
if (float64_is_signaling_nan(f64, s)) {
119
float_raise(float_flag_invalid, s);
120
- nan = float64_silence_nan(f64, s);
121
+ if (!s->default_nan_mode) {
122
+ nan = float64_silence_nan(f64, fpstp);
123
+ }
124
}
125
if (s->default_nan_mode) {
126
nan = float64_default_nan(s);
127
--
36
--
128
2.20.1
37
2.25.1
129
38
130
39
diff view generated by jsdifflib
New patch
1
From: Keqian Zhu <zhukeqian1@huawei.com>
1
2
3
Setup an ARM virtual machine of machine virt and execute qmp "query-acpi-ospm-status"
4
causes segmentation fault with following dumpstack:
5
#1 0x0000aaaaab64235c in qmp_query_acpi_ospm_status (errp=errp@entry=0xfffffffff030) at ../monitor/qmp-cmds.c:312
6
#2 0x0000aaaaabfc4e20 in qmp_marshal_query_acpi_ospm_status (args=<optimized out>, ret=0xffffea4ffe90, errp=0xffffea4ffe88) at qapi/qapi-commands-acpi.c:63
7
#3 0x0000aaaaabff8ba0 in do_qmp_dispatch_bh (opaque=0xffffea4ffe98) at ../qapi/qmp-dispatch.c:128
8
#4 0x0000aaaaac02e594 in aio_bh_call (bh=0xffffe0004d80) at ../util/async.c:150
9
#5 aio_bh_poll (ctx=ctx@entry=0xaaaaad0f6040) at ../util/async.c:178
10
#6 0x0000aaaaac00bd40 in aio_dispatch (ctx=ctx@entry=0xaaaaad0f6040) at ../util/aio-posix.c:421
11
#7 0x0000aaaaac02e010 in aio_ctx_dispatch (source=0xaaaaad0f6040, callback=<optimized out>, user_data=<optimized out>) at ../util/async.c:320
12
#8 0x0000fffff76f6884 in g_main_context_dispatch () at /usr/lib64/libglib-2.0.so.0
13
#9 0x0000aaaaac0452d4 in glib_pollfds_poll () at ../util/main-loop.c:297
14
#10 os_host_main_loop_wait (timeout=0) at ../util/main-loop.c:320
15
#11 main_loop_wait (nonblocking=nonblocking@entry=0) at ../util/main-loop.c:596
16
#12 0x0000aaaaab5c9e50 in qemu_main_loop () at ../softmmu/runstate.c:734
17
#13 0x0000aaaaab185370 in qemu_main (argc=argc@entry=47, argv=argv@entry=0xfffffffff518, envp=envp@entry=0x0) at ../softmmu/main.c:38
18
#14 0x0000aaaaab16f99c in main (argc=47, argv=0xfffffffff518) at ../softmmu/main.c:47
19
20
Fixes: ebb62075021a ("hw/acpi: Add ACPI Generic Event Device Support")
21
Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
22
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
23
Message-id: 20220816094957.31700-1-zhukeqian1@huawei.com
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
26
hw/acpi/generic_event_device.c | 8 ++++++++
27
1 file changed, 8 insertions(+)
28
29
diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/acpi/generic_event_device.c
32
+++ b/hw/acpi/generic_event_device.c
33
@@ -XXX,XX +XXX,XX @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev,
34
}
35
}
36
37
+static void acpi_ged_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
38
+{
39
+ AcpiGedState *s = ACPI_GED(adev);
40
+
41
+ acpi_memory_ospm_status(&s->memhp_state, list);
42
+}
43
+
44
static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
45
{
46
AcpiGedState *s = ACPI_GED(adev);
47
@@ -XXX,XX +XXX,XX @@ static void acpi_ged_class_init(ObjectClass *class, void *data)
48
hc->unplug_request = acpi_ged_unplug_request_cb;
49
hc->unplug = acpi_ged_unplug_cb;
50
51
+ adevc->ospm_status = acpi_ged_ospm_status;
52
adevc->send_event = acpi_ged_send_event;
53
}
54
55
--
56
2.25.1
diff view generated by jsdifflib
New patch
1
From: Lucas Dietrich <ld.adecy@gmail.com>
1
2
3
The LAN9118 allows the guest to specify a level for both the TX and
4
RX FIFOs at which an interrupt will be generated. We implement the
5
RSFL_INT interrupt for the RX FIFO but are missing the handling of
6
the equivalent TSFL_INT for the TX FIFO. Add the missing test to set
7
the interrupt if the TX FIFO has exceeded the guest-specified level.
8
9
This flag is required for Micrium lan911x ethernet driver to work.
10
11
Signed-off-by: Lucas Dietrich <ld.adecy@gmail.com>
12
[PMM: Tweaked commit message and comment]
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
hw/net/lan9118.c | 8 ++++++++
17
1 file changed, 8 insertions(+)
18
19
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/net/lan9118.c
22
+++ b/hw/net/lan9118.c
23
@@ -XXX,XX +XXX,XX @@ static void do_tx_packet(lan9118_state *s)
24
n = (s->tx_status_fifo_head + s->tx_status_fifo_used) & 511;
25
s->tx_status_fifo[n] = status;
26
s->tx_status_fifo_used++;
27
+
28
+ /*
29
+ * Generate TSFL interrupt if TX FIFO level exceeds the level
30
+ * specified in the FIFO_INT TX Status Level field.
31
+ */
32
+ if (s->tx_status_fifo_used > ((s->fifo_int >> 16) & 0xff)) {
33
+ s->int_sts |= TSFL_INT;
34
+ }
35
if (s->tx_status_fifo_used == 512) {
36
s->int_sts |= TSFF_INT;
37
/* TODO: Stop transmission. */
38
--
39
2.25.1
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Replace '84' magic value by the X_MAX definition, and '1' by Y_MAX.
4
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
7
Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20220819153931.3147384-2-peter.maydell@linaro.org
10
---
11
chardev/baum.c | 11 +++++++----
12
1 file changed, 7 insertions(+), 4 deletions(-)
13
14
diff --git a/chardev/baum.c b/chardev/baum.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/chardev/baum.c
17
+++ b/chardev/baum.c
18
@@ -XXX,XX +XXX,XX @@
19
20
#define BUF_SIZE 256
21
22
+#define X_MAX 84
23
+#define Y_MAX 1
24
+
25
struct BaumChardev {
26
Chardev parent;
27
28
@@ -XXX,XX +XXX,XX @@ static int baum_deferred_init(BaumChardev *baum)
29
brlapi_perror("baum: brlapi__getDisplaySize");
30
return 0;
31
}
32
- if (baum->y > 1) {
33
- baum->y = 1;
34
+ if (baum->y > Y_MAX) {
35
+ baum->y = Y_MAX;
36
}
37
- if (baum->x > 84) {
38
- baum->x = 84;
39
+ if (baum->x > X_MAX) {
40
+ baum->x = X_MAX;
41
}
42
43
con = qemu_console_lookup_by_index(0);
44
--
45
2.25.1
46
47
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
We know 'x * y' will be at most 'X_MAX * Y_MAX' (which is not
4
a big value, it is actually 84). Instead of having the compiler
5
use variable-length array, declare an array able to hold the
6
maximum 'x * y'.
7
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
10
Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Message-id: 20220819153931.3147384-3-peter.maydell@linaro.org
13
---
14
chardev/baum.c | 8 ++++----
15
1 file changed, 4 insertions(+), 4 deletions(-)
16
17
diff --git a/chardev/baum.c b/chardev/baum.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/chardev/baum.c
20
+++ b/chardev/baum.c
21
@@ -XXX,XX +XXX,XX @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len)
22
switch (req) {
23
case BAUM_REQ_DisplayData:
24
{
25
- uint8_t cells[baum->x * baum->y], c;
26
- uint8_t text[baum->x * baum->y];
27
- uint8_t zero[baum->x * baum->y];
28
+ uint8_t cells[X_MAX * Y_MAX], c;
29
+ uint8_t text[X_MAX * Y_MAX];
30
+ uint8_t zero[X_MAX * Y_MAX];
31
int cursor = BRLAPI_CURSOR_OFF;
32
int i;
33
34
@@ -XXX,XX +XXX,XX @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len)
35
}
36
timer_del(baum->cellCount_timer);
37
38
- memset(zero, 0, sizeof(zero));
39
+ memset(zero, 0, baum->x * baum->y);
40
41
brlapi_writeArguments_t wa = {
42
.displayNumber = BRLAPI_DISPLAY_DEFAULT,
43
--
44
2.25.1
45
46
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Use autofree heap allocation instead of variable-length
4
array on the stack.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
8
Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20220819153931.3147384-4-peter.maydell@linaro.org
11
---
12
chardev/baum.c | 3 ++-
13
1 file changed, 2 insertions(+), 1 deletion(-)
14
15
diff --git a/chardev/baum.c b/chardev/baum.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/chardev/baum.c
18
+++ b/chardev/baum.c
19
@@ -XXX,XX +XXX,XX @@ static void baum_chr_accept_input(struct Chardev *chr)
20
static void baum_write_packet(BaumChardev *baum, const uint8_t *buf, int len)
21
{
22
Chardev *chr = CHARDEV(baum);
23
- uint8_t io_buf[1 + 2 * len], *cur = io_buf;
24
+ g_autofree uint8_t *io_buf = g_malloc(1 + 2 * len);
25
+ uint8_t *cur = io_buf;
26
int room;
27
*cur++ = ESC;
28
while (len--)
29
--
30
2.25.1
31
32
diff view generated by jsdifflib
1
Implement the MVE shifts by immediate, which perform shifts
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
on a single general-purpose register.
3
2
4
These patterns overlap with the long-shift-by-immediates,
3
The combined_key[... QIO_CHANNEL_WEBSOCK_GUID_LEN ...] array in
5
so we have to rearrange the grouping a little here.
4
qio_channel_websock_handshake_send_res_ok() expands to a call
5
to strlen(QIO_CHANNEL_WEBSOCK_GUID), and the compiler doesn't
6
realize the string is const, so consider combined_key[] being
7
a variable-length array.
6
8
9
To remove the variable-length array, we provide it a hint to
10
the compiler by using sizeof() - 1 instead of strlen().
11
12
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20220819153931.3147384-5-peter.maydell@linaro.org
9
Message-id: 20210628135835.6690-18-peter.maydell@linaro.org
10
---
16
---
11
target/arm/helper-mve.h | 3 ++
17
io/channel-websock.c | 2 +-
12
target/arm/translate.h | 1 +
18
1 file changed, 1 insertion(+), 1 deletion(-)
13
target/arm/t32.decode | 31 ++++++++++++++-----
14
target/arm/mve_helper.c | 10 ++++++
15
target/arm/translate.c | 68 +++++++++++++++++++++++++++++++++++++++--
16
5 files changed, 104 insertions(+), 9 deletions(-)
17
19
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
diff --git a/io/channel-websock.c b/io/channel-websock.c
19
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
22
--- a/io/channel-websock.c
21
+++ b/target/arm/helper-mve.h
23
+++ b/io/channel-websock.c
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
23
DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
25
DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
26
+
27
+DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
28
+DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.h
32
+++ b/target/arm/translate.h
33
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
34
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
35
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
36
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
37
+typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
38
39
/**
40
* arm_tbflags_from_tb:
41
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/t32.decode
44
+++ b/target/arm/t32.decode
45
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@
46
25
47
&mve_shl_ri rdalo rdahi shim
26
#define QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN 24
48
&mve_shl_rr rdalo rdahi rm
27
#define QIO_CHANNEL_WEBSOCK_GUID "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
49
+&mve_sh_ri rda shim
28
-#define QIO_CHANNEL_WEBSOCK_GUID_LEN strlen(QIO_CHANNEL_WEBSOCK_GUID)
50
29
+#define QIO_CHANNEL_WEBSOCK_GUID_LEN (sizeof(QIO_CHANNEL_WEBSOCK_GUID) - 1)
51
# rdahi: bits [3:1] from insn, bit 0 is 1
30
52
# rdalo: bits [3:1] from insn, bit 0 is 0
31
#define QIO_CHANNEL_WEBSOCK_HEADER_PROTOCOL "sec-websocket-protocol"
53
@@ -XXX,XX +XXX,XX @@
32
#define QIO_CHANNEL_WEBSOCK_HEADER_VERSION "sec-websocket-version"
54
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
55
@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
56
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
57
+@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
58
+ &mve_sh_ri shim=%imm5_12_6
59
60
{
61
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
62
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
63
# the rest fall through (where ORR_rrri and MOV_rxri will end up
64
# handling them as r13 and r15 accesses with the same semantics as A32).
65
[
66
- LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
67
- LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
68
- ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
69
+ {
70
+ UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
71
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
72
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
73
+ }
74
75
- UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
76
- URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
77
- SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
78
- SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
79
+ {
80
+ URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
81
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
82
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
83
+ }
84
+
85
+ {
86
+ SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
87
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
88
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
89
+ }
90
+
91
+ {
92
+ SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
93
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
94
+ }
95
96
LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
97
ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
98
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/mve_helper.c
101
+++ b/target/arm/mve_helper.c
102
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
103
{
104
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
105
}
106
+
107
+uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
108
+{
109
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
110
+}
111
+
112
+uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
113
+{
114
+ return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
115
+}
116
diff --git a/target/arm/translate.c b/target/arm/translate.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/translate.c
119
+++ b/target/arm/translate.c
120
@@ -XXX,XX +XXX,XX @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
121
122
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
123
{
124
- TCGv_i32 t = tcg_temp_new_i32();
125
+ TCGv_i32 t;
126
127
+ /* Handle shift by the input size for the benefit of trans_SRSHR_ri */
128
+ if (sh == 32) {
129
+ tcg_gen_movi_i32(d, 0);
130
+ return;
131
+ }
132
+ t = tcg_temp_new_i32();
133
tcg_gen_extract_i32(t, a, sh - 1, 1);
134
tcg_gen_sari_i32(d, a, sh);
135
tcg_gen_add_i32(d, d, t);
136
@@ -XXX,XX +XXX,XX @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
137
138
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
139
{
140
- TCGv_i32 t = tcg_temp_new_i32();
141
+ TCGv_i32 t;
142
143
+ /* Handle shift by the input size for the benefit of trans_URSHR_ri */
144
+ if (sh == 32) {
145
+ tcg_gen_extract_i32(d, a, sh - 1, 1);
146
+ return;
147
+ }
148
+ t = tcg_temp_new_i32();
149
tcg_gen_extract_i32(t, a, sh - 1, 1);
150
tcg_gen_shri_i32(d, a, sh);
151
tcg_gen_add_i32(d, d, t);
152
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
153
return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
154
}
155
156
+static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
157
+{
158
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
159
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
160
+ return false;
161
+ }
162
+ if (!dc_isar_feature(aa32_mve, s) ||
163
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
164
+ a->rda == 13 || a->rda == 15) {
165
+ /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
166
+ unallocated_encoding(s);
167
+ return true;
168
+ }
169
+
170
+ if (a->shim == 0) {
171
+ a->shim = 32;
172
+ }
173
+ fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
174
+
175
+ return true;
176
+}
177
+
178
+static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
179
+{
180
+ return do_mve_sh_ri(s, a, gen_urshr32_i32);
181
+}
182
+
183
+static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
184
+{
185
+ return do_mve_sh_ri(s, a, gen_srshr32_i32);
186
+}
187
+
188
+static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
189
+{
190
+ gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
191
+}
192
+
193
+static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
194
+{
195
+ return do_mve_sh_ri(s, a, gen_mve_sqshl);
196
+}
197
+
198
+static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
199
+{
200
+ gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
201
+}
202
+
203
+static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
204
+{
205
+ return do_mve_sh_ri(s, a, gen_mve_uqshl);
206
+}
207
+
208
/*
209
* Multiply and multiply accumulate
210
*/
211
--
33
--
212
2.20.1
34
2.25.1
213
35
214
36
diff view generated by jsdifflib
1
Implement the MVE saturating shift-right-and-narrow insns
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
VQSHRN, VQSHRUN, VQRSHRN and VQRSHRUN.
3
2
4
do_srshr() is borrowed from sve_helper.c.
3
The compiler isn't clever enough to figure 'min_buf_size'
4
is a constant, so help it by using a definitions instead.
5
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Acked-by: Jason Wang <jasowang@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220819153931.3147384-6-peter.maydell@linaro.org
8
Message-id: 20210628135835.6690-13-peter.maydell@linaro.org
9
---
11
---
10
target/arm/helper-mve.h | 30 +++++++++++
12
hw/net/e1000e_core.c | 7 ++++---
11
target/arm/mve.decode | 28 ++++++++++
13
1 file changed, 4 insertions(+), 3 deletions(-)
12
target/arm/mve_helper.c | 104 +++++++++++++++++++++++++++++++++++++
13
target/arm/translate-mve.c | 12 +++++
14
4 files changed, 174 insertions(+)
15
14
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
17
--- a/hw/net/e1000e_core.c
19
+++ b/target/arm/helper-mve.h
18
+++ b/hw/net/e1000e_core.c
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
@@ -XXX,XX +XXX,XX @@ e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt)
21
DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+
35
+DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+
40
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+
45
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
+
50
+DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
52
+DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
54
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/mve.decode
57
+++ b/target/arm/mve.decode
58
@@ -XXX,XX +XXX,XX @@ VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
59
VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
60
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
61
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
62
+
63
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
64
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
65
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
66
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
67
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
68
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
69
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
70
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
71
+
72
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
73
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
74
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
75
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
76
+
77
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
78
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
79
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
80
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
81
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
82
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
83
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
84
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
85
+
86
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
87
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
88
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
89
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
90
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve_helper.c
93
+++ b/target/arm/mve_helper.c
94
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh)
95
}
20
}
96
}
21
}
97
22
98
+static inline int64_t do_srshr(int64_t x, unsigned sh)
23
+/* Min. octets in an ethernet frame sans FCS */
99
+{
24
+#define MIN_BUF_SIZE 60
100
+ if (likely(sh < 64)) {
101
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
102
+ } else {
103
+ /* Rounding the sign bit always produces 0. */
104
+ return 0;
105
+ }
106
+}
107
+
25
+
108
DO_VSHRN_ALL(vshrn, DO_SHR)
26
ssize_t
109
DO_VSHRN_ALL(vrshrn, do_urshr)
27
e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt)
110
+
28
{
111
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
29
static const int maximum_ethernet_hdr_len = (14 + 4);
112
+ bool *satp)
30
- /* Min. octets in an ethernet frame sans FCS */
113
+{
31
- static const int min_buf_size = 60;
114
+ if (val > max) {
32
115
+ *satp = true;
33
uint32_t n = 0;
116
+ return max;
34
- uint8_t min_buf[min_buf_size];
117
+ } else if (val < min) {
35
+ uint8_t min_buf[MIN_BUF_SIZE];
118
+ *satp = true;
36
struct iovec min_iov;
119
+ return min;
37
uint8_t *filter_buf;
120
+ } else {
38
size_t size, orig_size;
121
+ return val;
122
+ }
123
+}
124
+
125
+/* Saturating narrowing right shifts */
126
+#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
127
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
128
+ void *vm, uint32_t shift) \
129
+ { \
130
+ LTYPE *m = vm; \
131
+ TYPE *d = vd; \
132
+ uint16_t mask = mve_element_mask(env); \
133
+ bool qc = false; \
134
+ unsigned le; \
135
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
136
+ bool sat = false; \
137
+ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
138
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
139
+ qc |= sat && (mask & 1 << (TOP * ESIZE)); \
140
+ } \
141
+ if (qc) { \
142
+ env->vfp.qc[0] = qc; \
143
+ } \
144
+ mve_advance_vpt(env); \
145
+ }
146
+
147
+#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
148
+ DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
149
+ DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
150
+
151
+#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
152
+ DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
153
+ DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
154
+
155
+#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
156
+ DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
157
+ DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
158
+
159
+#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
160
+ DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
161
+ DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
162
+
163
+#define DO_SHRN_SB(N, M, SATP) \
164
+ do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
165
+#define DO_SHRN_UB(N, M, SATP) \
166
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
167
+#define DO_SHRUN_B(N, M, SATP) \
168
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
169
+
170
+#define DO_SHRN_SH(N, M, SATP) \
171
+ do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
172
+#define DO_SHRN_UH(N, M, SATP) \
173
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
174
+#define DO_SHRUN_H(N, M, SATP) \
175
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
176
+
177
+#define DO_RSHRN_SB(N, M, SATP) \
178
+ do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
179
+#define DO_RSHRN_UB(N, M, SATP) \
180
+ do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
181
+#define DO_RSHRUN_B(N, M, SATP) \
182
+ do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
183
+
184
+#define DO_RSHRN_SH(N, M, SATP) \
185
+ do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
186
+#define DO_RSHRN_UH(N, M, SATP) \
187
+ do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
188
+#define DO_RSHRUN_H(N, M, SATP) \
189
+ do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
190
+
191
+DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
192
+DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
193
+DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
194
+DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
195
+DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
196
+DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
197
+
198
+DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
199
+DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
200
+DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
201
+DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
202
+DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
203
+DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
204
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/translate-mve.c
207
+++ b/target/arm/translate-mve.c
208
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VSHRNB, vshrnb)
209
DO_2SHIFT_N(VSHRNT, vshrnt)
210
DO_2SHIFT_N(VRSHRNB, vrshrnb)
211
DO_2SHIFT_N(VRSHRNT, vrshrnt)
212
+DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s)
213
+DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s)
214
+DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u)
215
+DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u)
216
+DO_2SHIFT_N(VQSHRUNB, vqshrunb)
217
+DO_2SHIFT_N(VQSHRUNT, vqshrunt)
218
+DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s)
219
+DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s)
220
+DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
221
+DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
222
+DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
223
+DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
224
--
39
--
225
2.20.1
40
2.25.1
226
41
227
42
diff view generated by jsdifflib
1
The function asimd_imm_const() in translate-neon.c is an
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
implementation of the pseudocode AdvSIMDExpandImm(), which we will
3
also want for MVE. Move the implementation to translate.c, with a
4
prototype in translate.h.
5
2
3
Use autofree heap allocation instead of variable-length
4
array on the stack.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Acked-by: David Gibson <david@gibson.dropbear.id.au>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20210628135835.6690-4-peter.maydell@linaro.org
10
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
11
Message-id: 20220819153931.3147384-7-peter.maydell@linaro.org
9
---
12
---
10
target/arm/translate.h | 16 ++++++++++
13
hw/ppc/pnv.c | 4 ++--
11
target/arm/translate-neon.c | 63 -------------------------------------
14
hw/ppc/spapr.c | 8 ++++----
12
target/arm/translate.c | 57 +++++++++++++++++++++++++++++++++
15
hw/ppc/spapr_pci_nvlink2.c | 2 +-
13
3 files changed, 73 insertions(+), 63 deletions(-)
16
3 files changed, 7 insertions(+), 7 deletions(-)
14
17
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
18
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
20
--- a/hw/ppc/pnv.c
18
+++ b/target/arm/translate.h
21
+++ b/hw/ppc/pnv.c
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
22
@@ -XXX,XX +XXX,XX @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
20
return opc | s->be_data;
23
int smt_threads = CPU_CORE(pc)->nr_threads;
24
CPUPPCState *env = &cpu->env;
25
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
26
- uint32_t servers_prop[smt_threads];
27
+ g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
28
int i;
29
uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
30
0xffffffff, 0xffffffff};
31
@@ -XXX,XX +XXX,XX @@ static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
32
servers_prop[i] = cpu_to_be32(pc->pir + i);
33
}
34
_FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
35
- servers_prop, sizeof(servers_prop))));
36
+ servers_prop, sizeof(*servers_prop) * smt_threads)));
21
}
37
}
22
38
23
+/**
39
static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir,
24
+ * asimd_imm_const: Expand an encoded SIMD constant value
40
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
25
+ *
26
+ * Expand a SIMD constant value. This is essentially the pseudocode
27
+ * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
28
+ * VMVN and VBIC (when cmode < 14 && op == 1).
29
+ *
30
+ * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
31
+ * callers must catch this.
32
+ *
33
+ * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
34
+ * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
35
+ * we produce an immediate constant value of 0 in these cases.
36
+ */
37
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
38
+
39
#endif /* TARGET_ARM_TRANSLATE_H */
40
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
41
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate-neon.c
42
--- a/hw/ppc/spapr.c
43
+++ b/target/arm/translate-neon.c
43
+++ b/hw/ppc/spapr.c
44
@@ -XXX,XX +XXX,XX @@ DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
44
@@ -XXX,XX +XXX,XX @@ static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
45
DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
45
int smt_threads)
46
DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
47
48
-static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
49
-{
50
- /*
51
- * Expand the encoded constant.
52
- * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
53
- * We choose to not special-case this and will behave as if a
54
- * valid constant encoding of 0 had been given.
55
- * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
56
- */
57
- switch (cmode) {
58
- case 0: case 1:
59
- /* no-op */
60
- break;
61
- case 2: case 3:
62
- imm <<= 8;
63
- break;
64
- case 4: case 5:
65
- imm <<= 16;
66
- break;
67
- case 6: case 7:
68
- imm <<= 24;
69
- break;
70
- case 8: case 9:
71
- imm |= imm << 16;
72
- break;
73
- case 10: case 11:
74
- imm = (imm << 8) | (imm << 24);
75
- break;
76
- case 12:
77
- imm = (imm << 8) | 0xff;
78
- break;
79
- case 13:
80
- imm = (imm << 16) | 0xffff;
81
- break;
82
- case 14:
83
- if (op) {
84
- /*
85
- * This is the only case where the top and bottom 32 bits
86
- * of the encoded constant differ.
87
- */
88
- uint64_t imm64 = 0;
89
- int n;
90
-
91
- for (n = 0; n < 8; n++) {
92
- if (imm & (1 << n)) {
93
- imm64 |= (0xffULL << (n * 8));
94
- }
95
- }
96
- return imm64;
97
- }
98
- imm |= (imm << 8) | (imm << 16) | (imm << 24);
99
- break;
100
- case 15:
101
- imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
102
- | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
103
- break;
104
- }
105
- if (op) {
106
- imm = ~imm;
107
- }
108
- return dup_const(MO_32, imm);
109
-}
110
-
111
static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
112
GVecGen2iFn *fn)
113
{
46
{
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
47
int i, ret = 0;
48
- uint32_t servers_prop[smt_threads];
49
- uint32_t gservers_prop[smt_threads * 2];
50
+ g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
51
+ g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2);
52
int index = spapr_get_vcpu_id(cpu);
53
54
if (cpu->compat_pvr) {
55
@@ -XXX,XX +XXX,XX @@ static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
56
gservers_prop[i*2 + 1] = 0;
57
}
58
ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
59
- servers_prop, sizeof(servers_prop));
60
+ servers_prop, sizeof(*servers_prop) * smt_threads);
61
if (ret < 0) {
62
return ret;
63
}
64
ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
65
- gservers_prop, sizeof(gservers_prop));
66
+ gservers_prop, sizeof(*gservers_prop) * smt_threads * 2);
67
68
return ret;
69
}
70
diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
115
index XXXXXXX..XXXXXXX 100644
71
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/translate.c
72
--- a/hw/ppc/spapr_pci_nvlink2.c
117
+++ b/target/arm/translate.c
73
+++ b/hw/ppc/spapr_pci_nvlink2.c
118
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
74
@@ -XXX,XX +XXX,XX @@ void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
119
a64_translate_init();
75
continue;
120
}
76
}
121
77
if (dev == nvslot->gpdev) {
122
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
78
- uint32_t npus[nvslot->linknum];
123
+{
79
+ g_autofree uint32_t *npus = g_new(uint32_t, nvslot->linknum);
124
+ /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
80
125
+ switch (cmode) {
81
for (j = 0; j < nvslot->linknum; ++j) {
126
+ case 0: case 1:
82
PCIDevice *npdev = nvslot->links[j].npdev;
127
+ /* no-op */
128
+ break;
129
+ case 2: case 3:
130
+ imm <<= 8;
131
+ break;
132
+ case 4: case 5:
133
+ imm <<= 16;
134
+ break;
135
+ case 6: case 7:
136
+ imm <<= 24;
137
+ break;
138
+ case 8: case 9:
139
+ imm |= imm << 16;
140
+ break;
141
+ case 10: case 11:
142
+ imm = (imm << 8) | (imm << 24);
143
+ break;
144
+ case 12:
145
+ imm = (imm << 8) | 0xff;
146
+ break;
147
+ case 13:
148
+ imm = (imm << 16) | 0xffff;
149
+ break;
150
+ case 14:
151
+ if (op) {
152
+ /*
153
+ * This is the only case where the top and bottom 32 bits
154
+ * of the encoded constant differ.
155
+ */
156
+ uint64_t imm64 = 0;
157
+ int n;
158
+
159
+ for (n = 0; n < 8; n++) {
160
+ if (imm & (1 << n)) {
161
+ imm64 |= (0xffULL << (n * 8));
162
+ }
163
+ }
164
+ return imm64;
165
+ }
166
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
167
+ break;
168
+ case 15:
169
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
170
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
171
+ break;
172
+ }
173
+ if (op) {
174
+ imm = ~imm;
175
+ }
176
+ return dup_const(MO_32, imm);
177
+}
178
+
179
/* Generate a label used for skipping this instruction */
180
void arm_gen_condlabel(DisasContext *s)
181
{
182
--
83
--
183
2.20.1
84
2.25.1
184
85
185
86
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Use autofree heap allocation instead of variable-length
4
array on the stack.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Acked-by: David Gibson <david@gibson.dropbear.id.au>
8
Reviewed-by: Greg Kurz <groug@kaod.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20220819153931.3147384-8-peter.maydell@linaro.org
11
---
12
hw/intc/xics.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/intc/xics.c
18
+++ b/hw/intc/xics.c
19
@@ -XXX,XX +XXX,XX @@ static void ics_reset_irq(ICSIRQState *irq)
20
static void ics_reset(DeviceState *dev)
21
{
22
ICSState *ics = ICS(dev);
23
+ g_autofree uint8_t *flags = g_malloc(ics->nr_irqs);
24
int i;
25
- uint8_t flags[ics->nr_irqs];
26
27
for (i = 0; i < ics->nr_irqs; i++) {
28
flags[i] = ics->irqs[i].flags;
29
--
30
2.25.1
31
32
diff view generated by jsdifflib
1
Implement the MVE shifts by register, which perform
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
shifts on a single general-purpose register.
3
2
3
Use autofree heap allocation instead of variable-length array on
4
the stack. Replace the snprintf() call by g_strdup_printf().
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220819153931.3147384-9-peter.maydell@linaro.org
6
Message-id: 20210628135835.6690-19-peter.maydell@linaro.org
7
---
10
---
8
target/arm/helper-mve.h | 2 ++
11
hw/i386/multiboot.c | 5 ++---
9
target/arm/translate.h | 1 +
12
1 file changed, 2 insertions(+), 3 deletions(-)
10
target/arm/t32.decode | 18 ++++++++++++++----
11
target/arm/mve_helper.c | 10 ++++++++++
12
target/arm/translate.c | 30 ++++++++++++++++++++++++++++++
13
5 files changed, 57 insertions(+), 4 deletions(-)
14
13
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
16
--- a/hw/i386/multiboot.c
18
+++ b/target/arm/helper-mve.h
17
+++ b/hw/i386/multiboot.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
18
@@ -XXX,XX +XXX,XX @@ int load_multiboot(X86MachineState *x86ms,
20
19
uint8_t *mb_bootinfo_data;
21
DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
20
uint32_t cmdline_len;
22
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
21
GList *mods = NULL;
23
+DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
22
+ g_autofree char *kcmdline = NULL;
24
+DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
23
25
diff --git a/target/arm/translate.h b/target/arm/translate.h
24
/* Ok, let's see if it is a multiboot image.
26
index XXXXXXX..XXXXXXX 100644
25
The header is 12x32bit long, so the latest entry may be 8192 - 48. */
27
--- a/target/arm/translate.h
26
@@ -XXX,XX +XXX,XX @@ int load_multiboot(X86MachineState *x86ms,
28
+++ b/target/arm/translate.h
29
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
30
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
31
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
32
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
33
+typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
34
35
/**
36
* arm_tbflags_from_tb:
37
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/t32.decode
40
+++ b/target/arm/t32.decode
41
@@ -XXX,XX +XXX,XX @@
42
&mve_shl_ri rdalo rdahi shim
43
&mve_shl_rr rdalo rdahi rm
44
&mve_sh_ri rda shim
45
+&mve_sh_rr rda rm
46
47
# rdahi: bits [3:1] from insn, bit 0 is 1
48
# rdalo: bits [3:1] from insn, bit 0 is 0
49
@@ -XXX,XX +XXX,XX @@
50
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
51
@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
52
&mve_sh_ri shim=%imm5_12_6
53
+@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr
54
55
{
56
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
57
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
58
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
59
}
27
}
60
28
61
- LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
29
/* Commandline support */
62
- ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
30
- char kcmdline[strlen(kernel_filename) + strlen(kernel_cmdline) + 2];
63
- UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
31
- snprintf(kcmdline, sizeof(kcmdline), "%s %s",
64
- SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
32
- kernel_filename, kernel_cmdline);
65
+ {
33
+ kcmdline = g_strdup_printf("%s %s", kernel_filename, kernel_cmdline);
66
+ UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr
34
stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline));
67
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
35
68
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
36
stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name));
69
+ }
70
+
71
+ {
72
+ SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr
73
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
74
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
75
+ }
76
+
77
UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
78
SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
79
]
80
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/mve_helper.c
83
+++ b/target/arm/mve_helper.c
84
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
85
{
86
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
87
}
88
+
89
+uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
90
+{
91
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
92
+}
93
+
94
+uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
95
+{
96
+ return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
97
+}
98
diff --git a/target/arm/translate.c b/target/arm/translate.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate.c
101
+++ b/target/arm/translate.c
102
@@ -XXX,XX +XXX,XX @@ static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
103
return do_mve_sh_ri(s, a, gen_mve_uqshl);
104
}
105
106
+static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
107
+{
108
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
109
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
110
+ return false;
111
+ }
112
+ if (!dc_isar_feature(aa32_mve, s) ||
113
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
114
+ a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
115
+ a->rm == a->rda) {
116
+ /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
117
+ unallocated_encoding(s);
118
+ return true;
119
+ }
120
+
121
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
122
+ fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
123
+ return true;
124
+}
125
+
126
+static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
127
+{
128
+ return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
129
+}
130
+
131
+static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
132
+{
133
+ return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
134
+}
135
+
136
/*
137
* Multiply and multiply accumulate
138
*/
139
--
37
--
140
2.20.1
38
2.25.1
141
39
142
40
diff view generated by jsdifflib
1
The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
insns had some bugs:
3
* the 32x32 multiply of elements was being done as 32x32->32,
4
not 32x32->64
5
* we were incorrectly maintaining the accumulator in its full
6
72-bit form across all 4 beats of the insn; in the pseudocode
7
it is squashed back into the 64 bits of the RdaHi:RdaLo
8
registers after each beat
9
2
10
In particular, fixing the second of these allows us to recast
3
The compiler isn't clever enough to figure 'width' is a constant,
11
the implementation to avoid 128-bit arithmetic entirely.
4
so help it by using a definitions instead.
12
5
13
Since the element size here is always 4, we can also drop the
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
parameterization of ESIZE to make the code a little more readable.
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20220819153931.3147384-10-peter.maydell@linaro.org
10
---
11
hw/usb/hcd-ohci.c | 7 ++++---
12
1 file changed, 4 insertions(+), 3 deletions(-)
15
13
16
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
14
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20210628135835.6690-3-peter.maydell@linaro.org
20
---
21
target/arm/mve_helper.c | 38 +++++++++++++++++++++-----------------
22
1 file changed, 21 insertions(+), 17 deletions(-)
23
24
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
25
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/mve_helper.c
16
--- a/hw/usb/hcd-ohci.c
27
+++ b/target/arm/mve_helper.c
17
+++ b/hw/usb/hcd-ohci.c
28
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed)
29
*/
19
return 1;
30
20
}
31
#include "qemu/osdep.h"
21
32
-#include "qemu/int128.h"
22
+#define HEX_CHAR_PER_LINE 16
33
#include "cpu.h"
23
+
34
#include "internals.h"
24
static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len)
35
#include "vec_internal.h"
25
{
36
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
26
bool print16;
37
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
27
bool printall;
38
28
- const int width = 16;
39
/*
29
int i;
40
- * Rounding multiply add long dual accumulate high: we must keep
30
- char tmp[3 * width + 1];
41
- * a 72-bit internal accumulator value and return the top 64 bits.
31
+ char tmp[3 * HEX_CHAR_PER_LINE + 1];
42
+ * Rounding multiply add long dual accumulate high. In the pseudocode
32
char *p = tmp;
43
+ * this is implemented with a 72-bit internal accumulator value of which
33
44
+ * the top 64 bits are returned. We optimize this to avoid having to
34
print16 = !!trace_event_get_state_backends(TRACE_USB_OHCI_TD_PKT_SHORT);
45
+ * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
35
@@ -XXX,XX +XXX,XX @@ static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len)
46
+ * is squashed back into 64-bits after each beat.
47
*/
48
-#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
49
+#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
50
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
51
void *vm, uint64_t a) \
52
{ \
53
uint16_t mask = mve_element_mask(env); \
54
unsigned e; \
55
TYPE *n = vn, *m = vm; \
56
- Int128 acc = int128_lshift(TO128(a), 8); \
57
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
58
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
59
if (mask & 1) { \
60
+ LTYPE mul; \
61
if (e & 1) { \
62
- acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
63
- m[H##ESIZE(e)])); \
64
+ mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
65
+ if (SUB) { \
66
+ mul = -mul; \
67
+ } \
68
} else { \
69
- acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
70
- m[H##ESIZE(e)])); \
71
+ mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
72
} \
73
- acc = int128_add(acc, int128_make64(1 << 7)); \
74
+ mul = (mul >> 8) + ((mul >> 7) & 1); \
75
+ a += mul; \
76
} \
77
} \
78
mve_advance_vpt(env); \
79
- return int128_getlo(int128_rshift(acc, 8)); \
80
+ return a; \
81
}
36
}
82
37
83
-DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
38
for (i = 0; ; i++) {
84
-DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
39
- if (i && (!(i % width) || (i == len))) {
85
+DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
40
+ if (i && (!(i % HEX_CHAR_PER_LINE) || (i == len))) {
86
+DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
41
if (!printall) {
87
42
trace_usb_ohci_td_pkt_short(msg, tmp);
88
-DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
43
break;
89
+DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
90
91
-DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
92
-DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
93
+DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
94
+DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
95
96
/* Vector add across vector */
97
#define DO_VADDV(OP, ESIZE, TYPE) \
98
--
44
--
99
2.20.1
45
2.25.1
100
46
101
47
diff view generated by jsdifflib
1
The MVE extension to v8.1M includes some new shift instructions which
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
sit entirely within the non-coprocessor part of the encoding space
3
and which operate only on general-purpose registers. They take up
4
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
5
with Rm == 13 or 15.
6
2
7
Implement the long shifts by immediate, which perform shifts on a
3
Use autofree heap allocation instead of variable-length
8
pair of general-purpose registers treated as a 64-bit quantity, with
4
array on the stack.
9
an immediate shift count between 1 and 32.
10
5
11
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
is too difficult, because the functions that generate the code are
9
Message-id: 20220819153931.3147384-11-peter.maydell@linaro.org
15
shared between a dozen different kinds of arithmetic or logical
10
---
16
instruction for all A32, T16 and T32 encodings, and for some insns
11
ui/curses.c | 2 +-
17
and some encodings Rm==13,15 are valid.)
12
1 file changed, 1 insertion(+), 1 deletion(-)
18
13
19
We make the helper functions we need for UQSHLL and SQSHLL take
14
diff --git a/ui/curses.c b/ui/curses.c
20
a 32-bit value which the helper casts to int8_t because we'll need
21
these helpers also for the shift-by-register insns, where the shift
22
count might be < 0 or > 32.
23
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
27
---
28
target/arm/helper-mve.h | 3 ++
29
target/arm/translate.h | 1 +
30
target/arm/t32.decode | 28 +++++++++++++
31
target/arm/mve_helper.c | 10 +++++
32
target/arm/translate.c | 90 +++++++++++++++++++++++++++++++++++++++++
33
5 files changed, 132 insertions(+)
34
35
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
36
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper-mve.h
16
--- a/ui/curses.c
38
+++ b/target/arm/helper-mve.h
17
+++ b/ui/curses.c
39
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static void curses_update(DisplayChangeListener *dcl,
40
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
int x, int y, int w, int h)
41
42
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
43
+
44
+DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
45
+DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
46
diff --git a/target/arm/translate.h b/target/arm/translate.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate.h
49
+++ b/target/arm/translate.h
50
@@ -XXX,XX +XXX,XX @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
51
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
52
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
53
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
54
+typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
55
56
/**
57
* arm_tbflags_from_tb:
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
62
@@ -XXX,XX +XXX,XX @@
63
&mcr !extern cp opc1 crn crm opc2 rt
64
&mcrr !extern cp opc1 crm rt rt2
65
66
+&mve_shl_ri rdalo rdahi shim
67
+
68
+# rdahi: bits [3:1] from insn, bit 0 is 1
69
+# rdalo: bits [3:1] from insn, bit 0 is 0
70
+%rdahi_9 9:3 !function=times_2_plus_1
71
+%rdalo_17 17:3 !function=times_2
72
+
73
# Data-processing (register)
74
75
%imm5_12_6 12:3 6:2
76
@@ -XXX,XX +XXX,XX @@
77
@S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \
78
&s_rrr_shi shim=%imm5_12_6 s=1 rd=0
79
80
+@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
81
+ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
82
+
83
{
20
{
84
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
21
console_ch_t *line;
85
AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi
22
- cchar_t curses_line[width];
86
}
23
+ g_autofree cchar_t *curses_line = g_new(cchar_t, width);
87
BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
24
wchar_t wch[CCHARW_MAX];
88
{
25
attr_t attrs;
89
+ # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS
26
short colors;
90
+ # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE
91
+ # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that
92
+ # they explicitly call unallocated_encoding() for cases that must UNDEF
93
+ # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting
94
+ # the rest fall through (where ORR_rrri and MOV_rxri will end up
95
+ # handling them as r13 and r15 accesses with the same semantics as A32).
96
+ [
97
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
98
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
99
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
100
+
101
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
102
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
103
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
104
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
105
+ ]
106
+
107
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
108
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
109
}
110
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/mve_helper.c
113
+++ b/target/arm/mve_helper.c
114
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
115
mve_advance_vpt(env);
116
return rdm;
117
}
118
+
119
+uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
120
+{
121
+ return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
122
+}
123
+
124
+uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
+{
126
+ return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
+}
128
diff --git a/target/arm/translate.c b/target/arm/translate.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate.c
131
+++ b/target/arm/translate.c
132
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
133
return true;
134
}
135
136
+/*
137
+ * v8.1M MVE wide-shifts
138
+ */
139
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
140
+ WideShiftImmFn *fn)
141
+{
142
+ TCGv_i64 rda;
143
+ TCGv_i32 rdalo, rdahi;
144
+
145
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
146
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
147
+ return false;
148
+ }
149
+ if (a->rdahi == 15) {
150
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
151
+ return false;
152
+ }
153
+ if (!dc_isar_feature(aa32_mve, s) ||
154
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
155
+ a->rdahi == 13) {
156
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
157
+ unallocated_encoding(s);
158
+ return true;
159
+ }
160
+
161
+ if (a->shim == 0) {
162
+ a->shim = 32;
163
+ }
164
+
165
+ rda = tcg_temp_new_i64();
166
+ rdalo = load_reg(s, a->rdalo);
167
+ rdahi = load_reg(s, a->rdahi);
168
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
169
+
170
+ fn(rda, rda, a->shim);
171
+
172
+ tcg_gen_extrl_i64_i32(rdalo, rda);
173
+ tcg_gen_extrh_i64_i32(rdahi, rda);
174
+ store_reg(s, a->rdalo, rdalo);
175
+ store_reg(s, a->rdahi, rdahi);
176
+ tcg_temp_free_i64(rda);
177
+
178
+ return true;
179
+}
180
+
181
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
182
+{
183
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
184
+}
185
+
186
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
187
+{
188
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
189
+}
190
+
191
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
192
+{
193
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
194
+}
195
+
196
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
197
+{
198
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
199
+}
200
+
201
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
202
+{
203
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
204
+}
205
+
206
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
207
+{
208
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
209
+}
210
+
211
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
212
+{
213
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
214
+}
215
+
216
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
217
+{
218
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
219
+}
220
+
221
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
222
+{
223
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
224
+}
225
+
226
/*
227
* Multiply and multiply accumulate
228
*/
229
--
27
--
230
2.20.1
28
2.25.1
231
29
232
30
diff view generated by jsdifflib
1
Implement the MVE VSHLC insn, which performs a shift left of the
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
entire vector with carry in bits provided from a general purpose
3
register and carry out bits written back to that register.
4
2
3
Use autofree heap allocation instead of variable-length
4
array on the stack.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220819153931.3147384-12-peter.maydell@linaro.org
7
Message-id: 20210628135835.6690-14-peter.maydell@linaro.org
8
---
10
---
9
target/arm/helper-mve.h | 2 ++
11
tests/unit/test-vmstate.c | 7 +++----
10
target/arm/mve.decode | 2 ++
12
1 file changed, 3 insertions(+), 4 deletions(-)
11
target/arm/mve_helper.c | 38 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
13
4 files changed, 72 insertions(+)
14
13
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
16
--- a/tests/unit/test-vmstate.c
18
+++ b/target/arm/helper-mve.h
17
+++ b/tests/unit/test-vmstate.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static void save_buffer(const uint8_t *buf, size_t buf_size)
20
DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
static void compare_vmstate(const uint8_t *wire, size_t size)
21
DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
{
22
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
QEMUFile *f = open_test_file(false);
23
+
22
- uint8_t result[size];
24
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
23
+ g_autofree uint8_t *result = g_malloc(size);
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
24
26
index XXXXXXX..XXXXXXX 100644
25
/* read back as binary */
27
--- a/target/arm/mve.decode
26
28
+++ b/target/arm/mve.decode
27
- g_assert_cmpint(qemu_get_buffer(f, result, sizeof(result)), ==,
29
@@ -XXX,XX +XXX,XX @@ VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
28
- sizeof(result));
30
VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
29
+ g_assert_cmpint(qemu_get_buffer(f, result, size), ==, size);
31
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
30
g_assert(!qemu_file_get_error(f));
32
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
31
33
+
32
/* Compare that what is on the file is the same that what we
34
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
33
expected to be there */
35
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
34
- SUCCESS(memcmp(result, wire, sizeof(result)));
36
index XXXXXXX..XXXXXXX 100644
35
+ SUCCESS(memcmp(result, wire, size));
37
--- a/target/arm/mve_helper.c
36
38
+++ b/target/arm/mve_helper.c
37
/* Must reach EOF */
39
@@ -XXX,XX +XXX,XX @@ DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
38
qemu_get_byte(f);
40
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
41
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
42
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
43
+
44
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
45
+ uint32_t shift)
46
+{
47
+ uint32_t *d = vd;
48
+ uint16_t mask = mve_element_mask(env);
49
+ unsigned e;
50
+ uint32_t r;
51
+
52
+ /*
53
+ * For each 32-bit element, we shift it left, bringing in the
54
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
55
+ * the top become the new rdm, if the predicate mask permits.
56
+ * The final rdm value is returned to update the register.
57
+ * shift == 0 here means "shift by 32 bits".
58
+ */
59
+ if (shift == 0) {
60
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
61
+ r = rdm;
62
+ if (mask & 1) {
63
+ rdm = d[H4(e)];
64
+ }
65
+ mergemask(&d[H4(e)], r, mask);
66
+ }
67
+ } else {
68
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
69
+
70
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
71
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
72
+ if (mask & 1) {
73
+ rdm = d[H4(e)] >> (32 - shift);
74
+ }
75
+ mergemask(&d[H4(e)], r, mask);
76
+ }
77
+ }
78
+ mve_advance_vpt(env);
79
+ return rdm;
80
+}
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/arm/translate-mve.c
84
+++ b/target/arm/translate-mve.c
85
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
86
DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
87
DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
88
DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
89
+
90
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
91
+{
92
+ /*
93
+ * Whole Vector Left Shift with Carry. The carry is taken
94
+ * from a general purpose register and written back there.
95
+ * An imm of 0 means "shift by 32".
96
+ */
97
+ TCGv_ptr qd;
98
+ TCGv_i32 rdm;
99
+
100
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
101
+ return false;
102
+ }
103
+ if (a->rdm == 13 || a->rdm == 15) {
104
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
105
+ return false;
106
+ }
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
108
+ return true;
109
+ }
110
+
111
+ qd = mve_qreg_ptr(a->qd);
112
+ rdm = load_reg(s, a->rdm);
113
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
114
+ store_reg(s, a->rdm, rdm);
115
+ tcg_temp_free_ptr(qd);
116
+ mve_update_eci(s);
117
+ return true;
118
+}
119
--
39
--
120
2.20.1
40
2.25.1
121
41
122
42
diff view generated by jsdifflib
1
Implement the MVE shift-right-and-narrow insn VSHRN and VRSHRN.
1
Shellcheck correctly reports that we set python_version and never use
2
2
it. This is a leftover from commit f9332757898a7: we used to use
3
do_urshr() is borrowed from sve_helper.c.
3
python_version purely to as part of the summary information printed
4
at the end of a configure run, and that commit changed to printing
5
the information from meson (which looks up the python version
6
itself). Remove the unused variable.
4
7
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
7
Message-id: 20210628135835.6690-12-peter.maydell@linaro.org
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20220825150703.4074125-2-peter.maydell@linaro.org
8
---
12
---
9
target/arm/helper-mve.h | 10 ++++++++++
13
configure | 3 ---
10
target/arm/mve.decode | 11 +++++++++++
14
1 file changed, 3 deletions(-)
11
target/arm/mve_helper.c | 40 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 15 ++++++++++++++
13
4 files changed, 76 insertions(+)
14
15
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/configure b/configure
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100755
17
--- a/target/arm/helper-mve.h
18
--- a/configure
18
+++ b/target/arm/helper-mve.h
19
+++ b/configure
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
@@ -XXX,XX +XXX,XX @@ if ! $python -c 'import sys; sys.exit(sys.version_info < (3,6))'; then
20
DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
"Use --python=/path/to/python to specify a supported Python."
21
DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
fi
22
DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
23
+
24
-# Preserve python version since some functionality is dependent on it
24
+DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
-python_version=$($python -c 'import sys; print("%d.%d.%d" % (sys.version_info[0], sys.version_info[1], sys.version_info[2]))' 2>/dev/null)
25
+DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
-
26
+DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
# Suppress writing compiled files
27
+DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
python="$python -B"
28
+
29
29
+DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
38
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
39
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
40
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
41
+
42
+# Narrowing shifts (which only support b and h sizes)
43
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
44
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
45
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
46
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
47
+
48
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
49
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
50
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
51
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
57
58
DO_VSHLL_ALL(vshllb, false)
59
DO_VSHLL_ALL(vshllt, true)
60
+
61
+/*
62
+ * Narrowing right shifts, taking a double sized input, shifting it
63
+ * and putting the result in either the top or bottom half of the output.
64
+ * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
65
+ */
66
+#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
68
+ void *vm, uint32_t shift) \
69
+ { \
70
+ LTYPE *m = vm; \
71
+ TYPE *d = vd; \
72
+ uint16_t mask = mve_element_mask(env); \
73
+ unsigned le; \
74
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
75
+ TYPE r = FN(m[H##LESIZE(le)], shift); \
76
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
77
+ } \
78
+ mve_advance_vpt(env); \
79
+ }
80
+
81
+#define DO_VSHRN_ALL(OP, FN) \
82
+ DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
83
+ DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
84
+ DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
85
+ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
86
+
87
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
88
+{
89
+ if (likely(sh < 64)) {
90
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
91
+ } else if (sh == 64) {
92
+ return x >> 63;
93
+ } else {
94
+ return 0;
95
+ }
96
+}
97
+
98
+DO_VSHRN_ALL(vshrn, DO_SHR)
99
+DO_VSHRN_ALL(vrshrn, do_urshr)
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_VSHLL(VSHLL_BS, vshllbs)
105
DO_VSHLL(VSHLL_BU, vshllbu)
106
DO_VSHLL(VSHLL_TS, vshllts)
107
DO_VSHLL(VSHLL_TU, vshlltu)
108
+
109
+#define DO_2SHIFT_N(INSN, FN) \
110
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
111
+ { \
112
+ static MVEGenTwoOpShiftFn * const fns[] = { \
113
+ gen_helper_mve_##FN##b, \
114
+ gen_helper_mve_##FN##h, \
115
+ }; \
116
+ return do_2shift(s, a, fns[a->size], false); \
117
+ }
118
+
119
+DO_2SHIFT_N(VSHRNB, vshrnb)
120
+DO_2SHIFT_N(VSHRNT, vshrnt)
121
+DO_2SHIFT_N(VRSHRNB, vrshrnb)
122
+DO_2SHIFT_N(VRSHRNT, vrshrnt)
123
--
30
--
124
2.20.1
31
2.25.1
125
32
126
33
diff view generated by jsdifflib
1
Implement the MVE VSRI and VSLI insns, which perform a
1
The meson_args variable was added in commit 3b4da13293482134b, but
2
shift-and-insert operation.
2
was not used in that commit and isn't used today. Delete the
3
unnecessary assignment.
3
4
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
6
Message-id: 20210628135835.6690-11-peter.maydell@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20220825150703.4074125-3-peter.maydell@linaro.org
7
---
9
---
8
target/arm/helper-mve.h | 8 ++++++++
10
configure | 1 -
9
target/arm/mve.decode | 9 ++++++++
11
1 file changed, 1 deletion(-)
10
target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 3 +++
12
4 files changed, 62 insertions(+)
13
12
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
diff --git a/configure b/configure
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100755
16
--- a/target/arm/helper-mve.h
15
--- a/configure
17
+++ b/target/arm/helper-mve.h
16
+++ b/configure
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ pie=""
19
DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
coroutine=""
20
DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
plugins="$default_feature"
21
DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
meson=""
22
+
21
-meson_args=""
23
+DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
ninja=""
24
+DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
bindir="bin"
25
+DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
skip_meson=no
26
+
27
+DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/mve.decode
33
+++ b/target/arm/mve.decode
34
@@ -XXX,XX +XXX,XX @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
35
36
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
37
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
38
+
39
+# Shift-and-insert
40
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
41
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
42
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
43
+
44
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
45
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
46
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
52
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
53
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
54
55
+/* Shift-and-insert; we always work with 64 bits at a time */
56
+#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
57
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
+ void *vm, uint32_t shift) \
59
+ { \
60
+ uint64_t *d = vd, *m = vm; \
61
+ uint16_t mask; \
62
+ uint64_t shiftmask; \
63
+ unsigned e; \
64
+ if (shift == 0 || shift == ESIZE * 8) { \
65
+ /* \
66
+ * Only VSLI can shift by 0; only VSRI can shift by <dt>. \
67
+ * The generic logic would give the right answer for 0 but \
68
+ * fails for <dt>. \
69
+ */ \
70
+ goto done; \
71
+ } \
72
+ assert(shift < ESIZE * 8); \
73
+ mask = mve_element_mask(env); \
74
+ /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
75
+ shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
76
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
77
+ uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
78
+ (d[H8(e)] & ~shiftmask); \
79
+ mergemask(&d[H8(e)], r, mask); \
80
+ } \
81
+done: \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
86
+#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
87
+#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
88
+#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
89
+
90
+DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
91
+DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
92
+DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
93
+DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
94
+DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
95
+DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
96
+
97
/*
98
* Long shifts taking half-sized inputs from top or bottom of the input
99
* vector and producing a double-width result. ESIZE, TYPE are for
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_U, vshli_u, true)
105
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
106
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
107
108
+DO_2SHIFT(VSRI, vsri, false)
109
+DO_2SHIFT(VSLI, vsli, false)
110
+
111
#define DO_VSHLL(INSN, FN) \
112
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
113
{ \
114
--
25
--
115
2.20.1
26
2.25.1
116
27
117
28
diff view generated by jsdifflib
New patch
1
1
This commit adds quotes in some places which:
2
* are spotted by shellcheck
3
* are obviously incorrect
4
* are easy to fix just by adding the quotes
5
6
It doesn't attempt fix all of the places shellcheck finds errors,
7
or even all the ones which are easy to fix. It's just a random
8
sampling which is hopefully easy to review and which cuts
9
down the size of the problem for next time somebody wants to
10
try to look at shellcheck errors.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Message-id: 20220825150703.4074125-4-peter.maydell@linaro.org
16
---
17
configure | 64 +++++++++++++++++++++++++++----------------------------
18
1 file changed, 32 insertions(+), 32 deletions(-)
19
20
diff --git a/configure b/configure
21
index XXXXXXX..XXXXXXX 100755
22
--- a/configure
23
+++ b/configure
24
@@ -XXX,XX +XXX,XX @@ GNUmakefile: ;
25
26
EOF
27
cd build
28
- exec $source_path/configure "$@"
29
+ exec "$source_path/configure" "$@"
30
fi
31
32
# Temporary directory used for files created while
33
@@ -XXX,XX +XXX,XX @@ meson_option_build_array() {
34
printf ']\n'
35
}
36
37
-. $source_path/scripts/meson-buildoptions.sh
38
+. "$source_path/scripts/meson-buildoptions.sh"
39
40
meson_options=
41
meson_option_add() {
42
@@ -XXX,XX +XXX,XX @@ for opt do
43
case "$opt" in
44
--help|-h) show_help=yes
45
;;
46
- --version|-V) exec cat $source_path/VERSION
47
+ --version|-V) exec cat "$source_path/VERSION"
48
;;
49
--prefix=*) prefix="$optarg"
50
;;
51
@@ -XXX,XX +XXX,XX @@ default_target_list=""
52
mak_wilds=""
53
54
if [ "$linux_user" != no ]; then
55
- if [ "$targetos" = linux ] && [ -d $source_path/linux-user/include/host/$cpu ]; then
56
+ if [ "$targetos" = linux ] && [ -d "$source_path/linux-user/include/host/$cpu" ]; then
57
linux_user=yes
58
elif [ "$linux_user" = yes ]; then
59
error_exit "linux-user not supported on this architecture"
60
@@ -XXX,XX +XXX,XX @@ if [ "$bsd_user" != no ]; then
61
if [ "$bsd_user" = "" ]; then
62
test $targetos = freebsd && bsd_user=yes
63
fi
64
- if [ "$bsd_user" = yes ] && ! [ -d $source_path/bsd-user/$targetos ]; then
65
+ if [ "$bsd_user" = yes ] && ! [ -d "$source_path/bsd-user/$targetos" ]; then
66
error_exit "bsd-user not supported on this host OS"
67
fi
68
fi
69
@@ -XXX,XX +XXX,XX @@ python="$python -B"
70
if test -z "$meson"; then
71
if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.59.3; then
72
meson=meson
73
- elif test $git_submodules_action != 'ignore' ; then
74
+ elif test "$git_submodules_action" != 'ignore' ; then
75
meson=git
76
elif test -e "${source_path}/meson/meson.py" ; then
77
meson=internal
78
@@ -XXX,XX +XXX,XX @@ esac
79
container="no"
80
if test $use_containers = "yes"; then
81
if has "docker" || has "podman"; then
82
- container=$($python $source_path/tests/docker/docker.py probe)
83
+ container=$($python "$source_path"/tests/docker/docker.py probe)
84
fi
85
fi
86
87
@@ -XXX,XX +XXX,XX @@ if test "$QEMU_GA_DISTRO" = ""; then
88
QEMU_GA_DISTRO=Linux
89
fi
90
if test "$QEMU_GA_VERSION" = ""; then
91
- QEMU_GA_VERSION=$(cat $source_path/VERSION)
92
+ QEMU_GA_VERSION=$(cat "$source_path"/VERSION)
93
fi
94
95
96
@@ -XXX,XX +XXX,XX @@ fi
97
for target in $target_list; do
98
target_dir="$target"
99
target_name=$(echo $target | cut -d '-' -f 1)$EXESUF
100
- mkdir -p $target_dir
101
+ mkdir -p "$target_dir"
102
case $target in
103
*-user) symlink "../qemu-$target_name" "$target_dir/qemu-$target_name" ;;
104
*) symlink "../qemu-system-$target_name" "$target_dir/qemu-system-$target_name" ;;
105
@@ -XXX,XX +XXX,XX @@ for target in $target_list; do
106
config_target_mak=tests/tcg/config-$target.mak
107
108
echo "# Automatically generated by configure - do not modify" > $config_target_mak
109
- echo "TARGET_NAME=$arch" >> $config_target_mak
110
+ echo "TARGET_NAME=$arch" >> "$config_target_mak"
111
case $target in
112
xtensa*-linux-user)
113
# the toolchain is not complete with headers, only build softmmu tests
114
continue
115
;;
116
*-softmmu)
117
- test -f $source_path/tests/tcg/$arch/Makefile.softmmu-target || continue
118
+ test -f "$source_path/tests/tcg/$arch/Makefile.softmmu-target" || continue
119
qemu="qemu-system-$arch"
120
;;
121
*-linux-user|*-bsd-user)
122
@@ -XXX,XX +XXX,XX @@ for target in $target_list; do
123
# compilers is a requirememt for adding a new test that needs a
124
# compiler feature.
125
126
- echo "BUILD_STATIC=$build_static" >> $config_target_mak
127
- write_target_makefile >> $config_target_mak
128
+ echo "BUILD_STATIC=$build_static" >> "$config_target_mak"
129
+ write_target_makefile >> "$config_target_mak"
130
case $target in
131
aarch64-*)
132
if do_compiler "$target_cc" $target_cflags \
133
-march=armv8.1-a+sve -o $TMPE $TMPC; then
134
- echo "CROSS_CC_HAS_SVE=y" >> $config_target_mak
135
+ echo "CROSS_CC_HAS_SVE=y" >> "$config_target_mak"
136
fi
137
if do_compiler "$target_cc" $target_cflags \
138
-march=armv8.1-a+sve2 -o $TMPE $TMPC; then
139
- echo "CROSS_CC_HAS_SVE2=y" >> $config_target_mak
140
+ echo "CROSS_CC_HAS_SVE2=y" >> "$config_target_mak"
141
fi
142
if do_compiler "$target_cc" $target_cflags \
143
-march=armv8.3-a -o $TMPE $TMPC; then
144
- echo "CROSS_CC_HAS_ARMV8_3=y" >> $config_target_mak
145
+ echo "CROSS_CC_HAS_ARMV8_3=y" >> "$config_target_mak"
146
fi
147
if do_compiler "$target_cc" $target_cflags \
148
-mbranch-protection=standard -o $TMPE $TMPC; then
149
- echo "CROSS_CC_HAS_ARMV8_BTI=y" >> $config_target_mak
150
+ echo "CROSS_CC_HAS_ARMV8_BTI=y" >> "$config_target_mak"
151
fi
152
if do_compiler "$target_cc" $target_cflags \
153
-march=armv8.5-a+memtag -o $TMPE $TMPC; then
154
- echo "CROSS_CC_HAS_ARMV8_MTE=y" >> $config_target_mak
155
+ echo "CROSS_CC_HAS_ARMV8_MTE=y" >> "$config_target_mak"
156
fi
157
;;
158
ppc*)
159
if do_compiler "$target_cc" $target_cflags \
160
-mpower8-vector -o $TMPE $TMPC; then
161
- echo "CROSS_CC_HAS_POWER8_VECTOR=y" >> $config_target_mak
162
+ echo "CROSS_CC_HAS_POWER8_VECTOR=y" >> "$config_target_mak"
163
fi
164
if do_compiler "$target_cc" $target_cflags \
165
-mpower10 -o $TMPE $TMPC; then
166
- echo "CROSS_CC_HAS_POWER10=y" >> $config_target_mak
167
+ echo "CROSS_CC_HAS_POWER10=y" >> "$config_target_mak"
168
fi
169
;;
170
i386-linux-user)
171
if do_compiler "$target_cc" $target_cflags \
172
-Werror -fno-pie -o $TMPE $TMPC; then
173
- echo "CROSS_CC_HAS_I386_NOPIE=y" >> $config_target_mak
174
+ echo "CROSS_CC_HAS_I386_NOPIE=y" >> "$config_target_mak"
175
fi
176
;;
177
esac
178
elif test -n "$container_image"; then
179
echo "build-tcg-tests-$target: docker-image-$container_image" >> $makefile
180
- echo "BUILD_STATIC=y" >> $config_target_mak
181
- write_container_target_makefile >> $config_target_mak
182
+ echo "BUILD_STATIC=y" >> "$config_target_mak"
183
+ write_container_target_makefile >> "$config_target_mak"
184
case $target in
185
aarch64-*)
186
- echo "CROSS_CC_HAS_SVE=y" >> $config_target_mak
187
- echo "CROSS_CC_HAS_SVE2=y" >> $config_target_mak
188
- echo "CROSS_CC_HAS_ARMV8_3=y" >> $config_target_mak
189
- echo "CROSS_CC_HAS_ARMV8_BTI=y" >> $config_target_mak
190
- echo "CROSS_CC_HAS_ARMV8_MTE=y" >> $config_target_mak
191
+ echo "CROSS_CC_HAS_SVE=y" >> "$config_target_mak"
192
+ echo "CROSS_CC_HAS_SVE2=y" >> "$config_target_mak"
193
+ echo "CROSS_CC_HAS_ARMV8_3=y" >> "$config_target_mak"
194
+ echo "CROSS_CC_HAS_ARMV8_BTI=y" >> "$config_target_mak"
195
+ echo "CROSS_CC_HAS_ARMV8_MTE=y" >> "$config_target_mak"
196
;;
197
ppc*)
198
- echo "CROSS_CC_HAS_POWER8_VECTOR=y" >> $config_target_mak
199
- echo "CROSS_CC_HAS_POWER10=y" >> $config_target_mak
200
+ echo "CROSS_CC_HAS_POWER8_VECTOR=y" >> "$config_target_mak"
201
+ echo "CROSS_CC_HAS_POWER10=y" >> "$config_target_mak"
202
;;
203
i386-linux-user)
204
- echo "CROSS_CC_HAS_I386_NOPIE=y" >> $config_target_mak
205
+ echo "CROSS_CC_HAS_I386_NOPIE=y" >> "$config_target_mak"
206
;;
207
esac
208
got_cross_cc=yes
209
fi
210
if test $got_cross_cc = yes; then
211
mkdir -p tests/tcg/$target
212
- echo "QEMU=$PWD/$qemu" >> $config_target_mak
213
+ echo "QEMU=$PWD/$qemu" >> "$config_target_mak"
214
echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> $makefile
215
tcg_tests_targets="$tcg_tests_targets $target"
216
fi
217
--
218
2.25.1
219
220
diff view generated by jsdifflib
1
Implement the MVE VHLL (vector shift left long) insn. This has two
1
Shellcheck warns that in
2
encodings: the T1 encoding is the usual shift-by-immediate format,
2
rm -f */config-devices.mak.d
3
and the T2 encoding is a special case where the shift count is always
3
the glob might expand to something with a '-' in it, which would
4
equal to the element size.
4
then be misinterpreted as an option to rm. Fix this by adding './'.
5
5
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
8
Message-id: 20210628135835.6690-10-peter.maydell@linaro.org
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-id: 20220825150703.4074125-5-peter.maydell@linaro.org
9
---
10
---
10
target/arm/helper-mve.h | 9 +++++++
11
configure | 2 +-
11
target/arm/mve.decode | 53 +++++++++++++++++++++++++++++++++++---
12
1 file changed, 1 insertion(+), 1 deletion(-)
12
target/arm/mve_helper.c | 32 +++++++++++++++++++++++
13
target/arm/translate-mve.c | 15 +++++++++++
14
4 files changed, 105 insertions(+), 4 deletions(-)
15
13
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/configure b/configure
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100755
18
--- a/target/arm/helper-mve.h
16
--- a/configure
19
+++ b/target/arm/helper-mve.h
17
+++ b/configure
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ exit 0
21
DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
fi
22
DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
23
DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
# Remove old dependency files to make sure that they get properly regenerated
24
+
22
-rm -f */config-devices.mak.d
25
+DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+rm -f ./*/config-devices.mak.d
26
+DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
27
+DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
if test -z "$python"
28
+DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
then
29
+DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@
38
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
39
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
40
41
+@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
42
+@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
43
+# VSHLL encoding T2 where shift == esize
44
+@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \
45
+ qd=%qd qm=%qm size=0 shift=8
46
+@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \
47
+ qd=%qd qm=%qm size=1 shift=16
48
+
49
# Right shifts are encoded as N - shift, where N is the element size in bits.
50
%rshift_i5 16:5 !function=rsub_32
51
%rshift_i4 16:4 !function=rsub_16
52
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
53
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
54
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
55
56
-VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
57
-VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
58
+# The VSHLL T2 encoding is not a @2op pattern, but is here because it
59
+# overlaps what would be size=0b11 VMULH/VRMULH
60
+{
61
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
62
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
63
64
-VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
65
-VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
66
+ VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
67
+}
68
+
69
+{
70
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
71
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
72
+
73
+ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
74
+}
75
+
76
+{
77
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
78
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
79
+
80
+ VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
81
+}
82
+
83
+{
84
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
85
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
86
+
87
+ VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
88
+}
89
90
VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
91
VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
92
@@ -XXX,XX +XXX,XX @@ VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
93
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
94
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
95
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
96
+
97
+# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
98
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
99
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
100
+
101
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
102
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
103
+
104
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
105
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
106
+
107
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
108
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
109
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/target/arm/mve_helper.c
112
+++ b/target/arm/mve_helper.c
113
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
114
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
115
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
116
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
117
+
118
+/*
119
+ * Long shifts taking half-sized inputs from top or bottom of the input
120
+ * vector and producing a double-width result. ESIZE, TYPE are for
121
+ * the input, and LESIZE, LTYPE for the output.
122
+ * Unlike the normal shift helpers, we do not handle negative shift counts,
123
+ * because the long shift is strictly left-only.
124
+ */
125
+#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
126
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
127
+ void *vm, uint32_t shift) \
128
+ { \
129
+ LTYPE *d = vd; \
130
+ TYPE *m = vm; \
131
+ uint16_t mask = mve_element_mask(env); \
132
+ unsigned le; \
133
+ assert(shift <= 16); \
134
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
135
+ LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
136
+ mergemask(&d[H##LESIZE(le)], r, mask); \
137
+ } \
138
+ mve_advance_vpt(env); \
139
+ }
140
+
141
+#define DO_VSHLL_ALL(OP, TOP) \
142
+ DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
143
+ DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
144
+ DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
145
+ DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
146
+
147
+DO_VSHLL_ALL(vshllb, false)
148
+DO_VSHLL_ALL(vshllt, true)
149
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/arm/translate-mve.c
152
+++ b/target/arm/translate-mve.c
153
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_S, vshli_s, true)
154
DO_2SHIFT(VSHRI_U, vshli_u, true)
155
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
+
158
+#define DO_VSHLL(INSN, FN) \
159
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
160
+ { \
161
+ static MVEGenTwoOpShiftFn * const fns[] = { \
162
+ gen_helper_mve_##FN##b, \
163
+ gen_helper_mve_##FN##h, \
164
+ }; \
165
+ return do_2shift(s, a, fns[a->size], false); \
166
+ }
167
+
168
+DO_VSHLL(VSHLL_BS, vshllbs)
169
+DO_VSHLL(VSHLL_BU, vshllbu)
170
+DO_VSHLL(VSHLL_TS, vshllts)
171
+DO_VSHLL(VSHLL_TU, vshlltu)
172
--
27
--
173
2.20.1
28
2.25.1
174
29
175
30
diff view generated by jsdifflib
1
Use dup_const() instead of bitfield_replicate() in
1
There's only one place in configure where we use `...` to execute a
2
disas_simd_mod_imm().
2
command and capture the result. Switch to $() to match the rest of
3
3
the script. This silences a shellcheck warning.
4
(We can't replace the other use of bitfield_replicate() in this file,
5
in logic_imm_decode_wmask(), because that location needs to handle 2
6
and 4 bit elements, which dup_const() cannot.)
7
4
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
10
Message-id: 20210628135835.6690-6-peter.maydell@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20220825150703.4074125-6-peter.maydell@linaro.org
11
---
9
---
12
target/arm/translate-a64.c | 2 +-
10
configure | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
11
1 file changed, 1 insertion(+), 1 deletion(-)
14
12
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
13
diff --git a/configure b/configure
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100755
17
--- a/target/arm/translate-a64.c
15
--- a/configure
18
+++ b/target/arm/translate-a64.c
16
+++ b/configure
19
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
17
@@ -XXX,XX +XXX,XX @@ LINKS="$LINKS python"
20
/* FMOV (vector, immediate) - half-precision */
18
LINKS="$LINKS contrib/plugins/Makefile "
21
imm = vfp_expand_imm(MO_16, abcdefgh);
19
for f in $LINKS ; do
22
/* now duplicate across the lanes */
20
if [ -e "$source_path/$f" ]; then
23
- imm = bitfield_replicate(imm, 16);
21
- mkdir -p `dirname ./$f`
24
+ imm = dup_const(MO_16, imm);
22
+ mkdir -p "$(dirname ./"$f")"
25
} else {
23
symlink "$source_path/$f" "$f"
26
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
24
fi
27
}
25
done
28
--
26
--
29
2.20.1
27
2.25.1
30
28
31
29
diff view generated by jsdifflib
1
Implement the MVE shift-vector-left-by-immediate insns VSHL, VQSHL
1
Shellcheck warns that we have one place where we run a command and
2
and VQSHLU.
2
then check if it failed using $?; this is better written to simply
3
3
check the command in the 'if' statement directly.
4
The size-and-immediate encoding here is the same as Neon, and we
5
handle it the same way neon-dp.decode does.
6
4
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
9
Message-id: 20210628135835.6690-8-peter.maydell@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20220825150703.4074125-7-peter.maydell@linaro.org
10
---
9
---
11
target/arm/helper-mve.h | 16 +++++++++++
10
configure | 3 +--
12
target/arm/mve.decode | 23 +++++++++++++++
11
1 file changed, 1 insertion(+), 2 deletions(-)
13
target/arm/mve_helper.c | 57 ++++++++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 51 ++++++++++++++++++++++++++++++++++
15
4 files changed, 147 insertions(+)
16
12
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
diff --git a/configure b/configure
18
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100755
19
--- a/target/arm/helper-mve.h
15
--- a/configure
20
+++ b/target/arm/helper-mve.h
16
+++ b/configure
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
17
@@ -XXX,XX +XXX,XX @@ fi
22
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
18
# it when configure exits.)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
19
TMPDIR1="config-temp"
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
20
rm -rf "${TMPDIR1}"
25
+
21
-mkdir -p "${TMPDIR1}"
26
+DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
-if [ $? -ne 0 ]; then
27
+DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+if ! mkdir -p "${TMPDIR1}"; then
28
+DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
echo "ERROR: failed to create temporary directory"
29
+
25
exit 1
30
+DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
fi
31
+DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/mve.decode
44
+++ b/target/arm/mve.decode
45
@@ -XXX,XX +XXX,XX @@
46
&2op qd qm qn size
47
&2scalar qd qn rm size
48
&1imm qd imm cmode op
49
+&2shift qd qm shift size
50
51
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
52
# Note that both Rn and Qd are 3 bits only (no D bit)
53
@@ -XXX,XX +XXX,XX @@
54
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
55
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
56
57
+@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
58
+@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
59
+@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
60
+
61
# Vector loads and stores
62
63
# Widening loads and narrowing stores:
64
@@ -XXX,XX +XXX,XX @@ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
65
# So we have a single decode line and check the cmode/op in the
66
# trans function.
67
Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
68
+
69
+# Shifts by immediate
70
+
71
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
72
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
73
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
74
+
75
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
76
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
77
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
78
+
79
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
80
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
81
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
82
+
83
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
84
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
85
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
86
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/mve_helper.c
89
+++ b/target/arm/mve_helper.c
90
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
91
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
92
#define DO_UQRSHL_OP(N, M, satp) \
93
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
94
+#define DO_SUQSHL_OP(N, M, satp) \
95
+ WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
96
97
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
98
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
99
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvsw, 4, uint32_t)
100
DO_VADDV(vaddvub, 1, uint8_t)
101
DO_VADDV(vaddvuh, 2, uint16_t)
102
DO_VADDV(vaddvuw, 4, uint32_t)
103
+
104
+/* Shifts by immediate */
105
+#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
106
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
107
+ void *vm, uint32_t shift) \
108
+ { \
109
+ TYPE *d = vd, *m = vm; \
110
+ uint16_t mask = mve_element_mask(env); \
111
+ unsigned e; \
112
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
113
+ mergemask(&d[H##ESIZE(e)], \
114
+ FN(m[H##ESIZE(e)], shift), mask); \
115
+ } \
116
+ mve_advance_vpt(env); \
117
+ }
118
+
119
+#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
120
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
121
+ void *vm, uint32_t shift) \
122
+ { \
123
+ TYPE *d = vd, *m = vm; \
124
+ uint16_t mask = mve_element_mask(env); \
125
+ unsigned e; \
126
+ bool qc = false; \
127
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
128
+ bool sat = false; \
129
+ mergemask(&d[H##ESIZE(e)], \
130
+ FN(m[H##ESIZE(e)], shift, &sat), mask); \
131
+ qc |= sat & mask & 1; \
132
+ } \
133
+ if (qc) { \
134
+ env->vfp.qc[0] = qc; \
135
+ } \
136
+ mve_advance_vpt(env); \
137
+ }
138
+
139
+/* provide unsigned 2-op shift helpers for all sizes */
140
+#define DO_2SHIFT_U(OP, FN) \
141
+ DO_2SHIFT(OP##b, 1, uint8_t, FN) \
142
+ DO_2SHIFT(OP##h, 2, uint16_t, FN) \
143
+ DO_2SHIFT(OP##w, 4, uint32_t, FN)
144
+
145
+#define DO_2SHIFT_SAT_U(OP, FN) \
146
+ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
147
+ DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
148
+ DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
149
+#define DO_2SHIFT_SAT_S(OP, FN) \
150
+ DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
151
+ DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
152
+ DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
153
+
154
+DO_2SHIFT_U(vshli_u, DO_VSHLU)
155
+DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
156
+DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
157
+DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
162
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
163
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
164
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
165
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
166
+typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
167
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
168
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
169
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
170
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
171
}
172
return do_1imm(s, a, fn);
173
}
174
+
175
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
176
+ bool negateshift)
177
+{
178
+ TCGv_ptr qd, qm;
179
+ int shift = a->shift;
180
+
181
+ if (!dc_isar_feature(aa32_mve, s) ||
182
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
183
+ !fn) {
184
+ return false;
185
+ }
186
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
187
+ return true;
188
+ }
189
+
190
+ /*
191
+ * When we handle a right shift insn using a left-shift helper
192
+ * which permits a negative shift count to indicate a right-shift,
193
+ * we must negate the shift count.
194
+ */
195
+ if (negateshift) {
196
+ shift = -shift;
197
+ }
198
+
199
+ qd = mve_qreg_ptr(a->qd);
200
+ qm = mve_qreg_ptr(a->qm);
201
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
202
+ tcg_temp_free_ptr(qd);
203
+ tcg_temp_free_ptr(qm);
204
+ mve_update_eci(s);
205
+ return true;
206
+}
207
+
208
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
209
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
210
+ { \
211
+ static MVEGenTwoOpShiftFn * const fns[] = { \
212
+ gen_helper_mve_##FN##b, \
213
+ gen_helper_mve_##FN##h, \
214
+ gen_helper_mve_##FN##w, \
215
+ NULL, \
216
+ }; \
217
+ return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
218
+ }
219
+
220
+DO_2SHIFT(VSHLI, vshli_u, false)
221
+DO_2SHIFT(VQSHLI_S, vqshli_s, false)
222
+DO_2SHIFT(VQSHLI_U, vqshli_u, false)
223
+DO_2SHIFT(VQSHLUI, vqshlui_s, false)
224
--
27
--
225
2.20.1
28
2.25.1
226
29
227
30
diff view generated by jsdifflib
1
Implement the MVE logical-immediate insns (VMOV, VMVN,
1
We use the non-POSIX 'local' keyword in just two places in configure;
2
VORR and VBIC). These have essentially the same encoding
2
rewrite to avoid it.
3
as their Neon equivalents, and we implement the decode
3
4
in the same way.
4
In do_compiler(), just drop the 'local' keyword. The variable
5
'compiler' is only used elsewhere in the do_compiler_werror()
6
function, which already uses the variable as a normal non-local one.
7
8
In probe_target_compiler(), $try and $t are both local; make them
9
normal variables and use a more obviously distinct variable name
10
for $t.
5
11
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
8
Message-id: 20210628135835.6690-7-peter.maydell@linaro.org
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Message-id: 20220825150703.4074125-8-peter.maydell@linaro.org
9
---
16
---
10
target/arm/helper-mve.h | 4 +++
17
configure | 7 +++----
11
target/arm/mve.decode | 17 +++++++++++++
18
1 file changed, 3 insertions(+), 4 deletions(-)
12
target/arm/mve_helper.c | 24 ++++++++++++++++++
13
target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++
14
4 files changed, 95 insertions(+)
15
19
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
diff --git a/configure b/configure
17
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100755
18
--- a/target/arm/helper-mve.h
22
--- a/configure
19
+++ b/target/arm/helper-mve.h
23
+++ b/configure
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
24
@@ -XXX,XX +XXX,XX @@ error_exit() {
21
DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
25
do_compiler() {
22
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
26
# Run the compiler, capturing its output to the log. First argument
23
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
27
# is compiler binary to execute.
24
+
28
- local compiler="$1"
25
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
29
+ compiler="$1"
26
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
30
shift
27
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
31
if test -n "$BASH_VERSION"; then eval '
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
32
echo >>config.log "
29
index XXXXXXX..XXXXXXX 100644
33
@@ -XXX,XX +XXX,XX @@ probe_target_compiler() {
30
--- a/target/arm/mve.decode
34
: ${container_cross_strip:=${container_cross_prefix}strip}
31
+++ b/target/arm/mve.decode
35
done
32
@@ -XXX,XX +XXX,XX @@
36
33
# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
37
- local t try
34
%size_28 28:1 !function=plus_1
38
try=cross
35
39
case "$target_arch:$cpu" in
36
+# 1imm format immediate
40
aarch64_be:aarch64 | \
37
+%imm_28_16_0 28:1 16:3 0:4
41
@@ -XXX,XX +XXX,XX @@ probe_target_compiler() {
38
+
42
try='native cross' ;;
39
&vldr_vstr rn qd imm p a w size l u
43
esac
40
&1op qd qm size
44
eval "target_cflags=\${cross_cc_cflags_$target_arch}"
41
&2op qd qm qn size
45
- for t in $try; do
42
&2scalar qd qn rm size
46
- case $t in
43
+&1imm qd imm cmode op
47
+ for thistry in $try; do
44
48
+ case $thistry in
45
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
49
native)
46
# Note that both Rn and Qd are 3 bits only (no D bit)
50
target_cc=$cc
47
@@ -XXX,XX +XXX,XX @@
51
target_ccas=$ccas
48
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
49
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
50
size=%size_28
51
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
52
53
# The _rev suffix indicates that Vn and Vm are reversed. This is
54
# the case for shifts. In the Arm ARM these insns are documented
55
@@ -XXX,XX +XXX,XX @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
56
# Predicate operations
57
%mask_22_13 22:1 13:3
58
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
59
+
60
+# Logical immediate operations (1 reg and modified-immediate)
61
+
62
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
63
+# not in a way we can conveniently represent in decodetree without
64
+# a lot of repetition:
65
+# VORR: op=0, (cmode & 1) && cmode < 12
66
+# VBIC: op=1, (cmode & 1) && cmode < 12
67
+# VMOV: everything else
68
+# So we have a single decode line and check the cmode/op in the
69
+# trans function.
70
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
71
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/mve_helper.c
74
+++ b/target/arm/mve_helper.c
75
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
76
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
77
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
78
79
+/*
80
+ * 1 operand immediates: Vda is destination and possibly also one source.
81
+ * All these insns work at 64-bit widths.
82
+ */
83
+#define DO_1OP_IMM(OP, FN) \
84
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
85
+ { \
86
+ uint64_t *da = vda; \
87
+ uint16_t mask = mve_element_mask(env); \
88
+ unsigned e; \
89
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
90
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
91
+ } \
92
+ mve_advance_vpt(env); \
93
+ }
94
+
95
+#define DO_MOVI(N, I) (I)
96
+#define DO_ANDI(N, I) ((N) & (I))
97
+#define DO_ORRI(N, I) ((N) | (I))
98
+
99
+DO_1OP_IMM(vmovi, DO_MOVI)
100
+DO_1OP_IMM(vandi, DO_ANDI)
101
+DO_1OP_IMM(vorri, DO_ORRI)
102
+
103
#define DO_2OP(OP, ESIZE, TYPE, FN) \
104
void HELPER(glue(mve_, OP))(CPUARMState *env, \
105
void *vd, void *vn, void *vm) \
106
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/translate-mve.c
109
+++ b/target/arm/translate-mve.c
110
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
111
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
112
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
113
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
114
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
115
116
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
117
static inline long mve_qreg_offset(unsigned reg)
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
119
mve_update_eci(s);
120
return true;
121
}
122
+
123
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
124
+{
125
+ TCGv_ptr qd;
126
+ uint64_t imm;
127
+
128
+ if (!dc_isar_feature(aa32_mve, s) ||
129
+ !mve_check_qreg_bank(s, a->qd) ||
130
+ !fn) {
131
+ return false;
132
+ }
133
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
134
+ return true;
135
+ }
136
+
137
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
138
+
139
+ qd = mve_qreg_ptr(a->qd);
140
+ fn(cpu_env, qd, tcg_constant_i64(imm));
141
+ tcg_temp_free_ptr(qd);
142
+ mve_update_eci(s);
143
+ return true;
144
+}
145
+
146
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
147
+{
148
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
149
+ MVEGenOneOpImmFn *fn;
150
+
151
+ if ((a->cmode & 1) && a->cmode < 12) {
152
+ if (a->op) {
153
+ /*
154
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
155
+ * so the VBIC becomes a logical AND operation.
156
+ */
157
+ fn = gen_helper_mve_vandi;
158
+ } else {
159
+ fn = gen_helper_mve_vorri;
160
+ }
161
+ } else {
162
+ /* There is one unallocated cmode/op combination in this space */
163
+ if (a->cmode == 15 && a->op == 1) {
164
+ return false;
165
+ }
166
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
167
+ fn = gen_helper_mve_vmovi;
168
+ }
169
+ return do_1imm(s, a, fn);
170
+}
171
--
52
--
172
2.20.1
53
2.25.1
173
54
174
55
diff view generated by jsdifflib