1
The following changes since commit 6eeea6725a70e6fcb5abba0764496bdab07ddfb3:
1
The following changes since commit 5a67d7735d4162630769ef495cf813244fc850df:
2
2
3
Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2020-10-06' into staging (2020-10-06 21:13:34 +0100)
3
Merge remote-tracking branch 'remotes/berrange-gitlab/tags/tls-deps-pull-request' into staging (2021-07-02 08:22:39 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20201008
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210702
8
8
9
for you to fetch changes up to ba118c26e16a97e6ff6de8184057d3420ce16a23:
9
for you to fetch changes up to 04ea4d3cfd0a21b248ece8eb7a9436a3d9898dd8:
10
10
11
target/arm: Make '-cpu max' have a 48-bit PA (2020-10-08 15:24:32 +0100)
11
target/arm: Implement MVE shifts by register (2021-07-02 11:48:38 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
target-arm queue:
14
target-arm queue:
15
* hw/ssi/npcm7xx_fiu: Fix handling of unsigned integer
15
* more MVE instructions
16
* hw/arm/fsl-imx25: Fix a typo
16
* hw/gpio/gpio_pwr: use shutdown function for reboot
17
* hw/arm/sbsa-ref : Fix SMMUv3 Initialisation
17
* target/arm: Check NaN mode before silencing NaN
18
* hw/arm/sbsa-ref : allocate IRQs for SMMUv3
18
* tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
19
* hw/char/bcm2835_aux: Allow less than 32-bit accesses
19
* hw/arm: Add basic power management to raspi.
20
* hw/arm/virt: Implement kvm-steal-time
20
* docs/system/arm: Add quanta-gbs-bmc, quanta-q7l1-bmc
21
* target/arm: Make '-cpu max' have a 48-bit PA
22
21
23
----------------------------------------------------------------
22
----------------------------------------------------------------
24
Andrew Jones (6):
23
Joe Komlodi (1):
25
linux headers: sync to 5.9-rc7
24
target/arm: Check NaN mode before silencing NaN
26
target/arm/kvm: Make uncalled stubs explicitly unreachable
27
hw/arm/virt: Move post cpu realize check into its own function
28
hw/arm/virt: Move kvm pmu setup to virt_cpu_post_init
29
tests/qtest: Restore aarch64 arm-cpu-features test
30
hw/arm/virt: Implement kvm-steal-time
31
25
32
Graeme Gregory (2):
26
Maxim Uvarov (1):
33
hw/arm/sbsa-ref : Fix SMMUv3 Initialisation
27
hw/gpio/gpio_pwr: use shutdown function for reboot
34
hw/arm/sbsa-ref : allocate IRQs for SMMUv3
35
28
36
Peter Maydell (1):
29
Nolan Leake (1):
37
target/arm: Make '-cpu max' have a 48-bit PA
30
hw/arm: Add basic power management to raspi.
38
31
39
Philippe Mathieu-Daudé (3):
32
Patrick Venture (2):
40
hw/ssi/npcm7xx_fiu: Fix handling of unsigned integer
33
docs/system/arm: Add quanta-q7l1-bmc reference
41
hw/arm/fsl-imx25: Fix a typo
34
docs/system/arm: Add quanta-gbs-bmc reference
42
hw/char/bcm2835_aux: Allow less than 32-bit accesses
43
35
44
docs/system/arm/cpu-features.rst | 11 ++++
36
Peter Maydell (18):
45
include/hw/arm/fsl-imx25.h | 2 +-
37
target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation
46
include/hw/arm/virt.h | 5 ++
38
target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH
47
linux-headers/linux/kvm.h | 6 ++-
39
target/arm: Make asimd_imm_const() public
48
target/arm/cpu.h | 4 ++
40
target/arm: Use asimd_imm_const for A64 decode
49
target/arm/kvm_arm.h | 94 ++++++++++++++++++++++++++-------
41
target/arm: Use dup_const() instead of bitfield_replicate()
50
hw/arm/sbsa-ref.c | 3 +-
42
target/arm: Implement MVE logical immediate insns
51
hw/arm/virt.c | 110 ++++++++++++++++++++++++++++-----------
43
target/arm: Implement MVE vector shift left by immediate insns
52
hw/char/bcm2835_aux.c | 4 +-
44
target/arm: Implement MVE vector shift right by immediate insns
53
hw/ssi/npcm7xx_fiu.c | 12 ++---
45
target/arm: Implement MVE VSHLL
54
target/arm/cpu.c | 8 +++
46
target/arm: Implement MVE VSRI, VSLI
55
target/arm/cpu64.c | 4 ++
47
target/arm: Implement MVE VSHRN, VRSHRN
56
target/arm/kvm.c | 16 ++++++
48
target/arm: Implement MVE saturating narrowing shifts
57
target/arm/kvm64.c | 64 +++++++++++++++++++++--
49
target/arm: Implement MVE VSHLC
58
target/arm/monitor.c | 2 +-
50
target/arm: Implement MVE VADDLV
59
tests/qtest/arm-cpu-features.c | 25 +++++++--
51
target/arm: Implement MVE long shifts by immediate
60
hw/ssi/trace-events | 2 +-
52
target/arm: Implement MVE long shifts by register
61
tests/qtest/meson.build | 3 +-
53
target/arm: Implement MVE shifts by immediate
62
18 files changed, 303 insertions(+), 72 deletions(-)
54
target/arm: Implement MVE shifts by register
63
55
56
Philippe Mathieu-Daudé (1):
57
tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
58
59
docs/system/arm/aspeed.rst | 1 +
60
docs/system/arm/nuvoton.rst | 5 +-
61
include/hw/arm/bcm2835_peripherals.h | 3 +-
62
include/hw/misc/bcm2835_powermgt.h | 29 ++
63
target/arm/helper-mve.h | 108 +++++++
64
target/arm/translate.h | 41 +++
65
target/arm/mve.decode | 177 ++++++++++-
66
target/arm/t32.decode | 71 ++++-
67
hw/arm/bcm2835_peripherals.c | 13 +-
68
hw/gpio/gpio_pwr.c | 2 +-
69
hw/misc/bcm2835_powermgt.c | 160 ++++++++++
70
target/arm/helper-a64.c | 12 +-
71
target/arm/mve_helper.c | 524 +++++++++++++++++++++++++++++++--
72
target/arm/translate-a64.c | 86 +-----
73
target/arm/translate-mve.c | 261 +++++++++++++++-
74
target/arm/translate-neon.c | 81 -----
75
target/arm/translate.c | 327 +++++++++++++++++++-
76
target/arm/vfp_helper.c | 24 +-
77
hw/misc/meson.build | 1 +
78
tests/acceptance/boot_linux_console.py | 43 +++
79
20 files changed, 1760 insertions(+), 209 deletions(-)
80
create mode 100644 include/hw/misc/bcm2835_powermgt.h
81
create mode 100644 hw/misc/bcm2835_powermgt.c
82
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Patrick Venture <venture@google.com>
2
2
3
Update against Linux 5.9-rc7.
3
Adds a line-item reference to the supported quanta-q71l-bmc aspeed
4
entry.
4
5
5
Cc: Paolo Bonzini <pbonzini@redhat.com>
6
Signed-off-by: Patrick Venture <venture@google.com>
6
Signed-off-by: Andrew Jones <drjones@redhat.com>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Message-id: 20201001061718.101915-2-drjones@redhat.com
8
Message-id: 20210615192848.1065297-2-venture@google.com
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
linux-headers/linux/kvm.h | 6 ++++--
11
docs/system/arm/aspeed.rst | 1 +
11
1 file changed, 4 insertions(+), 2 deletions(-)
12
1 file changed, 1 insertion(+)
12
13
13
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
14
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-headers/linux/kvm.h
16
--- a/docs/system/arm/aspeed.rst
16
+++ b/linux-headers/linux/kvm.h
17
+++ b/docs/system/arm/aspeed.rst
17
@@ -XXX,XX +XXX,XX @@ struct kvm_ppc_resize_hpt {
18
@@ -XXX,XX +XXX,XX @@ etc.
18
#define KVM_VM_PPC_HV 1
19
AST2400 SoC based machines :
19
#define KVM_VM_PPC_PR 2
20
20
21
- ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC
21
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
22
+- ``quanta-q71l-bmc`` OpenBMC Quanta BMC
22
-#define KVM_VM_MIPS_TE        0
23
23
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
24
AST2500 SoC based machines :
24
+#define KVM_VM_MIPS_AUTO    0
25
#define KVM_VM_MIPS_VZ        1
26
+#define KVM_VM_MIPS_TE        2
27
28
#define KVM_S390_SIE_PAGE_OFFSET 1
29
30
@@ -XXX,XX +XXX,XX @@ struct kvm_ppc_resize_hpt {
31
#define KVM_CAP_LAST_CPU 184
32
#define KVM_CAP_SMALLER_MAXPHYADDR 185
33
#define KVM_CAP_S390_DIAG318 186
34
+#define KVM_CAP_STEAL_TIME 187
35
36
#ifdef KVM_CAP_IRQ_ROUTING
37
25
38
--
26
--
39
2.20.1
27
2.20.1
40
28
41
29
diff view generated by jsdifflib
1
From: Graeme Gregory <graeme@nuviainc.com>
1
From: Patrick Venture <venture@google.com>
2
2
3
Original commit did not allocate IRQs for the SMMUv3 in the irqmap
3
Add line item reference to quanta-gbs-bmc machine.
4
effectively using irq 0->3 (shared with other devices). Assuming
5
original intent was to allocate unique IRQs then add an allocation
6
to the irqmap.
7
4
8
Fixes: e9fdf453240 ("hw/arm: Add arm SBSA reference machine, devices part")
5
Signed-off-by: Patrick Venture <venture@google.com>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Cédric Le Goater <clg@kaod.org>
10
Signed-off-by: Graeme Gregory <graeme@nuviainc.com>
7
Message-id: 20210615192848.1065297-3-venture@google.com
11
Reviewed-by: Eric Auger <eric.auger@redhat.com>
8
[PMM: fixed underline Sphinx warning]
12
Message-id: 20201007100732.4103790-3-graeme@nuviainc.com
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
10
---
15
hw/arm/sbsa-ref.c | 1 +
11
docs/system/arm/nuvoton.rst | 5 +++--
16
1 file changed, 1 insertion(+)
12
1 file changed, 3 insertions(+), 2 deletions(-)
17
13
18
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
14
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/sbsa-ref.c
16
--- a/docs/system/arm/nuvoton.rst
21
+++ b/hw/arm/sbsa-ref.c
17
+++ b/docs/system/arm/nuvoton.rst
22
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
18
@@ -XXX,XX +XXX,XX @@
23
[SBSA_SECURE_UART_MM] = 9,
19
-Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
24
[SBSA_AHCI] = 10,
20
-=====================================================
25
[SBSA_EHCI] = 11,
21
+Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``)
26
+ [SBSA_SMMU] = 12, /* ... to 15 */
22
+================================================================
27
};
23
28
24
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
29
static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
25
designed to be used as Baseboard Management Controllers (BMCs) in various
26
@@ -XXX,XX +XXX,XX @@ segment. The following machines are based on this chip :
27
The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
28
Hyperscale applications. The following machines are based on this chip :
29
30
+- ``quanta-gbs-bmc`` Quanta GBS server BMC
31
- ``quanta-gsj`` Quanta GSJ server BMC
32
33
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
30
--
34
--
31
2.20.1
35
2.20.1
32
36
33
37
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Nolan Leake <nolan@sigbus.net>
2
2
3
The "BCM2835 ARM Peripherals" datasheet [*] chapter 2
3
This is just enough to make reboot and poweroff work. Works for
4
("Auxiliaries: UART1 & SPI1, SPI2"), list the register
4
linux, u-boot, and the arm trusted firmware. Not tested, but should
5
sizes as 3/8/16/32 bits. We assume this means this
5
work for plan9, and bare-metal/hobby OSes, since they seem to generally
6
peripheral allows 8-bit accesses.
6
do what linux does for reset.
7
7
8
This was not an issue until commit 5d971f9e67 which reverted
8
The watchdog timer functionality is not yet implemented.
9
("memory: accept mismatching sizes in memory_region_access_valid").
9
10
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/64
11
The model is implemented as 32-bit accesses (see commit 97398d900c,
11
Signed-off-by: Nolan Leake <nolan@sigbus.net>
12
all registers are 32-bit) so replace MemoryRegionOps.valid as
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
MemoryRegionOps.impl, and re-introduce MemoryRegionOps.valid
13
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
with a 8/32-bit range.
14
Message-id: 20210625210209.1870217-1-nolan@sigbus.net
15
15
[PMM: tweaked commit title; fixed region size to 0x200;
16
[*] https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf
16
moved header file to include/]
17
18
Fixes: 97398d900c ("bcm2835_aux: add emulation of BCM2835 AUX (aka UART1) block")
19
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
20
Message-id: 20201002181032.1899463-1-f4bug@amsat.org
21
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
---
18
---
24
hw/char/bcm2835_aux.c | 4 +++-
19
include/hw/arm/bcm2835_peripherals.h | 3 +-
25
1 file changed, 3 insertions(+), 1 deletion(-)
20
include/hw/misc/bcm2835_powermgt.h | 29 +++++
26
21
hw/arm/bcm2835_peripherals.c | 13 ++-
27
diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c
22
hw/misc/bcm2835_powermgt.c | 160 +++++++++++++++++++++++++++
23
hw/misc/meson.build | 1 +
24
5 files changed, 204 insertions(+), 2 deletions(-)
25
create mode 100644 include/hw/misc/bcm2835_powermgt.h
26
create mode 100644 hw/misc/bcm2835_powermgt.c
27
28
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
28
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
29
--- a/hw/char/bcm2835_aux.c
30
--- a/include/hw/arm/bcm2835_peripherals.h
30
+++ b/hw/char/bcm2835_aux.c
31
+++ b/include/hw/arm/bcm2835_peripherals.h
31
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps bcm2835_aux_ops = {
32
@@ -XXX,XX +XXX,XX @@
32
.read = bcm2835_aux_read,
33
#include "hw/misc/bcm2835_mphi.h"
33
.write = bcm2835_aux_write,
34
#include "hw/misc/bcm2835_thermal.h"
34
.endianness = DEVICE_NATIVE_ENDIAN,
35
#include "hw/misc/bcm2835_cprman.h"
35
- .valid.min_access_size = 4,
36
+#include "hw/misc/bcm2835_powermgt.h"
37
#include "hw/sd/sdhci.h"
38
#include "hw/sd/bcm2835_sdhost.h"
39
#include "hw/gpio/bcm2835_gpio.h"
40
@@ -XXX,XX +XXX,XX @@ struct BCM2835PeripheralState {
41
BCM2835MphiState mphi;
42
UnimplementedDeviceState txp;
43
UnimplementedDeviceState armtmr;
44
- UnimplementedDeviceState powermgt;
45
+ BCM2835PowerMgtState powermgt;
46
BCM2835CprmanState cprman;
47
PL011State uart0;
48
BCM2835AuxState aux;
49
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
50
new file mode 100644
51
index XXXXXXX..XXXXXXX
52
--- /dev/null
53
+++ b/include/hw/misc/bcm2835_powermgt.h
54
@@ -XXX,XX +XXX,XX @@
55
+/*
56
+ * BCM2835 Power Management emulation
57
+ *
58
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
59
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
60
+ *
61
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
62
+ * See the COPYING file in the top-level directory.
63
+ */
64
+
65
+#ifndef BCM2835_POWERMGT_H
66
+#define BCM2835_POWERMGT_H
67
+
68
+#include "hw/sysbus.h"
69
+#include "qom/object.h"
70
+
71
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
72
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
73
+
74
+struct BCM2835PowerMgtState {
75
+ SysBusDevice busdev;
76
+ MemoryRegion iomem;
77
+
78
+ uint32_t rstc;
79
+ uint32_t rsts;
80
+ uint32_t wdog;
81
+};
82
+
83
+#endif
84
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/hw/arm/bcm2835_peripherals.c
87
+++ b/hw/arm/bcm2835_peripherals.c
88
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
89
90
object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr",
91
OBJECT(&s->gpu_bus_mr));
92
+
93
+ /* Power Management */
94
+ object_initialize_child(obj, "powermgt", &s->powermgt,
95
+ TYPE_BCM2835_POWERMGT);
96
}
97
98
static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
99
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
100
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
101
INTERRUPT_USB));
102
103
+ /* Power Management */
104
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) {
105
+ return;
106
+ }
107
+
108
+ memory_region_add_subregion(&s->peri_mr, PM_OFFSET,
109
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0));
110
+
111
create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000);
112
create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40);
113
- create_unimp(s, &s->powermgt, "bcm2835-powermgt", PM_OFFSET, 0x114);
114
create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100);
115
create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100);
116
create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20);
117
diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c
118
new file mode 100644
119
index XXXXXXX..XXXXXXX
120
--- /dev/null
121
+++ b/hw/misc/bcm2835_powermgt.c
122
@@ -XXX,XX +XXX,XX @@
123
+/*
124
+ * BCM2835 Power Management emulation
125
+ *
126
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
127
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
128
+ *
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
131
+ */
132
+
133
+#include "qemu/osdep.h"
134
+#include "qemu/log.h"
135
+#include "qemu/module.h"
136
+#include "hw/misc/bcm2835_powermgt.h"
137
+#include "migration/vmstate.h"
138
+#include "sysemu/runstate.h"
139
+
140
+#define PASSWORD 0x5a000000
141
+#define PASSWORD_MASK 0xff000000
142
+
143
+#define R_RSTC 0x1c
144
+#define V_RSTC_RESET 0x20
145
+#define R_RSTS 0x20
146
+#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */
147
+#define R_WDOG 0x24
148
+
149
+static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset,
150
+ unsigned size)
151
+{
152
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
153
+ uint32_t res = 0;
154
+
155
+ switch (offset) {
156
+ case R_RSTC:
157
+ res = s->rstc;
158
+ break;
159
+ case R_RSTS:
160
+ res = s->rsts;
161
+ break;
162
+ case R_WDOG:
163
+ res = s->wdog;
164
+ break;
165
+
166
+ default:
167
+ qemu_log_mask(LOG_UNIMP,
168
+ "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx
169
+ "\n", offset);
170
+ res = 0;
171
+ break;
172
+ }
173
+
174
+ return res;
175
+}
176
+
177
+static void bcm2835_powermgt_write(void *opaque, hwaddr offset,
178
+ uint64_t value, unsigned size)
179
+{
180
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
181
+
182
+ if ((value & PASSWORD_MASK) != PASSWORD) {
183
+ qemu_log_mask(LOG_GUEST_ERROR,
184
+ "bcm2835_powermgt_write: Bad password 0x%"PRIx64
185
+ " at offset 0x%08"HWADDR_PRIx"\n",
186
+ value, offset);
187
+ return;
188
+ }
189
+
190
+ value = value & ~PASSWORD_MASK;
191
+
192
+ switch (offset) {
193
+ case R_RSTC:
194
+ s->rstc = value;
195
+ if (value & V_RSTC_RESET) {
196
+ if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) {
197
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
198
+ } else {
199
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
200
+ }
201
+ }
202
+ break;
203
+ case R_RSTS:
204
+ qemu_log_mask(LOG_UNIMP,
205
+ "bcm2835_powermgt_write: RSTS\n");
206
+ s->rsts = value;
207
+ break;
208
+ case R_WDOG:
209
+ qemu_log_mask(LOG_UNIMP,
210
+ "bcm2835_powermgt_write: WDOG\n");
211
+ s->wdog = value;
212
+ break;
213
+
214
+ default:
215
+ qemu_log_mask(LOG_UNIMP,
216
+ "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx
217
+ "\n", offset);
218
+ break;
219
+ }
220
+}
221
+
222
+static const MemoryRegionOps bcm2835_powermgt_ops = {
223
+ .read = bcm2835_powermgt_read,
224
+ .write = bcm2835_powermgt_write,
225
+ .endianness = DEVICE_NATIVE_ENDIAN,
36
+ .impl.min_access_size = 4,
226
+ .impl.min_access_size = 4,
37
+ .impl.max_access_size = 4,
227
+ .impl.max_access_size = 4,
38
+ .valid.min_access_size = 1,
228
+};
39
.valid.max_access_size = 4,
229
+
40
};
230
+static const VMStateDescription vmstate_bcm2835_powermgt = {
41
231
+ .name = TYPE_BCM2835_POWERMGT,
232
+ .version_id = 1,
233
+ .minimum_version_id = 1,
234
+ .fields = (VMStateField[]) {
235
+ VMSTATE_UINT32(rstc, BCM2835PowerMgtState),
236
+ VMSTATE_UINT32(rsts, BCM2835PowerMgtState),
237
+ VMSTATE_UINT32(wdog, BCM2835PowerMgtState),
238
+ VMSTATE_END_OF_LIST()
239
+ }
240
+};
241
+
242
+static void bcm2835_powermgt_init(Object *obj)
243
+{
244
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj);
245
+
246
+ memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s,
247
+ TYPE_BCM2835_POWERMGT, 0x200);
248
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
249
+}
250
+
251
+static void bcm2835_powermgt_reset(DeviceState *dev)
252
+{
253
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev);
254
+
255
+ /* https://elinux.org/BCM2835_registers#PM */
256
+ s->rstc = 0x00000102;
257
+ s->rsts = 0x00001000;
258
+ s->wdog = 0x00000000;
259
+}
260
+
261
+static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data)
262
+{
263
+ DeviceClass *dc = DEVICE_CLASS(klass);
264
+
265
+ dc->reset = bcm2835_powermgt_reset;
266
+ dc->vmsd = &vmstate_bcm2835_powermgt;
267
+}
268
+
269
+static TypeInfo bcm2835_powermgt_info = {
270
+ .name = TYPE_BCM2835_POWERMGT,
271
+ .parent = TYPE_SYS_BUS_DEVICE,
272
+ .instance_size = sizeof(BCM2835PowerMgtState),
273
+ .class_init = bcm2835_powermgt_class_init,
274
+ .instance_init = bcm2835_powermgt_init,
275
+};
276
+
277
+static void bcm2835_powermgt_register_types(void)
278
+{
279
+ type_register_static(&bcm2835_powermgt_info);
280
+}
281
+
282
+type_init(bcm2835_powermgt_register_types)
283
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
284
index XXXXXXX..XXXXXXX 100644
285
--- a/hw/misc/meson.build
286
+++ b/hw/misc/meson.build
287
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files(
288
'bcm2835_rng.c',
289
'bcm2835_thermal.c',
290
'bcm2835_cprman.c',
291
+ 'bcm2835_powermgt.c',
292
))
293
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c'))
294
softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c', 'zynq-xadc.c'))
42
--
295
--
43
2.20.1
296
2.20.1
44
297
45
298
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
2
3
Fix integer handling issues handling issue reported by Coverity:
3
Add a test booting and quickly shutdown a raspi2 machine,
4
to test the power management model:
4
5
5
hw/ssi/npcm7xx_fiu.c: 162 in npcm7xx_fiu_flash_read()
6
(1/1) tests/acceptance/boot_linux_console.py:BootLinuxConsole.test_arm_raspi2_initrd:
6
>>> CID 1432730: Integer handling issues (NEGATIVE_RETURNS)
7
console: [ 0.000000] Booting Linux on physical CPU 0xf00
7
>>> "npcm7xx_fiu_cs_index(fiu, f)" is passed to a parameter that cannot be negative.
8
console: [ 0.000000] Linux version 4.14.98-v7+ (dom@dom-XPS-13-9370) (gcc version 4.9.3 (crosstool-NG crosstool-ng-1.22.0-88-g8460611)) #1200 SMP Tue Feb 12 20:27:48 GMT 2019
8
162 npcm7xx_fiu_select(fiu, npcm7xx_fiu_cs_index(fiu, f));
9
console: [ 0.000000] CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c5387d
10
console: [ 0.000000] CPU: div instructions available: patching division code
11
console: [ 0.000000] CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
12
console: [ 0.000000] OF: fdt: Machine model: Raspberry Pi 2 Model B
13
...
14
console: Boot successful.
15
console: cat /proc/cpuinfo
16
console: / # cat /proc/cpuinfo
17
...
18
console: processor : 3
19
console: model name : ARMv7 Processor rev 5 (v7l)
20
console: BogoMIPS : 125.00
21
console: Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm
22
console: CPU implementer : 0x41
23
console: CPU architecture: 7
24
console: CPU variant : 0x0
25
console: CPU part : 0xc07
26
console: CPU revision : 5
27
console: Hardware : BCM2835
28
console: Revision : 0000
29
console: Serial : 0000000000000000
30
console: cat /proc/iomem
31
console: / # cat /proc/iomem
32
console: 00000000-3bffffff : System RAM
33
console: 00008000-00afffff : Kernel code
34
console: 00c00000-00d468ef : Kernel data
35
console: 3f006000-3f006fff : dwc_otg
36
console: 3f007000-3f007eff : /soc/dma@7e007000
37
console: 3f00b880-3f00b8bf : /soc/mailbox@7e00b880
38
console: 3f100000-3f100027 : /soc/watchdog@7e100000
39
console: 3f101000-3f102fff : /soc/cprman@7e101000
40
console: 3f200000-3f2000b3 : /soc/gpio@7e200000
41
PASS (24.59 s)
42
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
43
JOB TIME : 25.02 s
9
44
10
hw/ssi/npcm7xx_fiu.c: 221 in npcm7xx_fiu_flash_write()
11
218 cs_id = npcm7xx_fiu_cs_index(fiu, f);
12
219 trace_npcm7xx_fiu_flash_write(DEVICE(fiu)->canonical_path, cs_id, addr,
13
220 size, v);
14
>>> CID 1432729: Integer handling issues (NEGATIVE_RETURNS)
15
>>> "cs_id" is passed to a parameter that cannot be negative.
16
221 npcm7xx_fiu_select(fiu, cs_id);
17
18
Since the index of the flash can not be negative, return an
19
unsigned type.
20
21
Reported-by: Coverity (CID 1432729 & 1432730: NEGATIVE_RETURNS)
22
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
45
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
23
Reviewed-by: Havard Skinnemoen <hskinnemoen@google.com>
46
Reviewed-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
24
Message-id: 20200919132435.310527-1-f4bug@amsat.org
47
Message-id: 20210531113837.1689775-1-f4bug@amsat.org
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
48
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
---
49
---
27
hw/ssi/npcm7xx_fiu.c | 12 ++++++------
50
tests/acceptance/boot_linux_console.py | 43 ++++++++++++++++++++++++++
28
hw/ssi/trace-events | 2 +-
51
1 file changed, 43 insertions(+)
29
2 files changed, 7 insertions(+), 7 deletions(-)
30
52
31
diff --git a/hw/ssi/npcm7xx_fiu.c b/hw/ssi/npcm7xx_fiu.c
53
diff --git a/tests/acceptance/boot_linux_console.py b/tests/acceptance/boot_linux_console.py
32
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
33
--- a/hw/ssi/npcm7xx_fiu.c
55
--- a/tests/acceptance/boot_linux_console.py
34
+++ b/hw/ssi/npcm7xx_fiu.c
56
+++ b/tests/acceptance/boot_linux_console.py
35
@@ -XXX,XX +XXX,XX @@ enum NPCM7xxFIURegister {
57
@@ -XXX,XX +XXX,XX @@
36
* Returns the index of flash in the fiu->flash array. This corresponds to the
58
from avocado import skip
37
* chip select ID of the flash.
59
from avocado import skipUnless
38
*/
60
from avocado_qemu import Test
39
-static int npcm7xx_fiu_cs_index(NPCM7xxFIUState *fiu, NPCM7xxFIUFlash *flash)
61
+from avocado_qemu import exec_command
40
+static unsigned npcm7xx_fiu_cs_index(NPCM7xxFIUState *fiu,
62
from avocado_qemu import exec_command_and_wait_for_pattern
41
+ NPCM7xxFIUFlash *flash)
63
from avocado_qemu import interrupt_interactive_console_until_pattern
42
{
64
from avocado_qemu import wait_for_console_pattern
43
int index = flash - fiu->flash;
65
@@ -XXX,XX +XXX,XX @@ def test_arm_raspi2_uart0(self):
44
66
"""
45
@@ -XXX,XX +XXX,XX @@ static int npcm7xx_fiu_cs_index(NPCM7xxFIUState *fiu, NPCM7xxFIUFlash *flash)
67
self.do_test_arm_raspi2(0)
46
}
68
47
69
+ def test_arm_raspi2_initrd(self):
48
/* Assert the chip select specified in the UMA Control/Status Register. */
70
+ """
49
-static void npcm7xx_fiu_select(NPCM7xxFIUState *s, int cs_id)
71
+ :avocado: tags=arch:arm
50
+static void npcm7xx_fiu_select(NPCM7xxFIUState *s, unsigned cs_id)
72
+ :avocado: tags=machine:raspi2
51
{
73
+ """
52
trace_npcm7xx_fiu_select(DEVICE(s)->canonical_path, cs_id);
74
+ deb_url = ('http://archive.raspberrypi.org/debian/'
53
75
+ 'pool/main/r/raspberrypi-firmware/'
54
if (cs_id < s->cs_count) {
76
+ 'raspberrypi-kernel_1.20190215-1_armhf.deb')
55
qemu_irq_lower(s->cs_lines[cs_id]);
77
+ deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
56
+ s->active_cs = cs_id;
78
+ deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
57
} else {
79
+ kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
58
qemu_log_mask(LOG_GUEST_ERROR,
80
+ dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
59
"%s: UMA to CS%d; this module has only %d chip selects",
81
+
60
DEVICE(s)->canonical_path, cs_id, s->cs_count);
82
+ initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
61
- cs_id = -1;
83
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
62
+ s->active_cs = -1;
84
+ 'arm/rootfs-armv7a.cpio.gz')
63
}
85
+ initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
64
-
86
+ initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
65
- s->active_cs = cs_id;
87
+ initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
66
}
88
+ archive.gzip_uncompress(initrd_path_gz, initrd_path)
67
89
+
68
/* Deassert the currently active chip select. */
90
+ self.vm.set_console()
69
@@ -XXX,XX +XXX,XX @@ static void npcm7xx_fiu_flash_write(void *opaque, hwaddr addr, uint64_t v,
91
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
70
NPCM7xxFIUFlash *f = opaque;
92
+ 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
71
NPCM7xxFIUState *fiu = f->fiu;
93
+ 'panic=-1 noreboot ' +
72
uint32_t dwr_cfg;
94
+ 'dwc_otg.fiq_fsm_enable=0')
73
- int cs_id;
95
+ self.vm.add_args('-kernel', kernel_path,
74
+ unsigned cs_id;
96
+ '-dtb', dtb_path,
75
int i;
97
+ '-initrd', initrd_path,
76
98
+ '-append', kernel_command_line,
77
if (fiu->active_cs != -1) {
99
+ '-no-reboot')
78
diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events
100
+ self.vm.launch()
79
index XXXXXXX..XXXXXXX 100644
101
+ self.wait_for_console_pattern('Boot successful.')
80
--- a/hw/ssi/trace-events
102
+
81
+++ b/hw/ssi/trace-events
103
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
82
@@ -XXX,XX +XXX,XX @@ npcm7xx_fiu_deselect(const char *id, int cs) "%s deselect CS%d"
104
+ 'BCM2835')
83
npcm7xx_fiu_ctrl_read(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
105
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
84
npcm7xx_fiu_ctrl_write(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
106
+ '/soc/cprman@7e101000')
85
npcm7xx_fiu_flash_read(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64
107
+ exec_command(self, 'halt')
86
-npcm7xx_fiu_flash_write(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64
108
+ # Wait for VM to shut down gracefully
87
+npcm7xx_fiu_flash_write(const char *id, unsigned cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64
109
+ self.vm.wait()
110
+
111
def test_arm_exynos4210_initrd(self):
112
"""
113
:avocado: tags=arch:arm
88
--
114
--
89
2.20.1
115
2.20.1
90
116
91
117
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
2
2
3
arm-cpu-features got dropped from the AArch64 tests during the meson
3
If the CPU is running in default NaN mode (FPCR.DN == 1) and we execute
4
conversion shuffle.
4
FRSQRTE, FRECPE, or FRECPX with a signaling NaN, parts_silence_nan_frac() will
5
assert due to fpst->default_nan_mode being set.
5
6
6
Signed-off-by: Andrew Jones <drjones@redhat.com>
7
To avoid this, we check to see what NaN mode we're running in before we call
7
Message-id: 20201001061718.101915-6-drjones@redhat.com
8
floatxx_silence_nan().
9
10
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1624662174-175828-2-git-send-email-joe.komlodi@xilinx.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
15
---
11
tests/qtest/meson.build | 3 ++-
16
target/arm/helper-a64.c | 12 +++++++++---
12
1 file changed, 2 insertions(+), 1 deletion(-)
17
target/arm/vfp_helper.c | 24 ++++++++++++++++++------
18
2 files changed, 27 insertions(+), 9 deletions(-)
13
19
14
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
20
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
15
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/qtest/meson.build
22
--- a/target/arm/helper-a64.c
17
+++ b/tests/qtest/meson.build
23
+++ b/target/arm/helper-a64.c
18
@@ -XXX,XX +XXX,XX @@ qtests_aarch64 = \
24
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
19
(cpu != 'arm' ? ['bios-tables-test'] : []) + \
25
float16 nan = a;
20
(config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-test'] : []) + \
26
if (float16_is_signaling_nan(a, fpst)) {
21
(config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-swtpm-test'] : []) + \
27
float_raise(float_flag_invalid, fpst);
22
- ['numa-test',
28
- nan = float16_silence_nan(a, fpst);
23
+ ['arm-cpu-features',
29
+ if (!fpst->default_nan_mode) {
24
+ 'numa-test',
30
+ nan = float16_silence_nan(a, fpst);
25
'boot-serial-test',
31
+ }
26
'migration-test']
32
}
27
33
if (fpst->default_nan_mode) {
34
nan = float16_default_nan(fpst);
35
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
36
float32 nan = a;
37
if (float32_is_signaling_nan(a, fpst)) {
38
float_raise(float_flag_invalid, fpst);
39
- nan = float32_silence_nan(a, fpst);
40
+ if (!fpst->default_nan_mode) {
41
+ nan = float32_silence_nan(a, fpst);
42
+ }
43
}
44
if (fpst->default_nan_mode) {
45
nan = float32_default_nan(fpst);
46
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
47
float64 nan = a;
48
if (float64_is_signaling_nan(a, fpst)) {
49
float_raise(float_flag_invalid, fpst);
50
- nan = float64_silence_nan(a, fpst);
51
+ if (!fpst->default_nan_mode) {
52
+ nan = float64_silence_nan(a, fpst);
53
+ }
54
}
55
if (fpst->default_nan_mode) {
56
nan = float64_default_nan(fpst);
57
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/vfp_helper.c
60
+++ b/target/arm/vfp_helper.c
61
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
62
float16 nan = f16;
63
if (float16_is_signaling_nan(f16, fpst)) {
64
float_raise(float_flag_invalid, fpst);
65
- nan = float16_silence_nan(f16, fpst);
66
+ if (!fpst->default_nan_mode) {
67
+ nan = float16_silence_nan(f16, fpst);
68
+ }
69
}
70
if (fpst->default_nan_mode) {
71
nan = float16_default_nan(fpst);
72
@@ -XXX,XX +XXX,XX @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp)
73
float32 nan = f32;
74
if (float32_is_signaling_nan(f32, fpst)) {
75
float_raise(float_flag_invalid, fpst);
76
- nan = float32_silence_nan(f32, fpst);
77
+ if (!fpst->default_nan_mode) {
78
+ nan = float32_silence_nan(f32, fpst);
79
+ }
80
}
81
if (fpst->default_nan_mode) {
82
nan = float32_default_nan(fpst);
83
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
84
float64 nan = f64;
85
if (float64_is_signaling_nan(f64, fpst)) {
86
float_raise(float_flag_invalid, fpst);
87
- nan = float64_silence_nan(f64, fpst);
88
+ if (!fpst->default_nan_mode) {
89
+ nan = float64_silence_nan(f64, fpst);
90
+ }
91
}
92
if (fpst->default_nan_mode) {
93
nan = float64_default_nan(fpst);
94
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
95
float16 nan = f16;
96
if (float16_is_signaling_nan(f16, s)) {
97
float_raise(float_flag_invalid, s);
98
- nan = float16_silence_nan(f16, s);
99
+ if (!s->default_nan_mode) {
100
+ nan = float16_silence_nan(f16, fpstp);
101
+ }
102
}
103
if (s->default_nan_mode) {
104
nan = float16_default_nan(s);
105
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
106
float32 nan = f32;
107
if (float32_is_signaling_nan(f32, s)) {
108
float_raise(float_flag_invalid, s);
109
- nan = float32_silence_nan(f32, s);
110
+ if (!s->default_nan_mode) {
111
+ nan = float32_silence_nan(f32, fpstp);
112
+ }
113
}
114
if (s->default_nan_mode) {
115
nan = float32_default_nan(s);
116
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
117
float64 nan = f64;
118
if (float64_is_signaling_nan(f64, s)) {
119
float_raise(float_flag_invalid, s);
120
- nan = float64_silence_nan(f64, s);
121
+ if (!s->default_nan_mode) {
122
+ nan = float64_silence_nan(f64, fpstp);
123
+ }
124
}
125
if (s->default_nan_mode) {
126
nan = float64_default_nan(s);
28
--
127
--
29
2.20.1
128
2.20.1
30
129
31
130
diff view generated by jsdifflib
1
From: Graeme Gregory <graeme@nuviainc.com>
1
From: Maxim Uvarov <maxim.uvarov@linaro.org>
2
2
3
SMMUv3 has an error in a previous patch where an i was transposed to a 1
3
qemu has 2 type of functions: shutdown and reboot. Shutdown
4
meaning interrupts would not have been correctly assigned to the SMMUv3
4
function has to be used for machine shutdown. Otherwise we cause
5
instance.
5
a reset with a bogus "cause" value, when we intended a shutdown.
6
6
7
Fixes: 48ba18e6d3f3 ("hw/arm/sbsa-ref: Simplify by moving the gic in the machine state")
7
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
8
Signed-off-by: Graeme Gregory <graeme@nuviainc.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-id: 20210625111842.3790-3-maxim.uvarov@linaro.org
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
10
[PMM: tweaked commit message]
11
Message-id: 20201007100732.4103790-2-graeme@nuviainc.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
12
---
14
hw/arm/sbsa-ref.c | 2 +-
13
hw/gpio/gpio_pwr.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
16
15
17
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
16
diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/sbsa-ref.c
18
--- a/hw/gpio/gpio_pwr.c
20
+++ b/hw/arm/sbsa-ref.c
19
+++ b/hw/gpio/gpio_pwr.c
21
@@ -XXX,XX +XXX,XX @@ static void create_smmu(const SBSAMachineState *sms, PCIBus *bus)
20
@@ -XXX,XX +XXX,XX @@ static void gpio_pwr_reset(void *opaque, int n, int level)
22
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
21
static void gpio_pwr_shutdown(void *opaque, int n, int level)
23
for (i = 0; i < NUM_SMMU_IRQS; i++) {
22
{
24
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i,
23
if (level) {
25
- qdev_get_gpio_in(sms->gic, irq + 1));
24
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
26
+ qdev_get_gpio_in(sms->gic, irq + i));
25
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
27
}
26
}
28
}
27
}
29
28
30
--
29
--
31
2.20.1
30
2.20.1
32
31
33
32
diff view generated by jsdifflib
New patch
1
In do_ldst(), the calculation of the offset needs to be based on the
2
size of the memory access, not the size of the elements in the
3
vector. This meant we were getting it wrong for the widening and
4
narrowing variants of the various VLDR and VSTR insns.
1
5
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-2-peter.maydell@linaro.org
9
---
10
target/arm/translate-mve.c | 17 +++++++++--------
11
1 file changed, 9 insertions(+), 8 deletions(-)
12
13
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-mve.c
16
+++ b/target/arm/translate-mve.c
17
@@ -XXX,XX +XXX,XX @@ static bool mve_skip_first_beat(DisasContext *s)
18
}
19
}
20
21
-static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
22
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
23
+ unsigned msize)
24
{
25
TCGv_i32 addr;
26
uint32_t offset;
27
@@ -XXX,XX +XXX,XX @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
28
return true;
29
}
30
31
- offset = a->imm << a->size;
32
+ offset = a->imm << msize;
33
if (!a->a) {
34
offset = -offset;
35
}
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
37
{ gen_helper_mve_vstrw, gen_helper_mve_vldrw },
38
{ NULL, NULL }
39
};
40
- return do_ldst(s, a, ldstfns[a->size][a->l]);
41
+ return do_ldst(s, a, ldstfns[a->size][a->l], a->size);
42
}
43
44
-#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
45
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \
46
static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
47
{ \
48
static MVEGenLdStFn * const ldstfns[2][2] = { \
49
{ gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
50
{ NULL, gen_helper_mve_##ULD }, \
51
}; \
52
- return do_ldst(s, a, ldstfns[a->u][a->l]); \
53
+ return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \
54
}
55
56
-DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
57
-DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
58
-DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
59
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
60
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
61
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
62
63
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
64
{
65
--
66
2.20.1
67
68
diff view generated by jsdifflib
New patch
1
The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH
2
insns had some bugs:
3
* the 32x32 multiply of elements was being done as 32x32->32,
4
not 32x32->64
5
* we were incorrectly maintaining the accumulator in its full
6
72-bit form across all 4 beats of the insn; in the pseudocode
7
it is squashed back into the 64 bits of the RdaHi:RdaLo
8
registers after each beat
1
9
10
In particular, fixing the second of these allows us to recast
11
the implementation to avoid 128-bit arithmetic entirely.
12
13
Since the element size here is always 4, we can also drop the
14
parameterization of ESIZE to make the code a little more readable.
15
16
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20210628135835.6690-3-peter.maydell@linaro.org
20
---
21
target/arm/mve_helper.c | 38 +++++++++++++++++++++-----------------
22
1 file changed, 21 insertions(+), 17 deletions(-)
23
24
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/mve_helper.c
27
+++ b/target/arm/mve_helper.c
28
@@ -XXX,XX +XXX,XX @@
29
*/
30
31
#include "qemu/osdep.h"
32
-#include "qemu/int128.h"
33
#include "cpu.h"
34
#include "internals.h"
35
#include "vec_internal.h"
36
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
37
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
38
39
/*
40
- * Rounding multiply add long dual accumulate high: we must keep
41
- * a 72-bit internal accumulator value and return the top 64 bits.
42
+ * Rounding multiply add long dual accumulate high. In the pseudocode
43
+ * this is implemented with a 72-bit internal accumulator value of which
44
+ * the top 64 bits are returned. We optimize this to avoid having to
45
+ * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
46
+ * is squashed back into 64-bits after each beat.
47
*/
48
-#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
49
+#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
50
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
51
void *vm, uint64_t a) \
52
{ \
53
uint16_t mask = mve_element_mask(env); \
54
unsigned e; \
55
TYPE *n = vn, *m = vm; \
56
- Int128 acc = int128_lshift(TO128(a), 8); \
57
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
58
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
59
if (mask & 1) { \
60
+ LTYPE mul; \
61
if (e & 1) { \
62
- acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
63
- m[H##ESIZE(e)])); \
64
+ mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
65
+ if (SUB) { \
66
+ mul = -mul; \
67
+ } \
68
} else { \
69
- acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
70
- m[H##ESIZE(e)])); \
71
+ mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
72
} \
73
- acc = int128_add(acc, int128_make64(1 << 7)); \
74
+ mul = (mul >> 8) + ((mul >> 7) & 1); \
75
+ a += mul; \
76
} \
77
} \
78
mve_advance_vpt(env); \
79
- return int128_getlo(int128_rshift(acc, 8)); \
80
+ return a; \
81
}
82
83
-DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
84
-DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
85
+DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
86
+DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
87
88
-DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
89
+DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
90
91
-DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
92
-DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
93
+DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
94
+DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
95
96
/* Vector add across vector */
97
#define DO_VADDV(OP, ESIZE, TYPE) \
98
--
99
2.20.1
100
101
diff view generated by jsdifflib
New patch
1
The function asimd_imm_const() in translate-neon.c is an
2
implementation of the pseudocode AdvSIMDExpandImm(), which we will
3
also want for MVE. Move the implementation to translate.c, with a
4
prototype in translate.h.
1
5
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-4-peter.maydell@linaro.org
9
---
10
target/arm/translate.h | 16 ++++++++++
11
target/arm/translate-neon.c | 63 -------------------------------------
12
target/arm/translate.c | 57 +++++++++++++++++++++++++++++++++
13
3 files changed, 73 insertions(+), 63 deletions(-)
14
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
+++ b/target/arm/translate.h
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
20
return opc | s->be_data;
21
}
22
23
+/**
24
+ * asimd_imm_const: Expand an encoded SIMD constant value
25
+ *
26
+ * Expand a SIMD constant value. This is essentially the pseudocode
27
+ * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
28
+ * VMVN and VBIC (when cmode < 14 && op == 1).
29
+ *
30
+ * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
31
+ * callers must catch this.
32
+ *
33
+ * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
34
+ * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
35
+ * we produce an immediate constant value of 0 in these cases.
36
+ */
37
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
38
+
39
#endif /* TARGET_ARM_TRANSLATE_H */
40
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate-neon.c
43
+++ b/target/arm/translate-neon.c
44
@@ -XXX,XX +XXX,XX @@ DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
45
DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
46
DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
47
48
-static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
49
-{
50
- /*
51
- * Expand the encoded constant.
52
- * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
53
- * We choose to not special-case this and will behave as if a
54
- * valid constant encoding of 0 had been given.
55
- * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
56
- */
57
- switch (cmode) {
58
- case 0: case 1:
59
- /* no-op */
60
- break;
61
- case 2: case 3:
62
- imm <<= 8;
63
- break;
64
- case 4: case 5:
65
- imm <<= 16;
66
- break;
67
- case 6: case 7:
68
- imm <<= 24;
69
- break;
70
- case 8: case 9:
71
- imm |= imm << 16;
72
- break;
73
- case 10: case 11:
74
- imm = (imm << 8) | (imm << 24);
75
- break;
76
- case 12:
77
- imm = (imm << 8) | 0xff;
78
- break;
79
- case 13:
80
- imm = (imm << 16) | 0xffff;
81
- break;
82
- case 14:
83
- if (op) {
84
- /*
85
- * This is the only case where the top and bottom 32 bits
86
- * of the encoded constant differ.
87
- */
88
- uint64_t imm64 = 0;
89
- int n;
90
-
91
- for (n = 0; n < 8; n++) {
92
- if (imm & (1 << n)) {
93
- imm64 |= (0xffULL << (n * 8));
94
- }
95
- }
96
- return imm64;
97
- }
98
- imm |= (imm << 8) | (imm << 16) | (imm << 24);
99
- break;
100
- case 15:
101
- imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
102
- | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
103
- break;
104
- }
105
- if (op) {
106
- imm = ~imm;
107
- }
108
- return dup_const(MO_32, imm);
109
-}
110
-
111
static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
112
GVecGen2iFn *fn)
113
{
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/translate.c
117
+++ b/target/arm/translate.c
118
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
119
a64_translate_init();
120
}
121
122
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
123
+{
124
+ /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
125
+ switch (cmode) {
126
+ case 0: case 1:
127
+ /* no-op */
128
+ break;
129
+ case 2: case 3:
130
+ imm <<= 8;
131
+ break;
132
+ case 4: case 5:
133
+ imm <<= 16;
134
+ break;
135
+ case 6: case 7:
136
+ imm <<= 24;
137
+ break;
138
+ case 8: case 9:
139
+ imm |= imm << 16;
140
+ break;
141
+ case 10: case 11:
142
+ imm = (imm << 8) | (imm << 24);
143
+ break;
144
+ case 12:
145
+ imm = (imm << 8) | 0xff;
146
+ break;
147
+ case 13:
148
+ imm = (imm << 16) | 0xffff;
149
+ break;
150
+ case 14:
151
+ if (op) {
152
+ /*
153
+ * This is the only case where the top and bottom 32 bits
154
+ * of the encoded constant differ.
155
+ */
156
+ uint64_t imm64 = 0;
157
+ int n;
158
+
159
+ for (n = 0; n < 8; n++) {
160
+ if (imm & (1 << n)) {
161
+ imm64 |= (0xffULL << (n * 8));
162
+ }
163
+ }
164
+ return imm64;
165
+ }
166
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
167
+ break;
168
+ case 15:
169
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
170
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
171
+ break;
172
+ }
173
+ if (op) {
174
+ imm = ~imm;
175
+ }
176
+ return dup_const(MO_32, imm);
177
+}
178
+
179
/* Generate a label used for skipping this instruction */
180
void arm_gen_condlabel(DisasContext *s)
181
{
182
--
183
2.20.1
184
185
diff view generated by jsdifflib
New patch
1
The A64 AdvSIMD modified-immediate grouping uses almost the same
2
constant encoding that A32 Neon does; reuse asimd_imm_const() (to
3
which we add the AArch64-specific case for cmode 15 op 1) instead of
4
reimplementing it all.
1
5
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-5-peter.maydell@linaro.org
9
---
10
target/arm/translate.h | 3 +-
11
target/arm/translate-a64.c | 86 ++++----------------------------------
12
target/arm/translate.c | 17 +++++++-
13
3 files changed, 24 insertions(+), 82 deletions(-)
14
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
+++ b/target/arm/translate.h
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
20
* VMVN and VBIC (when cmode < 14 && op == 1).
21
*
22
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
23
- * callers must catch this.
24
+ * callers must catch this; we return the 64-bit constant value defined
25
+ * for AArch64.
26
*
27
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
28
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
29
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-a64.c
32
+++ b/target/arm/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
34
{
35
int rd = extract32(insn, 0, 5);
36
int cmode = extract32(insn, 12, 4);
37
- int cmode_3_1 = extract32(cmode, 1, 3);
38
- int cmode_0 = extract32(cmode, 0, 1);
39
int o2 = extract32(insn, 11, 1);
40
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
41
bool is_neg = extract32(insn, 29, 1);
42
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
43
return;
44
}
45
46
- /* See AdvSIMDExpandImm() in ARM ARM */
47
- switch (cmode_3_1) {
48
- case 0: /* Replicate(Zeros(24):imm8, 2) */
49
- case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
50
- case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
51
- case 3: /* Replicate(imm8:Zeros(24), 2) */
52
- {
53
- int shift = cmode_3_1 * 8;
54
- imm = bitfield_replicate(abcdefgh << shift, 32);
55
- break;
56
- }
57
- case 4: /* Replicate(Zeros(8):imm8, 4) */
58
- case 5: /* Replicate(imm8:Zeros(8), 4) */
59
- {
60
- int shift = (cmode_3_1 & 0x1) * 8;
61
- imm = bitfield_replicate(abcdefgh << shift, 16);
62
- break;
63
- }
64
- case 6:
65
- if (cmode_0) {
66
- /* Replicate(Zeros(8):imm8:Ones(16), 2) */
67
- imm = (abcdefgh << 16) | 0xffff;
68
- } else {
69
- /* Replicate(Zeros(16):imm8:Ones(8), 2) */
70
- imm = (abcdefgh << 8) | 0xff;
71
- }
72
- imm = bitfield_replicate(imm, 32);
73
- break;
74
- case 7:
75
- if (!cmode_0 && !is_neg) {
76
- imm = bitfield_replicate(abcdefgh, 8);
77
- } else if (!cmode_0 && is_neg) {
78
- int i;
79
- imm = 0;
80
- for (i = 0; i < 8; i++) {
81
- if ((abcdefgh) & (1 << i)) {
82
- imm |= 0xffULL << (i * 8);
83
- }
84
- }
85
- } else if (cmode_0) {
86
- if (is_neg) {
87
- imm = (abcdefgh & 0x3f) << 48;
88
- if (abcdefgh & 0x80) {
89
- imm |= 0x8000000000000000ULL;
90
- }
91
- if (abcdefgh & 0x40) {
92
- imm |= 0x3fc0000000000000ULL;
93
- } else {
94
- imm |= 0x4000000000000000ULL;
95
- }
96
- } else {
97
- if (o2) {
98
- /* FMOV (vector, immediate) - half-precision */
99
- imm = vfp_expand_imm(MO_16, abcdefgh);
100
- /* now duplicate across the lanes */
101
- imm = bitfield_replicate(imm, 16);
102
- } else {
103
- imm = (abcdefgh & 0x3f) << 19;
104
- if (abcdefgh & 0x80) {
105
- imm |= 0x80000000;
106
- }
107
- if (abcdefgh & 0x40) {
108
- imm |= 0x3e000000;
109
- } else {
110
- imm |= 0x40000000;
111
- }
112
- imm |= (imm << 32);
113
- }
114
- }
115
- }
116
- break;
117
- default:
118
- g_assert_not_reached();
119
- }
120
-
121
- if (cmode_3_1 != 7 && is_neg) {
122
- imm = ~imm;
123
+ if (cmode == 15 && o2 && !is_neg) {
124
+ /* FMOV (vector, immediate) - half-precision */
125
+ imm = vfp_expand_imm(MO_16, abcdefgh);
126
+ /* now duplicate across the lanes */
127
+ imm = bitfield_replicate(imm, 16);
128
+ } else {
129
+ imm = asimd_imm_const(abcdefgh, cmode, is_neg);
130
}
131
132
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
133
diff --git a/target/arm/translate.c b/target/arm/translate.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/arm/translate.c
136
+++ b/target/arm/translate.c
137
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
138
case 14:
139
if (op) {
140
/*
141
- * This is the only case where the top and bottom 32 bits
142
- * of the encoded constant differ.
143
+ * This and cmode == 15 op == 1 are the only cases where
144
+ * the top and bottom 32 bits of the encoded constant differ.
145
*/
146
uint64_t imm64 = 0;
147
int n;
148
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
149
imm |= (imm << 8) | (imm << 16) | (imm << 24);
150
break;
151
case 15:
152
+ if (op) {
153
+ /* Reserved encoding for AArch32; valid for AArch64 */
154
+ uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
155
+ if (imm & 0x80) {
156
+ imm64 |= 0x8000000000000000ULL;
157
+ }
158
+ if (imm & 0x40) {
159
+ imm64 |= 0x3fc0000000000000ULL;
160
+ } else {
161
+ imm64 |= 0x4000000000000000ULL;
162
+ }
163
+ return imm64;
164
+ }
165
imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
166
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
167
break;
168
--
169
2.20.1
170
171
diff view generated by jsdifflib
New patch
1
Use dup_const() instead of bitfield_replicate() in
2
disas_simd_mod_imm().
1
3
4
(We can't replace the other use of bitfield_replicate() in this file,
5
in logic_imm_decode_wmask(), because that location needs to handle 2
6
and 4 bit elements, which dup_const() cannot.)
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210628135835.6690-6-peter.maydell@linaro.org
11
---
12
target/arm/translate-a64.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
19
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
20
/* FMOV (vector, immediate) - half-precision */
21
imm = vfp_expand_imm(MO_16, abcdefgh);
22
/* now duplicate across the lanes */
23
- imm = bitfield_replicate(imm, 16);
24
+ imm = dup_const(MO_16, imm);
25
} else {
26
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
27
}
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
New patch
1
Implement the MVE logical-immediate insns (VMOV, VMVN,
2
VORR and VBIC). These have essentially the same encoding
3
as their Neon equivalents, and we implement the decode
4
in the same way.
1
5
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-7-peter.maydell@linaro.org
9
---
10
target/arm/helper-mve.h | 4 +++
11
target/arm/mve.decode | 17 +++++++++++++
12
target/arm/mve_helper.c | 24 ++++++++++++++++++
13
target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++
14
4 files changed, 95 insertions(+)
15
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
23
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
24
+
25
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
26
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
27
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@
33
# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
34
%size_28 28:1 !function=plus_1
35
36
+# 1imm format immediate
37
+%imm_28_16_0 28:1 16:3 0:4
38
+
39
&vldr_vstr rn qd imm p a w size l u
40
&1op qd qm size
41
&2op qd qm qn size
42
&2scalar qd qn rm size
43
+&1imm qd imm cmode op
44
45
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
46
# Note that both Rn and Qd are 3 bits only (no D bit)
47
@@ -XXX,XX +XXX,XX @@
48
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
49
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
50
size=%size_28
51
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
52
53
# The _rev suffix indicates that Vn and Vm are reversed. This is
54
# the case for shifts. In the Arm ARM these insns are documented
55
@@ -XXX,XX +XXX,XX @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
56
# Predicate operations
57
%mask_22_13 22:1 13:3
58
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
59
+
60
+# Logical immediate operations (1 reg and modified-immediate)
61
+
62
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
63
+# not in a way we can conveniently represent in decodetree without
64
+# a lot of repetition:
65
+# VORR: op=0, (cmode & 1) && cmode < 12
66
+# VBIC: op=1, (cmode & 1) && cmode < 12
67
+# VMOV: everything else
68
+# So we have a single decode line and check the cmode/op in the
69
+# trans function.
70
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
71
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/mve_helper.c
74
+++ b/target/arm/mve_helper.c
75
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
76
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
77
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
78
79
+/*
80
+ * 1 operand immediates: Vda is destination and possibly also one source.
81
+ * All these insns work at 64-bit widths.
82
+ */
83
+#define DO_1OP_IMM(OP, FN) \
84
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
85
+ { \
86
+ uint64_t *da = vda; \
87
+ uint16_t mask = mve_element_mask(env); \
88
+ unsigned e; \
89
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
90
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
91
+ } \
92
+ mve_advance_vpt(env); \
93
+ }
94
+
95
+#define DO_MOVI(N, I) (I)
96
+#define DO_ANDI(N, I) ((N) & (I))
97
+#define DO_ORRI(N, I) ((N) | (I))
98
+
99
+DO_1OP_IMM(vmovi, DO_MOVI)
100
+DO_1OP_IMM(vandi, DO_ANDI)
101
+DO_1OP_IMM(vorri, DO_ORRI)
102
+
103
#define DO_2OP(OP, ESIZE, TYPE, FN) \
104
void HELPER(glue(mve_, OP))(CPUARMState *env, \
105
void *vd, void *vn, void *vm) \
106
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/translate-mve.c
109
+++ b/target/arm/translate-mve.c
110
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
111
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
112
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
113
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
114
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
115
116
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
117
static inline long mve_qreg_offset(unsigned reg)
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
119
mve_update_eci(s);
120
return true;
121
}
122
+
123
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
124
+{
125
+ TCGv_ptr qd;
126
+ uint64_t imm;
127
+
128
+ if (!dc_isar_feature(aa32_mve, s) ||
129
+ !mve_check_qreg_bank(s, a->qd) ||
130
+ !fn) {
131
+ return false;
132
+ }
133
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
134
+ return true;
135
+ }
136
+
137
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
138
+
139
+ qd = mve_qreg_ptr(a->qd);
140
+ fn(cpu_env, qd, tcg_constant_i64(imm));
141
+ tcg_temp_free_ptr(qd);
142
+ mve_update_eci(s);
143
+ return true;
144
+}
145
+
146
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
147
+{
148
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
149
+ MVEGenOneOpImmFn *fn;
150
+
151
+ if ((a->cmode & 1) && a->cmode < 12) {
152
+ if (a->op) {
153
+ /*
154
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
155
+ * so the VBIC becomes a logical AND operation.
156
+ */
157
+ fn = gen_helper_mve_vandi;
158
+ } else {
159
+ fn = gen_helper_mve_vorri;
160
+ }
161
+ } else {
162
+ /* There is one unallocated cmode/op combination in this space */
163
+ if (a->cmode == 15 && a->op == 1) {
164
+ return false;
165
+ }
166
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
167
+ fn = gen_helper_mve_vmovi;
168
+ }
169
+ return do_1imm(s, a, fn);
170
+}
171
--
172
2.20.1
173
174
diff view generated by jsdifflib
New patch
1
1
Implement the MVE shift-vector-left-by-immediate insns VSHL, VQSHL
2
and VQSHLU.
3
4
The size-and-immediate encoding here is the same as Neon, and we
5
handle it the same way neon-dp.decode does.
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210628135835.6690-8-peter.maydell@linaro.org
10
---
11
target/arm/helper-mve.h | 16 +++++++++++
12
target/arm/mve.decode | 23 +++++++++++++++
13
target/arm/mve_helper.c | 57 ++++++++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 51 ++++++++++++++++++++++++++++++++++
15
4 files changed, 147 insertions(+)
16
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
20
+++ b/target/arm/helper-mve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
25
+
26
+DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/mve.decode
44
+++ b/target/arm/mve.decode
45
@@ -XXX,XX +XXX,XX @@
46
&2op qd qm qn size
47
&2scalar qd qn rm size
48
&1imm qd imm cmode op
49
+&2shift qd qm shift size
50
51
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
52
# Note that both Rn and Qd are 3 bits only (no D bit)
53
@@ -XXX,XX +XXX,XX @@
54
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
55
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
56
57
+@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
58
+@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
59
+@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
60
+
61
# Vector loads and stores
62
63
# Widening loads and narrowing stores:
64
@@ -XXX,XX +XXX,XX @@ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
65
# So we have a single decode line and check the cmode/op in the
66
# trans function.
67
Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
68
+
69
+# Shifts by immediate
70
+
71
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
72
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
73
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
74
+
75
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
76
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
77
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
78
+
79
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
80
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
81
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
82
+
83
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
84
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
85
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
86
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/mve_helper.c
89
+++ b/target/arm/mve_helper.c
90
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
91
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
92
#define DO_UQRSHL_OP(N, M, satp) \
93
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
94
+#define DO_SUQSHL_OP(N, M, satp) \
95
+ WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
96
97
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
98
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
99
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvsw, 4, uint32_t)
100
DO_VADDV(vaddvub, 1, uint8_t)
101
DO_VADDV(vaddvuh, 2, uint16_t)
102
DO_VADDV(vaddvuw, 4, uint32_t)
103
+
104
+/* Shifts by immediate */
105
+#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
106
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
107
+ void *vm, uint32_t shift) \
108
+ { \
109
+ TYPE *d = vd, *m = vm; \
110
+ uint16_t mask = mve_element_mask(env); \
111
+ unsigned e; \
112
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
113
+ mergemask(&d[H##ESIZE(e)], \
114
+ FN(m[H##ESIZE(e)], shift), mask); \
115
+ } \
116
+ mve_advance_vpt(env); \
117
+ }
118
+
119
+#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
120
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
121
+ void *vm, uint32_t shift) \
122
+ { \
123
+ TYPE *d = vd, *m = vm; \
124
+ uint16_t mask = mve_element_mask(env); \
125
+ unsigned e; \
126
+ bool qc = false; \
127
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
128
+ bool sat = false; \
129
+ mergemask(&d[H##ESIZE(e)], \
130
+ FN(m[H##ESIZE(e)], shift, &sat), mask); \
131
+ qc |= sat & mask & 1; \
132
+ } \
133
+ if (qc) { \
134
+ env->vfp.qc[0] = qc; \
135
+ } \
136
+ mve_advance_vpt(env); \
137
+ }
138
+
139
+/* provide unsigned 2-op shift helpers for all sizes */
140
+#define DO_2SHIFT_U(OP, FN) \
141
+ DO_2SHIFT(OP##b, 1, uint8_t, FN) \
142
+ DO_2SHIFT(OP##h, 2, uint16_t, FN) \
143
+ DO_2SHIFT(OP##w, 4, uint32_t, FN)
144
+
145
+#define DO_2SHIFT_SAT_U(OP, FN) \
146
+ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
147
+ DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
148
+ DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
149
+#define DO_2SHIFT_SAT_S(OP, FN) \
150
+ DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
151
+ DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
152
+ DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
153
+
154
+DO_2SHIFT_U(vshli_u, DO_VSHLU)
155
+DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
156
+DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
157
+DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
162
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
163
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
164
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
165
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
166
+typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
167
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
168
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
169
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
170
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
171
}
172
return do_1imm(s, a, fn);
173
}
174
+
175
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
176
+ bool negateshift)
177
+{
178
+ TCGv_ptr qd, qm;
179
+ int shift = a->shift;
180
+
181
+ if (!dc_isar_feature(aa32_mve, s) ||
182
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
183
+ !fn) {
184
+ return false;
185
+ }
186
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
187
+ return true;
188
+ }
189
+
190
+ /*
191
+ * When we handle a right shift insn using a left-shift helper
192
+ * which permits a negative shift count to indicate a right-shift,
193
+ * we must negate the shift count.
194
+ */
195
+ if (negateshift) {
196
+ shift = -shift;
197
+ }
198
+
199
+ qd = mve_qreg_ptr(a->qd);
200
+ qm = mve_qreg_ptr(a->qm);
201
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
202
+ tcg_temp_free_ptr(qd);
203
+ tcg_temp_free_ptr(qm);
204
+ mve_update_eci(s);
205
+ return true;
206
+}
207
+
208
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
209
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
210
+ { \
211
+ static MVEGenTwoOpShiftFn * const fns[] = { \
212
+ gen_helper_mve_##FN##b, \
213
+ gen_helper_mve_##FN##h, \
214
+ gen_helper_mve_##FN##w, \
215
+ NULL, \
216
+ }; \
217
+ return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
218
+ }
219
+
220
+DO_2SHIFT(VSHLI, vshli_u, false)
221
+DO_2SHIFT(VQSHLI_S, vqshli_s, false)
222
+DO_2SHIFT(VQSHLI_U, vqshli_u, false)
223
+DO_2SHIFT(VQSHLUI, vqshlui_s, false)
224
--
225
2.20.1
226
227
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Implement the MVE vector shift right by immediate insns VSHRI and
2
VRSHRI. As with Neon, we implement these by using helper functions
3
which perform left shifts but allow negative shift counts to indicate
4
right shifts.
2
5
3
We add the kvm-steal-time CPU property and implement it for machvirt.
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
A tiny bit of refactoring was also done to allow pmu and pvtime to
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
use the same vcpu device helper functions.
8
Message-id: 20210628135835.6690-9-peter.maydell@linaro.org
9
---
10
target/arm/helper-mve.h | 12 ++++++++++++
11
target/arm/translate.h | 20 ++++++++++++++++++++
12
target/arm/mve.decode | 28 ++++++++++++++++++++++++++++
13
target/arm/mve_helper.c | 7 +++++++
14
target/arm/translate-mve.c | 5 +++++
15
target/arm/translate-neon.c | 18 ------------------
16
6 files changed, 72 insertions(+), 18 deletions(-)
6
17
7
Reviewed-by: Eric Auger <eric.auger@redhat.com>
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
8
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
Message-id: 20201001061718.101915-7-drjones@redhat.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
docs/system/arm/cpu-features.rst | 11 ++++++
13
include/hw/arm/virt.h | 5 +++
14
target/arm/cpu.h | 4 ++
15
target/arm/kvm_arm.h | 43 +++++++++++++++++++++
16
hw/arm/virt.c | 43 +++++++++++++++++++--
17
target/arm/cpu.c | 8 ++++
18
target/arm/kvm.c | 16 ++++++++
19
target/arm/kvm64.c | 64 +++++++++++++++++++++++++++++---
20
target/arm/monitor.c | 2 +-
21
tests/qtest/arm-cpu-features.c | 25 +++++++++++--
22
10 files changed, 208 insertions(+), 13 deletions(-)
23
24
diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
25
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
26
--- a/docs/system/arm/cpu-features.rst
20
--- a/target/arm/helper-mve.h
27
+++ b/docs/system/arm/cpu-features.rst
21
+++ b/target/arm/helper-mve.h
28
@@ -XXX,XX +XXX,XX @@ the list of KVM VCPU features and their descriptions.
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
29
adjustment, also restoring the legacy (pre-5.0)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
30
behavior.
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
31
25
32
+ kvm-steal-time Since v5.2, kvm-steal-time is enabled by
26
+DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+ default when KVM is enabled, the feature is
27
+DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+ supported, and the guest is 64-bit.
28
+DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+
29
+
36
+ When kvm-steal-time is enabled a 64-bit guest
30
DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+ can account for time its CPUs were not running
31
DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+ due to the host not scheduling the corresponding
32
DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+ VCPU threads. The accounting statistics may
33
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+ influence the guest scheduler behavior and/or be
34
DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+ exposed to the guest userspace.
35
DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+
37
+
43
SVE CPU Properties
38
+DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
==================
39
+DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
40
+DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
41
+
42
+DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
45
diff --git a/target/arm/translate.h b/target/arm/translate.h
47
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
48
--- a/include/hw/arm/virt.h
47
--- a/target/arm/translate.h
49
+++ b/include/hw/arm/virt.h
48
+++ b/target/arm/translate.h
50
@@ -XXX,XX +XXX,XX @@
49
@@ -XXX,XX +XXX,XX @@ static inline int times_2_plus_1(DisasContext *s, int x)
51
50
return x * 2 + 1;
52
#define PPI(irq) ((irq) + 16)
53
54
+/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */
55
+#define PVTIME_SIZE_PER_CPU 64
56
+
57
enum {
58
VIRT_FLASH,
59
VIRT_MEM,
60
@@ -XXX,XX +XXX,XX @@ enum {
61
VIRT_PCDIMM_ACPI,
62
VIRT_ACPI_GED,
63
VIRT_NVDIMM_ACPI,
64
+ VIRT_PVTIME,
65
VIRT_LOWMEMMAP_LAST,
66
};
67
68
@@ -XXX,XX +XXX,XX @@ struct VirtMachineClass {
69
bool no_highmem_ecam;
70
bool no_ged; /* Machines < 4.2 has no support for ACPI GED device */
71
bool kvm_no_adjvtime;
72
+ bool no_kvm_steal_time;
73
bool acpi_expose_flash;
74
};
75
76
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/cpu.h
79
+++ b/target/arm/cpu.h
80
@@ -XXX,XX +XXX,XX @@
81
#include "hw/registerfields.h"
82
#include "cpu-qom.h"
83
#include "exec/cpu-defs.h"
84
+#include "qapi/qapi-types-common.h"
85
86
/* ARM processors have a weak memory model */
87
#define TCG_GUEST_DEFAULT_MO (0)
88
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
89
bool kvm_vtime_dirty;
90
uint64_t kvm_vtime;
91
92
+ /* KVM steal time */
93
+ OnOffAuto kvm_steal_time;
94
+
95
/* Uniprocessor system with MP extensions */
96
bool mp_is_up;
97
98
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/kvm_arm.h
101
+++ b/target/arm/kvm_arm.h
102
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
103
*/
104
void kvm_arm_add_vcpu_properties(Object *obj);
105
106
+/**
107
+ * kvm_arm_steal_time_finalize:
108
+ * @cpu: ARMCPU for which to finalize kvm-steal-time
109
+ * @errp: Pointer to Error* for error propagation
110
+ *
111
+ * Validate the kvm-steal-time property selection and set its default
112
+ * based on KVM support and guest configuration.
113
+ */
114
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp);
115
+
116
+/**
117
+ * kvm_arm_steal_time_supported:
118
+ *
119
+ * Returns: true if KVM can enable steal time reporting
120
+ * and false otherwise.
121
+ */
122
+bool kvm_arm_steal_time_supported(void);
123
+
124
/**
125
* kvm_arm_aarch32_supported:
126
*
127
@@ -XXX,XX +XXX,XX @@ int kvm_arm_vgic_probe(void);
128
129
void kvm_arm_pmu_set_irq(CPUState *cs, int irq);
130
void kvm_arm_pmu_init(CPUState *cs);
131
+
132
+/**
133
+ * kvm_arm_pvtime_init:
134
+ * @cs: CPUState
135
+ * @ipa: Per-vcpu guest physical base address of the pvtime structures
136
+ *
137
+ * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa.
138
+ */
139
+void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
140
+
141
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
142
143
#else
144
@@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_sve_supported(void)
145
return false;
146
}
51
}
147
52
148
+static inline bool kvm_arm_steal_time_supported(void)
53
+static inline int rsub_64(DisasContext *s, int x)
149
+{
54
+{
150
+ return false;
55
+ return 64 - x;
151
+}
56
+}
152
+
57
+
153
/*
58
+static inline int rsub_32(DisasContext *s, int x)
154
* These functions should never actually be called without KVM support.
155
*/
156
@@ -XXX,XX +XXX,XX @@ static inline void kvm_arm_pmu_init(CPUState *cs)
157
g_assert_not_reached();
158
}
159
160
+static inline void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
161
+{
59
+{
162
+ g_assert_not_reached();
60
+ return 32 - x;
163
+}
61
+}
164
+
62
+
165
+static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
63
+static inline int rsub_16(DisasContext *s, int x)
166
+{
64
+{
167
+ g_assert_not_reached();
65
+ return 16 - x;
168
+}
66
+}
169
+
67
+
170
static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
68
+static inline int rsub_8(DisasContext *s, int x)
171
{
172
g_assert_not_reached();
173
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/hw/arm/virt.c
176
+++ b/hw/arm/virt.c
177
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry base_memmap[] = {
178
[VIRT_PCDIMM_ACPI] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN },
179
[VIRT_ACPI_GED] = { 0x09080000, ACPI_GED_EVT_SEL_LEN },
180
[VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN},
181
+ [VIRT_PVTIME] = { 0x090a0000, 0x00010000 },
182
[VIRT_MMIO] = { 0x0a000000, 0x00000200 },
183
/* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
184
[VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 },
185
@@ -XXX,XX +XXX,XX @@ static void finalize_gic_version(VirtMachineState *vms)
186
* virt_cpu_post_init() must be called after the CPUs have
187
* been realized and the GIC has been created.
188
*/
189
-static void virt_cpu_post_init(VirtMachineState *vms)
190
+static void virt_cpu_post_init(VirtMachineState *vms, int max_cpus,
191
+ MemoryRegion *sysmem)
192
{
193
- bool aarch64, pmu;
194
+ bool aarch64, pmu, steal_time;
195
CPUState *cpu;
196
197
aarch64 = object_property_get_bool(OBJECT(first_cpu), "aarch64", NULL);
198
pmu = object_property_get_bool(OBJECT(first_cpu), "pmu", NULL);
199
+ steal_time = object_property_get_bool(OBJECT(first_cpu),
200
+ "kvm-steal-time", NULL);
201
202
if (kvm_enabled()) {
203
+ hwaddr pvtime_reg_base = vms->memmap[VIRT_PVTIME].base;
204
+ hwaddr pvtime_reg_size = vms->memmap[VIRT_PVTIME].size;
205
+
206
+ if (steal_time) {
207
+ MemoryRegion *pvtime = g_new(MemoryRegion, 1);
208
+ hwaddr pvtime_size = max_cpus * PVTIME_SIZE_PER_CPU;
209
+
210
+ /* The memory region size must be a multiple of host page size. */
211
+ pvtime_size = REAL_HOST_PAGE_ALIGN(pvtime_size);
212
+
213
+ if (pvtime_size > pvtime_reg_size) {
214
+ error_report("pvtime requires a %ld byte memory region for "
215
+ "%d CPUs, but only %ld has been reserved",
216
+ pvtime_size, max_cpus, pvtime_reg_size);
217
+ exit(1);
218
+ }
219
+
220
+ memory_region_init_ram(pvtime, NULL, "pvtime", pvtime_size, NULL);
221
+ memory_region_add_subregion(sysmem, pvtime_reg_base, pvtime);
222
+ }
223
+
224
CPU_FOREACH(cpu) {
225
if (pmu) {
226
assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU));
227
@@ -XXX,XX +XXX,XX @@ static void virt_cpu_post_init(VirtMachineState *vms)
228
}
229
kvm_arm_pmu_init(cpu);
230
}
231
+ if (steal_time) {
232
+ kvm_arm_pvtime_init(cpu, pvtime_reg_base +
233
+ cpu->cpu_index * PVTIME_SIZE_PER_CPU);
234
+ }
235
}
236
} else {
237
if (aarch64 && vms->highmem) {
238
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
239
object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL);
240
}
241
242
+ if (vmc->no_kvm_steal_time &&
243
+ object_property_find(cpuobj, "kvm-steal-time")) {
244
+ object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL);
245
+ }
246
+
247
if (vmc->no_pmu && object_property_find(cpuobj, "pmu")) {
248
object_property_set_bool(cpuobj, "pmu", false, NULL);
249
}
250
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
251
252
create_gic(vms);
253
254
- virt_cpu_post_init(vms);
255
+ virt_cpu_post_init(vms, possible_cpus->len, sysmem);
256
257
fdt_add_pmu_nodes(vms);
258
259
@@ -XXX,XX +XXX,XX @@ DEFINE_VIRT_MACHINE_AS_LATEST(5, 2)
260
261
static void virt_machine_5_1_options(MachineClass *mc)
262
{
263
+ VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
264
+
265
virt_machine_5_2_options(mc);
266
compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len);
267
+ vmc->no_kvm_steal_time = true;
268
}
269
DEFINE_VIRT_MACHINE(5, 1)
270
271
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
272
index XXXXXXX..XXXXXXX 100644
273
--- a/target/arm/cpu.c
274
+++ b/target/arm/cpu.c
275
@@ -XXX,XX +XXX,XX @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
276
return;
277
}
278
}
279
+
280
+ if (kvm_enabled()) {
281
+ kvm_arm_steal_time_finalize(cpu, &local_err);
282
+ if (local_err != NULL) {
283
+ error_propagate(errp, local_err);
284
+ return;
285
+ }
286
+ }
287
}
288
289
static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
290
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/target/arm/kvm.c
293
+++ b/target/arm/kvm.c
294
@@ -XXX,XX +XXX,XX @@ static void kvm_no_adjvtime_set(Object *obj, bool value, Error **errp)
295
ARM_CPU(obj)->kvm_adjvtime = !value;
296
}
297
298
+static bool kvm_steal_time_get(Object *obj, Error **errp)
299
+{
69
+{
300
+ return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF;
70
+ return 8 - x;
301
+}
71
+}
302
+
72
+
303
+static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
73
static inline int arm_dc_feature(DisasContext *dc, int feature)
304
+{
74
{
305
+ ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
75
return (dc->features & (1ULL << feature)) != 0;
306
+}
76
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/mve.decode
79
+++ b/target/arm/mve.decode
80
@@ -XXX,XX +XXX,XX @@
81
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
82
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
83
84
+# Right shifts are encoded as N - shift, where N is the element size in bits.
85
+%rshift_i5 16:5 !function=rsub_32
86
+%rshift_i4 16:4 !function=rsub_16
87
+%rshift_i3 16:3 !function=rsub_8
307
+
88
+
308
/* KVM VCPU properties should be prefixed with "kvm-". */
89
+@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \
309
void kvm_arm_add_vcpu_properties(Object *obj)
90
+ size=0 shift=%rshift_i3
91
+@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \
92
+ size=1 shift=%rshift_i4
93
+@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
94
+ size=2 shift=%rshift_i5
95
+
96
# Vector loads and stores
97
98
# Widening loads and narrowing stores:
99
@@ -XXX,XX +XXX,XX @@ VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
100
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
101
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
102
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
103
+
104
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
105
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
106
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
107
+
108
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
109
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
110
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
111
+
112
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
113
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
114
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
115
+
116
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
117
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
118
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
119
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/mve_helper.c
122
+++ b/target/arm/mve_helper.c
123
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
124
DO_2SHIFT(OP##b, 1, uint8_t, FN) \
125
DO_2SHIFT(OP##h, 2, uint16_t, FN) \
126
DO_2SHIFT(OP##w, 4, uint32_t, FN)
127
+#define DO_2SHIFT_S(OP, FN) \
128
+ DO_2SHIFT(OP##b, 1, int8_t, FN) \
129
+ DO_2SHIFT(OP##h, 2, int16_t, FN) \
130
+ DO_2SHIFT(OP##w, 4, int32_t, FN)
131
132
#define DO_2SHIFT_SAT_U(OP, FN) \
133
DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
134
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
135
DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
136
137
DO_2SHIFT_U(vshli_u, DO_VSHLU)
138
+DO_2SHIFT_S(vshli_s, DO_VSHLS)
139
DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
140
DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
141
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
142
+DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
143
+DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
144
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-mve.c
147
+++ b/target/arm/translate-mve.c
148
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHLI, vshli_u, false)
149
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
150
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
151
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
152
+/* These right shifts use a left-shift helper with negated shift count */
153
+DO_2SHIFT(VSHRI_S, vshli_s, true)
154
+DO_2SHIFT(VSHRI_U, vshli_u, true)
155
+DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
+DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/translate-neon.c
160
+++ b/target/arm/translate-neon.c
161
@@ -XXX,XX +XXX,XX @@ static inline int plus1(DisasContext *s, int x)
162
return x + 1;
163
}
164
165
-static inline int rsub_64(DisasContext *s, int x)
166
-{
167
- return 64 - x;
168
-}
169
-
170
-static inline int rsub_32(DisasContext *s, int x)
171
-{
172
- return 32 - x;
173
-}
174
-static inline int rsub_16(DisasContext *s, int x)
175
-{
176
- return 16 - x;
177
-}
178
-static inline int rsub_8(DisasContext *s, int x)
179
-{
180
- return 8 - x;
181
-}
182
-
183
static inline int neon_3same_fp_size(DisasContext *s, int x)
310
{
184
{
311
@@ -XXX,XX +XXX,XX @@ void kvm_arm_add_vcpu_properties(Object *obj)
185
/* Convert 0==fp32, 1==fp16 into a MO_* value */
312
"the virtual counter. VM stopped time "
313
"will be counted.");
314
}
315
+
316
+ cpu->kvm_steal_time = ON_OFF_AUTO_AUTO;
317
+ object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get,
318
+ kvm_steal_time_set);
319
+ object_property_set_description(obj, "kvm-steal-time",
320
+ "Set off to disable KVM steal time.");
321
}
322
323
bool kvm_arm_pmu_supported(void)
324
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
325
index XXXXXXX..XXXXXXX 100644
326
--- a/target/arm/kvm64.c
327
+++ b/target/arm/kvm64.c
328
@@ -XXX,XX +XXX,XX @@
329
#include <linux/kvm.h>
330
331
#include "qemu-common.h"
332
+#include "qapi/error.h"
333
#include "cpu.h"
334
#include "qemu/timer.h"
335
#include "qemu/error-report.h"
336
@@ -XXX,XX +XXX,XX @@ static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
337
return NULL;
338
}
339
340
-static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr)
341
+static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
342
+ const char *name)
343
{
344
int err;
345
346
err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
347
if (err != 0) {
348
- error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err));
349
+ error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
350
return false;
351
}
352
353
err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
354
if (err != 0) {
355
- error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
356
+ error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
357
return false;
358
}
359
360
@@ -XXX,XX +XXX,XX @@ void kvm_arm_pmu_init(CPUState *cs)
361
if (!ARM_CPU(cs)->has_pmu) {
362
return;
363
}
364
- if (!kvm_arm_pmu_set_attr(cs, &attr)) {
365
+ if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
366
error_report("failed to init PMU");
367
abort();
368
}
369
@@ -XXX,XX +XXX,XX @@ void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
370
if (!ARM_CPU(cs)->has_pmu) {
371
return;
372
}
373
- if (!kvm_arm_pmu_set_attr(cs, &attr)) {
374
+ if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
375
error_report("failed to set irq for PMU");
376
abort();
377
}
378
}
379
380
+void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
381
+{
382
+ struct kvm_device_attr attr = {
383
+ .group = KVM_ARM_VCPU_PVTIME_CTRL,
384
+ .attr = KVM_ARM_VCPU_PVTIME_IPA,
385
+ .addr = (uint64_t)&ipa,
386
+ };
387
+
388
+ if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
389
+ return;
390
+ }
391
+ if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
392
+ error_report("failed to init PVTIME IPA");
393
+ abort();
394
+ }
395
+}
396
+
397
static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
398
{
399
uint64_t ret;
400
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
401
return true;
402
}
403
404
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
405
+{
406
+ bool has_steal_time = kvm_arm_steal_time_supported();
407
+
408
+ if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
409
+ if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
410
+ cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
411
+ } else {
412
+ cpu->kvm_steal_time = ON_OFF_AUTO_ON;
413
+ }
414
+ } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
415
+ if (!has_steal_time) {
416
+ error_setg(errp, "'kvm-steal-time' cannot be enabled "
417
+ "on this host");
418
+ return;
419
+ } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
420
+ /*
421
+ * DEN0057A chapter 2 says "This specification only covers
422
+ * systems in which the Execution state of the hypervisor
423
+ * as well as EL1 of virtual machines is AArch64.". And,
424
+ * to ensure that, the smc/hvc calls are only specified as
425
+ * smc64/hvc64.
426
+ */
427
+ error_setg(errp, "'kvm-steal-time' cannot be enabled "
428
+ "for AArch32 guests");
429
+ return;
430
+ }
431
+ }
432
+}
433
+
434
bool kvm_arm_aarch32_supported(void)
435
{
436
return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
437
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_sve_supported(void)
438
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
439
}
440
441
+bool kvm_arm_steal_time_supported(void)
442
+{
443
+ return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
444
+}
445
+
446
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
447
448
void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
449
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
450
index XXXXXXX..XXXXXXX 100644
451
--- a/target/arm/monitor.c
452
+++ b/target/arm/monitor.c
453
@@ -XXX,XX +XXX,XX @@ static const char *cpu_model_advertised_features[] = {
454
"sve128", "sve256", "sve384", "sve512",
455
"sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
456
"sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
457
- "kvm-no-adjvtime",
458
+ "kvm-no-adjvtime", "kvm-steal-time",
459
NULL
460
};
461
462
diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c
463
index XXXXXXX..XXXXXXX 100644
464
--- a/tests/qtest/arm-cpu-features.c
465
+++ b/tests/qtest/arm-cpu-features.c
466
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion(const void *data)
467
assert_set_feature(qts, "max", "pmu", true);
468
469
assert_has_not_feature(qts, "max", "kvm-no-adjvtime");
470
+ assert_has_not_feature(qts, "max", "kvm-steal-time");
471
472
if (g_str_equal(qtest_get_arch(), "aarch64")) {
473
assert_has_feature_enabled(qts, "max", "aarch64");
474
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
475
assert_set_feature(qts, "host", "kvm-no-adjvtime", false);
476
477
if (g_str_equal(qtest_get_arch(), "aarch64")) {
478
+ bool kvm_supports_steal_time;
479
bool kvm_supports_sve;
480
char max_name[8], name[8];
481
uint32_t max_vq, vq;
482
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
483
QDict *resp;
484
char *error;
485
486
+ assert_error(qts, "cortex-a15",
487
+ "We cannot guarantee the CPU type 'cortex-a15' works "
488
+ "with KVM on this host", NULL);
489
+
490
assert_has_feature_enabled(qts, "host", "aarch64");
491
492
/* Enabling and disabling pmu should always work. */
493
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
494
assert_set_feature(qts, "host", "pmu", false);
495
assert_set_feature(qts, "host", "pmu", true);
496
497
- assert_error(qts, "cortex-a15",
498
- "We cannot guarantee the CPU type 'cortex-a15' works "
499
- "with KVM on this host", NULL);
500
-
501
+ /*
502
+ * Some features would be enabled by default, but they're disabled
503
+ * because this instance of KVM doesn't support them. Test that the
504
+ * features are present, and, when enabled, issue further tests.
505
+ */
506
+ assert_has_feature(qts, "host", "kvm-steal-time");
507
assert_has_feature(qts, "host", "sve");
508
+
509
resp = do_query_no_props(qts, "host");
510
+ kvm_supports_steal_time = resp_get_feature(resp, "kvm-steal-time");
511
kvm_supports_sve = resp_get_feature(resp, "sve");
512
vls = resp_get_sve_vls(resp);
513
qobject_unref(resp);
514
515
+ if (kvm_supports_steal_time) {
516
+ /* If we have steal-time then we should be able to toggle it. */
517
+ assert_set_feature(qts, "host", "kvm-steal-time", false);
518
+ assert_set_feature(qts, "host", "kvm-steal-time", true);
519
+ }
520
+
521
if (kvm_supports_sve) {
522
g_assert(vls != 0);
523
max_vq = 64 - __builtin_clzll(vls);
524
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
525
assert_has_not_feature(qts, "host", "aarch64");
526
assert_has_not_feature(qts, "host", "pmu");
527
assert_has_not_feature(qts, "host", "sve");
528
+ assert_has_not_feature(qts, "host", "kvm-steal-time");
529
}
530
531
qtest_quit(qts);
532
--
186
--
533
2.20.1
187
2.20.1
534
188
535
189
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Implement the MVE VHLL (vector shift left long) insn. This has two
2
encodings: the T1 encoding is the usual shift-by-immediate format,
3
and the T2 encoding is a special case where the shift count is always
4
equal to the element size.
2
5
3
When we compile without KVM support !defined(CONFIG_KVM) we generate
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
stubs for functions that the linker will still encounter. Sometimes
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
these stubs can be executed safely and are placed in paths where they
8
Message-id: 20210628135835.6690-10-peter.maydell@linaro.org
6
get executed with or without KVM. Other functions should never be
9
---
7
called without KVM. Those functions should be guarded by kvm_enabled(),
10
target/arm/helper-mve.h | 9 +++++++
8
but should also be robust to refactoring mistakes. Putting a
11
target/arm/mve.decode | 53 +++++++++++++++++++++++++++++++++++---
9
g_assert_not_reached() in the function should help. Additionally,
12
target/arm/mve_helper.c | 32 +++++++++++++++++++++++
10
the g_assert_not_reached() calls may actually help the linker remove
13
target/arm/translate-mve.c | 15 +++++++++++
11
some code.
14
4 files changed, 105 insertions(+), 4 deletions(-)
12
15
13
We remove the stubs for kvm_arm_get/put_virtual_time(), as they aren't
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
necessary at all - the only caller is in kvm.c
15
16
Reviewed-by: Eric Auger <eric.auger@redhat.com>
17
Signed-off-by: Andrew Jones <drjones@redhat.com>
18
Message-id: 20201001061718.101915-3-drjones@redhat.com
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
21
target/arm/kvm_arm.h | 51 +++++++++++++++++++++++++++-----------------
22
1 file changed, 32 insertions(+), 19 deletions(-)
23
24
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
25
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/kvm_arm.h
18
--- a/target/arm/helper-mve.h
27
+++ b/target/arm/kvm_arm.h
19
+++ b/target/arm/helper-mve.h
28
@@ -XXX,XX +XXX,XX @@ int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
21
DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
#else
22
DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
23
DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
-static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
24
+
33
-{
25
+DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
- /*
26
+DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
- * This should never actually be called in the "not KVM" case,
27
+DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
- * but set up the fields to indicate an error anyway.
28
+DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
- */
29
+DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
- cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
30
+DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
- cpu->host_cpu_probe_failed = true;
31
+DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
-}
32
+DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
-
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
42
-static inline void kvm_arm_add_vcpu_properties(Object *obj) {}
34
index XXXXXXX..XXXXXXX 100644
43
-
35
--- a/target/arm/mve.decode
44
+/*
36
+++ b/target/arm/mve.decode
45
+ * It's safe to call these functions without KVM support.
37
@@ -XXX,XX +XXX,XX @@
46
+ * They should either do nothing or return "not supported".
38
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
47
+ */
39
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
48
static inline bool kvm_arm_aarch32_supported(void)
40
49
{
41
+@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
50
return false;
42
+@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
51
@@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_sve_supported(void)
43
+# VSHLL encoding T2 where shift == esize
52
return false;
44
+@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \
53
}
45
+ qd=%qd qm=%qm size=0 shift=8
54
46
+@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \
55
+/*
47
+ qd=%qd qm=%qm size=1 shift=16
56
+ * These functions should never actually be called without KVM support.
48
+
57
+ */
49
# Right shifts are encoded as N - shift, where N is the element size in bits.
58
+static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
50
%rshift_i5 16:5 !function=rsub_32
51
%rshift_i4 16:4 !function=rsub_16
52
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
53
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
54
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
55
56
-VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
57
-VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
58
+# The VSHLL T2 encoding is not a @2op pattern, but is here because it
59
+# overlaps what would be size=0b11 VMULH/VRMULH
59
+{
60
+{
60
+ g_assert_not_reached();
61
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
62
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
63
64
-VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
65
-VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
66
+ VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
61
+}
67
+}
62
+
68
+
63
+static inline void kvm_arm_add_vcpu_properties(Object *obj)
64
+{
69
+{
65
+ g_assert_not_reached();
70
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
71
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
72
+
73
+ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
66
+}
74
+}
67
+
75
+
68
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
69
{
70
- return -ENOENT;
71
+ g_assert_not_reached();
72
}
73
74
static inline int kvm_arm_vgic_probe(void)
75
{
76
- return 0;
77
+ g_assert_not_reached();
78
}
79
80
-static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) {}
81
-static inline void kvm_arm_pmu_init(CPUState *cs) {}
82
+static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
83
+{
76
+{
84
+ g_assert_not_reached();
77
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
85
+}
78
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
86
79
+
87
-static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) {}
80
+ VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
88
+static inline void kvm_arm_pmu_init(CPUState *cs)
89
+{
90
+ g_assert_not_reached();
91
+}
81
+}
92
+
82
+
93
+static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
94
+{
83
+{
95
+ g_assert_not_reached();
84
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
85
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
86
+
87
+ VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
96
+}
88
+}
97
89
98
-static inline void kvm_arm_get_virtual_time(CPUState *cs) {}
90
VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
99
-static inline void kvm_arm_put_virtual_time(CPUState *cs) {}
91
VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
100
#endif
92
@@ -XXX,XX +XXX,XX @@ VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
101
93
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
102
static inline const char *gic_class_name(void)
94
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
95
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
96
+
97
+# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
98
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
99
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
100
+
101
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
102
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
103
+
104
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
105
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
106
+
107
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
108
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
109
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/target/arm/mve_helper.c
112
+++ b/target/arm/mve_helper.c
113
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
114
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
115
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
116
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
117
+
118
+/*
119
+ * Long shifts taking half-sized inputs from top or bottom of the input
120
+ * vector and producing a double-width result. ESIZE, TYPE are for
121
+ * the input, and LESIZE, LTYPE for the output.
122
+ * Unlike the normal shift helpers, we do not handle negative shift counts,
123
+ * because the long shift is strictly left-only.
124
+ */
125
+#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
126
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
127
+ void *vm, uint32_t shift) \
128
+ { \
129
+ LTYPE *d = vd; \
130
+ TYPE *m = vm; \
131
+ uint16_t mask = mve_element_mask(env); \
132
+ unsigned le; \
133
+ assert(shift <= 16); \
134
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
135
+ LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
136
+ mergemask(&d[H##LESIZE(le)], r, mask); \
137
+ } \
138
+ mve_advance_vpt(env); \
139
+ }
140
+
141
+#define DO_VSHLL_ALL(OP, TOP) \
142
+ DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
143
+ DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
144
+ DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
145
+ DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
146
+
147
+DO_VSHLL_ALL(vshllb, false)
148
+DO_VSHLL_ALL(vshllt, true)
149
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/arm/translate-mve.c
152
+++ b/target/arm/translate-mve.c
153
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_S, vshli_s, true)
154
DO_2SHIFT(VSHRI_U, vshli_u, true)
155
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
+
158
+#define DO_VSHLL(INSN, FN) \
159
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
160
+ { \
161
+ static MVEGenTwoOpShiftFn * const fns[] = { \
162
+ gen_helper_mve_##FN##b, \
163
+ gen_helper_mve_##FN##h, \
164
+ }; \
165
+ return do_2shift(s, a, fns[a->size], false); \
166
+ }
167
+
168
+DO_VSHLL(VSHLL_BS, vshllbs)
169
+DO_VSHLL(VSHLL_BU, vshllbu)
170
+DO_VSHLL(VSHLL_TS, vshllts)
171
+DO_VSHLL(VSHLL_TU, vshlltu)
103
--
172
--
104
2.20.1
173
2.20.1
105
174
106
175
diff view generated by jsdifflib
New patch
1
Implement the MVE VSRI and VSLI insns, which perform a
2
shift-and-insert operation.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210628135835.6690-11-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 8 ++++++++
9
target/arm/mve.decode | 9 ++++++++
10
target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 3 +++
12
4 files changed, 62 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/mve.decode
33
+++ b/target/arm/mve.decode
34
@@ -XXX,XX +XXX,XX @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
35
36
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
37
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
38
+
39
+# Shift-and-insert
40
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
41
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
42
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
43
+
44
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
45
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
46
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
52
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
53
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
54
55
+/* Shift-and-insert; we always work with 64 bits at a time */
56
+#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
57
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
+ void *vm, uint32_t shift) \
59
+ { \
60
+ uint64_t *d = vd, *m = vm; \
61
+ uint16_t mask; \
62
+ uint64_t shiftmask; \
63
+ unsigned e; \
64
+ if (shift == 0 || shift == ESIZE * 8) { \
65
+ /* \
66
+ * Only VSLI can shift by 0; only VSRI can shift by <dt>. \
67
+ * The generic logic would give the right answer for 0 but \
68
+ * fails for <dt>. \
69
+ */ \
70
+ goto done; \
71
+ } \
72
+ assert(shift < ESIZE * 8); \
73
+ mask = mve_element_mask(env); \
74
+ /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
75
+ shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
76
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
77
+ uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
78
+ (d[H8(e)] & ~shiftmask); \
79
+ mergemask(&d[H8(e)], r, mask); \
80
+ } \
81
+done: \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
86
+#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
87
+#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
88
+#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
89
+
90
+DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
91
+DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
92
+DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
93
+DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
94
+DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
95
+DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
96
+
97
/*
98
* Long shifts taking half-sized inputs from top or bottom of the input
99
* vector and producing a double-width result. ESIZE, TYPE are for
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_U, vshli_u, true)
105
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
106
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
107
108
+DO_2SHIFT(VSRI, vsri, false)
109
+DO_2SHIFT(VSLI, vsli, false)
110
+
111
#define DO_VSHLL(INSN, FN) \
112
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
113
{ \
114
--
115
2.20.1
116
117
diff view generated by jsdifflib
New patch
1
Implement the MVE shift-right-and-narrow insn VSHRN and VRSHRN.
1
2
3
do_urshr() is borrowed from sve_helper.c.
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-12-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 10 ++++++++++
10
target/arm/mve.decode | 11 +++++++++++
11
target/arm/mve_helper.c | 40 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 15 ++++++++++++++
13
4 files changed, 76 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+
24
+DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
38
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
39
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
40
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
41
+
42
+# Narrowing shifts (which only support b and h sizes)
43
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
44
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
45
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
46
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
47
+
48
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
49
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
50
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
51
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
57
58
DO_VSHLL_ALL(vshllb, false)
59
DO_VSHLL_ALL(vshllt, true)
60
+
61
+/*
62
+ * Narrowing right shifts, taking a double sized input, shifting it
63
+ * and putting the result in either the top or bottom half of the output.
64
+ * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
65
+ */
66
+#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
68
+ void *vm, uint32_t shift) \
69
+ { \
70
+ LTYPE *m = vm; \
71
+ TYPE *d = vd; \
72
+ uint16_t mask = mve_element_mask(env); \
73
+ unsigned le; \
74
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
75
+ TYPE r = FN(m[H##LESIZE(le)], shift); \
76
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
77
+ } \
78
+ mve_advance_vpt(env); \
79
+ }
80
+
81
+#define DO_VSHRN_ALL(OP, FN) \
82
+ DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
83
+ DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
84
+ DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
85
+ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
86
+
87
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
88
+{
89
+ if (likely(sh < 64)) {
90
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
91
+ } else if (sh == 64) {
92
+ return x >> 63;
93
+ } else {
94
+ return 0;
95
+ }
96
+}
97
+
98
+DO_VSHRN_ALL(vshrn, DO_SHR)
99
+DO_VSHRN_ALL(vrshrn, do_urshr)
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_VSHLL(VSHLL_BS, vshllbs)
105
DO_VSHLL(VSHLL_BU, vshllbu)
106
DO_VSHLL(VSHLL_TS, vshllts)
107
DO_VSHLL(VSHLL_TU, vshlltu)
108
+
109
+#define DO_2SHIFT_N(INSN, FN) \
110
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
111
+ { \
112
+ static MVEGenTwoOpShiftFn * const fns[] = { \
113
+ gen_helper_mve_##FN##b, \
114
+ gen_helper_mve_##FN##h, \
115
+ }; \
116
+ return do_2shift(s, a, fns[a->size], false); \
117
+ }
118
+
119
+DO_2SHIFT_N(VSHRNB, vshrnb)
120
+DO_2SHIFT_N(VSHRNT, vshrnt)
121
+DO_2SHIFT_N(VRSHRNB, vrshrnb)
122
+DO_2SHIFT_N(VRSHRNT, vrshrnt)
123
--
124
2.20.1
125
126
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Implement the MVE saturating shift-right-and-narrow insns
2
2
VQSHRN, VQSHRUN, VQRSHRN and VQRSHRUN.
3
We'll add more to this new function in coming patches so we also
3
4
state the gic must be created and call it below create_gic().
4
do_srshr() is borrowed from sve_helper.c.
5
5
6
No functional change intended.
7
8
Reviewed-by: Eric Auger <eric.auger@redhat.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Andrew Jones <drjones@redhat.com>
11
Message-id: 20201001061718.101915-4-drjones@redhat.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-13-peter.maydell@linaro.org
13
---
9
---
14
hw/arm/virt.c | 43 +++++++++++++++++++++++++++----------------
10
target/arm/helper-mve.h | 30 +++++++++++
15
1 file changed, 27 insertions(+), 16 deletions(-)
11
target/arm/mve.decode | 28 ++++++++++
16
12
target/arm/mve_helper.c | 104 +++++++++++++++++++++++++++++++++++++
17
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
13
target/arm/translate-mve.c | 12 +++++
18
index XXXXXXX..XXXXXXX 100644
14
4 files changed, 174 insertions(+)
19
--- a/hw/arm/virt.c
15
20
+++ b/hw/arm/virt.c
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
21
@@ -XXX,XX +XXX,XX @@ static void finalize_gic_version(VirtMachineState *vms)
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+
35
+DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+
40
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+
45
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
+
50
+DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
52
+DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
54
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/mve.decode
57
+++ b/target/arm/mve.decode
58
@@ -XXX,XX +XXX,XX @@ VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
59
VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
60
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
61
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
62
+
63
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
64
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
65
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
66
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
67
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
68
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
69
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
70
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
71
+
72
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
73
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
74
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
75
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
76
+
77
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
78
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
79
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
80
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
81
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
82
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
83
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
84
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
85
+
86
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
87
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
88
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
89
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
90
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve_helper.c
93
+++ b/target/arm/mve_helper.c
94
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh)
22
}
95
}
23
}
96
}
24
97
25
+/*
98
+static inline int64_t do_srshr(int64_t x, unsigned sh)
26
+ * virt_cpu_post_init() must be called after the CPUs have
27
+ * been realized and the GIC has been created.
28
+ */
29
+static void virt_cpu_post_init(VirtMachineState *vms)
30
+{
99
+{
31
+ bool aarch64;
100
+ if (likely(sh < 64)) {
32
+
101
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
33
+ aarch64 = object_property_get_bool(OBJECT(first_cpu), "aarch64", NULL);
102
+ } else {
34
+
103
+ /* Rounding the sign bit always produces 0. */
35
+ if (!kvm_enabled()) {
104
+ return 0;
36
+ if (aarch64 && vms->highmem) {
37
+ int requested_pa_size = 64 - clz64(vms->highest_gpa);
38
+ int pamax = arm_pamax(ARM_CPU(first_cpu));
39
+
40
+ if (pamax < requested_pa_size) {
41
+ error_report("VCPU supports less PA bits (%d) than "
42
+ "requested by the memory map (%d)",
43
+ pamax, requested_pa_size);
44
+ exit(1);
45
+ }
46
+ }
47
+ }
105
+ }
48
+}
106
+}
49
+
107
+
50
static void machvirt_init(MachineState *machine)
108
DO_VSHRN_ALL(vshrn, DO_SHR)
51
{
109
DO_VSHRN_ALL(vrshrn, do_urshr)
52
VirtMachineState *vms = VIRT_MACHINE(machine);
110
+
53
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
111
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
54
fdt_add_timer_nodes(vms);
112
+ bool *satp)
55
fdt_add_cpu_nodes(vms);
113
+{
56
114
+ if (val > max) {
57
- if (!kvm_enabled()) {
115
+ *satp = true;
58
- ARMCPU *cpu = ARM_CPU(first_cpu);
116
+ return max;
59
- bool aarch64 = object_property_get_bool(OBJECT(cpu), "aarch64", NULL);
117
+ } else if (val < min) {
60
-
118
+ *satp = true;
61
- if (aarch64 && vms->highmem) {
119
+ return min;
62
- int requested_pa_size, pamax = arm_pamax(cpu);
120
+ } else {
63
-
121
+ return val;
64
- requested_pa_size = 64 - clz64(vms->highest_gpa);
122
+ }
65
- if (pamax < requested_pa_size) {
123
+}
66
- error_report("VCPU supports less PA bits (%d) than requested "
124
+
67
- "by the memory map (%d)", pamax, requested_pa_size);
125
+/* Saturating narrowing right shifts */
68
- exit(1);
126
+#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
69
- }
127
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
70
- }
128
+ void *vm, uint32_t shift) \
71
- }
129
+ { \
72
-
130
+ LTYPE *m = vm; \
73
memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base,
131
+ TYPE *d = vd; \
74
machine->ram);
132
+ uint16_t mask = mve_element_mask(env); \
75
if (machine->device_memory) {
133
+ bool qc = false; \
76
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
134
+ unsigned le; \
77
135
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
78
create_gic(vms);
136
+ bool sat = false; \
79
137
+ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
80
+ virt_cpu_post_init(vms);
138
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
81
+
139
+ qc |= sat && (mask & 1 << (TOP * ESIZE)); \
82
fdt_add_pmu_nodes(vms);
140
+ } \
83
141
+ if (qc) { \
84
create_uart(vms, VIRT_UART, sysmem, serial_hd(0));
142
+ env->vfp.qc[0] = qc; \
143
+ } \
144
+ mve_advance_vpt(env); \
145
+ }
146
+
147
+#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
148
+ DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
149
+ DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
150
+
151
+#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
152
+ DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
153
+ DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
154
+
155
+#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
156
+ DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
157
+ DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
158
+
159
+#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
160
+ DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
161
+ DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
162
+
163
+#define DO_SHRN_SB(N, M, SATP) \
164
+ do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
165
+#define DO_SHRN_UB(N, M, SATP) \
166
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
167
+#define DO_SHRUN_B(N, M, SATP) \
168
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
169
+
170
+#define DO_SHRN_SH(N, M, SATP) \
171
+ do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
172
+#define DO_SHRN_UH(N, M, SATP) \
173
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
174
+#define DO_SHRUN_H(N, M, SATP) \
175
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
176
+
177
+#define DO_RSHRN_SB(N, M, SATP) \
178
+ do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
179
+#define DO_RSHRN_UB(N, M, SATP) \
180
+ do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
181
+#define DO_RSHRUN_B(N, M, SATP) \
182
+ do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
183
+
184
+#define DO_RSHRN_SH(N, M, SATP) \
185
+ do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
186
+#define DO_RSHRN_UH(N, M, SATP) \
187
+ do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
188
+#define DO_RSHRUN_H(N, M, SATP) \
189
+ do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
190
+
191
+DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
192
+DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
193
+DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
194
+DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
195
+DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
196
+DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
197
+
198
+DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
199
+DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
200
+DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
201
+DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
202
+DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
203
+DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
204
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/translate-mve.c
207
+++ b/target/arm/translate-mve.c
208
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VSHRNB, vshrnb)
209
DO_2SHIFT_N(VSHRNT, vshrnt)
210
DO_2SHIFT_N(VRSHRNB, vrshrnb)
211
DO_2SHIFT_N(VRSHRNT, vrshrnt)
212
+DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s)
213
+DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s)
214
+DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u)
215
+DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u)
216
+DO_2SHIFT_N(VQSHRUNB, vqshrunb)
217
+DO_2SHIFT_N(VQSHRUNT, vqshrunt)
218
+DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s)
219
+DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s)
220
+DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
221
+DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
222
+DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
223
+DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
85
--
224
--
86
2.20.1
225
2.20.1
87
226
88
227
diff view generated by jsdifflib
New patch
1
Implement the MVE VSHLC insn, which performs a shift left of the
2
entire vector with carry in bits provided from a general purpose
3
register and carry out bits written back to that register.
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-14-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 2 ++
10
target/arm/mve.decode | 2 ++
11
target/arm/mve_helper.c | 38 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
13
4 files changed, 72 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+
24
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/mve.decode
28
+++ b/target/arm/mve.decode
29
@@ -XXX,XX +XXX,XX @@ VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
30
VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
31
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
32
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
33
+
34
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
35
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/mve_helper.c
38
+++ b/target/arm/mve_helper.c
39
@@ -XXX,XX +XXX,XX @@ DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
40
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
41
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
42
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
43
+
44
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
45
+ uint32_t shift)
46
+{
47
+ uint32_t *d = vd;
48
+ uint16_t mask = mve_element_mask(env);
49
+ unsigned e;
50
+ uint32_t r;
51
+
52
+ /*
53
+ * For each 32-bit element, we shift it left, bringing in the
54
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
55
+ * the top become the new rdm, if the predicate mask permits.
56
+ * The final rdm value is returned to update the register.
57
+ * shift == 0 here means "shift by 32 bits".
58
+ */
59
+ if (shift == 0) {
60
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
61
+ r = rdm;
62
+ if (mask & 1) {
63
+ rdm = d[H4(e)];
64
+ }
65
+ mergemask(&d[H4(e)], r, mask);
66
+ }
67
+ } else {
68
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
69
+
70
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
71
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
72
+ if (mask & 1) {
73
+ rdm = d[H4(e)] >> (32 - shift);
74
+ }
75
+ mergemask(&d[H4(e)], r, mask);
76
+ }
77
+ }
78
+ mve_advance_vpt(env);
79
+ return rdm;
80
+}
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/arm/translate-mve.c
84
+++ b/target/arm/translate-mve.c
85
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
86
DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
87
DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
88
DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
89
+
90
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
91
+{
92
+ /*
93
+ * Whole Vector Left Shift with Carry. The carry is taken
94
+ * from a general purpose register and written back there.
95
+ * An imm of 0 means "shift by 32".
96
+ */
97
+ TCGv_ptr qd;
98
+ TCGv_i32 rdm;
99
+
100
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
101
+ return false;
102
+ }
103
+ if (a->rdm == 13 || a->rdm == 15) {
104
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
105
+ return false;
106
+ }
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
108
+ return true;
109
+ }
110
+
111
+ qd = mve_qreg_ptr(a->qd);
112
+ rdm = load_reg(s, a->rdm);
113
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
114
+ store_reg(s, a->rdm, rdm);
115
+ tcg_temp_free_ptr(qd);
116
+ mve_update_eci(s);
117
+ return true;
118
+}
119
--
120
2.20.1
121
122
diff view generated by jsdifflib
New patch
1
Implement the MVE VADDLV insn; this is similar to VADDV, except
2
that it accumulates 32-bit elements into a 64-bit accumulator
3
stored in a pair of general-purpose registers.
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-15-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 3 ++
10
target/arm/mve.decode | 6 +++-
11
target/arm/mve_helper.c | 19 ++++++++++++
12
target/arm/translate-mve.c | 63 ++++++++++++++++++++++++++++++++++++++
13
4 files changed, 90 insertions(+), 1 deletion(-)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
20
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
23
+DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
24
+DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
25
+
26
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
27
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
28
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
34
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
35
36
# Vector add across vector
37
-VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
38
+{
39
+ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
40
+ VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \
41
+ rdahi=%rdahi rdalo=%rdalo
42
+}
43
44
# Predicate operations
45
%mask_22_13 22:1 13:3
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvub, 1, uint8_t)
51
DO_VADDV(vaddvuh, 2, uint16_t)
52
DO_VADDV(vaddvuw, 4, uint32_t)
53
54
+#define DO_VADDLV(OP, TYPE, LTYPE) \
55
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
56
+ uint64_t ra) \
57
+ { \
58
+ uint16_t mask = mve_element_mask(env); \
59
+ unsigned e; \
60
+ TYPE *m = vm; \
61
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
62
+ if (mask & 1) { \
63
+ ra += (LTYPE)m[H4(e)]; \
64
+ } \
65
+ } \
66
+ mve_advance_vpt(env); \
67
+ return ra; \
68
+ } \
69
+
70
+DO_VADDLV(vaddlv_s, int32_t, int64_t)
71
+DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
72
+
73
/* Shifts by immediate */
74
#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
76
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate-mve.c
79
+++ b/target/arm/translate-mve.c
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
81
return true;
82
}
83
84
+static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
85
+{
86
+ /*
87
+ * Vector Add Long Across Vector: accumulate the 32-bit
88
+ * elements of the vector into a 64-bit result stored in
89
+ * a pair of general-purpose registers.
90
+ * No need to check Qm's bank: it is only 3 bits in decode.
91
+ */
92
+ TCGv_ptr qm;
93
+ TCGv_i64 rda;
94
+ TCGv_i32 rdalo, rdahi;
95
+
96
+ if (!dc_isar_feature(aa32_mve, s)) {
97
+ return false;
98
+ }
99
+ /*
100
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
101
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
102
+ */
103
+ if (a->rdahi == 13 || a->rdahi == 15) {
104
+ return false;
105
+ }
106
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
107
+ return true;
108
+ }
109
+
110
+ /*
111
+ * This insn is subject to beat-wise execution. Partial execution
112
+ * of an A=0 (no-accumulate) insn which does not execute the first
113
+ * beat must start with the current value of RdaHi:RdaLo, not zero.
114
+ */
115
+ if (a->a || mve_skip_first_beat(s)) {
116
+ /* Accumulate input from RdaHi:RdaLo */
117
+ rda = tcg_temp_new_i64();
118
+ rdalo = load_reg(s, a->rdalo);
119
+ rdahi = load_reg(s, a->rdahi);
120
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
121
+ tcg_temp_free_i32(rdalo);
122
+ tcg_temp_free_i32(rdahi);
123
+ } else {
124
+ /* Accumulate starting at zero */
125
+ rda = tcg_const_i64(0);
126
+ }
127
+
128
+ qm = mve_qreg_ptr(a->qm);
129
+ if (a->u) {
130
+ gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
131
+ } else {
132
+ gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
133
+ }
134
+ tcg_temp_free_ptr(qm);
135
+
136
+ rdalo = tcg_temp_new_i32();
137
+ rdahi = tcg_temp_new_i32();
138
+ tcg_gen_extrl_i64_i32(rdalo, rda);
139
+ tcg_gen_extrh_i64_i32(rdahi, rda);
140
+ store_reg(s, a->rdalo, rdalo);
141
+ store_reg(s, a->rdahi, rdahi);
142
+ tcg_temp_free_i64(rda);
143
+ mve_update_eci(s);
144
+ return true;
145
+}
146
+
147
static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
148
{
149
TCGv_ptr qd;
150
--
151
2.20.1
152
153
diff view generated by jsdifflib
New patch
1
1
The MVE extension to v8.1M includes some new shift instructions which
2
sit entirely within the non-coprocessor part of the encoding space
3
and which operate only on general-purpose registers. They take up
4
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
5
with Rm == 13 or 15.
6
7
Implement the long shifts by immediate, which perform shifts on a
8
pair of general-purpose registers treated as a 64-bit quantity, with
9
an immediate shift count between 1 and 32.
10
11
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
12
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
13
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
14
is too difficult, because the functions that generate the code are
15
shared between a dozen different kinds of arithmetic or logical
16
instruction for all A32, T16 and T32 encodings, and for some insns
17
and some encodings Rm==13,15 are valid.)
18
19
We make the helper functions we need for UQSHLL and SQSHLL take
20
a 32-bit value which the helper casts to int8_t because we'll need
21
these helpers also for the shift-by-register insns, where the shift
22
count might be < 0 or > 32.
23
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
27
---
28
target/arm/helper-mve.h | 3 ++
29
target/arm/translate.h | 1 +
30
target/arm/t32.decode | 28 +++++++++++++
31
target/arm/mve_helper.c | 10 +++++
32
target/arm/translate.c | 90 +++++++++++++++++++++++++++++++++++++++++
33
5 files changed, 132 insertions(+)
34
35
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper-mve.h
38
+++ b/target/arm/helper-mve.h
39
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
42
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
43
+
44
+DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
45
+DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
46
diff --git a/target/arm/translate.h b/target/arm/translate.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate.h
49
+++ b/target/arm/translate.h
50
@@ -XXX,XX +XXX,XX @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
51
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
52
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
53
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
54
+typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
55
56
/**
57
* arm_tbflags_from_tb:
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
62
@@ -XXX,XX +XXX,XX @@
63
&mcr !extern cp opc1 crn crm opc2 rt
64
&mcrr !extern cp opc1 crm rt rt2
65
66
+&mve_shl_ri rdalo rdahi shim
67
+
68
+# rdahi: bits [3:1] from insn, bit 0 is 1
69
+# rdalo: bits [3:1] from insn, bit 0 is 0
70
+%rdahi_9 9:3 !function=times_2_plus_1
71
+%rdalo_17 17:3 !function=times_2
72
+
73
# Data-processing (register)
74
75
%imm5_12_6 12:3 6:2
76
@@ -XXX,XX +XXX,XX @@
77
@S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \
78
&s_rrr_shi shim=%imm5_12_6 s=1 rd=0
79
80
+@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
81
+ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
82
+
83
{
84
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
85
AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi
86
}
87
BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
88
{
89
+ # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS
90
+ # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE
91
+ # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that
92
+ # they explicitly call unallocated_encoding() for cases that must UNDEF
93
+ # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting
94
+ # the rest fall through (where ORR_rrri and MOV_rxri will end up
95
+ # handling them as r13 and r15 accesses with the same semantics as A32).
96
+ [
97
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
98
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
99
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
100
+
101
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
102
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
103
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
104
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
105
+ ]
106
+
107
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
108
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
109
}
110
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/mve_helper.c
113
+++ b/target/arm/mve_helper.c
114
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
115
mve_advance_vpt(env);
116
return rdm;
117
}
118
+
119
+uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
120
+{
121
+ return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
122
+}
123
+
124
+uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
+{
126
+ return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
+}
128
diff --git a/target/arm/translate.c b/target/arm/translate.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate.c
131
+++ b/target/arm/translate.c
132
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
133
return true;
134
}
135
136
+/*
137
+ * v8.1M MVE wide-shifts
138
+ */
139
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
140
+ WideShiftImmFn *fn)
141
+{
142
+ TCGv_i64 rda;
143
+ TCGv_i32 rdalo, rdahi;
144
+
145
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
146
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
147
+ return false;
148
+ }
149
+ if (a->rdahi == 15) {
150
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
151
+ return false;
152
+ }
153
+ if (!dc_isar_feature(aa32_mve, s) ||
154
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
155
+ a->rdahi == 13) {
156
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
157
+ unallocated_encoding(s);
158
+ return true;
159
+ }
160
+
161
+ if (a->shim == 0) {
162
+ a->shim = 32;
163
+ }
164
+
165
+ rda = tcg_temp_new_i64();
166
+ rdalo = load_reg(s, a->rdalo);
167
+ rdahi = load_reg(s, a->rdahi);
168
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
169
+
170
+ fn(rda, rda, a->shim);
171
+
172
+ tcg_gen_extrl_i64_i32(rdalo, rda);
173
+ tcg_gen_extrh_i64_i32(rdahi, rda);
174
+ store_reg(s, a->rdalo, rdalo);
175
+ store_reg(s, a->rdahi, rdahi);
176
+ tcg_temp_free_i64(rda);
177
+
178
+ return true;
179
+}
180
+
181
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
182
+{
183
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
184
+}
185
+
186
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
187
+{
188
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
189
+}
190
+
191
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
192
+{
193
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
194
+}
195
+
196
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
197
+{
198
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
199
+}
200
+
201
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
202
+{
203
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
204
+}
205
+
206
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
207
+{
208
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
209
+}
210
+
211
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
212
+{
213
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
214
+}
215
+
216
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
217
+{
218
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
219
+}
220
+
221
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
222
+{
223
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
224
+}
225
+
226
/*
227
* Multiply and multiply accumulate
228
*/
229
--
230
2.20.1
231
232
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Implement the MVE long shifts by register, which perform shifts on a
2
2
pair of general-purpose registers treated as a 64-bit quantity, with
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
the shift count in another general-purpose register, which might be
4
Message-id: 20201002080935.1660005-1-f4bug@amsat.org
4
either positive or negative.
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
6
Like the long-shifts-by-immediate, these encodings sit in the space
7
that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15.
8
Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and
9
also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases),
10
we have to move the CSEL pattern into the same decodetree group.
11
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210628135835.6690-17-peter.maydell@linaro.org
7
---
15
---
8
include/hw/arm/fsl-imx25.h | 2 +-
16
target/arm/helper-mve.h | 6 +++
9
1 file changed, 1 insertion(+), 1 deletion(-)
17
target/arm/translate.h | 1 +
10
18
target/arm/t32.decode | 16 +++++--
11
diff --git a/include/hw/arm/fsl-imx25.h b/include/hw/arm/fsl-imx25.h
19
target/arm/mve_helper.c | 93 +++++++++++++++++++++++++++++++++++++++++
12
index XXXXXXX..XXXXXXX 100644
20
target/arm/translate.c | 69 ++++++++++++++++++++++++++++++
13
--- a/include/hw/arm/fsl-imx25.h
21
5 files changed, 182 insertions(+), 3 deletions(-)
14
+++ b/include/hw/arm/fsl-imx25.h
22
15
@@ -XXX,XX +XXX,XX @@ struct FslIMX25State {
23
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
* 0xBB00_0000 0xBB00_0FFF 4 Kbytes NAND flash main area buffer
24
index XXXXXXX..XXXXXXX 100644
17
* 0xBB00_1000 0xBB00_11FF 512 B NAND flash spare area buffer
25
--- a/target/arm/helper-mve.h
18
* 0xBB00_1200 0xBB00_1DFF 3 Kbytes Reserved
26
+++ b/target/arm/helper-mve.h
19
- * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control regisers
27
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
+ * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control registers
28
21
* 0xBB01_2000 0xBFFF_FFFF 96 Mbytes (minus 8 Kbytes) Reserved
29
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
22
* 0xC000_0000 0xFFFF_FFFF 1024 Mbytes Reserved
30
31
+DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
32
+DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32)
33
DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
34
DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
35
+DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
36
+DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
37
+DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
38
+DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
39
diff --git a/target/arm/translate.h b/target/arm/translate.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate.h
42
+++ b/target/arm/translate.h
43
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
44
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
45
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
46
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
47
+typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
48
49
/**
50
* arm_tbflags_from_tb:
51
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/t32.decode
54
+++ b/target/arm/t32.decode
55
@@ -XXX,XX +XXX,XX @@
56
&mcrr !extern cp opc1 crm rt rt2
57
58
&mve_shl_ri rdalo rdahi shim
59
+&mve_shl_rr rdalo rdahi rm
60
61
# rdahi: bits [3:1] from insn, bit 0 is 1
62
# rdalo: bits [3:1] from insn, bit 0 is 0
63
@@ -XXX,XX +XXX,XX @@
64
65
@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
66
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
67
+@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
68
+ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
69
70
{
71
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
72
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
73
URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
74
SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
75
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
76
+
77
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
78
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
79
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
80
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
81
+ UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
82
+ SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
83
]
84
85
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
86
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
87
+
88
+ # v8.1M CSEL and friends
89
+ CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
90
}
91
{
92
MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi
93
@@ -XXX,XX +XXX,XX @@ SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi
94
}
95
RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi
96
97
-# v8.1M CSEL and friends
98
-CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
99
-
100
# Data-processing (register-shifted register)
101
102
MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \
103
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/mve_helper.c
106
+++ b/target/arm/mve_helper.c
107
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
108
return rdm;
109
}
110
111
+uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
112
+{
113
+ return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
114
+}
115
+
116
+uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
117
+{
118
+ return do_uqrshl_d(n, (int8_t)shift, false, NULL);
119
+}
120
+
121
uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
122
{
123
return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
124
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
{
126
return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
}
128
+
129
+uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
130
+{
131
+ return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
132
+}
133
+
134
+uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
135
+{
136
+ return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
137
+}
138
+
139
+/* Operate on 64-bit values, but saturate at 48 bits */
140
+static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
141
+ bool round, uint32_t *sat)
142
+{
143
+ if (shift <= -48) {
144
+ /* Rounding the sign bit always produces 0. */
145
+ if (round) {
146
+ return 0;
147
+ }
148
+ return src >> 63;
149
+ } else if (shift < 0) {
150
+ if (round) {
151
+ src >>= -shift - 1;
152
+ return (src >> 1) + (src & 1);
153
+ }
154
+ return src >> -shift;
155
+ } else if (shift < 48) {
156
+ int64_t val = src << shift;
157
+ int64_t extval = sextract64(val, 0, 48);
158
+ if (!sat || val == extval) {
159
+ return extval;
160
+ }
161
+ } else if (!sat || src == 0) {
162
+ return 0;
163
+ }
164
+
165
+ *sat = 1;
166
+ return (1ULL << 47) - (src >= 0);
167
+}
168
+
169
+/* Operate on 64-bit values, but saturate at 48 bits */
170
+static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
171
+ bool round, uint32_t *sat)
172
+{
173
+ uint64_t val, extval;
174
+
175
+ if (shift <= -(48 + round)) {
176
+ return 0;
177
+ } else if (shift < 0) {
178
+ if (round) {
179
+ val = src >> (-shift - 1);
180
+ val = (val >> 1) + (val & 1);
181
+ } else {
182
+ val = src >> -shift;
183
+ }
184
+ extval = extract64(val, 0, 48);
185
+ if (!sat || val == extval) {
186
+ return extval;
187
+ }
188
+ } else if (shift < 48) {
189
+ uint64_t val = src << shift;
190
+ uint64_t extval = extract64(val, 0, 48);
191
+ if (!sat || val == extval) {
192
+ return extval;
193
+ }
194
+ } else if (!sat || src == 0) {
195
+ return 0;
196
+ }
197
+
198
+ *sat = 1;
199
+ return MAKE_64BIT_MASK(0, 48);
200
+}
201
+
202
+uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
203
+{
204
+ return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
205
+}
206
+
207
+uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
208
+{
209
+ return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
210
+}
211
diff --git a/target/arm/translate.c b/target/arm/translate.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/arm/translate.c
214
+++ b/target/arm/translate.c
215
@@ -XXX,XX +XXX,XX @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
216
return do_mve_shl_ri(s, a, gen_urshr64_i64);
217
}
218
219
+static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
220
+{
221
+ TCGv_i64 rda;
222
+ TCGv_i32 rdalo, rdahi;
223
+
224
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
225
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
226
+ return false;
227
+ }
228
+ if (a->rdahi == 15) {
229
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
230
+ return false;
231
+ }
232
+ if (!dc_isar_feature(aa32_mve, s) ||
233
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
234
+ a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
235
+ a->rm == a->rdahi || a->rm == a->rdalo) {
236
+ /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
237
+ unallocated_encoding(s);
238
+ return true;
239
+ }
240
+
241
+ rda = tcg_temp_new_i64();
242
+ rdalo = load_reg(s, a->rdalo);
243
+ rdahi = load_reg(s, a->rdahi);
244
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
245
+
246
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
247
+ fn(rda, cpu_env, rda, cpu_R[a->rm]);
248
+
249
+ tcg_gen_extrl_i64_i32(rdalo, rda);
250
+ tcg_gen_extrh_i64_i32(rdahi, rda);
251
+ store_reg(s, a->rdalo, rdalo);
252
+ store_reg(s, a->rdahi, rdahi);
253
+ tcg_temp_free_i64(rda);
254
+
255
+ return true;
256
+}
257
+
258
+static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
259
+{
260
+ return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
261
+}
262
+
263
+static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
264
+{
265
+ return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
266
+}
267
+
268
+static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
269
+{
270
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
271
+}
272
+
273
+static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
274
+{
275
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
276
+}
277
+
278
+static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
279
+{
280
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
281
+}
282
+
283
+static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
284
+{
285
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
286
+}
287
+
288
/*
289
* Multiply and multiply accumulate
23
*/
290
*/
24
--
291
--
25
2.20.1
292
2.20.1
26
293
27
294
diff view generated by jsdifflib
1
QEMU supports a 48-bit physical address range, but we don't currently
1
Implement the MVE shifts by immediate, which perform shifts
2
expose it in the '-cpu max' ID registers (you get the same range as
2
on a single general-purpose register.
3
Cortex-A57, which is 44 bits).
3
4
4
These patterns overlap with the long-shift-by-immediates,
5
Set the ID_AA64MMFR0.PARange field to indicate 48 bits.
5
so we have to rearrange the grouping a little here.
6
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20201001160116.18095-1-peter.maydell@linaro.org
9
Message-id: 20210628135835.6690-18-peter.maydell@linaro.org
10
---
10
---
11
target/arm/cpu64.c | 4 ++++
11
target/arm/helper-mve.h | 3 ++
12
1 file changed, 4 insertions(+)
12
target/arm/translate.h | 1 +
13
13
target/arm/t32.decode | 31 ++++++++++++++-----
14
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
14
target/arm/mve_helper.c | 10 ++++++
15
index XXXXXXX..XXXXXXX 100644
15
target/arm/translate.c | 68 +++++++++++++++++++++++++++++++++++++++--
16
--- a/target/arm/cpu64.c
16
5 files changed, 104 insertions(+), 9 deletions(-)
17
+++ b/target/arm/cpu64.c
17
18
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2);
19
index XXXXXXX..XXXXXXX 100644
20
cpu->isar.id_aa64pfr1 = t;
20
--- a/target/arm/helper-mve.h
21
21
+++ b/target/arm/helper-mve.h
22
+ t = cpu->isar.id_aa64mmfr0;
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
23
+ t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */
23
DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
+ cpu->isar.id_aa64mmfr0 = t;
24
DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
25
+
25
DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
26
t = cpu->isar.id_aa64mmfr1;
26
+
27
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
27
+DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
28
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
28
+DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.h
32
+++ b/target/arm/translate.h
33
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
34
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
35
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
36
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
37
+typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
38
39
/**
40
* arm_tbflags_from_tb:
41
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/t32.decode
44
+++ b/target/arm/t32.decode
45
@@ -XXX,XX +XXX,XX @@
46
47
&mve_shl_ri rdalo rdahi shim
48
&mve_shl_rr rdalo rdahi rm
49
+&mve_sh_ri rda shim
50
51
# rdahi: bits [3:1] from insn, bit 0 is 1
52
# rdalo: bits [3:1] from insn, bit 0 is 0
53
@@ -XXX,XX +XXX,XX @@
54
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
55
@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
56
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
57
+@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
58
+ &mve_sh_ri shim=%imm5_12_6
59
60
{
61
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
62
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
63
# the rest fall through (where ORR_rrri and MOV_rxri will end up
64
# handling them as r13 and r15 accesses with the same semantics as A32).
65
[
66
- LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
67
- LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
68
- ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
69
+ {
70
+ UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
71
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
72
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
73
+ }
74
75
- UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
76
- URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
77
- SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
78
- SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
79
+ {
80
+ URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
81
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
82
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
83
+ }
84
+
85
+ {
86
+ SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
87
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
88
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
89
+ }
90
+
91
+ {
92
+ SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
93
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
94
+ }
95
96
LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
97
ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
98
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/mve_helper.c
101
+++ b/target/arm/mve_helper.c
102
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
103
{
104
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
105
}
106
+
107
+uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
108
+{
109
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
110
+}
111
+
112
+uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
113
+{
114
+ return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
115
+}
116
diff --git a/target/arm/translate.c b/target/arm/translate.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/translate.c
119
+++ b/target/arm/translate.c
120
@@ -XXX,XX +XXX,XX @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
121
122
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
123
{
124
- TCGv_i32 t = tcg_temp_new_i32();
125
+ TCGv_i32 t;
126
127
+ /* Handle shift by the input size for the benefit of trans_SRSHR_ri */
128
+ if (sh == 32) {
129
+ tcg_gen_movi_i32(d, 0);
130
+ return;
131
+ }
132
+ t = tcg_temp_new_i32();
133
tcg_gen_extract_i32(t, a, sh - 1, 1);
134
tcg_gen_sari_i32(d, a, sh);
135
tcg_gen_add_i32(d, d, t);
136
@@ -XXX,XX +XXX,XX @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
137
138
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
139
{
140
- TCGv_i32 t = tcg_temp_new_i32();
141
+ TCGv_i32 t;
142
143
+ /* Handle shift by the input size for the benefit of trans_URSHR_ri */
144
+ if (sh == 32) {
145
+ tcg_gen_extract_i32(d, a, sh - 1, 1);
146
+ return;
147
+ }
148
+ t = tcg_temp_new_i32();
149
tcg_gen_extract_i32(t, a, sh - 1, 1);
150
tcg_gen_shri_i32(d, a, sh);
151
tcg_gen_add_i32(d, d, t);
152
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
153
return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
154
}
155
156
+static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
157
+{
158
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
159
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
160
+ return false;
161
+ }
162
+ if (!dc_isar_feature(aa32_mve, s) ||
163
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
164
+ a->rda == 13 || a->rda == 15) {
165
+ /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
166
+ unallocated_encoding(s);
167
+ return true;
168
+ }
169
+
170
+ if (a->shim == 0) {
171
+ a->shim = 32;
172
+ }
173
+ fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
174
+
175
+ return true;
176
+}
177
+
178
+static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
179
+{
180
+ return do_mve_sh_ri(s, a, gen_urshr32_i32);
181
+}
182
+
183
+static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
184
+{
185
+ return do_mve_sh_ri(s, a, gen_srshr32_i32);
186
+}
187
+
188
+static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
189
+{
190
+ gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
191
+}
192
+
193
+static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
194
+{
195
+ return do_mve_sh_ri(s, a, gen_mve_sqshl);
196
+}
197
+
198
+static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
199
+{
200
+ gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
201
+}
202
+
203
+static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
204
+{
205
+ return do_mve_sh_ri(s, a, gen_mve_uqshl);
206
+}
207
+
208
/*
209
* Multiply and multiply accumulate
210
*/
29
--
211
--
30
2.20.1
212
2.20.1
31
213
32
214
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Implement the MVE shifts by register, which perform
2
shifts on a single general-purpose register.
2
3
3
Move the KVM PMU setup part of fdt_add_pmu_nodes() to
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
virt_cpu_post_init(), which is a more appropriate location. Now
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
fdt_add_pmu_nodes() is also named more appropriately, because it
6
Message-id: 20210628135835.6690-19-peter.maydell@linaro.org
6
no longer does anything but fdt node creation.
7
---
8
target/arm/helper-mve.h | 2 ++
9
target/arm/translate.h | 1 +
10
target/arm/t32.decode | 18 ++++++++++++++----
11
target/arm/mve_helper.c | 10 ++++++++++
12
target/arm/translate.c | 30 ++++++++++++++++++++++++++++++
13
5 files changed, 57 insertions(+), 4 deletions(-)
7
14
8
No functional change intended.
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Eric Auger <eric.auger@redhat.com>
12
Signed-off-by: Andrew Jones <drjones@redhat.com>
13
Message-id: 20201001061718.101915-5-drjones@redhat.com
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
hw/arm/virt.c | 34 ++++++++++++++++++----------------
17
1 file changed, 18 insertions(+), 16 deletions(-)
18
19
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/virt.c
17
--- a/target/arm/helper-mve.h
22
+++ b/hw/arm/virt.c
18
+++ b/target/arm/helper-mve.h
23
@@ -XXX,XX +XXX,XX @@ static void fdt_add_gic_node(VirtMachineState *vms)
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
20
25
static void fdt_add_pmu_nodes(const VirtMachineState *vms)
21
DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
22
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
23
+DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
24
+DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
25
diff --git a/target/arm/translate.h b/target/arm/translate.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.h
28
+++ b/target/arm/translate.h
29
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
30
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
31
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
32
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
33
+typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
34
35
/**
36
* arm_tbflags_from_tb:
37
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/t32.decode
40
+++ b/target/arm/t32.decode
41
@@ -XXX,XX +XXX,XX @@
42
&mve_shl_ri rdalo rdahi shim
43
&mve_shl_rr rdalo rdahi rm
44
&mve_sh_ri rda shim
45
+&mve_sh_rr rda rm
46
47
# rdahi: bits [3:1] from insn, bit 0 is 1
48
# rdalo: bits [3:1] from insn, bit 0 is 0
49
@@ -XXX,XX +XXX,XX @@
50
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
51
@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
52
&mve_sh_ri shim=%imm5_12_6
53
+@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr
54
26
{
55
{
27
- CPUState *cpu;
56
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
28
- ARMCPU *armcpu;
57
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
29
+ ARMCPU *armcpu = ARM_CPU(first_cpu);
58
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
30
uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI;
31
32
- CPU_FOREACH(cpu) {
33
- armcpu = ARM_CPU(cpu);
34
- if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
35
- return;
36
- }
37
- if (kvm_enabled()) {
38
- if (kvm_irqchip_in_kernel()) {
39
- kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ));
40
- }
41
- kvm_arm_pmu_init(cpu);
42
- }
43
+ if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
44
+ assert(!object_property_get_bool(OBJECT(armcpu), "pmu", NULL));
45
+ return;
46
}
59
}
47
60
48
if (vms->gic_version == VIRT_GIC_VERSION_2) {
61
- LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
49
@@ -XXX,XX +XXX,XX @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
62
- ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
50
(1 << vms->smp_cpus) - 1);
63
- UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
51
}
64
- SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
52
65
+ {
53
- armcpu = ARM_CPU(qemu_get_cpu(0));
66
+ UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr
54
qemu_fdt_add_subnode(vms->fdt, "/pmu");
67
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
55
if (arm_feature(&armcpu->env, ARM_FEATURE_V8)) {
68
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
56
const char compat[] = "arm,armv8-pmuv3";
69
+ }
57
@@ -XXX,XX +XXX,XX @@ static void finalize_gic_version(VirtMachineState *vms)
70
+
71
+ {
72
+ SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr
73
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
74
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
75
+ }
76
+
77
UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
78
SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
79
]
80
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/mve_helper.c
83
+++ b/target/arm/mve_helper.c
84
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
85
{
86
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
87
}
88
+
89
+uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
90
+{
91
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
92
+}
93
+
94
+uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
95
+{
96
+ return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
97
+}
98
diff --git a/target/arm/translate.c b/target/arm/translate.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate.c
101
+++ b/target/arm/translate.c
102
@@ -XXX,XX +XXX,XX @@ static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
103
return do_mve_sh_ri(s, a, gen_mve_uqshl);
104
}
105
106
+static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
107
+{
108
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
109
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
110
+ return false;
111
+ }
112
+ if (!dc_isar_feature(aa32_mve, s) ||
113
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
114
+ a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
115
+ a->rm == a->rda) {
116
+ /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
117
+ unallocated_encoding(s);
118
+ return true;
119
+ }
120
+
121
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
122
+ fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
123
+ return true;
124
+}
125
+
126
+static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
127
+{
128
+ return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
129
+}
130
+
131
+static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
132
+{
133
+ return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
134
+}
135
+
136
/*
137
* Multiply and multiply accumulate
58
*/
138
*/
59
static void virt_cpu_post_init(VirtMachineState *vms)
60
{
61
- bool aarch64;
62
+ bool aarch64, pmu;
63
+ CPUState *cpu;
64
65
aarch64 = object_property_get_bool(OBJECT(first_cpu), "aarch64", NULL);
66
+ pmu = object_property_get_bool(OBJECT(first_cpu), "pmu", NULL);
67
68
- if (!kvm_enabled()) {
69
+ if (kvm_enabled()) {
70
+ CPU_FOREACH(cpu) {
71
+ if (pmu) {
72
+ assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU));
73
+ if (kvm_irqchip_in_kernel()) {
74
+ kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ));
75
+ }
76
+ kvm_arm_pmu_init(cpu);
77
+ }
78
+ }
79
+ } else {
80
if (aarch64 && vms->highmem) {
81
int requested_pa_size = 64 - clz64(vms->highest_gpa);
82
int pamax = arm_pamax(ARM_CPU(first_cpu));
83
--
139
--
84
2.20.1
140
2.20.1
85
141
86
142
diff view generated by jsdifflib