1
The following changes since commit e5cd695266c5709308aa95b1baae499e4b5d4544:
1
The following changes since commit 5a67d7735d4162630769ef495cf813244fc850df:
2
2
3
Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging (2018-05-08 17:05:58 +0100)
3
Merge remote-tracking branch 'remotes/berrange-gitlab/tags/tls-deps-pull-request' into staging (2021-07-02 08:22:39 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180510
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210702
8
8
9
for you to fetch changes up to 9a9f1f59521f46e8ff4527d9a2b52f83577e2aa3:
9
for you to fetch changes up to 04ea4d3cfd0a21b248ece8eb7a9436a3d9898dd8:
10
10
11
target/arm: Clear SVE high bits for FMOV (2018-05-10 18:10:58 +0100)
11
target/arm: Implement MVE shifts by register (2021-07-02 11:48:38 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
target-arm queue:
14
target-arm queue:
15
* hw/arm/iotkit.c: fix minor memory leak
15
* more MVE instructions
16
* softfloat: fix wrong-exception-flags bug for multiply-add corner case
16
* hw/gpio/gpio_pwr: use shutdown function for reboot
17
* arm: isolate and clean up DTB generation
17
* target/arm: Check NaN mode before silencing NaN
18
* implement Arm v8.1-Atomics extension
18
* tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
19
* Fix some bugs and missing instructions in the v8.2-FP16 extension
19
* hw/arm: Add basic power management to raspi.
20
* docs/system/arm: Add quanta-gbs-bmc, quanta-q7l1-bmc
20
21
21
----------------------------------------------------------------
22
----------------------------------------------------------------
22
Igor Mammedov (4):
23
Joe Komlodi (1):
23
pc: simplify MachineClass::get_hotplug_handler handling
24
target/arm: Check NaN mode before silencing NaN
24
platform-bus-device: use device plug callback instead of machine_done notifier
25
arm/boot: split load_dtb() from arm_load_kernel()
26
make sure that we aren't overwriting mc->get_hotplug_handler by accident
27
25
28
Peter Maydell (3):
26
Maxim Uvarov (1):
29
hw/arm/iotkit.c: fix minor memory leak
27
hw/gpio/gpio_pwr: use shutdown function for reboot
30
softfloat: Handle default NaN mode after pickNaNMulAdd, not before
31
atomic.h: Work around gcc spurious "unused value" warning
32
28
33
Richard Henderson (14):
29
Nolan Leake (1):
34
tcg: Introduce helpers for integer min/max
30
hw/arm: Add basic power management to raspi.
35
target/arm: Use new min/max expanders
36
target/xtensa: Use new min/max expanders
37
tcg: Introduce atomic helpers for integer min/max
38
tcg: Use GEN_ATOMIC_HELPER_FN for opposite endian atomic add
39
target/riscv: Use new atomic min/max expanders
40
target/arm: Introduce ARM_FEATURE_V8_ATOMICS and initial decode
41
target/arm: Fill in disas_ldst_atomic
42
target/arm: Implement CAS and CASP
43
target/arm: Enable ARM_FEATURE_V8_ATOMICS for user-only
44
target/arm: Implement vector shifted SCVF/UCVF for fp16
45
target/arm: Implement vector shifted FCVT for fp16
46
target/arm: Fix float16 to/from int16
47
target/arm: Clear SVE high bits for FMOV
48
31
49
accel/tcg/atomic_template.h | 112 ++++++----
32
Patrick Venture (2):
50
accel/tcg/tcg-runtime.h | 8 +
33
docs/system/arm: Add quanta-q7l1-bmc reference
51
hw/ppc/e500.h | 5 +
34
docs/system/arm: Add quanta-gbs-bmc reference
52
include/hw/arm/arm.h | 45 +++-
53
include/hw/arm/sysbus-fdt.h | 37 +---
54
include/hw/arm/virt.h | 1 +
55
include/hw/i386/pc.h | 8 -
56
include/hw/platform-bus.h | 4 +-
57
include/qemu/atomic.h | 2 +-
58
target/arm/cpu.h | 1 +
59
target/arm/helper-a64.h | 2 +
60
target/arm/helper.h | 4 +-
61
tcg/tcg-op.h | 50 +++++
62
tcg/tcg.h | 8 +
63
fpu/softfloat.c | 52 +++--
64
hw/arm/boot.c | 72 ++-----
65
hw/arm/iotkit.c | 1 +
66
hw/arm/sysbus-fdt.c | 64 +-----
67
hw/arm/virt.c | 96 ++++++---
68
hw/core/platform-bus.c | 29 +--
69
hw/i386/pc.c | 7 +-
70
hw/ppc/e500.c | 38 ++--
71
hw/ppc/e500plat.c | 32 +++
72
hw/ppc/spapr.c | 1 +
73
hw/s390x/s390-virtio-ccw.c | 1 +
74
linux-user/elfload.c | 1 +
75
target/arm/cpu64.c | 1 +
76
target/arm/helper-a64.c | 43 ++++
77
target/arm/helper.c | 53 ++++-
78
target/arm/translate-a64.c | 490 +++++++++++++++++++++++++++++++++-----------
79
target/riscv/translate.c | 72 ++-----
80
target/xtensa/translate.c | 50 +++--
81
tcg/tcg-op.c | 48 +++++
82
33 files changed, 934 insertions(+), 504 deletions(-)
83
35
36
Peter Maydell (18):
37
target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation
38
target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH
39
target/arm: Make asimd_imm_const() public
40
target/arm: Use asimd_imm_const for A64 decode
41
target/arm: Use dup_const() instead of bitfield_replicate()
42
target/arm: Implement MVE logical immediate insns
43
target/arm: Implement MVE vector shift left by immediate insns
44
target/arm: Implement MVE vector shift right by immediate insns
45
target/arm: Implement MVE VSHLL
46
target/arm: Implement MVE VSRI, VSLI
47
target/arm: Implement MVE VSHRN, VRSHRN
48
target/arm: Implement MVE saturating narrowing shifts
49
target/arm: Implement MVE VSHLC
50
target/arm: Implement MVE VADDLV
51
target/arm: Implement MVE long shifts by immediate
52
target/arm: Implement MVE long shifts by register
53
target/arm: Implement MVE shifts by immediate
54
target/arm: Implement MVE shifts by register
55
56
Philippe Mathieu-Daudé (1):
57
tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine
58
59
docs/system/arm/aspeed.rst | 1 +
60
docs/system/arm/nuvoton.rst | 5 +-
61
include/hw/arm/bcm2835_peripherals.h | 3 +-
62
include/hw/misc/bcm2835_powermgt.h | 29 ++
63
target/arm/helper-mve.h | 108 +++++++
64
target/arm/translate.h | 41 +++
65
target/arm/mve.decode | 177 ++++++++++-
66
target/arm/t32.decode | 71 ++++-
67
hw/arm/bcm2835_peripherals.c | 13 +-
68
hw/gpio/gpio_pwr.c | 2 +-
69
hw/misc/bcm2835_powermgt.c | 160 ++++++++++
70
target/arm/helper-a64.c | 12 +-
71
target/arm/mve_helper.c | 524 +++++++++++++++++++++++++++++++--
72
target/arm/translate-a64.c | 86 +-----
73
target/arm/translate-mve.c | 261 +++++++++++++++-
74
target/arm/translate-neon.c | 81 -----
75
target/arm/translate.c | 327 +++++++++++++++++++-
76
target/arm/vfp_helper.c | 24 +-
77
hw/misc/meson.build | 1 +
78
tests/acceptance/boot_linux_console.py | 43 +++
79
20 files changed, 1760 insertions(+), 209 deletions(-)
80
create mode 100644 include/hw/misc/bcm2835_powermgt.h
81
create mode 100644 hw/misc/bcm2835_powermgt.c
82
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Patrick Venture <venture@google.com>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Adds a line-item reference to the supported quanta-q71l-bmc aspeed
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
entry.
5
Message-id: 20180508151437.4232-11-richard.henderson@linaro.org
5
6
Signed-off-by: Patrick Venture <venture@google.com>
7
Reviewed-by: Cédric Le Goater <clg@kaod.org>
8
Message-id: 20210615192848.1065297-2-venture@google.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/cpu64.c | 1 +
11
docs/system/arm/aspeed.rst | 1 +
9
1 file changed, 1 insertion(+)
12
1 file changed, 1 insertion(+)
10
13
11
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
14
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/cpu64.c
16
--- a/docs/system/arm/aspeed.rst
14
+++ b/target/arm/cpu64.c
17
+++ b/docs/system/arm/aspeed.rst
15
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
18
@@ -XXX,XX +XXX,XX @@ etc.
16
set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
19
AST2400 SoC based machines :
17
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
20
18
set_feature(&cpu->env, ARM_FEATURE_CRC);
21
- ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC
19
+ set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS);
22
+- ``quanta-q71l-bmc`` OpenBMC Quanta BMC
20
set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
23
21
set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
24
AST2500 SoC based machines :
22
set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
25
23
--
26
--
24
2.17.0
27
2.20.1
25
28
26
29
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Patrick Venture <venture@google.com>
2
2
3
Reviewed-by: Michael Clark <mjc@sifive.com>
3
Add line item reference to quanta-gbs-bmc machine.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20180508151437.4232-7-richard.henderson@linaro.org
5
Signed-off-by: Patrick Venture <venture@google.com>
6
Reviewed-by: Cédric Le Goater <clg@kaod.org>
7
Message-id: 20210615192848.1065297-3-venture@google.com
8
[PMM: fixed underline Sphinx warning]
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/riscv/translate.c | 72 +++++++++++-----------------------------
11
docs/system/arm/nuvoton.rst | 5 +++--
9
1 file changed, 20 insertions(+), 52 deletions(-)
12
1 file changed, 3 insertions(+), 2 deletions(-)
10
13
11
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
14
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/translate.c
16
--- a/docs/system/arm/nuvoton.rst
14
+++ b/target/riscv/translate.c
17
+++ b/docs/system/arm/nuvoton.rst
15
@@ -XXX,XX +XXX,XX @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
18
@@ -XXX,XX +XXX,XX @@
16
TCGv src1, src2, dat;
19
-Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
17
TCGLabel *l1, *l2;
20
-=====================================================
18
TCGMemOp mop;
21
+Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``)
19
- TCGCond cond;
22
+================================================================
20
bool aq, rl;
23
21
24
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
22
/* Extract the size of the atomic operation. */
25
designed to be used as Baseboard Management Controllers (BMCs) in various
23
@@ -XXX,XX +XXX,XX @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
26
@@ -XXX,XX +XXX,XX @@ segment. The following machines are based on this chip :
24
tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
27
The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
25
gen_set_gpr(rd, src2);
28
Hyperscale applications. The following machines are based on this chip :
26
break;
29
27
-
30
+- ``quanta-gbs-bmc`` Quanta GBS server BMC
28
case OPC_RISC_AMOMIN:
31
- ``quanta-gsj`` Quanta GSJ server BMC
29
- cond = TCG_COND_LT;
32
30
- goto do_minmax;
33
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
31
- case OPC_RISC_AMOMAX:
32
- cond = TCG_COND_GT;
33
- goto do_minmax;
34
- case OPC_RISC_AMOMINU:
35
- cond = TCG_COND_LTU;
36
- goto do_minmax;
37
- case OPC_RISC_AMOMAXU:
38
- cond = TCG_COND_GTU;
39
- goto do_minmax;
40
- do_minmax:
41
- /* Handle the RL barrier. The AQ barrier is handled along the
42
- parallel path by the SC atomic cmpxchg. On the serial path,
43
- of course, barriers do not matter. */
44
- if (rl) {
45
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
46
- }
47
- if (tb_cflags(ctx->tb) & CF_PARALLEL) {
48
- l1 = gen_new_label();
49
- gen_set_label(l1);
50
- } else {
51
- l1 = NULL;
52
- }
53
-
54
gen_get_gpr(src1, rs1);
55
gen_get_gpr(src2, rs2);
56
- if ((mop & MO_SSIZE) == MO_SL) {
57
- /* Sign-extend the register comparison input. */
58
- tcg_gen_ext32s_tl(src2, src2);
59
- }
60
- dat = tcg_temp_local_new();
61
- tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
62
- tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
63
-
64
- if (tb_cflags(ctx->tb) & CF_PARALLEL) {
65
- /* Parallel context. Make this operation atomic by verifying
66
- that the memory didn't change while we computed the result. */
67
- tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
68
-
69
- /* If the cmpxchg failed, retry. */
70
- /* ??? There is an assumption here that this will eventually
71
- succeed, such that we don't live-lock. This is not unlike
72
- a similar loop that the compiler would generate for e.g.
73
- __atomic_fetch_and_xor, so don't worry about it. */
74
- tcg_gen_brcond_tl(TCG_COND_NE, dat, src2, l1);
75
- } else {
76
- /* Serial context. Directly store the result. */
77
- tcg_gen_qemu_st_tl(src2, src1, ctx->mem_idx, mop);
78
- }
79
- gen_set_gpr(rd, dat);
80
- tcg_temp_free(dat);
81
+ tcg_gen_atomic_fetch_smin_tl(src2, src1, src2, ctx->mem_idx, mop);
82
+ gen_set_gpr(rd, src2);
83
+ break;
84
+ case OPC_RISC_AMOMAX:
85
+ gen_get_gpr(src1, rs1);
86
+ gen_get_gpr(src2, rs2);
87
+ tcg_gen_atomic_fetch_smax_tl(src2, src1, src2, ctx->mem_idx, mop);
88
+ gen_set_gpr(rd, src2);
89
+ break;
90
+ case OPC_RISC_AMOMINU:
91
+ gen_get_gpr(src1, rs1);
92
+ gen_get_gpr(src2, rs2);
93
+ tcg_gen_atomic_fetch_umin_tl(src2, src1, src2, ctx->mem_idx, mop);
94
+ gen_set_gpr(rd, src2);
95
+ break;
96
+ case OPC_RISC_AMOMAXU:
97
+ gen_get_gpr(src1, rs1);
98
+ gen_get_gpr(src2, rs2);
99
+ tcg_gen_atomic_fetch_umax_tl(src2, src1, src2, ctx->mem_idx, mop);
100
+ gen_set_gpr(rd, src2);
101
break;
102
103
default:
104
--
34
--
105
2.17.0
35
2.20.1
106
36
107
37
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Nolan Leake <nolan@sigbus.net>
2
2
3
By default MachineClass::get_hotplug_handler is NULL and concrete board
3
This is just enough to make reboot and poweroff work. Works for
4
should set it to it's own handler.
4
linux, u-boot, and the arm trusted firmware. Not tested, but should
5
Considering there isn't any default handler, drop saving empty
5
work for plan9, and bare-metal/hobby OSes, since they seem to generally
6
MachineClass::get_hotplug_handler in child class and make PC code
6
do what linux does for reset.
7
consistent with spapr/s390x boards.
7
8
8
The watchdog timer functionality is not yet implemented.
9
We can bring this back when actual usecase surfaces and do it
9
10
consistently across boards that use get_hotplug_handler().
10
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/64
11
11
Signed-off-by: Nolan Leake <nolan@sigbus.net>
12
Suggested-by: David Gibson <david@gibson.dropbear.id.au>
13
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
13
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Message-id: 1525691524-32265-2-git-send-email-imammedo@redhat.com
14
Message-id: 20210625210209.1870217-1-nolan@sigbus.net
15
[PMM: tweaked commit title; fixed region size to 0x200;
16
moved header file to include/]
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
18
---
19
include/hw/i386/pc.h | 8 --------
19
include/hw/arm/bcm2835_peripherals.h | 3 +-
20
hw/i386/pc.c | 6 +-----
20
include/hw/misc/bcm2835_powermgt.h | 29 +++++
21
2 files changed, 1 insertion(+), 13 deletions(-)
21
hw/arm/bcm2835_peripherals.c | 13 ++-
22
22
hw/misc/bcm2835_powermgt.c | 160 +++++++++++++++++++++++++++
23
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
23
hw/misc/meson.build | 1 +
24
5 files changed, 204 insertions(+), 2 deletions(-)
25
create mode 100644 include/hw/misc/bcm2835_powermgt.h
26
create mode 100644 hw/misc/bcm2835_powermgt.c
27
28
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
24
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
25
--- a/include/hw/i386/pc.h
30
--- a/include/hw/arm/bcm2835_peripherals.h
26
+++ b/include/hw/i386/pc.h
31
+++ b/include/hw/arm/bcm2835_peripherals.h
27
@@ -XXX,XX +XXX,XX @@ struct PCMachineState {
32
@@ -XXX,XX +XXX,XX @@
28
/**
33
#include "hw/misc/bcm2835_mphi.h"
29
* PCMachineClass:
34
#include "hw/misc/bcm2835_thermal.h"
30
*
35
#include "hw/misc/bcm2835_cprman.h"
31
- * Methods:
36
+#include "hw/misc/bcm2835_powermgt.h"
32
- *
37
#include "hw/sd/sdhci.h"
33
- * @get_hotplug_handler: pointer to parent class callback @get_hotplug_handler
38
#include "hw/sd/bcm2835_sdhost.h"
34
- *
39
#include "hw/gpio/bcm2835_gpio.h"
35
* Compat fields:
40
@@ -XXX,XX +XXX,XX @@ struct BCM2835PeripheralState {
36
*
41
BCM2835MphiState mphi;
37
* @enforce_aligned_dimm: check that DIMM's address/size is aligned by
42
UnimplementedDeviceState txp;
38
@@ -XXX,XX +XXX,XX @@ struct PCMachineClass {
43
UnimplementedDeviceState armtmr;
39
44
- UnimplementedDeviceState powermgt;
40
/*< public >*/
45
+ BCM2835PowerMgtState powermgt;
41
46
BCM2835CprmanState cprman;
42
- /* Methods: */
47
PL011State uart0;
43
- HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
48
BCM2835AuxState aux;
44
- DeviceState *dev);
49
diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h
45
-
50
new file mode 100644
46
/* Device configuration: */
51
index XXXXXXX..XXXXXXX
47
bool pci_enabled;
52
--- /dev/null
48
bool kvmclock_enabled;
53
+++ b/include/hw/misc/bcm2835_powermgt.h
49
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
54
@@ -XXX,XX +XXX,XX @@
55
+/*
56
+ * BCM2835 Power Management emulation
57
+ *
58
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
59
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
60
+ *
61
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
62
+ * See the COPYING file in the top-level directory.
63
+ */
64
+
65
+#ifndef BCM2835_POWERMGT_H
66
+#define BCM2835_POWERMGT_H
67
+
68
+#include "hw/sysbus.h"
69
+#include "qom/object.h"
70
+
71
+#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt"
72
+OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT)
73
+
74
+struct BCM2835PowerMgtState {
75
+ SysBusDevice busdev;
76
+ MemoryRegion iomem;
77
+
78
+ uint32_t rstc;
79
+ uint32_t rsts;
80
+ uint32_t wdog;
81
+};
82
+
83
+#endif
84
diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c
50
index XXXXXXX..XXXXXXX 100644
85
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/i386/pc.c
86
--- a/hw/arm/bcm2835_peripherals.c
52
+++ b/hw/i386/pc.c
87
+++ b/hw/arm/bcm2835_peripherals.c
53
@@ -XXX,XX +XXX,XX @@ static void pc_machine_device_unplug_cb(HotplugHandler *hotplug_dev,
88
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_init(Object *obj)
54
static HotplugHandler *pc_get_hotpug_handler(MachineState *machine,
89
55
DeviceState *dev)
90
object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr",
56
{
91
OBJECT(&s->gpu_bus_mr));
57
- PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine);
92
+
58
-
93
+ /* Power Management */
59
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
94
+ object_initialize_child(obj, "powermgt", &s->powermgt,
60
object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
95
+ TYPE_BCM2835_POWERMGT);
61
return HOTPLUG_HANDLER(machine);
62
}
63
64
- return pcmc->get_hotplug_handler ?
65
- pcmc->get_hotplug_handler(machine, dev) : NULL;
66
+ return NULL;
67
}
96
}
68
97
69
static void
98
static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
70
@@ -XXX,XX +XXX,XX @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
99
@@ -XXX,XX +XXX,XX @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
71
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
100
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
72
NMIClass *nc = NMI_CLASS(oc);
101
INTERRUPT_USB));
73
102
74
- pcmc->get_hotplug_handler = mc->get_hotplug_handler;
103
+ /* Power Management */
75
pcmc->pci_enabled = true;
104
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) {
76
pcmc->has_acpi_build = true;
105
+ return;
77
pcmc->rsdp_in_ram = true;
106
+ }
107
+
108
+ memory_region_add_subregion(&s->peri_mr, PM_OFFSET,
109
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0));
110
+
111
create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000);
112
create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40);
113
- create_unimp(s, &s->powermgt, "bcm2835-powermgt", PM_OFFSET, 0x114);
114
create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100);
115
create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100);
116
create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20);
117
diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c
118
new file mode 100644
119
index XXXXXXX..XXXXXXX
120
--- /dev/null
121
+++ b/hw/misc/bcm2835_powermgt.c
122
@@ -XXX,XX +XXX,XX @@
123
+/*
124
+ * BCM2835 Power Management emulation
125
+ *
126
+ * Copyright (C) 2017 Marcin Chojnacki <marcinch7@gmail.com>
127
+ * Copyright (C) 2021 Nolan Leake <nolan@sigbus.net>
128
+ *
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
131
+ */
132
+
133
+#include "qemu/osdep.h"
134
+#include "qemu/log.h"
135
+#include "qemu/module.h"
136
+#include "hw/misc/bcm2835_powermgt.h"
137
+#include "migration/vmstate.h"
138
+#include "sysemu/runstate.h"
139
+
140
+#define PASSWORD 0x5a000000
141
+#define PASSWORD_MASK 0xff000000
142
+
143
+#define R_RSTC 0x1c
144
+#define V_RSTC_RESET 0x20
145
+#define R_RSTS 0x20
146
+#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */
147
+#define R_WDOG 0x24
148
+
149
+static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset,
150
+ unsigned size)
151
+{
152
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
153
+ uint32_t res = 0;
154
+
155
+ switch (offset) {
156
+ case R_RSTC:
157
+ res = s->rstc;
158
+ break;
159
+ case R_RSTS:
160
+ res = s->rsts;
161
+ break;
162
+ case R_WDOG:
163
+ res = s->wdog;
164
+ break;
165
+
166
+ default:
167
+ qemu_log_mask(LOG_UNIMP,
168
+ "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx
169
+ "\n", offset);
170
+ res = 0;
171
+ break;
172
+ }
173
+
174
+ return res;
175
+}
176
+
177
+static void bcm2835_powermgt_write(void *opaque, hwaddr offset,
178
+ uint64_t value, unsigned size)
179
+{
180
+ BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque;
181
+
182
+ if ((value & PASSWORD_MASK) != PASSWORD) {
183
+ qemu_log_mask(LOG_GUEST_ERROR,
184
+ "bcm2835_powermgt_write: Bad password 0x%"PRIx64
185
+ " at offset 0x%08"HWADDR_PRIx"\n",
186
+ value, offset);
187
+ return;
188
+ }
189
+
190
+ value = value & ~PASSWORD_MASK;
191
+
192
+ switch (offset) {
193
+ case R_RSTC:
194
+ s->rstc = value;
195
+ if (value & V_RSTC_RESET) {
196
+ if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) {
197
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
198
+ } else {
199
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
200
+ }
201
+ }
202
+ break;
203
+ case R_RSTS:
204
+ qemu_log_mask(LOG_UNIMP,
205
+ "bcm2835_powermgt_write: RSTS\n");
206
+ s->rsts = value;
207
+ break;
208
+ case R_WDOG:
209
+ qemu_log_mask(LOG_UNIMP,
210
+ "bcm2835_powermgt_write: WDOG\n");
211
+ s->wdog = value;
212
+ break;
213
+
214
+ default:
215
+ qemu_log_mask(LOG_UNIMP,
216
+ "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx
217
+ "\n", offset);
218
+ break;
219
+ }
220
+}
221
+
222
+static const MemoryRegionOps bcm2835_powermgt_ops = {
223
+ .read = bcm2835_powermgt_read,
224
+ .write = bcm2835_powermgt_write,
225
+ .endianness = DEVICE_NATIVE_ENDIAN,
226
+ .impl.min_access_size = 4,
227
+ .impl.max_access_size = 4,
228
+};
229
+
230
+static const VMStateDescription vmstate_bcm2835_powermgt = {
231
+ .name = TYPE_BCM2835_POWERMGT,
232
+ .version_id = 1,
233
+ .minimum_version_id = 1,
234
+ .fields = (VMStateField[]) {
235
+ VMSTATE_UINT32(rstc, BCM2835PowerMgtState),
236
+ VMSTATE_UINT32(rsts, BCM2835PowerMgtState),
237
+ VMSTATE_UINT32(wdog, BCM2835PowerMgtState),
238
+ VMSTATE_END_OF_LIST()
239
+ }
240
+};
241
+
242
+static void bcm2835_powermgt_init(Object *obj)
243
+{
244
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj);
245
+
246
+ memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s,
247
+ TYPE_BCM2835_POWERMGT, 0x200);
248
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
249
+}
250
+
251
+static void bcm2835_powermgt_reset(DeviceState *dev)
252
+{
253
+ BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev);
254
+
255
+ /* https://elinux.org/BCM2835_registers#PM */
256
+ s->rstc = 0x00000102;
257
+ s->rsts = 0x00001000;
258
+ s->wdog = 0x00000000;
259
+}
260
+
261
+static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data)
262
+{
263
+ DeviceClass *dc = DEVICE_CLASS(klass);
264
+
265
+ dc->reset = bcm2835_powermgt_reset;
266
+ dc->vmsd = &vmstate_bcm2835_powermgt;
267
+}
268
+
269
+static TypeInfo bcm2835_powermgt_info = {
270
+ .name = TYPE_BCM2835_POWERMGT,
271
+ .parent = TYPE_SYS_BUS_DEVICE,
272
+ .instance_size = sizeof(BCM2835PowerMgtState),
273
+ .class_init = bcm2835_powermgt_class_init,
274
+ .instance_init = bcm2835_powermgt_init,
275
+};
276
+
277
+static void bcm2835_powermgt_register_types(void)
278
+{
279
+ type_register_static(&bcm2835_powermgt_info);
280
+}
281
+
282
+type_init(bcm2835_powermgt_register_types)
283
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
284
index XXXXXXX..XXXXXXX 100644
285
--- a/hw/misc/meson.build
286
+++ b/hw/misc/meson.build
287
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files(
288
'bcm2835_rng.c',
289
'bcm2835_thermal.c',
290
'bcm2835_cprman.c',
291
+ 'bcm2835_powermgt.c',
292
))
293
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c'))
294
softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c', 'zynq-xadc.c'))
78
--
295
--
79
2.17.0
296
2.20.1
80
297
81
298
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
2
3
Suggested-by: Eduardo Habkost <ehabkost@redhat.com>
3
Add a test booting and quickly shutdown a raspi2 machine,
4
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
4
to test the power management model:
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
6
Message-id: 1525691524-32265-5-git-send-email-imammedo@redhat.com
6
(1/1) tests/acceptance/boot_linux_console.py:BootLinuxConsole.test_arm_raspi2_initrd:
7
console: [ 0.000000] Booting Linux on physical CPU 0xf00
8
console: [ 0.000000] Linux version 4.14.98-v7+ (dom@dom-XPS-13-9370) (gcc version 4.9.3 (crosstool-NG crosstool-ng-1.22.0-88-g8460611)) #1200 SMP Tue Feb 12 20:27:48 GMT 2019
9
console: [ 0.000000] CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c5387d
10
console: [ 0.000000] CPU: div instructions available: patching division code
11
console: [ 0.000000] CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
12
console: [ 0.000000] OF: fdt: Machine model: Raspberry Pi 2 Model B
13
...
14
console: Boot successful.
15
console: cat /proc/cpuinfo
16
console: / # cat /proc/cpuinfo
17
...
18
console: processor : 3
19
console: model name : ARMv7 Processor rev 5 (v7l)
20
console: BogoMIPS : 125.00
21
console: Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm
22
console: CPU implementer : 0x41
23
console: CPU architecture: 7
24
console: CPU variant : 0x0
25
console: CPU part : 0xc07
26
console: CPU revision : 5
27
console: Hardware : BCM2835
28
console: Revision : 0000
29
console: Serial : 0000000000000000
30
console: cat /proc/iomem
31
console: / # cat /proc/iomem
32
console: 00000000-3bffffff : System RAM
33
console: 00008000-00afffff : Kernel code
34
console: 00c00000-00d468ef : Kernel data
35
console: 3f006000-3f006fff : dwc_otg
36
console: 3f007000-3f007eff : /soc/dma@7e007000
37
console: 3f00b880-3f00b8bf : /soc/mailbox@7e00b880
38
console: 3f100000-3f100027 : /soc/watchdog@7e100000
39
console: 3f101000-3f102fff : /soc/cprman@7e101000
40
console: 3f200000-3f2000b3 : /soc/gpio@7e200000
41
PASS (24.59 s)
42
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
43
JOB TIME : 25.02 s
44
45
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Reviewed-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
47
Message-id: 20210531113837.1689775-1-f4bug@amsat.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
48
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
49
---
9
hw/arm/virt.c | 1 +
50
tests/acceptance/boot_linux_console.py | 43 ++++++++++++++++++++++++++
10
hw/i386/pc.c | 1 +
51
1 file changed, 43 insertions(+)
11
hw/ppc/e500plat.c | 1 +
12
hw/ppc/spapr.c | 1 +
13
hw/s390x/s390-virtio-ccw.c | 1 +
14
5 files changed, 5 insertions(+)
15
52
16
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
53
diff --git a/tests/acceptance/boot_linux_console.py b/tests/acceptance/boot_linux_console.py
17
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/virt.c
55
--- a/tests/acceptance/boot_linux_console.py
19
+++ b/hw/arm/virt.c
56
+++ b/tests/acceptance/boot_linux_console.py
20
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
57
@@ -XXX,XX +XXX,XX @@
21
mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
58
from avocado import skip
22
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15");
59
from avocado import skipUnless
23
mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
60
from avocado_qemu import Test
24
+ assert(!mc->get_hotplug_handler);
61
+from avocado_qemu import exec_command
25
mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
62
from avocado_qemu import exec_command_and_wait_for_pattern
26
hc->plug = virt_machine_device_plug_cb;
63
from avocado_qemu import interrupt_interactive_console_until_pattern
27
}
64
from avocado_qemu import wait_for_console_pattern
28
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
65
@@ -XXX,XX +XXX,XX @@ def test_arm_raspi2_uart0(self):
29
index XXXXXXX..XXXXXXX 100644
66
"""
30
--- a/hw/i386/pc.c
67
self.do_test_arm_raspi2(0)
31
+++ b/hw/i386/pc.c
68
32
@@ -XXX,XX +XXX,XX @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
69
+ def test_arm_raspi2_initrd(self):
33
pcmc->acpi_data_size = 0x20000 + 0x8000;
70
+ """
34
pcmc->save_tsc_khz = true;
71
+ :avocado: tags=arch:arm
35
pcmc->linuxboot_dma_enabled = true;
72
+ :avocado: tags=machine:raspi2
36
+ assert(!mc->get_hotplug_handler);
73
+ """
37
mc->get_hotplug_handler = pc_get_hotpug_handler;
74
+ deb_url = ('http://archive.raspberrypi.org/debian/'
38
mc->cpu_index_to_instance_props = pc_cpu_index_to_props;
75
+ 'pool/main/r/raspberrypi-firmware/'
39
mc->get_default_cpu_node_id = pc_get_default_cpu_node_id;
76
+ 'raspberrypi-kernel_1.20190215-1_armhf.deb')
40
diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c
77
+ deb_hash = 'cd284220b32128c5084037553db3c482426f3972'
41
index XXXXXXX..XXXXXXX 100644
78
+ deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash)
42
--- a/hw/ppc/e500plat.c
79
+ kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img')
43
+++ b/hw/ppc/e500plat.c
80
+ dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb')
44
@@ -XXX,XX +XXX,XX @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data)
81
+
45
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
82
+ initrd_url = ('https://github.com/groeck/linux-build-test/raw/'
46
MachineClass *mc = MACHINE_CLASS(oc);
83
+ '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/'
47
84
+ 'arm/rootfs-armv7a.cpio.gz')
48
+ assert(!mc->get_hotplug_handler);
85
+ initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c'
49
mc->get_hotplug_handler = e500plat_machine_get_hotpug_handler;
86
+ initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash)
50
hc->plug = e500plat_machine_device_plug_cb;
87
+ initrd_path = os.path.join(self.workdir, 'rootfs.cpio')
51
88
+ archive.gzip_uncompress(initrd_path_gz, initrd_path)
52
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
89
+
53
index XXXXXXX..XXXXXXX 100644
90
+ self.vm.set_console()
54
--- a/hw/ppc/spapr.c
91
+ kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
55
+++ b/hw/ppc/spapr.c
92
+ 'earlycon=pl011,0x3f201000 console=ttyAMA0 '
56
@@ -XXX,XX +XXX,XX @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
93
+ 'panic=-1 noreboot ' +
57
mc->kvm_type = spapr_kvm_type;
94
+ 'dwc_otg.fiq_fsm_enable=0')
58
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
95
+ self.vm.add_args('-kernel', kernel_path,
59
mc->pci_allow_0_address = true;
96
+ '-dtb', dtb_path,
60
+ assert(!mc->get_hotplug_handler);
97
+ '-initrd', initrd_path,
61
mc->get_hotplug_handler = spapr_get_hotplug_handler;
98
+ '-append', kernel_command_line,
62
hc->pre_plug = spapr_machine_device_pre_plug;
99
+ '-no-reboot')
63
hc->plug = spapr_machine_device_plug;
100
+ self.vm.launch()
64
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
101
+ self.wait_for_console_pattern('Boot successful.')
65
index XXXXXXX..XXXXXXX 100644
102
+
66
--- a/hw/s390x/s390-virtio-ccw.c
103
+ exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
67
+++ b/hw/s390x/s390-virtio-ccw.c
104
+ 'BCM2835')
68
@@ -XXX,XX +XXX,XX @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
105
+ exec_command_and_wait_for_pattern(self, 'cat /proc/iomem',
69
mc->no_sdcard = 1;
106
+ '/soc/cprman@7e101000')
70
mc->max_cpus = S390_MAX_CPUS;
107
+ exec_command(self, 'halt')
71
mc->has_hotpluggable_cpus = true;
108
+ # Wait for VM to shut down gracefully
72
+ assert(!mc->get_hotplug_handler);
109
+ self.vm.wait()
73
mc->get_hotplug_handler = s390_get_hotplug_handler;
110
+
74
mc->cpu_index_to_instance_props = s390_cpu_index_to_props;
111
def test_arm_exynos4210_initrd(self):
75
mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids;
112
"""
113
:avocado: tags=arch:arm
76
--
114
--
77
2.17.0
115
2.20.1
78
116
79
117
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
2
2
3
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
3
If the CPU is running in default NaN mode (FPCR.DN == 1) and we execute
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
FRSQRTE, FRECPE, or FRECPX with a signaling NaN, parts_silence_nan_frac() will
5
Message-id: 20180508151437.4232-6-richard.henderson@linaro.org
5
assert due to fpst->default_nan_mode being set.
6
7
To avoid this, we check to see what NaN mode we're running in before we call
8
floatxx_silence_nan().
9
10
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1624662174-175828-2-git-send-email-joe.komlodi@xilinx.com
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
15
---
9
accel/tcg/atomic_template.h | 49 ++++++-------------------------------
16
target/arm/helper-a64.c | 12 +++++++++---
10
1 file changed, 7 insertions(+), 42 deletions(-)
17
target/arm/vfp_helper.c | 24 ++++++++++++++++++------
18
2 files changed, 27 insertions(+), 9 deletions(-)
11
19
12
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
20
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
13
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/atomic_template.h
22
--- a/target/arm/helper-a64.c
15
+++ b/accel/tcg/atomic_template.h
23
+++ b/target/arm/helper-a64.c
16
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(xor_fetch)
24
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
17
25
float16 nan = a;
18
#undef GEN_ATOMIC_HELPER
26
if (float16_is_signaling_nan(a, fpst)) {
19
27
float_raise(float_flag_invalid, fpst);
20
-/* Note that for addition, we need to use a separate cmpxchg loop instead
28
- nan = float16_silence_nan(a, fpst);
21
- of bswaps for the reverse-host-endian helpers. */
29
+ if (!fpst->default_nan_mode) {
22
-ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr,
30
+ nan = float16_silence_nan(a, fpst);
23
- ABI_TYPE val EXTRA_ARGS)
31
+ }
24
-{
32
}
25
- ATOMIC_MMU_DECLS;
33
if (fpst->default_nan_mode) {
26
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
34
nan = float16_default_nan(fpst);
27
- DATA_TYPE ldo, ldn, ret, sto;
35
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
28
-
36
float32 nan = a;
29
- ldo = atomic_read__nocheck(haddr);
37
if (float32_is_signaling_nan(a, fpst)) {
30
- while (1) {
38
float_raise(float_flag_invalid, fpst);
31
- ret = BSWAP(ldo);
39
- nan = float32_silence_nan(a, fpst);
32
- sto = BSWAP(ret + val);
40
+ if (!fpst->default_nan_mode) {
33
- ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
41
+ nan = float32_silence_nan(a, fpst);
34
- if (ldn == ldo) {
42
+ }
35
- ATOMIC_MMU_CLEANUP;
43
}
36
- return ret;
44
if (fpst->default_nan_mode) {
37
- }
45
nan = float32_default_nan(fpst);
38
- ldo = ldn;
46
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
39
- }
47
float64 nan = a;
40
-}
48
if (float64_is_signaling_nan(a, fpst)) {
41
-
49
float_raise(float_flag_invalid, fpst);
42
-ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
50
- nan = float64_silence_nan(a, fpst);
43
- ABI_TYPE val EXTRA_ARGS)
51
+ if (!fpst->default_nan_mode) {
44
-{
52
+ nan = float64_silence_nan(a, fpst);
45
- ATOMIC_MMU_DECLS;
53
+ }
46
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
54
}
47
- DATA_TYPE ldo, ldn, ret, sto;
55
if (fpst->default_nan_mode) {
48
-
56
nan = float64_default_nan(fpst);
49
- ldo = atomic_read__nocheck(haddr);
57
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
50
- while (1) {
58
index XXXXXXX..XXXXXXX 100644
51
- ret = BSWAP(ldo) + val;
59
--- a/target/arm/vfp_helper.c
52
- sto = BSWAP(ret);
60
+++ b/target/arm/vfp_helper.c
53
- ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
61
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
54
- if (ldn == ldo) {
62
float16 nan = f16;
55
- ATOMIC_MMU_CLEANUP;
63
if (float16_is_signaling_nan(f16, fpst)) {
56
- return ret;
64
float_raise(float_flag_invalid, fpst);
57
- }
65
- nan = float16_silence_nan(f16, fpst);
58
- ldo = ldn;
66
+ if (!fpst->default_nan_mode) {
59
- }
67
+ nan = float16_silence_nan(f16, fpst);
60
-}
68
+ }
61
-
69
}
62
/* These helpers are, as a whole, full barriers. Within the helper,
70
if (fpst->default_nan_mode) {
63
* the leading barrier is explicit and the trailing barrier is within
71
nan = float16_default_nan(fpst);
64
* cmpxchg primitive.
72
@@ -XXX,XX +XXX,XX @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp)
65
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
73
float32 nan = f32;
66
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
74
if (float32_is_signaling_nan(f32, fpst)) {
67
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
75
float_raise(float_flag_invalid, fpst);
68
76
- nan = float32_silence_nan(f32, fpst);
69
+/* Note that for addition, we need to use a separate cmpxchg loop instead
77
+ if (!fpst->default_nan_mode) {
70
+ of bswaps for the reverse-host-endian helpers. */
78
+ nan = float32_silence_nan(f32, fpst);
71
+#define ADD(X, Y) (X + Y)
79
+ }
72
+GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
80
}
73
+GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
81
if (fpst->default_nan_mode) {
74
+#undef ADD
82
nan = float32_default_nan(fpst);
75
+
83
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
76
#undef GEN_ATOMIC_HELPER_FN
84
float64 nan = f64;
77
#endif /* DATA_SIZE >= 16 */
85
if (float64_is_signaling_nan(f64, fpst)) {
78
86
float_raise(float_flag_invalid, fpst);
87
- nan = float64_silence_nan(f64, fpst);
88
+ if (!fpst->default_nan_mode) {
89
+ nan = float64_silence_nan(f64, fpst);
90
+ }
91
}
92
if (fpst->default_nan_mode) {
93
nan = float64_default_nan(fpst);
94
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
95
float16 nan = f16;
96
if (float16_is_signaling_nan(f16, s)) {
97
float_raise(float_flag_invalid, s);
98
- nan = float16_silence_nan(f16, s);
99
+ if (!s->default_nan_mode) {
100
+ nan = float16_silence_nan(f16, fpstp);
101
+ }
102
}
103
if (s->default_nan_mode) {
104
nan = float16_default_nan(s);
105
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
106
float32 nan = f32;
107
if (float32_is_signaling_nan(f32, s)) {
108
float_raise(float_flag_invalid, s);
109
- nan = float32_silence_nan(f32, s);
110
+ if (!s->default_nan_mode) {
111
+ nan = float32_silence_nan(f32, fpstp);
112
+ }
113
}
114
if (s->default_nan_mode) {
115
nan = float32_default_nan(s);
116
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
117
float64 nan = f64;
118
if (float64_is_signaling_nan(f64, s)) {
119
float_raise(float_flag_invalid, s);
120
- nan = float64_silence_nan(f64, s);
121
+ if (!s->default_nan_mode) {
122
+ nan = float64_silence_nan(f64, fpstp);
123
+ }
124
}
125
if (s->default_nan_mode) {
126
nan = float64_default_nan(s);
79
--
127
--
80
2.17.0
128
2.20.1
81
129
82
130
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Maxim Uvarov <maxim.uvarov@linaro.org>
2
2
3
While we have some of the scalar paths for *CVF for fp16,
3
qemu has 2 type of functions: shutdown and reboot. Shutdown
4
we failed to decode the fp16 version of these instructions.
4
function has to be used for machine shutdown. Otherwise we cause
5
a reset with a bogus "cause" value, when we intended a shutdown.
5
6
6
Cc: qemu-stable@nongnu.org
7
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180502221552.3873-2-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20210625111842.3790-3-maxim.uvarov@linaro.org
10
[PMM: tweaked commit message]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
target/arm/translate-a64.c | 33 ++++++++++++++++++++-------------
13
hw/gpio/gpio_pwr.c | 2 +-
13
1 file changed, 20 insertions(+), 13 deletions(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
18
--- a/hw/gpio/gpio_pwr.c
18
+++ b/target/arm/translate-a64.c
19
+++ b/hw/gpio/gpio_pwr.c
19
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
20
@@ -XXX,XX +XXX,XX @@ static void gpio_pwr_reset(void *opaque, int n, int level)
20
int immh, int immb, int opcode,
21
static void gpio_pwr_shutdown(void *opaque, int n, int level)
21
int rn, int rd)
22
{
22
{
23
- bool is_double = extract32(immh, 3, 1);
23
if (level) {
24
- int size = is_double ? MO_64 : MO_32;
24
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
25
- int elements;
25
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
26
+ int size, elements, fracbits;
27
int immhb = immh << 3 | immb;
28
- int fracbits = (is_double ? 128 : 64) - immhb;
29
30
- if (!extract32(immh, 2, 2)) {
31
+ if (immh & 8) {
32
+ size = MO_64;
33
+ if (!is_scalar && !is_q) {
34
+ unallocated_encoding(s);
35
+ return;
36
+ }
37
+ } else if (immh & 4) {
38
+ size = MO_32;
39
+ } else if (immh & 2) {
40
+ size = MO_16;
41
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
42
+ unallocated_encoding(s);
43
+ return;
44
+ }
45
+ } else {
46
+ /* immh == 0 would be a failure of the decode logic */
47
+ g_assert(immh == 1);
48
unallocated_encoding(s);
49
return;
50
}
26
}
51
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
52
if (is_scalar) {
53
elements = 1;
54
} else {
55
- elements = is_double ? 2 : is_q ? 4 : 2;
56
- if (is_double && !is_q) {
57
- unallocated_encoding(s);
58
- return;
59
- }
60
+ elements = (8 << is_q) >> size;
61
}
62
+ fracbits = (16 << size) - immhb;
63
64
if (!fp_access_check(s)) {
65
return;
66
}
67
68
- /* immh == 0 would be a failure of the decode logic */
69
- g_assert(immh);
70
-
71
handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
72
}
27
}
73
28
74
--
29
--
75
2.17.0
30
2.20.1
76
31
77
32
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In do_ldst(), the calculation of the offset needs to be based on the
2
size of the memory access, not the size of the elements in the
3
vector. This meant we were getting it wrong for the widening and
4
narrowing variants of the various VLDR and VSTR insns.
2
5
3
The insns in the ARMv8.1-Atomics are added to the existing
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
load/store exclusive and load/store reg opcode spaces.
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Rearrange the top-level decoders for these to accomodate.
8
Message-id: 20210628135835.6690-2-peter.maydell@linaro.org
6
The Atomics insns themselves still generate Unallocated.
9
---
10
target/arm/translate-mve.c | 17 +++++++++--------
11
1 file changed, 9 insertions(+), 8 deletions(-)
7
12
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
9
Message-id: 20180508151437.4232-8-richard.henderson@linaro.org
10
[PMM: Drop the ARM_FEATURE_V8_1 feature flag]
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/cpu.h | 1 +
15
linux-user/elfload.c | 1 +
16
target/arm/translate-a64.c | 182 +++++++++++++++++++++++++++----------
17
3 files changed, 138 insertions(+), 46 deletions(-)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
15
--- a/target/arm/translate-mve.c
22
+++ b/target/arm/cpu.h
16
+++ b/target/arm/translate-mve.c
23
@@ -XXX,XX +XXX,XX @@ enum arm_features {
17
@@ -XXX,XX +XXX,XX @@ static bool mve_skip_first_beat(DisasContext *s)
24
ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
25
ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
26
ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
27
+ ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
28
ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
29
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
30
ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
31
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/linux-user/elfload.c
34
+++ b/linux-user/elfload.c
35
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
36
GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
37
GET_FEATURE(ARM_FEATURE_V8_FP16,
38
ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
39
+ GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
40
GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
41
GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
42
#undef GET_FEATURE
43
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/translate-a64.c
46
+++ b/target/arm/translate-a64.c
47
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
48
int rt = extract32(insn, 0, 5);
49
int rn = extract32(insn, 5, 5);
50
int rt2 = extract32(insn, 10, 5);
51
- int is_lasr = extract32(insn, 15, 1);
52
int rs = extract32(insn, 16, 5);
53
- int is_pair = extract32(insn, 21, 1);
54
- int is_store = !extract32(insn, 22, 1);
55
- int is_excl = !extract32(insn, 23, 1);
56
+ int is_lasr = extract32(insn, 15, 1);
57
+ int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
58
int size = extract32(insn, 30, 2);
59
TCGv_i64 tcg_addr;
60
61
- if ((!is_excl && !is_pair && !is_lasr) ||
62
- (!is_excl && is_pair) ||
63
- (is_pair && size < 2)) {
64
- unallocated_encoding(s);
65
+ switch (o2_L_o1_o0) {
66
+ case 0x0: /* STXR */
67
+ case 0x1: /* STLXR */
68
+ if (rn == 31) {
69
+ gen_check_sp_alignment(s);
70
+ }
71
+ if (is_lasr) {
72
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
73
+ }
74
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
75
+ gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
76
return;
77
- }
78
79
- if (rn == 31) {
80
- gen_check_sp_alignment(s);
81
- }
82
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
83
-
84
- /* Note that since TCG is single threaded load-acquire/store-release
85
- * semantics require no extra if (is_lasr) { ... } handling.
86
- */
87
-
88
- if (is_excl) {
89
- if (!is_store) {
90
- s->is_ldex = true;
91
- gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
92
- if (is_lasr) {
93
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
94
- }
95
- } else {
96
- if (is_lasr) {
97
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
98
- }
99
- gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
100
+ case 0x4: /* LDXR */
101
+ case 0x5: /* LDAXR */
102
+ if (rn == 31) {
103
+ gen_check_sp_alignment(s);
104
}
105
- } else {
106
- TCGv_i64 tcg_rt = cpu_reg(s, rt);
107
- bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
108
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
109
+ s->is_ldex = true;
110
+ gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
111
+ if (is_lasr) {
112
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
113
+ }
114
+ return;
115
116
+ case 0x9: /* STLR */
117
/* Generate ISS for non-exclusive accesses including LASR. */
118
- if (is_store) {
119
+ if (rn == 31) {
120
+ gen_check_sp_alignment(s);
121
+ }
122
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
123
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
124
+ do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
125
+ disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
126
+ return;
127
+
128
+ case 0xd: /* LDAR */
129
+ /* Generate ISS for non-exclusive accesses including LASR. */
130
+ if (rn == 31) {
131
+ gen_check_sp_alignment(s);
132
+ }
133
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
134
+ do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
135
+ disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
136
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
137
+ return;
138
+
139
+ case 0x2: case 0x3: /* CASP / STXP */
140
+ if (size & 2) { /* STXP / STLXP */
141
+ if (rn == 31) {
142
+ gen_check_sp_alignment(s);
143
+ }
144
if (is_lasr) {
145
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
146
}
147
- do_gpr_st(s, tcg_rt, tcg_addr, size,
148
- true, rt, iss_sf, is_lasr);
149
- } else {
150
- do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
151
- true, rt, iss_sf, is_lasr);
152
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
153
+ gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
154
+ return;
155
+ }
156
+ /* CASP / CASPL */
157
+ break;
158
+
159
+ case 0x6: case 0x7: /* CASP / LDXP */
160
+ if (size & 2) { /* LDXP / LDAXP */
161
+ if (rn == 31) {
162
+ gen_check_sp_alignment(s);
163
+ }
164
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
165
+ s->is_ldex = true;
166
+ gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
167
if (is_lasr) {
168
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
169
}
170
+ return;
171
}
172
+ /* CASPA / CASPAL */
173
+ break;
174
+
175
+ case 0xa: /* CAS */
176
+ case 0xb: /* CASL */
177
+ case 0xe: /* CASA */
178
+ case 0xf: /* CASAL */
179
+ break;
180
}
181
+ unallocated_encoding(s);
182
}
183
184
/*
185
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
186
}
18
}
187
}
19
}
188
20
189
+/* Atomic memory operations
21
-static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
190
+ *
22
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
191
+ * 31 30 27 26 24 22 21 16 15 12 10 5 0
23
+ unsigned msize)
192
+ * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
193
+ * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
194
+ * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
195
+ *
196
+ * Rt: the result register
197
+ * Rn: base address or SP
198
+ * Rs: the source register for the operation
199
+ * V: vector flag (always 0 as of v8.3)
200
+ * A: acquire flag
201
+ * R: release flag
202
+ */
203
+static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
204
+ int size, int rt, bool is_vector)
205
+{
206
+ int rs = extract32(insn, 16, 5);
207
+ int rn = extract32(insn, 5, 5);
208
+ int o3_opc = extract32(insn, 12, 4);
209
+ int feature = ARM_FEATURE_V8_ATOMICS;
210
+
211
+ if (is_vector) {
212
+ unallocated_encoding(s);
213
+ return;
214
+ }
215
+ switch (o3_opc) {
216
+ case 000: /* LDADD */
217
+ case 001: /* LDCLR */
218
+ case 002: /* LDEOR */
219
+ case 003: /* LDSET */
220
+ case 004: /* LDSMAX */
221
+ case 005: /* LDSMIN */
222
+ case 006: /* LDUMAX */
223
+ case 007: /* LDUMIN */
224
+ case 010: /* SWP */
225
+ default:
226
+ unallocated_encoding(s);
227
+ return;
228
+ }
229
+ if (!arm_dc_feature(s, feature)) {
230
+ unallocated_encoding(s);
231
+ return;
232
+ }
233
+
234
+ (void)rs;
235
+ (void)rn;
236
+}
237
+
238
/* Load/store register (all forms) */
239
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
240
{
24
{
241
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
25
TCGv_i32 addr;
242
26
uint32_t offset;
243
switch (extract32(insn, 24, 2)) {
27
@@ -XXX,XX +XXX,XX @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
244
case 0:
28
return true;
245
- if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
246
- disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
247
- } else {
248
+ if (extract32(insn, 21, 1) == 0) {
249
/* Load/store register (unscaled immediate)
250
* Load/store immediate pre/post-indexed
251
* Load/store register unprivileged
252
*/
253
disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
254
+ return;
255
+ }
256
+ switch (extract32(insn, 10, 2)) {
257
+ case 0:
258
+ disas_ldst_atomic(s, insn, size, rt, is_vector);
259
+ return;
260
+ case 2:
261
+ disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
262
+ return;
263
}
264
break;
265
case 1:
266
disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
267
- break;
268
- default:
269
- unallocated_encoding(s);
270
- break;
271
+ return;
272
}
29
}
273
+ unallocated_encoding(s);
30
31
- offset = a->imm << a->size;
32
+ offset = a->imm << msize;
33
if (!a->a) {
34
offset = -offset;
35
}
36
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
37
{ gen_helper_mve_vstrw, gen_helper_mve_vldrw },
38
{ NULL, NULL }
39
};
40
- return do_ldst(s, a, ldstfns[a->size][a->l]);
41
+ return do_ldst(s, a, ldstfns[a->size][a->l], a->size);
274
}
42
}
275
43
276
/* AdvSIMD load/store multiple structures
44
-#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
45
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \
46
static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
47
{ \
48
static MVEGenLdStFn * const ldstfns[2][2] = { \
49
{ gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
50
{ NULL, gen_helper_mve_##ULD }, \
51
}; \
52
- return do_ldst(s, a, ldstfns[a->u][a->l]); \
53
+ return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \
54
}
55
56
-DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
57
-DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
58
-DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
59
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
60
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
61
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
62
63
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
64
{
277
--
65
--
278
2.17.0
66
2.20.1
279
67
280
68
diff view generated by jsdifflib
1
It is implementation defined whether a multiply-add of
1
The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH
2
(0,inf,qnan) or (inf,0,qnan) raises InvalidaOperation or
2
insns had some bugs:
3
not, so we let the target-specific pickNaNMulAdd function
3
* the 32x32 multiply of elements was being done as 32x32->32,
4
handle this. This means that we must do the "return the
4
not 32x32->64
5
default NaN in default NaN mode" check after the call,
5
* we were incorrectly maintaining the accumulator in its full
6
not before. Correct the ordering, and restore the comment
6
72-bit form across all 4 beats of the insn; in the pseudocode
7
from the old propagateFloat64MulAddNaN() that warned about
7
it is squashed back into the 64 bits of the RdaHi:RdaLo
8
this corner case.
8
registers after each beat
9
9
10
This fixes a regression from 2.11 for Arm guests where we would
10
In particular, fixing the second of these allows us to recast
11
incorrectly fail to set the Invalid flag for these cases.
11
the implementation to avoid 128-bit arithmetic entirely.
12
12
13
Cc: qemu-stable@nongnu.org
13
Since the element size here is always 4, we can also drop the
14
parameterization of ESIZE to make the code a little more readable.
15
16
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
19
Message-id: 20210628135835.6690-3-peter.maydell@linaro.org
17
Tested-by: Alex Bennée <alex.bennee@linaro.org>
18
Message-id: 20180504100547.14621-1-peter.maydell@linaro.org
19
---
20
---
20
fpu/softfloat.c | 52 ++++++++++++++++++++++++++++---------------------
21
target/arm/mve_helper.c | 38 +++++++++++++++++++++-----------------
21
1 file changed, 30 insertions(+), 22 deletions(-)
22
1 file changed, 21 insertions(+), 17 deletions(-)
22
23
23
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
24
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
24
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
25
--- a/fpu/softfloat.c
26
--- a/target/arm/mve_helper.c
26
+++ b/fpu/softfloat.c
27
+++ b/target/arm/mve_helper.c
27
@@ -XXX,XX +XXX,XX @@ static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
28
@@ -XXX,XX +XXX,XX @@
28
static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
29
*/
29
bool inf_zero, float_status *s)
30
30
{
31
#include "qemu/osdep.h"
31
+ int which;
32
-#include "qemu/int128.h"
32
+
33
#include "cpu.h"
33
if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) {
34
#include "internals.h"
34
s->float_exception_flags |= float_flag_invalid;
35
#include "vec_internal.h"
36
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
37
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
38
39
/*
40
- * Rounding multiply add long dual accumulate high: we must keep
41
- * a 72-bit internal accumulator value and return the top 64 bits.
42
+ * Rounding multiply add long dual accumulate high. In the pseudocode
43
+ * this is implemented with a 72-bit internal accumulator value of which
44
+ * the top 64 bits are returned. We optimize this to avoid having to
45
+ * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
46
+ * is squashed back into 64-bits after each beat.
47
*/
48
-#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
49
+#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
50
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
51
void *vm, uint64_t a) \
52
{ \
53
uint16_t mask = mve_element_mask(env); \
54
unsigned e; \
55
TYPE *n = vn, *m = vm; \
56
- Int128 acc = int128_lshift(TO128(a), 8); \
57
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
58
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
59
if (mask & 1) { \
60
+ LTYPE mul; \
61
if (e & 1) { \
62
- acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
63
- m[H##ESIZE(e)])); \
64
+ mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
65
+ if (SUB) { \
66
+ mul = -mul; \
67
+ } \
68
} else { \
69
- acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
70
- m[H##ESIZE(e)])); \
71
+ mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
72
} \
73
- acc = int128_add(acc, int128_make64(1 << 7)); \
74
+ mul = (mul >> 8) + ((mul >> 7) & 1); \
75
+ a += mul; \
76
} \
77
} \
78
mve_advance_vpt(env); \
79
- return int128_getlo(int128_rshift(acc, 8)); \
80
+ return a; \
35
}
81
}
36
82
37
- if (s->default_nan_mode) {
83
-DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
38
- a.cls = float_class_dnan;
84
-DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
39
- } else {
85
+DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
40
- switch (pickNaNMulAdd(is_qnan(a.cls), is_snan(a.cls),
86
+DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
41
- is_qnan(b.cls), is_snan(b.cls),
87
42
- is_qnan(c.cls), is_snan(c.cls),
88
-DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
43
- inf_zero, s)) {
89
+DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
44
- case 0:
90
45
- break;
91
-DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
46
- case 1:
92
-DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
47
- a = b;
93
+DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
48
- break;
94
+DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
49
- case 2:
95
50
- a = c;
96
/* Vector add across vector */
51
- break;
97
#define DO_VADDV(OP, ESIZE, TYPE) \
52
- case 3:
53
- a.cls = float_class_dnan;
54
- return a;
55
- default:
56
- g_assert_not_reached();
57
- }
58
+ which = pickNaNMulAdd(is_qnan(a.cls), is_snan(a.cls),
59
+ is_qnan(b.cls), is_snan(b.cls),
60
+ is_qnan(c.cls), is_snan(c.cls),
61
+ inf_zero, s);
62
63
- a.cls = float_class_msnan;
64
+ if (s->default_nan_mode) {
65
+ /* Note that this check is after pickNaNMulAdd so that function
66
+ * has an opportunity to set the Invalid flag.
67
+ */
68
+ a.cls = float_class_dnan;
69
+ return a;
70
}
71
+
72
+ switch (which) {
73
+ case 0:
74
+ break;
75
+ case 1:
76
+ a = b;
77
+ break;
78
+ case 2:
79
+ a = c;
80
+ break;
81
+ case 3:
82
+ a.cls = float_class_dnan;
83
+ return a;
84
+ default:
85
+ g_assert_not_reached();
86
+ }
87
+ a.cls = float_class_msnan;
88
+
89
return a;
90
}
91
92
--
98
--
93
2.17.0
99
2.20.1
94
100
95
101
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The function asimd_imm_const() in translate-neon.c is an
2
implementation of the pseudocode AdvSIMDExpandImm(), which we will
3
also want for MVE. Move the implementation to translate.c, with a
4
prototype in translate.h.
2
5
3
The generic expanders replace nearly identical code in the translator.
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-4-peter.maydell@linaro.org
9
---
10
target/arm/translate.h | 16 ++++++++++
11
target/arm/translate-neon.c | 63 -------------------------------------
12
target/arm/translate.c | 57 +++++++++++++++++++++++++++++++++
13
3 files changed, 73 insertions(+), 63 deletions(-)
4
14
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180508151437.4232-3-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate-a64.c | 46 ++++++++++++--------------------------
11
1 file changed, 14 insertions(+), 32 deletions(-)
12
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-a64.c
17
--- a/target/arm/translate.h
16
+++ b/target/arm/translate-a64.c
18
+++ b/target/arm/translate.h
17
@@ -XXX,XX +XXX,XX @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
18
tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
20
return opc | s->be_data;
19
break;
20
case 0x0a: /* SMAXV / UMAXV */
21
- tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
22
- tcg_res,
23
- tcg_res, tcg_elt, tcg_res, tcg_elt);
24
+ if (is_u) {
25
+ tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
26
+ } else {
27
+ tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
28
+ }
29
break;
30
case 0x1a: /* SMINV / UMINV */
31
- tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
32
- tcg_res,
33
- tcg_res, tcg_elt, tcg_res, tcg_elt);
34
- break;
35
+ if (is_u) {
36
+ tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
37
+ } else {
38
+ tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
39
+ }
40
break;
41
default:
42
g_assert_not_reached();
43
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
44
}
45
}
21
}
46
22
47
-/* Helper functions for 32 bit comparisons */
23
+/**
48
-static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
24
+ * asimd_imm_const: Expand an encoded SIMD constant value
25
+ *
26
+ * Expand a SIMD constant value. This is essentially the pseudocode
27
+ * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
28
+ * VMVN and VBIC (when cmode < 14 && op == 1).
29
+ *
30
+ * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
31
+ * callers must catch this.
32
+ *
33
+ * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
34
+ * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
35
+ * we produce an immediate constant value of 0 in these cases.
36
+ */
37
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
38
+
39
#endif /* TARGET_ARM_TRANSLATE_H */
40
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/translate-neon.c
43
+++ b/target/arm/translate-neon.c
44
@@ -XXX,XX +XXX,XX @@ DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
45
DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
46
DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
47
48
-static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
49
-{
49
-{
50
- tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
50
- /*
51
- * Expand the encoded constant.
52
- * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
53
- * We choose to not special-case this and will behave as if a
54
- * valid constant encoding of 0 had been given.
55
- * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
56
- */
57
- switch (cmode) {
58
- case 0: case 1:
59
- /* no-op */
60
- break;
61
- case 2: case 3:
62
- imm <<= 8;
63
- break;
64
- case 4: case 5:
65
- imm <<= 16;
66
- break;
67
- case 6: case 7:
68
- imm <<= 24;
69
- break;
70
- case 8: case 9:
71
- imm |= imm << 16;
72
- break;
73
- case 10: case 11:
74
- imm = (imm << 8) | (imm << 24);
75
- break;
76
- case 12:
77
- imm = (imm << 8) | 0xff;
78
- break;
79
- case 13:
80
- imm = (imm << 16) | 0xffff;
81
- break;
82
- case 14:
83
- if (op) {
84
- /*
85
- * This is the only case where the top and bottom 32 bits
86
- * of the encoded constant differ.
87
- */
88
- uint64_t imm64 = 0;
89
- int n;
90
-
91
- for (n = 0; n < 8; n++) {
92
- if (imm & (1 << n)) {
93
- imm64 |= (0xffULL << (n * 8));
94
- }
95
- }
96
- return imm64;
97
- }
98
- imm |= (imm << 8) | (imm << 16) | (imm << 24);
99
- break;
100
- case 15:
101
- imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
102
- | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
103
- break;
104
- }
105
- if (op) {
106
- imm = ~imm;
107
- }
108
- return dup_const(MO_32, imm);
51
-}
109
-}
52
-
110
-
53
-static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
111
static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
54
-{
112
GVecGen2iFn *fn)
55
- tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
113
{
56
-}
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
57
-
115
index XXXXXXX..XXXXXXX 100644
58
-static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
116
--- a/target/arm/translate.c
59
-{
117
+++ b/target/arm/translate.c
60
- tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
118
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
61
-}
119
a64_translate_init();
62
-
120
}
63
-static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
121
64
-{
122
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
65
- tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
123
+{
66
-}
124
+ /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
67
-
125
+ switch (cmode) {
68
/* Pairwise op subgroup of C3.6.16.
126
+ case 0: case 1:
69
*
127
+ /* no-op */
70
* This is called directly or via the handle_3same_float for float pairwise
128
+ break;
71
@@ -XXX,XX +XXX,XX @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
129
+ case 2: case 3:
72
static NeonGenTwoOpFn * const fns[3][2] = {
130
+ imm <<= 8;
73
{ gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
131
+ break;
74
{ gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
132
+ case 4: case 5:
75
- { gen_max_s32, gen_max_u32 },
133
+ imm <<= 16;
76
+ { tcg_gen_smax_i32, tcg_gen_umax_i32 },
134
+ break;
77
};
135
+ case 6: case 7:
78
genfn = fns[size][u];
136
+ imm <<= 24;
79
break;
137
+ break;
80
@@ -XXX,XX +XXX,XX @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
138
+ case 8: case 9:
81
static NeonGenTwoOpFn * const fns[3][2] = {
139
+ imm |= imm << 16;
82
{ gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
140
+ break;
83
{ gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
141
+ case 10: case 11:
84
- { gen_min_s32, gen_min_u32 },
142
+ imm = (imm << 8) | (imm << 24);
85
+ { tcg_gen_smin_i32, tcg_gen_umin_i32 },
143
+ break;
86
};
144
+ case 12:
87
genfn = fns[size][u];
145
+ imm = (imm << 8) | 0xff;
88
break;
146
+ break;
89
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
147
+ case 13:
90
static NeonGenTwoOpFn * const fns[3][2] = {
148
+ imm = (imm << 16) | 0xffff;
91
{ gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
149
+ break;
92
{ gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
150
+ case 14:
93
- { gen_max_s32, gen_max_u32 },
151
+ if (op) {
94
+ { tcg_gen_smax_i32, tcg_gen_umax_i32 },
152
+ /*
95
};
153
+ * This is the only case where the top and bottom 32 bits
96
genfn = fns[size][u];
154
+ * of the encoded constant differ.
97
break;
155
+ */
98
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
156
+ uint64_t imm64 = 0;
99
static NeonGenTwoOpFn * const fns[3][2] = {
157
+ int n;
100
{ gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
158
+
101
{ gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
159
+ for (n = 0; n < 8; n++) {
102
- { gen_min_s32, gen_min_u32 },
160
+ if (imm & (1 << n)) {
103
+ { tcg_gen_smin_i32, tcg_gen_umin_i32 },
161
+ imm64 |= (0xffULL << (n * 8));
104
};
162
+ }
105
genfn = fns[size][u];
163
+ }
106
break;
164
+ return imm64;
165
+ }
166
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
167
+ break;
168
+ case 15:
169
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
170
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
171
+ break;
172
+ }
173
+ if (op) {
174
+ imm = ~imm;
175
+ }
176
+ return dup_const(MO_32, imm);
177
+}
178
+
179
/* Generate a label used for skipping this instruction */
180
void arm_gen_condlabel(DisasContext *s)
181
{
107
--
182
--
108
2.17.0
183
2.20.1
109
184
110
185
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
The A64 AdvSIMD modified-immediate grouping uses almost the same
2
constant encoding that A32 Neon does; reuse asimd_imm_const() (to
3
which we add the AArch64-specific case for cmode 15 op 1) instead of
4
reimplementing it all.
2
5
3
This implements all of the v8.1-Atomics instructions except
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
for compare-and-swap, which is decoded elsewhere.
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-5-peter.maydell@linaro.org
9
---
10
target/arm/translate.h | 3 +-
11
target/arm/translate-a64.c | 86 ++++----------------------------------
12
target/arm/translate.c | 17 +++++++-
13
3 files changed, 24 insertions(+), 82 deletions(-)
5
14
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
index XXXXXXX..XXXXXXX 100644
8
Message-id: 20180508151437.4232-9-richard.henderson@linaro.org
17
--- a/target/arm/translate.h
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
+++ b/target/arm/translate.h
10
---
19
@@ -XXX,XX +XXX,XX @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
11
target/arm/translate-a64.c | 38 ++++++++++++++++++++++++++++++++++++--
20
* VMVN and VBIC (when cmode < 14 && op == 1).
12
1 file changed, 36 insertions(+), 2 deletions(-)
21
*
13
22
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
23
- * callers must catch this.
24
+ * callers must catch this; we return the 64-bit constant value defined
25
+ * for AArch64.
26
*
27
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
28
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
29
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
31
--- a/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
32
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
33
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
19
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
34
{
20
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
35
int rd = extract32(insn, 0, 5);
21
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
36
int cmode = extract32(insn, 12, 4);
22
+typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
37
- int cmode_3_1 = extract32(cmode, 1, 3);
23
38
- int cmode_0 = extract32(cmode, 0, 1);
24
/* Note that the gvec expanders operate on offsets + sizes. */
39
int o2 = extract32(insn, 11, 1);
25
typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
40
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
26
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
41
bool is_neg = extract32(insn, 29, 1);
27
int rn = extract32(insn, 5, 5);
42
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
28
int o3_opc = extract32(insn, 12, 4);
29
int feature = ARM_FEATURE_V8_ATOMICS;
30
+ TCGv_i64 tcg_rn, tcg_rs;
31
+ AtomicThreeOpFn *fn;
32
33
if (is_vector) {
34
unallocated_encoding(s);
35
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
36
}
37
switch (o3_opc) {
38
case 000: /* LDADD */
39
+ fn = tcg_gen_atomic_fetch_add_i64;
40
+ break;
41
case 001: /* LDCLR */
42
+ fn = tcg_gen_atomic_fetch_and_i64;
43
+ break;
44
case 002: /* LDEOR */
45
+ fn = tcg_gen_atomic_fetch_xor_i64;
46
+ break;
47
case 003: /* LDSET */
48
+ fn = tcg_gen_atomic_fetch_or_i64;
49
+ break;
50
case 004: /* LDSMAX */
51
+ fn = tcg_gen_atomic_fetch_smax_i64;
52
+ break;
53
case 005: /* LDSMIN */
54
+ fn = tcg_gen_atomic_fetch_smin_i64;
55
+ break;
56
case 006: /* LDUMAX */
57
+ fn = tcg_gen_atomic_fetch_umax_i64;
58
+ break;
59
case 007: /* LDUMIN */
60
+ fn = tcg_gen_atomic_fetch_umin_i64;
61
+ break;
62
case 010: /* SWP */
63
+ fn = tcg_gen_atomic_xchg_i64;
64
+ break;
65
default:
66
unallocated_encoding(s);
67
return;
68
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
69
return;
43
return;
70
}
44
}
71
45
72
- (void)rs;
46
- /* See AdvSIMDExpandImm() in ARM ARM */
73
- (void)rn;
47
- switch (cmode_3_1) {
74
+ if (rn == 31) {
48
- case 0: /* Replicate(Zeros(24):imm8, 2) */
75
+ gen_check_sp_alignment(s);
49
- case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
76
+ }
50
- case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
77
+ tcg_rn = cpu_reg_sp(s, rn);
51
- case 3: /* Replicate(imm8:Zeros(24), 2) */
78
+ tcg_rs = read_cpu_reg(s, rs, true);
52
- {
79
+
53
- int shift = cmode_3_1 * 8;
80
+ if (o3_opc == 1) { /* LDCLR */
54
- imm = bitfield_replicate(abcdefgh << shift, 32);
81
+ tcg_gen_not_i64(tcg_rs, tcg_rs);
55
- break;
82
+ }
56
- }
83
+
57
- case 4: /* Replicate(Zeros(8):imm8, 4) */
84
+ /* The tcg atomic primitives are all full barriers. Therefore we
58
- case 5: /* Replicate(imm8:Zeros(8), 4) */
85
+ * can ignore the Acquire and Release bits of this instruction.
59
- {
86
+ */
60
- int shift = (cmode_3_1 & 0x1) * 8;
87
+ fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
61
- imm = bitfield_replicate(abcdefgh << shift, 16);
88
+ s->be_data | size | MO_ALIGN);
62
- break;
89
}
63
- }
90
64
- case 6:
91
/* Load/store register (all forms) */
65
- if (cmode_0) {
66
- /* Replicate(Zeros(8):imm8:Ones(16), 2) */
67
- imm = (abcdefgh << 16) | 0xffff;
68
- } else {
69
- /* Replicate(Zeros(16):imm8:Ones(8), 2) */
70
- imm = (abcdefgh << 8) | 0xff;
71
- }
72
- imm = bitfield_replicate(imm, 32);
73
- break;
74
- case 7:
75
- if (!cmode_0 && !is_neg) {
76
- imm = bitfield_replicate(abcdefgh, 8);
77
- } else if (!cmode_0 && is_neg) {
78
- int i;
79
- imm = 0;
80
- for (i = 0; i < 8; i++) {
81
- if ((abcdefgh) & (1 << i)) {
82
- imm |= 0xffULL << (i * 8);
83
- }
84
- }
85
- } else if (cmode_0) {
86
- if (is_neg) {
87
- imm = (abcdefgh & 0x3f) << 48;
88
- if (abcdefgh & 0x80) {
89
- imm |= 0x8000000000000000ULL;
90
- }
91
- if (abcdefgh & 0x40) {
92
- imm |= 0x3fc0000000000000ULL;
93
- } else {
94
- imm |= 0x4000000000000000ULL;
95
- }
96
- } else {
97
- if (o2) {
98
- /* FMOV (vector, immediate) - half-precision */
99
- imm = vfp_expand_imm(MO_16, abcdefgh);
100
- /* now duplicate across the lanes */
101
- imm = bitfield_replicate(imm, 16);
102
- } else {
103
- imm = (abcdefgh & 0x3f) << 19;
104
- if (abcdefgh & 0x80) {
105
- imm |= 0x80000000;
106
- }
107
- if (abcdefgh & 0x40) {
108
- imm |= 0x3e000000;
109
- } else {
110
- imm |= 0x40000000;
111
- }
112
- imm |= (imm << 32);
113
- }
114
- }
115
- }
116
- break;
117
- default:
118
- g_assert_not_reached();
119
- }
120
-
121
- if (cmode_3_1 != 7 && is_neg) {
122
- imm = ~imm;
123
+ if (cmode == 15 && o2 && !is_neg) {
124
+ /* FMOV (vector, immediate) - half-precision */
125
+ imm = vfp_expand_imm(MO_16, abcdefgh);
126
+ /* now duplicate across the lanes */
127
+ imm = bitfield_replicate(imm, 16);
128
+ } else {
129
+ imm = asimd_imm_const(abcdefgh, cmode, is_neg);
130
}
131
132
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
133
diff --git a/target/arm/translate.c b/target/arm/translate.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/arm/translate.c
136
+++ b/target/arm/translate.c
137
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
138
case 14:
139
if (op) {
140
/*
141
- * This is the only case where the top and bottom 32 bits
142
- * of the encoded constant differ.
143
+ * This and cmode == 15 op == 1 are the only cases where
144
+ * the top and bottom 32 bits of the encoded constant differ.
145
*/
146
uint64_t imm64 = 0;
147
int n;
148
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
149
imm |= (imm << 8) | (imm << 16) | (imm << 24);
150
break;
151
case 15:
152
+ if (op) {
153
+ /* Reserved encoding for AArch32; valid for AArch64 */
154
+ uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
155
+ if (imm & 0x80) {
156
+ imm64 |= 0x8000000000000000ULL;
157
+ }
158
+ if (imm & 0x40) {
159
+ imm64 |= 0x3fc0000000000000ULL;
160
+ } else {
161
+ imm64 |= 0x4000000000000000ULL;
162
+ }
163
+ return imm64;
164
+ }
165
imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
166
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
167
break;
92
--
168
--
93
2.17.0
169
2.20.1
94
170
95
171
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Use dup_const() instead of bitfield_replicate() in
2
disas_simd_mod_imm().
2
3
3
Use write_fp_dreg and clear_vec_high to zero the bits
4
(We can't replace the other use of bitfield_replicate() in this file,
4
that need zeroing for these cases.
5
in logic_imm_decode_wmask(), because that location needs to handle 2
6
and 4 bit elements, which dup_const() cannot.)
5
7
6
Cc: qemu-stable@nongnu.org
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180502221552.3873-5-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210628135835.6690-6-peter.maydell@linaro.org
11
---
11
---
12
target/arm/translate-a64.c | 17 +++++------------
12
target/arm/translate-a64.c | 2 +-
13
1 file changed, 5 insertions(+), 12 deletions(-)
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
14
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
17
--- a/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
19
@@ -XXX,XX +XXX,XX @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
19
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
20
20
/* FMOV (vector, immediate) - half-precision */
21
if (itof) {
21
imm = vfp_expand_imm(MO_16, abcdefgh);
22
TCGv_i64 tcg_rn = cpu_reg(s, rn);
22
/* now duplicate across the lanes */
23
+ TCGv_i64 tmp;
23
- imm = bitfield_replicate(imm, 16);
24
24
+ imm = dup_const(MO_16, imm);
25
switch (type) {
26
case 0:
27
- {
28
/* 32 bit */
29
- TCGv_i64 tmp = tcg_temp_new_i64();
30
+ tmp = tcg_temp_new_i64();
31
tcg_gen_ext32u_i64(tmp, tcg_rn);
32
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
33
- tcg_gen_movi_i64(tmp, 0);
34
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
35
+ write_fp_dreg(s, rd, tmp);
36
tcg_temp_free_i64(tmp);
37
break;
38
- }
39
case 1:
40
- {
41
/* 64 bit */
42
- TCGv_i64 tmp = tcg_const_i64(0);
43
- tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
44
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
45
- tcg_temp_free_i64(tmp);
46
+ write_fp_dreg(s, rd, tcg_rn);
47
break;
48
- }
49
case 2:
50
/* 64 bit to top half. */
51
tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
52
+ clear_vec_high(s, true, rd);
53
break;
54
}
55
} else {
25
} else {
26
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
27
}
56
--
28
--
57
2.17.0
29
2.20.1
58
30
59
31
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Implement the MVE logical-immediate insns (VMOV, VMVN,
2
VORR and VBIC). These have essentially the same encoding
3
as their Neon equivalents, and we implement the decode
4
in the same way.
2
5
3
While we have some of the scalar paths for FCVT for fp16,
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
we failed to decode the fp16 version of these instructions.
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-7-peter.maydell@linaro.org
9
---
10
target/arm/helper-mve.h | 4 +++
11
target/arm/mve.decode | 17 +++++++++++++
12
target/arm/mve_helper.c | 24 ++++++++++++++++++
13
target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++
14
4 files changed, 95 insertions(+)
5
15
6
Cc: qemu-stable@nongnu.org
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180502221552.3873-3-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/translate-a64.c | 65 +++++++++++++++++++++++++++-----------
13
1 file changed, 46 insertions(+), 19 deletions(-)
14
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
18
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/translate-a64.c
19
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
20
bool is_q, bool is_u,
21
DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
int immh, int immb, int rn, int rd)
22
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
{
23
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
23
- bool is_double = extract32(immh, 3, 1);
24
+
24
int immhb = immh << 3 | immb;
25
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
25
- int fracbits = (is_double ? 128 : 64) - immhb;
26
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
26
- int pass;
27
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
27
+ int pass, size, fracbits;
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
TCGv_ptr tcg_fpstatus;
29
index XXXXXXX..XXXXXXX 100644
29
TCGv_i32 tcg_rmode, tcg_shift;
30
--- a/target/arm/mve.decode
30
31
+++ b/target/arm/mve.decode
31
- if (!extract32(immh, 2, 2)) {
32
@@ -XXX,XX +XXX,XX @@
32
- unallocated_encoding(s);
33
# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
33
- return;
34
%size_28 28:1 !function=plus_1
34
- }
35
35
-
36
+# 1imm format immediate
36
- if (!is_scalar && !is_q && is_double) {
37
+%imm_28_16_0 28:1 16:3 0:4
37
+ if (immh & 0x8) {
38
+
38
+ size = MO_64;
39
&vldr_vstr rn qd imm p a w size l u
39
+ if (!is_scalar && !is_q) {
40
&1op qd qm size
40
+ unallocated_encoding(s);
41
&2op qd qm qn size
41
+ return;
42
&2scalar qd qn rm size
42
+ }
43
+&1imm qd imm cmode op
43
+ } else if (immh & 0x4) {
44
44
+ size = MO_32;
45
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
45
+ } else if (immh & 0x2) {
46
# Note that both Rn and Qd are 3 bits only (no D bit)
46
+ size = MO_16;
47
@@ -XXX,XX +XXX,XX @@
47
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
48
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
48
+ unallocated_encoding(s);
49
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
49
+ return;
50
size=%size_28
51
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
52
53
# The _rev suffix indicates that Vn and Vm are reversed. This is
54
# the case for shifts. In the Arm ARM these insns are documented
55
@@ -XXX,XX +XXX,XX @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd
56
# Predicate operations
57
%mask_22_13 22:1 13:3
58
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
59
+
60
+# Logical immediate operations (1 reg and modified-immediate)
61
+
62
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
63
+# not in a way we can conveniently represent in decodetree without
64
+# a lot of repetition:
65
+# VORR: op=0, (cmode & 1) && cmode < 12
66
+# VBIC: op=1, (cmode & 1) && cmode < 12
67
+# VMOV: everything else
68
+# So we have a single decode line and check the cmode/op in the
69
+# trans function.
70
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
71
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/mve_helper.c
74
+++ b/target/arm/mve_helper.c
75
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
76
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
77
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
78
79
+/*
80
+ * 1 operand immediates: Vda is destination and possibly also one source.
81
+ * All these insns work at 64-bit widths.
82
+ */
83
+#define DO_1OP_IMM(OP, FN) \
84
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
85
+ { \
86
+ uint64_t *da = vda; \
87
+ uint16_t mask = mve_element_mask(env); \
88
+ unsigned e; \
89
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
90
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
91
+ } \
92
+ mve_advance_vpt(env); \
93
+ }
94
+
95
+#define DO_MOVI(N, I) (I)
96
+#define DO_ANDI(N, I) ((N) & (I))
97
+#define DO_ORRI(N, I) ((N) | (I))
98
+
99
+DO_1OP_IMM(vmovi, DO_MOVI)
100
+DO_1OP_IMM(vandi, DO_ANDI)
101
+DO_1OP_IMM(vorri, DO_ORRI)
102
+
103
#define DO_2OP(OP, ESIZE, TYPE, FN) \
104
void HELPER(glue(mve_, OP))(CPUARMState *env, \
105
void *vd, void *vn, void *vm) \
106
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/translate-mve.c
109
+++ b/target/arm/translate-mve.c
110
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
111
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
112
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
113
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
114
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
115
116
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
117
static inline long mve_qreg_offset(unsigned reg)
118
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
119
mve_update_eci(s);
120
return true;
121
}
122
+
123
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
124
+{
125
+ TCGv_ptr qd;
126
+ uint64_t imm;
127
+
128
+ if (!dc_isar_feature(aa32_mve, s) ||
129
+ !mve_check_qreg_bank(s, a->qd) ||
130
+ !fn) {
131
+ return false;
132
+ }
133
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
134
+ return true;
135
+ }
136
+
137
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
138
+
139
+ qd = mve_qreg_ptr(a->qd);
140
+ fn(cpu_env, qd, tcg_constant_i64(imm));
141
+ tcg_temp_free_ptr(qd);
142
+ mve_update_eci(s);
143
+ return true;
144
+}
145
+
146
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
147
+{
148
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
149
+ MVEGenOneOpImmFn *fn;
150
+
151
+ if ((a->cmode & 1) && a->cmode < 12) {
152
+ if (a->op) {
153
+ /*
154
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
155
+ * so the VBIC becomes a logical AND operation.
156
+ */
157
+ fn = gen_helper_mve_vandi;
158
+ } else {
159
+ fn = gen_helper_mve_vorri;
50
+ }
160
+ }
51
+ } else {
161
+ } else {
52
+ /* Should have split out AdvSIMD modified immediate earlier. */
162
+ /* There is one unallocated cmode/op combination in this space */
53
+ assert(immh == 1);
163
+ if (a->cmode == 15 && a->op == 1) {
54
unallocated_encoding(s);
164
+ return false;
55
return;
56
}
57
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
58
assert(!(is_scalar && is_q));
59
60
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
61
- tcg_fpstatus = get_fpstatus_ptr(false);
62
+ tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
63
gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
64
+ fracbits = (16 << size) - immhb;
65
tcg_shift = tcg_const_i32(fracbits);
66
67
- if (is_double) {
68
+ if (size == MO_64) {
69
int maxpass = is_scalar ? 1 : 2;
70
71
for (pass = 0; pass < maxpass; pass++) {
72
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
73
}
74
clear_vec_high(s, is_q, rd);
75
} else {
76
- int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
77
+ void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
78
+ int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
79
+
80
+ switch (size) {
81
+ case MO_16:
82
+ if (is_u) {
83
+ fn = gen_helper_vfp_toulh;
84
+ } else {
85
+ fn = gen_helper_vfp_toslh;
86
+ }
87
+ break;
88
+ case MO_32:
89
+ if (is_u) {
90
+ fn = gen_helper_vfp_touls;
91
+ } else {
92
+ fn = gen_helper_vfp_tosls;
93
+ }
94
+ break;
95
+ default:
96
+ g_assert_not_reached();
97
+ }
165
+ }
98
+
166
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
99
for (pass = 0; pass < maxpass; pass++) {
167
+ fn = gen_helper_mve_vmovi;
100
TCGv_i32 tcg_op = tcg_temp_new_i32();
168
+ }
101
169
+ return do_1imm(s, a, fn);
102
- read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
170
+}
103
- if (is_u) {
104
- gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
105
- } else {
106
- gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
107
- }
108
+ read_vec_element_i32(s, tcg_op, rn, pass, size);
109
+ fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
110
if (is_scalar) {
111
write_fp_sreg(s, rd, tcg_op);
112
} else {
113
- write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
114
+ write_vec_element_i32(s, tcg_op, rd, pass, size);
115
}
116
tcg_temp_free_i32(tcg_op);
117
}
118
--
171
--
119
2.17.0
172
2.20.1
120
173
121
174
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Implement the MVE shift-vector-left-by-immediate insns VSHL, VQSHL
2
2
and VQSHLU.
3
Given that this atomic operation will be used by both risc-v
3
4
and aarch64, let's not duplicate code across the two targets.
4
The size-and-immediate encoding here is the same as Neon, and we
5
5
handle it the same way neon-dp.decode does.
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180508151437.4232-5-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210628135835.6690-8-peter.maydell@linaro.org
10
---
10
---
11
accel/tcg/atomic_template.h | 71 +++++++++++++++++++++++++++++++++++++
11
target/arm/helper-mve.h | 16 +++++++++++
12
accel/tcg/tcg-runtime.h | 8 +++++
12
target/arm/mve.decode | 23 +++++++++++++++
13
tcg/tcg-op.h | 34 ++++++++++++++++++
13
target/arm/mve_helper.c | 57 ++++++++++++++++++++++++++++++++++++++
14
tcg/tcg.h | 8 +++++
14
target/arm/translate-mve.c | 51 ++++++++++++++++++++++++++++++++++
15
tcg/tcg-op.c | 8 +++++
15
4 files changed, 147 insertions(+)
16
5 files changed, 129 insertions(+)
16
17
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
18
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
20
--- a/accel/tcg/atomic_template.h
20
+++ b/target/arm/helper-mve.h
21
+++ b/accel/tcg/atomic_template.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
25
+
26
+DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+
38
+DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/mve.decode
44
+++ b/target/arm/mve.decode
22
@@ -XXX,XX +XXX,XX @@
45
@@ -XXX,XX +XXX,XX @@
23
#elif DATA_SIZE == 8
46
&2op qd qm qn size
24
# define SUFFIX q
47
&2scalar qd qn rm size
25
# define DATA_TYPE uint64_t
48
&1imm qd imm cmode op
26
+# define SDATA_TYPE int64_t
49
+&2shift qd qm shift size
27
# define BSWAP bswap64
50
28
#elif DATA_SIZE == 4
51
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
29
# define SUFFIX l
52
# Note that both Rn and Qd are 3 bits only (no D bit)
30
# define DATA_TYPE uint32_t
53
@@ -XXX,XX +XXX,XX @@
31
+# define SDATA_TYPE int32_t
54
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
32
# define BSWAP bswap32
55
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
33
#elif DATA_SIZE == 2
56
34
# define SUFFIX w
57
+@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
35
# define DATA_TYPE uint16_t
58
+@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
36
+# define SDATA_TYPE int16_t
59
+@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
37
# define BSWAP bswap16
60
+
38
#elif DATA_SIZE == 1
61
# Vector loads and stores
39
# define SUFFIX b
62
40
# define DATA_TYPE uint8_t
63
# Widening loads and narrowing stores:
41
+# define SDATA_TYPE int8_t
64
@@ -XXX,XX +XXX,XX @@ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
42
# define BSWAP
65
# So we have a single decode line and check the cmode/op in the
43
#else
66
# trans function.
44
# error unsupported data size
67
Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
45
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(or_fetch)
68
+
46
GEN_ATOMIC_HELPER(xor_fetch)
69
+# Shifts by immediate
47
70
+
48
#undef GEN_ATOMIC_HELPER
71
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
49
+
72
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
50
+/* These helpers are, as a whole, full barriers. Within the helper,
73
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
51
+ * the leading barrier is explicit and the trailing barrier is within
74
+
52
+ * cmpxchg primitive.
75
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
53
+ */
76
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
54
+#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
77
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
55
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
78
+
56
+ ABI_TYPE xval EXTRA_ARGS) \
79
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
57
+{ \
80
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
58
+ ATOMIC_MMU_DECLS; \
81
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
59
+ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
82
+
60
+ XDATA_TYPE cmp, old, new, val = xval; \
83
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
61
+ smp_mb(); \
84
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
62
+ cmp = atomic_read__nocheck(haddr); \
85
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
63
+ do { \
86
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
64
+ old = cmp; new = FN(old, val); \
87
index XXXXXXX..XXXXXXX 100644
65
+ cmp = atomic_cmpxchg__nocheck(haddr, old, new); \
88
--- a/target/arm/mve_helper.c
66
+ } while (cmp != old); \
89
+++ b/target/arm/mve_helper.c
67
+ ATOMIC_MMU_CLEANUP; \
90
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
68
+ return RET; \
91
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
92
#define DO_UQRSHL_OP(N, M, satp) \
93
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
94
+#define DO_SUQSHL_OP(N, M, satp) \
95
+ WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
96
97
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
98
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
99
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvsw, 4, uint32_t)
100
DO_VADDV(vaddvub, 1, uint8_t)
101
DO_VADDV(vaddvuh, 2, uint16_t)
102
DO_VADDV(vaddvuw, 4, uint32_t)
103
+
104
+/* Shifts by immediate */
105
+#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
106
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
107
+ void *vm, uint32_t shift) \
108
+ { \
109
+ TYPE *d = vd, *m = vm; \
110
+ uint16_t mask = mve_element_mask(env); \
111
+ unsigned e; \
112
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
113
+ mergemask(&d[H##ESIZE(e)], \
114
+ FN(m[H##ESIZE(e)], shift), mask); \
115
+ } \
116
+ mve_advance_vpt(env); \
117
+ }
118
+
119
+#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
120
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
121
+ void *vm, uint32_t shift) \
122
+ { \
123
+ TYPE *d = vd, *m = vm; \
124
+ uint16_t mask = mve_element_mask(env); \
125
+ unsigned e; \
126
+ bool qc = false; \
127
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
128
+ bool sat = false; \
129
+ mergemask(&d[H##ESIZE(e)], \
130
+ FN(m[H##ESIZE(e)], shift, &sat), mask); \
131
+ qc |= sat & mask & 1; \
132
+ } \
133
+ if (qc) { \
134
+ env->vfp.qc[0] = qc; \
135
+ } \
136
+ mve_advance_vpt(env); \
137
+ }
138
+
139
+/* provide unsigned 2-op shift helpers for all sizes */
140
+#define DO_2SHIFT_U(OP, FN) \
141
+ DO_2SHIFT(OP##b, 1, uint8_t, FN) \
142
+ DO_2SHIFT(OP##h, 2, uint16_t, FN) \
143
+ DO_2SHIFT(OP##w, 4, uint32_t, FN)
144
+
145
+#define DO_2SHIFT_SAT_U(OP, FN) \
146
+ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
147
+ DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
148
+ DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
149
+#define DO_2SHIFT_SAT_S(OP, FN) \
150
+ DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
151
+ DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
152
+ DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
153
+
154
+DO_2SHIFT_U(vshli_u, DO_VSHLU)
155
+DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
156
+DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
157
+DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
162
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
163
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
164
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
165
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
166
+typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
167
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
168
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
169
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
170
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
171
}
172
return do_1imm(s, a, fn);
173
}
174
+
175
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
176
+ bool negateshift)
177
+{
178
+ TCGv_ptr qd, qm;
179
+ int shift = a->shift;
180
+
181
+ if (!dc_isar_feature(aa32_mve, s) ||
182
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
183
+ !fn) {
184
+ return false;
185
+ }
186
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
187
+ return true;
188
+ }
189
+
190
+ /*
191
+ * When we handle a right shift insn using a left-shift helper
192
+ * which permits a negative shift count to indicate a right-shift,
193
+ * we must negate the shift count.
194
+ */
195
+ if (negateshift) {
196
+ shift = -shift;
197
+ }
198
+
199
+ qd = mve_qreg_ptr(a->qd);
200
+ qm = mve_qreg_ptr(a->qm);
201
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
202
+ tcg_temp_free_ptr(qd);
203
+ tcg_temp_free_ptr(qm);
204
+ mve_update_eci(s);
205
+ return true;
69
+}
206
+}
70
+
207
+
71
+GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
208
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
72
+GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
209
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
73
+GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
210
+ { \
74
+GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
211
+ static MVEGenTwoOpShiftFn * const fns[] = { \
75
+
212
+ gen_helper_mve_##FN##b, \
76
+GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
213
+ gen_helper_mve_##FN##h, \
77
+GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
214
+ gen_helper_mve_##FN##w, \
78
+GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
215
+ NULL, \
79
+GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
216
+ }; \
80
+
217
+ return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
81
+#undef GEN_ATOMIC_HELPER_FN
218
+ }
82
#endif /* DATA SIZE >= 16 */
219
+
83
220
+DO_2SHIFT(VSHLI, vshli_u, false)
84
#undef END
221
+DO_2SHIFT(VQSHLI_S, vqshli_s, false)
85
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
222
+DO_2SHIFT(VQSHLI_U, vqshli_u, false)
86
ldo = ldn;
223
+DO_2SHIFT(VQSHLUI, vqshlui_s, false)
87
}
88
}
89
+
90
+/* These helpers are, as a whole, full barriers. Within the helper,
91
+ * the leading barrier is explicit and the trailing barrier is within
92
+ * cmpxchg primitive.
93
+ */
94
+#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
95
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
96
+ ABI_TYPE xval EXTRA_ARGS) \
97
+{ \
98
+ ATOMIC_MMU_DECLS; \
99
+ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
100
+ XDATA_TYPE ldo, ldn, old, new, val = xval; \
101
+ smp_mb(); \
102
+ ldn = atomic_read__nocheck(haddr); \
103
+ do { \
104
+ ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
105
+ ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
106
+ } while (ldo != ldn); \
107
+ ATOMIC_MMU_CLEANUP; \
108
+ return RET; \
109
+}
110
+
111
+GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
112
+GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
113
+GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
114
+GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
115
+
116
+GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
117
+GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
118
+GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
119
+GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
120
+
121
+#undef GEN_ATOMIC_HELPER_FN
122
#endif /* DATA_SIZE >= 16 */
123
124
#undef END
125
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
126
#undef BSWAP
127
#undef ABI_TYPE
128
#undef DATA_TYPE
129
+#undef SDATA_TYPE
130
#undef SUFFIX
131
#undef DATA_SIZE
132
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
133
index XXXXXXX..XXXXXXX 100644
134
--- a/accel/tcg/tcg-runtime.h
135
+++ b/accel/tcg/tcg-runtime.h
136
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPERS(fetch_add)
137
GEN_ATOMIC_HELPERS(fetch_and)
138
GEN_ATOMIC_HELPERS(fetch_or)
139
GEN_ATOMIC_HELPERS(fetch_xor)
140
+GEN_ATOMIC_HELPERS(fetch_smin)
141
+GEN_ATOMIC_HELPERS(fetch_umin)
142
+GEN_ATOMIC_HELPERS(fetch_smax)
143
+GEN_ATOMIC_HELPERS(fetch_umax)
144
145
GEN_ATOMIC_HELPERS(add_fetch)
146
GEN_ATOMIC_HELPERS(and_fetch)
147
GEN_ATOMIC_HELPERS(or_fetch)
148
GEN_ATOMIC_HELPERS(xor_fetch)
149
+GEN_ATOMIC_HELPERS(smin_fetch)
150
+GEN_ATOMIC_HELPERS(umin_fetch)
151
+GEN_ATOMIC_HELPERS(smax_fetch)
152
+GEN_ATOMIC_HELPERS(umax_fetch)
153
154
GEN_ATOMIC_HELPERS(xchg)
155
156
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/tcg-op.h
159
+++ b/tcg/tcg-op.h
160
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
161
162
void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
163
void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
164
+
165
void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
166
void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
167
void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
168
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
169
void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
170
void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
171
void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
172
+void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
173
+void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
174
+void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
175
+void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
176
+void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
177
+void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
178
+void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
179
+void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
180
+
181
void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
182
void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
183
void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
184
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
185
void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
186
void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
187
void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
188
+void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
189
+void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
190
+void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
191
+void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
192
+void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
193
+void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
194
+void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
195
+void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
196
197
void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
198
void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
199
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
200
#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i64
201
#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i64
202
#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i64
203
+#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i64
204
+#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i64
205
+#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i64
206
+#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i64
207
#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i64
208
#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i64
209
#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i64
210
#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i64
211
+#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i64
212
+#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i64
213
+#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64
214
+#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64
215
#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec
216
#else
217
#define tcg_gen_movi_tl tcg_gen_movi_i32
218
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
219
#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i32
220
#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i32
221
#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i32
222
+#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i32
223
+#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i32
224
+#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i32
225
+#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i32
226
#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i32
227
#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i32
228
#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i32
229
#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i32
230
+#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i32
231
+#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i32
232
+#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32
233
+#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32
234
#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec
235
#endif
236
237
diff --git a/tcg/tcg.h b/tcg/tcg.h
238
index XXXXXXX..XXXXXXX 100644
239
--- a/tcg/tcg.h
240
+++ b/tcg/tcg.h
241
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_ALL(fetch_sub)
242
GEN_ATOMIC_HELPER_ALL(fetch_and)
243
GEN_ATOMIC_HELPER_ALL(fetch_or)
244
GEN_ATOMIC_HELPER_ALL(fetch_xor)
245
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
246
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
247
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
248
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
249
250
GEN_ATOMIC_HELPER_ALL(add_fetch)
251
GEN_ATOMIC_HELPER_ALL(sub_fetch)
252
GEN_ATOMIC_HELPER_ALL(and_fetch)
253
GEN_ATOMIC_HELPER_ALL(or_fetch)
254
GEN_ATOMIC_HELPER_ALL(xor_fetch)
255
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
256
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
257
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
258
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
259
260
GEN_ATOMIC_HELPER_ALL(xchg)
261
262
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/tcg-op.c
265
+++ b/tcg/tcg-op.c
266
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(fetch_add, add, 0)
267
GEN_ATOMIC_HELPER(fetch_and, and, 0)
268
GEN_ATOMIC_HELPER(fetch_or, or, 0)
269
GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
270
+GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
271
+GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
272
+GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
273
+GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
274
275
GEN_ATOMIC_HELPER(add_fetch, add, 1)
276
GEN_ATOMIC_HELPER(and_fetch, and, 1)
277
GEN_ATOMIC_HELPER(or_fetch, or, 1)
278
GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
279
+GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
280
+GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
281
+GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
282
+GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
283
284
static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
285
{
286
--
224
--
287
2.17.0
225
2.20.1
288
226
289
227
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Implement the MVE vector shift right by immediate insns VSHRI and
2
VRSHRI. As with Neon, we implement these by using helper functions
3
which perform left shifts but allow negative shift counts to indicate
4
right shifts.
2
5
3
These operations are re-invented by several targets so far.
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Several supported hosts have insns for these, so place the
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
expanders out-of-line for a future introduction of tcg opcodes.
8
Message-id: 20210628135835.6690-9-peter.maydell@linaro.org
9
---
10
target/arm/helper-mve.h | 12 ++++++++++++
11
target/arm/translate.h | 20 ++++++++++++++++++++
12
target/arm/mve.decode | 28 ++++++++++++++++++++++++++++
13
target/arm/mve_helper.c | 7 +++++++
14
target/arm/translate-mve.c | 5 +++++
15
target/arm/translate-neon.c | 18 ------------------
16
6 files changed, 72 insertions(+), 18 deletions(-)
6
17
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180508151437.4232-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
tcg/tcg-op.h | 16 ++++++++++++++++
13
tcg/tcg-op.c | 40 ++++++++++++++++++++++++++++++++++++++++
14
2 files changed, 56 insertions(+)
15
16
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg-op.h
20
--- a/target/arm/helper-mve.h
19
+++ b/tcg/tcg-op.h
21
+++ b/target/arm/helper-mve.h
20
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
21
void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
23
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
22
void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg);
24
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
23
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
25
24
+void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
26
+DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
27
+DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
28
+DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
29
+
28
30
DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
static inline void tcg_gen_discard_i32(TCGv_i32 arg)
31
DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
{
32
DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
33
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg);
34
DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg);
35
DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
36
DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
37
+
36
+void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
38
+DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
39
+DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
40
+DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
41
+
40
#if TCG_TARGET_REG_BITS == 64
42
+DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
static inline void tcg_gen_discard_i64(TCGv_i64 arg)
43
+DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
44
+DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64
45
diff --git a/target/arm/translate.h b/target/arm/translate.h
44
#define tcg_gen_muls2_tl tcg_gen_muls2_i64
45
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64
46
+#define tcg_gen_smin_tl tcg_gen_smin_i64
47
+#define tcg_gen_umin_tl tcg_gen_umin_i64
48
+#define tcg_gen_smax_tl tcg_gen_smax_i64
49
+#define tcg_gen_umax_tl tcg_gen_umax_i64
50
#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i64
51
#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i64
52
#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i64
53
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
54
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32
55
#define tcg_gen_muls2_tl tcg_gen_muls2_i32
56
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32
57
+#define tcg_gen_smin_tl tcg_gen_smin_i32
58
+#define tcg_gen_umin_tl tcg_gen_umin_i32
59
+#define tcg_gen_smax_tl tcg_gen_smax_i32
60
+#define tcg_gen_umax_tl tcg_gen_umax_i32
61
#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i32
62
#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i32
63
#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i32
64
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
65
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/tcg-op.c
47
--- a/target/arm/translate.h
67
+++ b/tcg/tcg-op.c
48
+++ b/target/arm/translate.h
68
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
49
@@ -XXX,XX +XXX,XX @@ static inline int times_2_plus_1(DisasContext *s, int x)
69
}
50
return x * 2 + 1;
70
}
51
}
71
52
72
+void tcg_gen_smin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
53
+static inline int rsub_64(DisasContext *s, int x)
73
+{
54
+{
74
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, a, b);
55
+ return 64 - x;
75
+}
56
+}
76
+
57
+
77
+void tcg_gen_umin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
58
+static inline int rsub_32(DisasContext *s, int x)
78
+{
59
+{
79
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, a, b);
60
+ return 32 - x;
80
+}
61
+}
81
+
62
+
82
+void tcg_gen_smax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
63
+static inline int rsub_16(DisasContext *s, int x)
83
+{
64
+{
84
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, b, a);
65
+ return 16 - x;
85
+}
66
+}
86
+
67
+
87
+void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
68
+static inline int rsub_8(DisasContext *s, int x)
88
+{
69
+{
89
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a);
70
+ return 8 - x;
90
+}
71
+}
91
+
72
+
92
/* 64-bit ops */
73
static inline int arm_dc_feature(DisasContext *dc, int feature)
93
74
{
94
#if TCG_TARGET_REG_BITS == 32
75
return (dc->features & (1ULL << feature)) != 0;
95
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
76
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
96
tcg_temp_free_i64(t2);
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/mve.decode
79
+++ b/target/arm/mve.decode
80
@@ -XXX,XX +XXX,XX @@
81
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
82
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
83
84
+# Right shifts are encoded as N - shift, where N is the element size in bits.
85
+%rshift_i5 16:5 !function=rsub_32
86
+%rshift_i4 16:4 !function=rsub_16
87
+%rshift_i3 16:3 !function=rsub_8
88
+
89
+@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \
90
+ size=0 shift=%rshift_i3
91
+@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \
92
+ size=1 shift=%rshift_i4
93
+@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
94
+ size=2 shift=%rshift_i5
95
+
96
# Vector loads and stores
97
98
# Widening loads and narrowing stores:
99
@@ -XXX,XX +XXX,XX @@ VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
100
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
101
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
102
VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
103
+
104
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
105
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
106
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
107
+
108
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
109
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
110
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
111
+
112
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
113
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
114
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
115
+
116
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
117
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
118
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
119
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/mve_helper.c
122
+++ b/target/arm/mve_helper.c
123
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
124
DO_2SHIFT(OP##b, 1, uint8_t, FN) \
125
DO_2SHIFT(OP##h, 2, uint16_t, FN) \
126
DO_2SHIFT(OP##w, 4, uint32_t, FN)
127
+#define DO_2SHIFT_S(OP, FN) \
128
+ DO_2SHIFT(OP##b, 1, int8_t, FN) \
129
+ DO_2SHIFT(OP##h, 2, int16_t, FN) \
130
+ DO_2SHIFT(OP##w, 4, int32_t, FN)
131
132
#define DO_2SHIFT_SAT_U(OP, FN) \
133
DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
134
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvuw, 4, uint32_t)
135
DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
136
137
DO_2SHIFT_U(vshli_u, DO_VSHLU)
138
+DO_2SHIFT_S(vshli_s, DO_VSHLS)
139
DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
140
DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
141
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
142
+DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
143
+DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
144
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-mve.c
147
+++ b/target/arm/translate-mve.c
148
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHLI, vshli_u, false)
149
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
150
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
151
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
152
+/* These right shifts use a left-shift helper with negated shift count */
153
+DO_2SHIFT(VSHRI_S, vshli_s, true)
154
+DO_2SHIFT(VSHRI_U, vshli_u, true)
155
+DO_2SHIFT(VRSHRI_S, vrshli_s, true)
156
+DO_2SHIFT(VRSHRI_U, vrshli_u, true)
157
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/translate-neon.c
160
+++ b/target/arm/translate-neon.c
161
@@ -XXX,XX +XXX,XX @@ static inline int plus1(DisasContext *s, int x)
162
return x + 1;
97
}
163
}
98
164
99
+void tcg_gen_smin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
165
-static inline int rsub_64(DisasContext *s, int x)
100
+{
166
-{
101
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, a, b);
167
- return 64 - x;
102
+}
168
-}
103
+
169
-
104
+void tcg_gen_umin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
170
-static inline int rsub_32(DisasContext *s, int x)
105
+{
171
-{
106
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, a, b);
172
- return 32 - x;
107
+}
173
-}
108
+
174
-static inline int rsub_16(DisasContext *s, int x)
109
+void tcg_gen_smax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
175
-{
110
+{
176
- return 16 - x;
111
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, b, a);
177
-}
112
+}
178
-static inline int rsub_8(DisasContext *s, int x)
113
+
179
-{
114
+void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
180
- return 8 - x;
115
+{
181
-}
116
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a);
182
-
117
+}
183
static inline int neon_3same_fp_size(DisasContext *s, int x)
118
+
184
{
119
/* Size changing operations. */
185
/* Convert 0==fp32, 1==fp16 into a MO_* value */
120
121
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
122
--
186
--
123
2.17.0
187
2.20.1
124
188
125
189
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Implement the MVE VHLL (vector shift left long) insn. This has two
2
encodings: the T1 encoding is the usual shift-by-immediate format,
3
and the T2 encoding is a special case where the shift count is always
4
equal to the element size.
2
5
3
The instruction "ucvtf v0.4h, v04h, #2", with input 0x8000u,
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
overflows the intermediate float16 to infinity before we have a
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
chance to scale the output. Use float64 as the intermediate type
8
Message-id: 20210628135835.6690-10-peter.maydell@linaro.org
6
so that no input argument (uint32_t in this case) can overflow
9
---
7
or round before scaling. Given the declared argument, the signed
10
target/arm/helper-mve.h | 9 +++++++
8
int32_t function has the same problem.
11
target/arm/mve.decode | 53 +++++++++++++++++++++++++++++++++++---
12
target/arm/mve_helper.c | 32 +++++++++++++++++++++++
13
target/arm/translate-mve.c | 15 +++++++++++
14
4 files changed, 105 insertions(+), 4 deletions(-)
9
15
10
When converting from float16 to integer, using u/int32_t instead
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
11
of u/int16_t means that the bounding is incorrect.
12
13
Cc: qemu-stable@nongnu.org
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20180502221552.3873-4-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
19
target/arm/helper.h | 4 +--
20
target/arm/helper.c | 53 ++++++++++++++++++++++++++++++++++++--
21
target/arm/translate-a64.c | 4 +--
22
3 files changed, 55 insertions(+), 6 deletions(-)
23
24
diff --git a/target/arm/helper.h b/target/arm/helper.h
25
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/helper.h
18
--- a/target/arm/helper-mve.h
27
+++ b/target/arm/helper.h
19
+++ b/target/arm/helper-mve.h
28
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr)
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr)
21
DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr)
22
DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr)
23
DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
-DEF_HELPER_3(vfp_toulh, i32, f16, i32, ptr)
24
+
33
-DEF_HELPER_3(vfp_toslh, i32, f16, i32, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+DEF_HELPER_3(vfp_touhh, i32, f16, i32, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_3(vfp_toshh, i32, f16, i32, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr)
29
+DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr)
30
+DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
diff --git a/target/arm/helper.c b/target/arm/helper.c
31
+DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
40
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/helper.c
35
--- a/target/arm/mve.decode
42
+++ b/target/arm/helper.c
36
+++ b/target/arm/mve.decode
43
@@ -XXX,XX +XXX,XX @@ VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
37
@@ -XXX,XX +XXX,XX @@
44
VFP_CONV_FIX(uh, s, 32, 32, uint16)
38
@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
45
VFP_CONV_FIX(ul, s, 32, 32, uint32)
39
@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
46
VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
40
47
-VFP_CONV_FIX_A64(sl, h, 16, 32, int32)
41
+@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
48
-VFP_CONV_FIX_A64(ul, h, 16, 32, uint32)
42
+@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
43
+# VSHLL encoding T2 where shift == esize
44
+@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \
45
+ qd=%qd qm=%qm size=0 shift=8
46
+@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \
47
+ qd=%qd qm=%qm size=1 shift=16
49
+
48
+
50
#undef VFP_CONV_FIX
49
# Right shifts are encoded as N - shift, where N is the element size in bits.
51
#undef VFP_CONV_FIX_FLOAT
50
%rshift_i5 16:5 !function=rsub_32
52
#undef VFP_CONV_FLOAT_FIX_ROUND
51
%rshift_i4 16:4 !function=rsub_16
53
+#undef VFP_CONV_FIX_A64
52
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
54
+
53
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
55
+/* Conversion to/from f16 can overflow to infinity before/after scaling.
54
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
56
+ * Therefore we convert to f64 (which does not round), scale,
55
57
+ * and then convert f64 to f16 (which may round).
56
-VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
58
+ */
57
-VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
59
+
58
+# The VSHLL T2 encoding is not a @2op pattern, but is here because it
60
+static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst)
59
+# overlaps what would be size=0b11 VMULH/VRMULH
61
+{
60
+{
62
+ return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst);
61
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
62
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
63
64
-VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
65
-VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
66
+ VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
63
+}
67
+}
64
+
68
+
65
+float16 HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
66
+{
69
+{
67
+ return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst);
70
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
71
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
72
+
73
+ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
68
+}
74
+}
69
+
75
+
70
+float16 HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
71
+{
76
+{
72
+ return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst);
77
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
78
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
79
+
80
+ VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
73
+}
81
+}
74
+
82
+
75
+static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst)
76
+{
83
+{
77
+ if (unlikely(float16_is_any_nan(f))) {
84
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
78
+ float_raise(float_flag_invalid, fpst);
85
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
79
+ return 0;
80
+ } else {
81
+ int old_exc_flags = get_float_exception_flags(fpst);
82
+ float64 ret;
83
+
86
+
84
+ ret = float16_to_float64(f, true, fpst);
87
+ VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
85
+ ret = float64_scalbn(ret, shift, fpst);
88
+}
86
+ old_exc_flags |= get_float_exception_flags(fpst)
89
87
+ & float_flag_input_denormal;
90
VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
88
+ set_float_exception_flags(old_exc_flags, fpst);
91
VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
92
@@ -XXX,XX +XXX,XX @@ VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
93
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
94
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
95
VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
89
+
96
+
90
+ return ret;
97
+# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
98
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
99
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
100
+
101
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
102
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
103
+
104
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
105
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
106
+
107
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
108
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
109
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/target/arm/mve_helper.c
112
+++ b/target/arm/mve_helper.c
113
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
114
DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
115
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
116
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
117
+
118
+/*
119
+ * Long shifts taking half-sized inputs from top or bottom of the input
120
+ * vector and producing a double-width result. ESIZE, TYPE are for
121
+ * the input, and LESIZE, LTYPE for the output.
122
+ * Unlike the normal shift helpers, we do not handle negative shift counts,
123
+ * because the long shift is strictly left-only.
124
+ */
125
+#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
126
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
127
+ void *vm, uint32_t shift) \
128
+ { \
129
+ LTYPE *d = vd; \
130
+ TYPE *m = vm; \
131
+ uint16_t mask = mve_element_mask(env); \
132
+ unsigned le; \
133
+ assert(shift <= 16); \
134
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
135
+ LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
136
+ mergemask(&d[H##LESIZE(le)], r, mask); \
137
+ } \
138
+ mve_advance_vpt(env); \
91
+ }
139
+ }
92
+}
93
+
140
+
94
+uint32_t HELPER(vfp_toshh)(float16 x, uint32_t shift, void *fpst)
141
+#define DO_VSHLL_ALL(OP, TOP) \
95
+{
142
+ DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
96
+ return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst);
143
+ DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
97
+}
144
+ DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
145
+ DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
98
+
146
+
99
+uint32_t HELPER(vfp_touhh)(float16 x, uint32_t shift, void *fpst)
147
+DO_VSHLL_ALL(vshllb, false)
100
+{
148
+DO_VSHLL_ALL(vshllt, true)
101
+ return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst);
149
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
102
+}
103
104
/* Set the current fp rounding mode and return the old one.
105
* The argument is a softfloat float_round_ value.
106
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
107
index XXXXXXX..XXXXXXX 100644
150
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/translate-a64.c
151
--- a/target/arm/translate-mve.c
109
+++ b/target/arm/translate-a64.c
152
+++ b/target/arm/translate-mve.c
110
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
153
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_S, vshli_s, true)
111
switch (size) {
154
DO_2SHIFT(VSHRI_U, vshli_u, true)
112
case MO_16:
155
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
113
if (is_u) {
156
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
114
- fn = gen_helper_vfp_toulh;
157
+
115
+ fn = gen_helper_vfp_touhh;
158
+#define DO_VSHLL(INSN, FN) \
116
} else {
159
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
117
- fn = gen_helper_vfp_toslh;
160
+ { \
118
+ fn = gen_helper_vfp_toshh;
161
+ static MVEGenTwoOpShiftFn * const fns[] = { \
119
}
162
+ gen_helper_mve_##FN##b, \
120
break;
163
+ gen_helper_mve_##FN##h, \
121
case MO_32:
164
+ }; \
165
+ return do_2shift(s, a, fns[a->size], false); \
166
+ }
167
+
168
+DO_VSHLL(VSHLL_BS, vshllbs)
169
+DO_VSHLL(VSHLL_BU, vshllbu)
170
+DO_VSHLL(VSHLL_TS, vshllts)
171
+DO_VSHLL(VSHLL_TU, vshlltu)
122
--
172
--
123
2.17.0
173
2.20.1
124
174
125
175
diff view generated by jsdifflib
1
Some versions of gcc produce a spurious warning if the result of
1
Implement the MVE VSRI and VSLI insns, which perform a
2
__atomic_compare_echange_n() is not used and the type involved
2
shift-and-insert operation.
3
is a signed 8 bit value:
4
error: value computed is not used [-Werror=unused-value]
5
This has been seen on at least
6
gcc (Ubuntu 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609
7
8
Work around this by using an explicit cast to void to indicate
9
that we don't care about the return value.
10
11
We don't currently use our atomic_cmpxchg() macro on any signed
12
8 bit types, but the upcoming support for the Arm v8.1-Atomics
13
will require it.
14
3
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-id: 20210628135835.6690-11-peter.maydell@linaro.org
18
---
7
---
19
include/qemu/atomic.h | 2 +-
8
target/arm/helper-mve.h | 8 ++++++++
20
1 file changed, 1 insertion(+), 1 deletion(-)
9
target/arm/mve.decode | 9 ++++++++
10
target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 3 +++
12
4 files changed, 62 insertions(+)
21
13
22
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
23
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
24
--- a/include/qemu/atomic.h
16
--- a/target/arm/helper-mve.h
25
+++ b/include/qemu/atomic.h
17
+++ b/target/arm/helper-mve.h
26
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
/* Returns the eventual value, failed or not */
19
DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
20
DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
typeof_strip_qual(*ptr) _old = (old); \
21
DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
- __atomic_compare_exchange_n(ptr, &_old, new, false, \
22
+
31
+ (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
23
+DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
24
+DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
_old; \
25
+DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
})
26
+
27
+DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/mve.decode
33
+++ b/target/arm/mve.decode
34
@@ -XXX,XX +XXX,XX @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
35
36
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
37
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
38
+
39
+# Shift-and-insert
40
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
41
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
42
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
43
+
44
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
45
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
46
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
52
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
53
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
54
55
+/* Shift-and-insert; we always work with 64 bits at a time */
56
+#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
57
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
+ void *vm, uint32_t shift) \
59
+ { \
60
+ uint64_t *d = vd, *m = vm; \
61
+ uint16_t mask; \
62
+ uint64_t shiftmask; \
63
+ unsigned e; \
64
+ if (shift == 0 || shift == ESIZE * 8) { \
65
+ /* \
66
+ * Only VSLI can shift by 0; only VSRI can shift by <dt>. \
67
+ * The generic logic would give the right answer for 0 but \
68
+ * fails for <dt>. \
69
+ */ \
70
+ goto done; \
71
+ } \
72
+ assert(shift < ESIZE * 8); \
73
+ mask = mve_element_mask(env); \
74
+ /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
75
+ shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
76
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
77
+ uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
78
+ (d[H8(e)] & ~shiftmask); \
79
+ mergemask(&d[H8(e)], r, mask); \
80
+ } \
81
+done: \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
86
+#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
87
+#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
88
+#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
89
+
90
+DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
91
+DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
92
+DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
93
+DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
94
+DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
95
+DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
96
+
97
/*
98
* Long shifts taking half-sized inputs from top or bottom of the input
99
* vector and producing a double-width result. ESIZE, TYPE are for
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-mve.c
103
+++ b/target/arm/translate-mve.c
104
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT(VSHRI_U, vshli_u, true)
105
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
106
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
107
108
+DO_2SHIFT(VSRI, vsri, false)
109
+DO_2SHIFT(VSLI, vsli, false)
110
+
111
#define DO_VSHLL(INSN, FN) \
112
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
113
{ \
35
--
114
--
36
2.17.0
115
2.20.1
37
116
38
117
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Implement the MVE shift-right-and-narrow insn VSHRN and VRSHRN.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
do_urshr() is borrowed from sve_helper.c.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
5
Message-id: 20180508151437.4232-10-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210628135835.6690-12-peter.maydell@linaro.org
7
---
8
---
8
target/arm/helper-a64.h | 2 +
9
target/arm/helper-mve.h | 10 ++++++++++
9
target/arm/helper-a64.c | 43 ++++++++++++++
10
target/arm/mve.decode | 11 +++++++++++
10
target/arm/translate-a64.c | 119 ++++++++++++++++++++++++++++++++++++-
11
target/arm/mve_helper.c | 40 ++++++++++++++++++++++++++++++++++++++
11
3 files changed, 161 insertions(+), 3 deletions(-)
12
target/arm/translate-mve.c | 15 ++++++++++++++
13
4 files changed, 76 insertions(+)
12
14
13
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-a64.h
17
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-a64.h
18
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG,
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
20
DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG,
21
DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
i64, env, i64, i64, i64)
22
DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
+DEF_HELPER_5(casp_le_parallel, void, env, i32, i64, i64, i64)
23
+
22
+DEF_HELPER_5(casp_be_parallel, void, env, i32, i64, i64, i64)
24
+DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_3(advsimd_maxh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_3(advsimd_minh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
DEF_HELPER_FLAGS_3(advsimd_maxnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
28
+
29
+DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
27
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/helper-a64.c
35
--- a/target/arm/mve.decode
29
+++ b/target/arm/helper-a64.c
36
+++ b/target/arm/mve.decode
30
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
37
@@ -XXX,XX +XXX,XX @@ VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
31
return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true, GETPC());
38
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
32
}
39
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
33
40
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
34
+/* Writes back the old data into Rs. */
35
+void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
36
+ uint64_t new_lo, uint64_t new_hi)
37
+{
38
+ uintptr_t ra = GETPC();
39
+#ifndef CONFIG_ATOMIC128
40
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
41
+#else
42
+ Int128 oldv, cmpv, newv;
43
+
41
+
44
+ cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
42
+# Narrowing shifts (which only support b and h sizes)
45
+ newv = int128_make128(new_lo, new_hi);
43
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
44
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
45
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
46
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
46
+
47
+
47
+ int mem_idx = cpu_mmu_index(env, false);
48
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
48
+ TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
49
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
49
+ oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
50
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
51
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
57
58
DO_VSHLL_ALL(vshllb, false)
59
DO_VSHLL_ALL(vshllt, true)
50
+
60
+
51
+ env->xregs[rs] = int128_getlo(oldv);
61
+/*
52
+ env->xregs[rs + 1] = int128_gethi(oldv);
62
+ * Narrowing right shifts, taking a double sized input, shifting it
53
+#endif
63
+ * and putting the result in either the top or bottom half of the output.
54
+}
64
+ * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
55
+
65
+ */
56
+void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
66
+#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
57
+ uint64_t new_hi, uint64_t new_lo)
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
+{
68
+ void *vm, uint32_t shift) \
59
+ uintptr_t ra = GETPC();
69
+ { \
60
+#ifndef CONFIG_ATOMIC128
70
+ LTYPE *m = vm; \
61
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
71
+ TYPE *d = vd; \
62
+#else
72
+ uint16_t mask = mve_element_mask(env); \
63
+ Int128 oldv, cmpv, newv;
73
+ unsigned le; \
64
+
74
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
65
+ cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
75
+ TYPE r = FN(m[H##LESIZE(le)], shift); \
66
+ newv = int128_make128(new_lo, new_hi);
76
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
67
+
77
+ } \
68
+ int mem_idx = cpu_mmu_index(env, false);
78
+ mve_advance_vpt(env); \
69
+ TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
70
+ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
71
+
72
+ env->xregs[rs + 1] = int128_getlo(oldv);
73
+ env->xregs[rs] = int128_gethi(oldv);
74
+#endif
75
+}
76
+
77
/*
78
* AdvSIMD half-precision
79
*/
80
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/translate-a64.c
83
+++ b/target/arm/translate-a64.c
84
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
85
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
86
}
87
88
+static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
89
+ int rn, int size)
90
+{
91
+ TCGv_i64 tcg_rs = cpu_reg(s, rs);
92
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
93
+ int memidx = get_mem_index(s);
94
+ TCGv_i64 addr = cpu_reg_sp(s, rn);
95
+
96
+ if (rn == 31) {
97
+ gen_check_sp_alignment(s);
98
+ }
99
+ tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
100
+ size | MO_ALIGN | s->be_data);
101
+}
102
+
103
+static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
104
+ int rn, int size)
105
+{
106
+ TCGv_i64 s1 = cpu_reg(s, rs);
107
+ TCGv_i64 s2 = cpu_reg(s, rs + 1);
108
+ TCGv_i64 t1 = cpu_reg(s, rt);
109
+ TCGv_i64 t2 = cpu_reg(s, rt + 1);
110
+ TCGv_i64 addr = cpu_reg_sp(s, rn);
111
+ int memidx = get_mem_index(s);
112
+
113
+ if (rn == 31) {
114
+ gen_check_sp_alignment(s);
115
+ }
79
+ }
116
+
80
+
117
+ if (size == 2) {
81
+#define DO_VSHRN_ALL(OP, FN) \
118
+ TCGv_i64 cmp = tcg_temp_new_i64();
82
+ DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
119
+ TCGv_i64 val = tcg_temp_new_i64();
83
+ DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
84
+ DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
85
+ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
120
+
86
+
121
+ if (s->be_data == MO_LE) {
87
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
122
+ tcg_gen_concat32_i64(val, t1, t2);
88
+{
123
+ tcg_gen_concat32_i64(cmp, s1, s2);
89
+ if (likely(sh < 64)) {
124
+ } else {
90
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
125
+ tcg_gen_concat32_i64(val, t2, t1);
91
+ } else if (sh == 64) {
126
+ tcg_gen_concat32_i64(cmp, s2, s1);
92
+ return x >> 63;
127
+ }
128
+
129
+ tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
130
+ MO_64 | MO_ALIGN | s->be_data);
131
+ tcg_temp_free_i64(val);
132
+
133
+ if (s->be_data == MO_LE) {
134
+ tcg_gen_extr32_i64(s1, s2, cmp);
135
+ } else {
136
+ tcg_gen_extr32_i64(s2, s1, cmp);
137
+ }
138
+ tcg_temp_free_i64(cmp);
139
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
140
+ TCGv_i32 tcg_rs = tcg_const_i32(rs);
141
+
142
+ if (s->be_data == MO_LE) {
143
+ gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
144
+ } else {
145
+ gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
146
+ }
147
+ tcg_temp_free_i32(tcg_rs);
148
+ } else {
93
+ } else {
149
+ TCGv_i64 d1 = tcg_temp_new_i64();
94
+ return 0;
150
+ TCGv_i64 d2 = tcg_temp_new_i64();
151
+ TCGv_i64 a2 = tcg_temp_new_i64();
152
+ TCGv_i64 c1 = tcg_temp_new_i64();
153
+ TCGv_i64 c2 = tcg_temp_new_i64();
154
+ TCGv_i64 zero = tcg_const_i64(0);
155
+
156
+ /* Load the two words, in memory order. */
157
+ tcg_gen_qemu_ld_i64(d1, addr, memidx,
158
+ MO_64 | MO_ALIGN_16 | s->be_data);
159
+ tcg_gen_addi_i64(a2, addr, 8);
160
+ tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
161
+
162
+ /* Compare the two words, also in memory order. */
163
+ tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
164
+ tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
165
+ tcg_gen_and_i64(c2, c2, c1);
166
+
167
+ /* If compare equal, write back new data, else write back old data. */
168
+ tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
169
+ tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
170
+ tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
171
+ tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
172
+ tcg_temp_free_i64(a2);
173
+ tcg_temp_free_i64(c1);
174
+ tcg_temp_free_i64(c2);
175
+ tcg_temp_free_i64(zero);
176
+
177
+ /* Write back the data from memory to Rs. */
178
+ tcg_gen_mov_i64(s1, d1);
179
+ tcg_gen_mov_i64(s2, d2);
180
+ tcg_temp_free_i64(d1);
181
+ tcg_temp_free_i64(d2);
182
+ }
95
+ }
183
+}
96
+}
184
+
97
+
185
/* Update the Sixty-Four bit (SF) registersize. This logic is derived
98
+DO_VSHRN_ALL(vshrn, DO_SHR)
186
* from the ARMv8 specs for LDR (Shared decode for all encodings).
99
+DO_VSHRN_ALL(vrshrn, do_urshr)
187
*/
100
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
188
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
101
index XXXXXXX..XXXXXXX 100644
189
gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
102
--- a/target/arm/translate-mve.c
190
return;
103
+++ b/target/arm/translate-mve.c
191
}
104
@@ -XXX,XX +XXX,XX @@ DO_VSHLL(VSHLL_BS, vshllbs)
192
- /* CASP / CASPL */
105
DO_VSHLL(VSHLL_BU, vshllbu)
193
+ if (rt2 == 31
106
DO_VSHLL(VSHLL_TS, vshllts)
194
+ && ((rt | rs) & 1) == 0
107
DO_VSHLL(VSHLL_TU, vshlltu)
195
+ && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
108
+
196
+ /* CASP / CASPL */
109
+#define DO_2SHIFT_N(INSN, FN) \
197
+ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
110
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
198
+ return;
111
+ { \
199
+ }
112
+ static MVEGenTwoOpShiftFn * const fns[] = { \
200
break;
113
+ gen_helper_mve_##FN##b, \
201
114
+ gen_helper_mve_##FN##h, \
202
- case 0x6: case 0x7: /* CASP / LDXP */
115
+ }; \
203
+ case 0x6: case 0x7: /* CASPA / LDXP */
116
+ return do_2shift(s, a, fns[a->size], false); \
204
if (size & 2) { /* LDXP / LDAXP */
117
+ }
205
if (rn == 31) {
118
+
206
gen_check_sp_alignment(s);
119
+DO_2SHIFT_N(VSHRNB, vshrnb)
207
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
120
+DO_2SHIFT_N(VSHRNT, vshrnt)
208
}
121
+DO_2SHIFT_N(VRSHRNB, vrshrnb)
209
return;
122
+DO_2SHIFT_N(VRSHRNT, vrshrnt)
210
}
211
- /* CASPA / CASPAL */
212
+ if (rt2 == 31
213
+ && ((rt | rs) & 1) == 0
214
+ && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
215
+ /* CASPA / CASPAL */
216
+ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
217
+ return;
218
+ }
219
break;
220
221
case 0xa: /* CAS */
222
case 0xb: /* CASL */
223
case 0xe: /* CASA */
224
case 0xf: /* CASAL */
225
+ if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
226
+ gen_compare_and_swap(s, rs, rt, rn, size);
227
+ return;
228
+ }
229
break;
230
}
231
unallocated_encoding(s);
232
--
123
--
233
2.17.0
124
2.20.1
234
125
235
126
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Implement the MVE saturating shift-right-and-narrow insns
2
2
VQSHRN, VQSHRUN, VQRSHRN and VQRSHRUN.
3
The generic expanders replace nearly identical code in the translator.
3
4
4
do_srshr() is borrowed from sve_helper.c.
5
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180508151437.4232-4-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210628135835.6690-13-peter.maydell@linaro.org
9
---
9
---
10
target/xtensa/translate.c | 50 ++++++++++++++++++++++++++-------------
10
target/arm/helper-mve.h | 30 +++++++++++
11
1 file changed, 33 insertions(+), 17 deletions(-)
11
target/arm/mve.decode | 28 ++++++++++
12
12
target/arm/mve_helper.c | 104 +++++++++++++++++++++++++++++++++++++
13
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
13
target/arm/translate-mve.c | 12 +++++
14
index XXXXXXX..XXXXXXX 100644
14
4 files changed, 174 insertions(+)
15
--- a/target/xtensa/translate.c
15
16
+++ b/target/xtensa/translate.c
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ static void translate_clamps(DisasContext *dc, const uint32_t arg[],
17
index XXXXXXX..XXXXXXX 100644
18
TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2]);
18
--- a/target/arm/helper-mve.h
19
TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2]) - 1);
19
+++ b/target/arm/helper-mve.h
20
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
- tcg_gen_movcond_i32(TCG_COND_GT, tmp1,
21
DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
- cpu_R[arg[1]], tmp1, cpu_R[arg[1]], tmp1);
22
DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
- tcg_gen_movcond_i32(TCG_COND_LT, cpu_R[arg[0]],
23
DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
- tmp1, tmp2, tmp1, tmp2);
24
+
25
+ tcg_gen_smax_i32(tmp1, tmp1, cpu_R[arg[1]]);
25
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+ tcg_gen_smin_i32(cpu_R[arg[0]], tmp1, tmp2);
26
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
tcg_temp_free(tmp1);
27
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
tcg_temp_free(tmp2);
28
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+
35
+DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+
40
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+
45
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
46
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
47
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
+
50
+DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
52
+DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
53
+DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
54
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/mve.decode
57
+++ b/target/arm/mve.decode
58
@@ -XXX,XX +XXX,XX @@ VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
59
VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
60
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
61
VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
62
+
63
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
64
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
65
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
66
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
67
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
68
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
69
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
70
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
71
+
72
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
73
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
74
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
75
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
76
+
77
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
78
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
79
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
80
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
81
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
82
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
83
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
84
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
85
+
86
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
87
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
88
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
89
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
90
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve_helper.c
93
+++ b/target/arm/mve_helper.c
94
@@ -XXX,XX +XXX,XX @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh)
29
}
95
}
30
@@ -XXX,XX +XXX,XX @@ static void translate_memw(DisasContext *dc, const uint32_t arg[],
31
tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
32
}
96
}
33
97
34
-static void translate_minmax(DisasContext *dc, const uint32_t arg[],
98
+static inline int64_t do_srshr(int64_t x, unsigned sh)
35
- const uint32_t par[])
99
+{
36
+static void translate_smin(DisasContext *dc, const uint32_t arg[],
100
+ if (likely(sh < 64)) {
37
+ const uint32_t par[])
101
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
38
{
102
+ } else {
39
if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
103
+ /* Rounding the sign bit always produces 0. */
40
- tcg_gen_movcond_i32(par[0], cpu_R[arg[0]],
104
+ return 0;
41
- cpu_R[arg[1]], cpu_R[arg[2]],
42
- cpu_R[arg[1]], cpu_R[arg[2]]);
43
+ tcg_gen_smin_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
44
+ }
105
+ }
45
+}
106
+}
46
+
107
+
47
+static void translate_umin(DisasContext *dc, const uint32_t arg[],
108
DO_VSHRN_ALL(vshrn, DO_SHR)
48
+ const uint32_t par[])
109
DO_VSHRN_ALL(vrshrn, do_urshr)
110
+
111
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
112
+ bool *satp)
49
+{
113
+{
50
+ if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
114
+ if (val > max) {
51
+ tcg_gen_umin_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
115
+ *satp = true;
116
+ return max;
117
+ } else if (val < min) {
118
+ *satp = true;
119
+ return min;
120
+ } else {
121
+ return val;
52
+ }
122
+ }
53
+}
123
+}
54
+
124
+
55
+static void translate_smax(DisasContext *dc, const uint32_t arg[],
125
+/* Saturating narrowing right shifts */
56
+ const uint32_t par[])
126
+#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
57
+{
127
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
58
+ if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
128
+ void *vm, uint32_t shift) \
59
+ tcg_gen_smax_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
129
+ { \
130
+ LTYPE *m = vm; \
131
+ TYPE *d = vd; \
132
+ uint16_t mask = mve_element_mask(env); \
133
+ bool qc = false; \
134
+ unsigned le; \
135
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
136
+ bool sat = false; \
137
+ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
138
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
139
+ qc |= sat && (mask & 1 << (TOP * ESIZE)); \
140
+ } \
141
+ if (qc) { \
142
+ env->vfp.qc[0] = qc; \
143
+ } \
144
+ mve_advance_vpt(env); \
60
+ }
145
+ }
61
+}
146
+
62
+
147
+#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
63
+static void translate_umax(DisasContext *dc, const uint32_t arg[],
148
+ DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
64
+ const uint32_t par[])
149
+ DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
65
+{
150
+
66
+ if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
151
+#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
67
+ tcg_gen_umax_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
152
+ DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
68
}
153
+ DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
69
}
154
+
70
155
+#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
71
@@ -XXX,XX +XXX,XX @@ static const XtensaOpcodeOps core_ops[] = {
156
+ DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
72
.par = (const uint32_t[]){TCG_COND_NE},
157
+ DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
73
}, {
158
+
74
.name = "max",
159
+#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
75
- .translate = translate_minmax,
160
+ DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
76
- .par = (const uint32_t[]){TCG_COND_GE},
161
+ DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
77
+ .translate = translate_smax,
162
+
78
}, {
163
+#define DO_SHRN_SB(N, M, SATP) \
79
.name = "maxu",
164
+ do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
80
- .translate = translate_minmax,
165
+#define DO_SHRN_UB(N, M, SATP) \
81
- .par = (const uint32_t[]){TCG_COND_GEU},
166
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
82
+ .translate = translate_umax,
167
+#define DO_SHRUN_B(N, M, SATP) \
83
}, {
168
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
84
.name = "memw",
169
+
85
.translate = translate_memw,
170
+#define DO_SHRN_SH(N, M, SATP) \
86
}, {
171
+ do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
87
.name = "min",
172
+#define DO_SHRN_UH(N, M, SATP) \
88
- .translate = translate_minmax,
173
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
89
- .par = (const uint32_t[]){TCG_COND_LT},
174
+#define DO_SHRUN_H(N, M, SATP) \
90
+ .translate = translate_smin,
175
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
91
}, {
176
+
92
.name = "minu",
177
+#define DO_RSHRN_SB(N, M, SATP) \
93
- .translate = translate_minmax,
178
+ do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
94
- .par = (const uint32_t[]){TCG_COND_LTU},
179
+#define DO_RSHRN_UB(N, M, SATP) \
95
+ .translate = translate_umin,
180
+ do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
96
}, {
181
+#define DO_RSHRUN_B(N, M, SATP) \
97
.name = "mov",
182
+ do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
98
.translate = translate_mov,
183
+
184
+#define DO_RSHRN_SH(N, M, SATP) \
185
+ do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
186
+#define DO_RSHRN_UH(N, M, SATP) \
187
+ do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
188
+#define DO_RSHRUN_H(N, M, SATP) \
189
+ do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
190
+
191
+DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
192
+DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
193
+DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
194
+DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
195
+DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
196
+DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
197
+
198
+DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
199
+DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
200
+DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
201
+DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
202
+DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
203
+DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
204
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/translate-mve.c
207
+++ b/target/arm/translate-mve.c
208
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VSHRNB, vshrnb)
209
DO_2SHIFT_N(VSHRNT, vshrnt)
210
DO_2SHIFT_N(VRSHRNB, vrshrnb)
211
DO_2SHIFT_N(VRSHRNT, vrshrnt)
212
+DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s)
213
+DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s)
214
+DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u)
215
+DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u)
216
+DO_2SHIFT_N(VQSHRUNB, vqshrunb)
217
+DO_2SHIFT_N(VQSHRUNT, vqshrunt)
218
+DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s)
219
+DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s)
220
+DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
221
+DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
222
+DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
223
+DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
99
--
224
--
100
2.17.0
225
2.20.1
101
226
102
227
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
Implement the MVE VSHLC insn, which performs a shift left of the
2
entire vector with carry in bits provided from a general purpose
3
register and carry out bits written back to that register.
2
4
3
load_dtb() depends on arm_load_kernel() to figure out place
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
in RAM where it should be loaded, but it's not required for
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
arm_load_kernel() to work. Sometimes it's neccesary for
7
Message-id: 20210628135835.6690-14-peter.maydell@linaro.org
6
devices added with -device/device_add to be enumerated in
8
---
7
DTB as well, which's lead to [1] and surrounding commits to
9
target/arm/helper-mve.h | 2 ++
8
add 2 more machine_done notifiers with non obvious ordering
10
target/arm/mve.decode | 2 ++
9
to make dynamic sysbus devices initialization happen in
11
target/arm/mve_helper.c | 38 ++++++++++++++++++++++++++++++++++++++
10
the right order.
12
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
13
4 files changed, 72 insertions(+)
11
14
12
However instead of moving whole arm_load_kernel() in to
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
13
machine_done, it's sufficient to move only load_dtb() into
14
virt_machine_done() notifier and remove ArmLoadKernelNotifier/
15
/PlatformBusFDTNotifierParams notifiers, which saves us ~90LOC
16
and simplifies code flow quite a bit.
17
Later would allow to consolidate DTB generation within one
18
function for 'mach-virt' board and make it reentrant so it
19
could generate updated DTB in device hotplug secenarios.
20
21
While at it rename load_dtb() to arm_load_dtb() since it's
22
public now.
23
24
Add additional field skip_dtb_autoload to struct arm_boot_info
25
to allow manual DTB load later in mach-virt and to avoid touching
26
all other boards to explicitly call arm_load_dtb().
27
28
1) (ac9d32e hw/arm/boot: arm_load_kernel implemented as a machine init done notifier)
29
30
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
31
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
32
Reviewed-by: Andrew Jones <drjones@redhat.com>
33
Message-id: 1525691524-32265-4-git-send-email-imammedo@redhat.com
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
35
---
36
include/hw/arm/arm.h | 45 +++++++++++++++++------
37
include/hw/arm/sysbus-fdt.h | 37 ++++---------------
38
hw/arm/boot.c | 72 ++++++++++---------------------------
39
hw/arm/sysbus-fdt.c | 61 +++----------------------------
40
hw/arm/virt.c | 64 ++++++++++++++++-----------------
41
5 files changed, 94 insertions(+), 185 deletions(-)
42
43
diff --git a/include/hw/arm/arm.h b/include/hw/arm/arm.h
44
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
45
--- a/include/hw/arm/arm.h
17
--- a/target/arm/helper-mve.h
46
+++ b/include/hw/arm/arm.h
18
+++ b/target/arm/helper-mve.h
47
@@ -XXX,XX +XXX,XX @@ DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
48
*/
20
DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
49
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
21
DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
50
22
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
51
-/*
23
+
52
- * struct used as a parameter of the arm_load_kernel machine init
24
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
53
- * done notifier
25
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
54
- */
26
index XXXXXXX..XXXXXXX 100644
55
-typedef struct {
27
--- a/target/arm/mve.decode
56
- Notifier notifier; /* actual notifier */
28
+++ b/target/arm/mve.decode
57
- ARMCPU *cpu; /* handle to the first cpu object */
29
@@ -XXX,XX +XXX,XX @@ VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
58
-} ArmLoadKernelNotifier;
30
VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
59
-
31
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
60
/* arm_boot.c */
32
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
61
struct arm_boot_info {
33
+
62
uint64_t ram_size;
34
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
63
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
35
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
64
const char *initrd_filename;
36
index XXXXXXX..XXXXXXX 100644
65
const char *dtb_filename;
37
--- a/target/arm/mve_helper.c
66
hwaddr loader_start;
38
+++ b/target/arm/mve_helper.c
67
+ hwaddr dtb_start;
39
@@ -XXX,XX +XXX,XX @@ DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
68
+ hwaddr dtb_limit;
40
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
69
+ /* If set to True, arm_load_kernel() will not load DTB.
41
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
70
+ * It allows board to load DTB manually later.
42
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
71
+ * (default: False)
43
+
44
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
45
+ uint32_t shift)
46
+{
47
+ uint32_t *d = vd;
48
+ uint16_t mask = mve_element_mask(env);
49
+ unsigned e;
50
+ uint32_t r;
51
+
52
+ /*
53
+ * For each 32-bit element, we shift it left, bringing in the
54
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
55
+ * the top become the new rdm, if the predicate mask permits.
56
+ * The final rdm value is returned to update the register.
57
+ * shift == 0 here means "shift by 32 bits".
72
+ */
58
+ */
73
+ bool skip_dtb_autoload;
59
+ if (shift == 0) {
74
/* multicore boards that use the default secondary core boot functions
60
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
75
* need to put the address of the secondary boot code, the boot reg,
61
+ r = rdm;
76
* and the GIC address in the next 3 values, respectively. boards that
62
+ if (mask & 1) {
77
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
63
+ rdm = d[H4(e)];
78
* the user it should implement this hook.
64
+ }
79
*/
65
+ mergemask(&d[H4(e)], r, mask);
80
void (*modify_dtb)(const struct arm_boot_info *info, void *fdt);
66
+ }
81
- /* machine init done notifier executing arm_load_dtb */
67
+ } else {
82
- ArmLoadKernelNotifier load_kernel_notifier;
68
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
83
/* Used internally by arm_boot.c */
84
int is_linux;
85
hwaddr initrd_start;
86
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
87
*/
88
void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info);
89
90
+AddressSpace *arm_boot_address_space(ARMCPU *cpu,
91
+ const struct arm_boot_info *info);
92
+
69
+
93
+/**
70
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
94
+ * arm_load_dtb() - load a device tree binary image into memory
71
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
95
+ * @addr: the address to load the image at
72
+ if (mask & 1) {
96
+ * @binfo: struct describing the boot environment
73
+ rdm = d[H4(e)] >> (32 - shift);
97
+ * @addr_limit: upper limit of the available memory area at @addr
74
+ }
98
+ * @as: address space to load image to
75
+ mergemask(&d[H4(e)], r, mask);
99
+ *
100
+ * Load a device tree supplied by the machine or by the user with the
101
+ * '-dtb' command line option, and put it at offset @addr in target
102
+ * memory.
103
+ *
104
+ * If @addr_limit contains a meaningful value (i.e., it is strictly greater
105
+ * than @addr), the device tree is only loaded if its size does not exceed
106
+ * the limit.
107
+ *
108
+ * Returns: the size of the device tree image on success,
109
+ * 0 if the image size exceeds the limit,
110
+ * -1 on errors.
111
+ *
112
+ * Note: Must not be called unless have_dtb(binfo) is true.
113
+ */
114
+int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
115
+ hwaddr addr_limit, AddressSpace *as);
116
+
117
/* Write a secure board setup routine with a dummy handler for SMCs */
118
void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
119
const struct arm_boot_info *info,
120
diff --git a/include/hw/arm/sysbus-fdt.h b/include/hw/arm/sysbus-fdt.h
121
index XXXXXXX..XXXXXXX 100644
122
--- a/include/hw/arm/sysbus-fdt.h
123
+++ b/include/hw/arm/sysbus-fdt.h
124
@@ -XXX,XX +XXX,XX @@
125
#ifndef HW_ARM_SYSBUS_FDT_H
126
#define HW_ARM_SYSBUS_FDT_H
127
128
-#include "hw/arm/arm.h"
129
-#include "qemu-common.h"
130
-#include "hw/sysbus.h"
131
-
132
-/*
133
- * struct that contains dimensioning parameters of the platform bus
134
- */
135
-typedef struct {
136
- hwaddr platform_bus_base; /* start address of the bus */
137
- hwaddr platform_bus_size; /* size of the bus */
138
- int platform_bus_first_irq; /* first hwirq assigned to the bus */
139
- int platform_bus_num_irqs; /* number of hwirq assigned to the bus */
140
-} ARMPlatformBusSystemParams;
141
-
142
-/*
143
- * struct that contains all relevant info to build the fdt nodes of
144
- * platform bus and attached dynamic sysbus devices
145
- * in the future might be augmented with additional info
146
- * such as PHY, CLK handles ...
147
- */
148
-typedef struct {
149
- const ARMPlatformBusSystemParams *system_params;
150
- struct arm_boot_info *binfo;
151
- const char *intc; /* parent interrupt controller name */
152
-} ARMPlatformBusFDTParams;
153
+#include "exec/hwaddr.h"
154
155
/**
156
- * arm_register_platform_bus_fdt_creator - register a machine init done
157
- * notifier that creates the device tree nodes of the platform bus and
158
- * associated dynamic sysbus devices
159
+ * platform_bus_add_all_fdt_nodes - create all the platform bus nodes
160
+ *
161
+ * builds the parent platform bus node and all the nodes of dynamic
162
+ * sysbus devices attached to it.
163
*/
164
-void arm_register_platform_bus_fdt_creator(ARMPlatformBusFDTParams *fdt_params);
165
-
166
+void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr,
167
+ hwaddr bus_size, int irq_start);
168
#endif
169
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/hw/arm/boot.c
172
+++ b/hw/arm/boot.c
173
@@ -XXX,XX +XXX,XX @@
174
#define ARM64_TEXT_OFFSET_OFFSET 8
175
#define ARM64_MAGIC_OFFSET 56
176
177
-static AddressSpace *arm_boot_address_space(ARMCPU *cpu,
178
- const struct arm_boot_info *info)
179
+AddressSpace *arm_boot_address_space(ARMCPU *cpu,
180
+ const struct arm_boot_info *info)
181
{
182
/* Return the address space to use for bootloader reads and writes.
183
* We prefer the secure address space if the CPU has it and we're
184
@@ -XXX,XX +XXX,XX @@ static void fdt_add_psci_node(void *fdt)
185
qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn);
186
}
187
188
-/**
189
- * load_dtb() - load a device tree binary image into memory
190
- * @addr: the address to load the image at
191
- * @binfo: struct describing the boot environment
192
- * @addr_limit: upper limit of the available memory area at @addr
193
- * @as: address space to load image to
194
- *
195
- * Load a device tree supplied by the machine or by the user with the
196
- * '-dtb' command line option, and put it at offset @addr in target
197
- * memory.
198
- *
199
- * If @addr_limit contains a meaningful value (i.e., it is strictly greater
200
- * than @addr), the device tree is only loaded if its size does not exceed
201
- * the limit.
202
- *
203
- * Returns: the size of the device tree image on success,
204
- * 0 if the image size exceeds the limit,
205
- * -1 on errors.
206
- *
207
- * Note: Must not be called unless have_dtb(binfo) is true.
208
- */
209
-static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
210
- hwaddr addr_limit, AddressSpace *as)
211
+int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
212
+ hwaddr addr_limit, AddressSpace *as)
213
{
214
void *fdt = NULL;
215
int size, rc;
216
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
217
return size;
218
}
219
220
-static void arm_load_kernel_notify(Notifier *notifier, void *data)
221
+void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
222
{
223
CPUState *cs;
224
int kernel_size;
225
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
226
int elf_machine;
227
hwaddr entry;
228
static const ARMInsnFixup *primary_loader;
229
- ArmLoadKernelNotifier *n = DO_UPCAST(ArmLoadKernelNotifier,
230
- notifier, notifier);
231
- ARMCPU *cpu = n->cpu;
232
- struct arm_boot_info *info =
233
- container_of(n, struct arm_boot_info, load_kernel_notifier);
234
AddressSpace *as = arm_boot_address_space(cpu, info);
235
236
/* The board code is not supposed to set secure_board_setup unless
237
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
238
assert(!(info->secure_board_setup && kvm_enabled()));
239
240
info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
241
+ info->dtb_limit = 0;
242
243
/* Load the kernel. */
244
if (!info->kernel_filename || info->firmware_loaded) {
245
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
246
* the kernel is supposed to be loaded by the bootloader), copy the
247
* DTB to the base of RAM for the bootloader to pick up.
248
*/
249
- if (load_dtb(info->loader_start, info, 0, as) < 0) {
250
- exit(1);
251
- }
252
+ info->dtb_start = info->loader_start;
253
}
254
255
if (info->kernel_filename) {
256
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
257
*/
258
if (elf_low_addr > info->loader_start
259
|| elf_high_addr < info->loader_start) {
260
- /* Pass elf_low_addr as address limit to load_dtb if it may be
261
+ /* Set elf_low_addr as address limit for arm_load_dtb if it may be
262
* pointing into RAM, otherwise pass '0' (no limit)
263
*/
264
if (elf_low_addr < info->loader_start) {
265
elf_low_addr = 0;
266
}
267
- if (load_dtb(info->loader_start, info, elf_low_addr, as) < 0) {
268
- exit(1);
269
- }
270
+ info->dtb_start = info->loader_start;
271
+ info->dtb_limit = elf_low_addr;
272
}
273
}
274
entry = elf_entry;
275
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
276
*/
277
if (have_dtb(info)) {
278
hwaddr align;
279
- hwaddr dtb_start;
280
281
if (elf_machine == EM_AARCH64) {
282
/*
283
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
284
}
285
286
/* Place the DTB after the initrd in memory with alignment. */
287
- dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size, align);
288
- if (load_dtb(dtb_start, info, 0, as) < 0) {
289
- exit(1);
290
- }
291
- fixupcontext[FIXUP_ARGPTR] = dtb_start;
292
+ info->dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size,
293
+ align);
294
+ fixupcontext[FIXUP_ARGPTR] = info->dtb_start;
295
} else {
296
fixupcontext[FIXUP_ARGPTR] = info->loader_start + KERNEL_ARGS_ADDR;
297
if (info->ram_size >= (1ULL << 32)) {
298
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
299
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
300
ARM_CPU(cs)->env.boot_info = info;
301
}
302
-}
303
-
304
-void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
305
-{
306
- CPUState *cs;
307
-
308
- info->load_kernel_notifier.cpu = cpu;
309
- info->load_kernel_notifier.notifier.notify = arm_load_kernel_notify;
310
- qemu_add_machine_init_done_notifier(&info->load_kernel_notifier.notifier);
311
312
/* CPU objects (unlike devices) are not automatically reset on system
313
* reset, so we must always register a handler to do so. If we're
314
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
315
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
316
qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
317
}
318
+
319
+ if (!info->skip_dtb_autoload && have_dtb(info)) {
320
+ if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
321
+ exit(1);
322
+ }
76
+ }
323
+ }
77
+ }
324
}
78
+ mve_advance_vpt(env);
325
79
+ return rdm;
326
static const TypeInfo arm_linux_boot_if_info = {
80
+}
327
diff --git a/hw/arm/sysbus-fdt.c b/hw/arm/sysbus-fdt.c
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
328
index XXXXXXX..XXXXXXX 100644
82
index XXXXXXX..XXXXXXX 100644
329
--- a/hw/arm/sysbus-fdt.c
83
--- a/target/arm/translate-mve.c
330
+++ b/hw/arm/sysbus-fdt.c
84
+++ b/target/arm/translate-mve.c
331
@@ -XXX,XX +XXX,XX @@ typedef struct PlatformBusFDTData {
85
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
332
PlatformBusDevice *pbus;
86
DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
333
} PlatformBusFDTData;
87
DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
334
88
DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
335
-/*
336
- * struct used when calling the machine init done notifier
337
- * that constructs the fdt nodes of platform bus devices
338
- */
339
-typedef struct PlatformBusFDTNotifierParams {
340
- Notifier notifier;
341
- ARMPlatformBusFDTParams *fdt_params;
342
-} PlatformBusFDTNotifierParams;
343
-
344
/* struct that associates a device type name and a node creation function */
345
typedef struct NodeCreationPair {
346
const char *typename;
347
@@ -XXX,XX +XXX,XX @@ static void add_fdt_node(SysBusDevice *sbdev, void *opaque)
348
exit(1);
349
}
350
351
-/**
352
- * add_all_platform_bus_fdt_nodes - create all the platform bus nodes
353
- *
354
- * builds the parent platform bus node and all the nodes of dynamic
355
- * sysbus devices attached to it.
356
- */
357
-static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
358
+void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr,
359
+ hwaddr bus_size, int irq_start)
360
{
361
const char platcomp[] = "qemu,platform\0simple-bus";
362
PlatformBusDevice *pbus;
363
DeviceState *dev;
364
gchar *node;
365
- uint64_t addr, size;
366
- int irq_start, dtb_size;
367
- struct arm_boot_info *info = fdt_params->binfo;
368
- const ARMPlatformBusSystemParams *params = fdt_params->system_params;
369
- const char *intc = fdt_params->intc;
370
- void *fdt = info->get_dtb(info, &dtb_size);
371
-
372
- /*
373
- * If the user provided a dtb, we assume the dynamic sysbus nodes
374
- * already are integrated there. This corresponds to a use case where
375
- * the dynamic sysbus nodes are complex and their generation is not yet
376
- * supported. In that case the user can take charge of the guest dt
377
- * while qemu takes charge of the qom stuff.
378
- */
379
- if (info->dtb_filename) {
380
- return;
381
- }
382
383
assert(fdt);
384
385
- node = g_strdup_printf("/platform@%"PRIx64, params->platform_bus_base);
386
- addr = params->platform_bus_base;
387
- size = params->platform_bus_size;
388
- irq_start = params->platform_bus_first_irq;
389
+ node = g_strdup_printf("/platform@%"PRIx64, addr);
390
391
/* Create a /platform node that we can put all devices into */
392
qemu_fdt_add_subnode(fdt, node);
393
@@ -XXX,XX +XXX,XX @@ static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
394
*/
395
qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1);
396
qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1);
397
- qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, size);
398
+ qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, bus_size);
399
400
qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", intc);
401
402
@@ -XXX,XX +XXX,XX @@ static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
403
404
g_free(node);
405
}
406
-
407
-static void platform_bus_fdt_notify(Notifier *notifier, void *data)
408
-{
409
- PlatformBusFDTNotifierParams *p = DO_UPCAST(PlatformBusFDTNotifierParams,
410
- notifier, notifier);
411
-
412
- add_all_platform_bus_fdt_nodes(p->fdt_params);
413
- g_free(p->fdt_params);
414
- g_free(p);
415
-}
416
-
417
-void arm_register_platform_bus_fdt_creator(ARMPlatformBusFDTParams *fdt_params)
418
-{
419
- PlatformBusFDTNotifierParams *p = g_new(PlatformBusFDTNotifierParams, 1);
420
-
421
- p->fdt_params = fdt_params;
422
- p->notifier.notify = platform_bus_fdt_notify;
423
- qemu_add_machine_init_done_notifier(&p->notifier);
424
-}
425
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
426
index XXXXXXX..XXXXXXX 100644
427
--- a/hw/arm/virt.c
428
+++ b/hw/arm/virt.c
429
@@ -XXX,XX +XXX,XX @@
430
431
#define PLATFORM_BUS_NUM_IRQS 64
432
433
-static ARMPlatformBusSystemParams platform_bus_params;
434
-
435
/* RAM limit in GB. Since VIRT_MEM starts at the 1GB mark, this means
436
* RAM can go up to the 256GB mark, leaving 256GB of the physical
437
* address space unallocated and free for future use between 256G and 512G.
438
@@ -XXX,XX +XXX,XX @@ static void create_platform_bus(VirtMachineState *vms, qemu_irq *pic)
439
DeviceState *dev;
440
SysBusDevice *s;
441
int i;
442
- ARMPlatformBusFDTParams *fdt_params = g_new(ARMPlatformBusFDTParams, 1);
443
MemoryRegion *sysmem = get_system_memory();
444
445
- platform_bus_params.platform_bus_base = vms->memmap[VIRT_PLATFORM_BUS].base;
446
- platform_bus_params.platform_bus_size = vms->memmap[VIRT_PLATFORM_BUS].size;
447
- platform_bus_params.platform_bus_first_irq = vms->irqmap[VIRT_PLATFORM_BUS];
448
- platform_bus_params.platform_bus_num_irqs = PLATFORM_BUS_NUM_IRQS;
449
-
450
- fdt_params->system_params = &platform_bus_params;
451
- fdt_params->binfo = &vms->bootinfo;
452
- fdt_params->intc = "/intc";
453
- /*
454
- * register a machine init done notifier that creates the device tree
455
- * nodes of the platform bus and its children dynamic sysbus devices
456
- */
457
- arm_register_platform_bus_fdt_creator(fdt_params);
458
-
459
dev = qdev_create(NULL, TYPE_PLATFORM_BUS_DEVICE);
460
dev->id = TYPE_PLATFORM_BUS_DEVICE;
461
- qdev_prop_set_uint32(dev, "num_irqs",
462
- platform_bus_params.platform_bus_num_irqs);
463
- qdev_prop_set_uint32(dev, "mmio_size",
464
- platform_bus_params.platform_bus_size);
465
+ qdev_prop_set_uint32(dev, "num_irqs", PLATFORM_BUS_NUM_IRQS);
466
+ qdev_prop_set_uint32(dev, "mmio_size", vms->memmap[VIRT_PLATFORM_BUS].size);
467
qdev_init_nofail(dev);
468
vms->platform_bus_dev = dev;
469
- s = SYS_BUS_DEVICE(dev);
470
471
- for (i = 0; i < platform_bus_params.platform_bus_num_irqs; i++) {
472
- int irqn = platform_bus_params.platform_bus_first_irq + i;
473
+ s = SYS_BUS_DEVICE(dev);
474
+ for (i = 0; i < PLATFORM_BUS_NUM_IRQS; i++) {
475
+ int irqn = vms->irqmap[VIRT_PLATFORM_BUS] + i;
476
sysbus_connect_irq(s, i, pic[irqn]);
477
}
478
479
memory_region_add_subregion(sysmem,
480
- platform_bus_params.platform_bus_base,
481
+ vms->memmap[VIRT_PLATFORM_BUS].base,
482
sysbus_mmio_get_region(s, 0));
483
}
484
485
@@ -XXX,XX +XXX,XX @@ void virt_machine_done(Notifier *notifier, void *data)
486
{
487
VirtMachineState *vms = container_of(notifier, VirtMachineState,
488
machine_done);
489
+ ARMCPU *cpu = ARM_CPU(first_cpu);
490
+ struct arm_boot_info *info = &vms->bootinfo;
491
+ AddressSpace *as = arm_boot_address_space(cpu, info);
492
+
89
+
90
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
91
+{
493
+ /*
92
+ /*
494
+ * If the user provided a dtb, we assume the dynamic sysbus nodes
93
+ * Whole Vector Left Shift with Carry. The carry is taken
495
+ * already are integrated there. This corresponds to a use case where
94
+ * from a general purpose register and written back there.
496
+ * the dynamic sysbus nodes are complex and their generation is not yet
95
+ * An imm of 0 means "shift by 32".
497
+ * supported. In that case the user can take charge of the guest dt
498
+ * while qemu takes charge of the qom stuff.
499
+ */
96
+ */
500
+ if (info->dtb_filename == NULL) {
97
+ TCGv_ptr qd;
501
+ platform_bus_add_all_fdt_nodes(vms->fdt, "/intc",
98
+ TCGv_i32 rdm;
502
+ vms->memmap[VIRT_PLATFORM_BUS].base,
99
+
503
+ vms->memmap[VIRT_PLATFORM_BUS].size,
100
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
504
+ vms->irqmap[VIRT_PLATFORM_BUS]);
101
+ return false;
505
+ }
102
+ }
506
+ if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
103
+ if (a->rdm == 13 || a->rdm == 15) {
507
+ exit(1);
104
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
105
+ return false;
508
+ }
106
+ }
509
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
510
virt_acpi_setup(vms);
108
+ return true;
511
virt_build_smbios(vms);
109
+ }
512
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
110
+
513
vms->fw_cfg = create_fw_cfg(vms, &address_space_memory);
111
+ qd = mve_qreg_ptr(a->qd);
514
rom_set_fw(vms->fw_cfg);
112
+ rdm = load_reg(s, a->rdm);
515
113
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
516
- vms->machine_done.notify = virt_machine_done;
114
+ store_reg(s, a->rdm, rdm);
517
- qemu_add_machine_init_done_notifier(&vms->machine_done);
115
+ tcg_temp_free_ptr(qd);
518
+ create_platform_bus(vms, pic);
116
+ mve_update_eci(s);
519
117
+ return true;
520
vms->bootinfo.ram_size = machine->ram_size;
118
+}
521
vms->bootinfo.kernel_filename = machine->kernel_filename;
522
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
523
vms->bootinfo.board_id = -1;
524
vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base;
525
vms->bootinfo.get_dtb = machvirt_dtb;
526
+ vms->bootinfo.skip_dtb_autoload = true;
527
vms->bootinfo.firmware_loaded = firmware_loaded;
528
arm_load_kernel(ARM_CPU(first_cpu), &vms->bootinfo);
529
530
- /*
531
- * arm_load_kernel machine init done notifier registration must
532
- * happen before the platform_bus_create call. In this latter,
533
- * another notifier is registered which adds platform bus nodes.
534
- * Notifiers are executed in registration reverse order.
535
- */
536
- create_platform_bus(vms, pic);
537
+ vms->machine_done.notify = virt_machine_done;
538
+ qemu_add_machine_init_done_notifier(&vms->machine_done);
539
}
540
541
static bool virt_get_secure(Object *obj, Error **errp)
542
--
119
--
543
2.17.0
120
2.20.1
544
121
545
122
diff view generated by jsdifflib
1
Coverity (CID1390573) spots that we forgot to free the
1
Implement the MVE VADDLV insn; this is similar to VADDV, except
2
gpioname strings in a loop in the iotkit realize function.
2
that it accumulates 32-bit elements into a 64-bit accumulator
3
Correct the error.
3
stored in a pair of general-purpose registers.
4
5
This isn't a significant leak, because this function
6
only ever runs once.
7
4
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Peter Xu <peterx@redhat.com>
7
Message-id: 20210628135835.6690-15-peter.maydell@linaro.org
11
Message-id: 20180427110137.19304-1-peter.maydell@linaro.org
12
---
8
---
13
hw/arm/iotkit.c | 1 +
9
target/arm/helper-mve.h | 3 ++
14
1 file changed, 1 insertion(+)
10
target/arm/mve.decode | 6 +++-
11
target/arm/mve_helper.c | 19 ++++++++++++
12
target/arm/translate-mve.c | 63 ++++++++++++++++++++++++++++++++++++++
13
4 files changed, 90 insertions(+), 1 deletion(-)
15
14
16
diff --git a/hw/arm/iotkit.c b/hw/arm/iotkit.c
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/iotkit.c
17
--- a/target/arm/helper-mve.h
19
+++ b/hw/arm/iotkit.c
18
+++ b/target/arm/helper-mve.h
20
@@ -XXX,XX +XXX,XX @@ static void iotkit_realize(DeviceState *dev, Error **errp)
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
21
qdev_get_gpio_in(DEVICE(&s->ppc_irq_orgate), i));
20
DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
22
qdev_connect_gpio_out_named(DEVICE(ppc), "irq", 0,
21
DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
23
qdev_get_gpio_in(devs, 0));
22
24
+ g_free(gpioname);
23
+DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
25
}
24
+DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
26
25
+
27
iotkit_forward_sec_resp_cfg(s);
26
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
27
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
28
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
34
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
35
36
# Vector add across vector
37
-VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
38
+{
39
+ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
40
+ VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \
41
+ rdahi=%rdahi rdalo=%rdalo
42
+}
43
44
# Predicate operations
45
%mask_22_13 22:1 13:3
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_VADDV(vaddvub, 1, uint8_t)
51
DO_VADDV(vaddvuh, 2, uint16_t)
52
DO_VADDV(vaddvuw, 4, uint32_t)
53
54
+#define DO_VADDLV(OP, TYPE, LTYPE) \
55
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
56
+ uint64_t ra) \
57
+ { \
58
+ uint16_t mask = mve_element_mask(env); \
59
+ unsigned e; \
60
+ TYPE *m = vm; \
61
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
62
+ if (mask & 1) { \
63
+ ra += (LTYPE)m[H4(e)]; \
64
+ } \
65
+ } \
66
+ mve_advance_vpt(env); \
67
+ return ra; \
68
+ } \
69
+
70
+DO_VADDLV(vaddlv_s, int32_t, int64_t)
71
+DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
72
+
73
/* Shifts by immediate */
74
#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
76
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/arm/translate-mve.c
79
+++ b/target/arm/translate-mve.c
80
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
81
return true;
82
}
83
84
+static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
85
+{
86
+ /*
87
+ * Vector Add Long Across Vector: accumulate the 32-bit
88
+ * elements of the vector into a 64-bit result stored in
89
+ * a pair of general-purpose registers.
90
+ * No need to check Qm's bank: it is only 3 bits in decode.
91
+ */
92
+ TCGv_ptr qm;
93
+ TCGv_i64 rda;
94
+ TCGv_i32 rdalo, rdahi;
95
+
96
+ if (!dc_isar_feature(aa32_mve, s)) {
97
+ return false;
98
+ }
99
+ /*
100
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
101
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
102
+ */
103
+ if (a->rdahi == 13 || a->rdahi == 15) {
104
+ return false;
105
+ }
106
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
107
+ return true;
108
+ }
109
+
110
+ /*
111
+ * This insn is subject to beat-wise execution. Partial execution
112
+ * of an A=0 (no-accumulate) insn which does not execute the first
113
+ * beat must start with the current value of RdaHi:RdaLo, not zero.
114
+ */
115
+ if (a->a || mve_skip_first_beat(s)) {
116
+ /* Accumulate input from RdaHi:RdaLo */
117
+ rda = tcg_temp_new_i64();
118
+ rdalo = load_reg(s, a->rdalo);
119
+ rdahi = load_reg(s, a->rdahi);
120
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
121
+ tcg_temp_free_i32(rdalo);
122
+ tcg_temp_free_i32(rdahi);
123
+ } else {
124
+ /* Accumulate starting at zero */
125
+ rda = tcg_const_i64(0);
126
+ }
127
+
128
+ qm = mve_qreg_ptr(a->qm);
129
+ if (a->u) {
130
+ gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
131
+ } else {
132
+ gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
133
+ }
134
+ tcg_temp_free_ptr(qm);
135
+
136
+ rdalo = tcg_temp_new_i32();
137
+ rdahi = tcg_temp_new_i32();
138
+ tcg_gen_extrl_i64_i32(rdalo, rda);
139
+ tcg_gen_extrh_i64_i32(rdahi, rda);
140
+ store_reg(s, a->rdalo, rdalo);
141
+ store_reg(s, a->rdahi, rdahi);
142
+ tcg_temp_free_i64(rda);
143
+ mve_update_eci(s);
144
+ return true;
145
+}
146
+
147
static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
148
{
149
TCGv_ptr qd;
28
--
150
--
29
2.17.0
151
2.20.1
30
152
31
153
diff view generated by jsdifflib
New patch
1
1
The MVE extension to v8.1M includes some new shift instructions which
2
sit entirely within the non-coprocessor part of the encoding space
3
and which operate only on general-purpose registers. They take up
4
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
5
with Rm == 13 or 15.
6
7
Implement the long shifts by immediate, which perform shifts on a
8
pair of general-purpose registers treated as a 64-bit quantity, with
9
an immediate shift count between 1 and 32.
10
11
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
12
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
13
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
14
is too difficult, because the functions that generate the code are
15
shared between a dozen different kinds of arithmetic or logical
16
instruction for all A32, T16 and T32 encodings, and for some insns
17
and some encodings Rm==13,15 are valid.)
18
19
We make the helper functions we need for UQSHLL and SQSHLL take
20
a 32-bit value which the helper casts to int8_t because we'll need
21
these helpers also for the shift-by-register insns, where the shift
22
count might be < 0 or > 32.
23
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
27
---
28
target/arm/helper-mve.h | 3 ++
29
target/arm/translate.h | 1 +
30
target/arm/t32.decode | 28 +++++++++++++
31
target/arm/mve_helper.c | 10 +++++
32
target/arm/translate.c | 90 +++++++++++++++++++++++++++++++++++++++++
33
5 files changed, 132 insertions(+)
34
35
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper-mve.h
38
+++ b/target/arm/helper-mve.h
39
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
42
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
43
+
44
+DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
45
+DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
46
diff --git a/target/arm/translate.h b/target/arm/translate.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate.h
49
+++ b/target/arm/translate.h
50
@@ -XXX,XX +XXX,XX @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
51
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
52
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
53
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
54
+typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
55
56
/**
57
* arm_tbflags_from_tb:
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
62
@@ -XXX,XX +XXX,XX @@
63
&mcr !extern cp opc1 crn crm opc2 rt
64
&mcrr !extern cp opc1 crm rt rt2
65
66
+&mve_shl_ri rdalo rdahi shim
67
+
68
+# rdahi: bits [3:1] from insn, bit 0 is 1
69
+# rdalo: bits [3:1] from insn, bit 0 is 0
70
+%rdahi_9 9:3 !function=times_2_plus_1
71
+%rdalo_17 17:3 !function=times_2
72
+
73
# Data-processing (register)
74
75
%imm5_12_6 12:3 6:2
76
@@ -XXX,XX +XXX,XX @@
77
@S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \
78
&s_rrr_shi shim=%imm5_12_6 s=1 rd=0
79
80
+@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
81
+ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
82
+
83
{
84
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
85
AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi
86
}
87
BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
88
{
89
+ # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS
90
+ # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE
91
+ # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that
92
+ # they explicitly call unallocated_encoding() for cases that must UNDEF
93
+ # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting
94
+ # the rest fall through (where ORR_rrri and MOV_rxri will end up
95
+ # handling them as r13 and r15 accesses with the same semantics as A32).
96
+ [
97
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
98
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
99
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
100
+
101
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
102
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
103
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
104
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
105
+ ]
106
+
107
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
108
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
109
}
110
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/mve_helper.c
113
+++ b/target/arm/mve_helper.c
114
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
115
mve_advance_vpt(env);
116
return rdm;
117
}
118
+
119
+uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
120
+{
121
+ return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
122
+}
123
+
124
+uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
+{
126
+ return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
+}
128
diff --git a/target/arm/translate.c b/target/arm/translate.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate.c
131
+++ b/target/arm/translate.c
132
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
133
return true;
134
}
135
136
+/*
137
+ * v8.1M MVE wide-shifts
138
+ */
139
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
140
+ WideShiftImmFn *fn)
141
+{
142
+ TCGv_i64 rda;
143
+ TCGv_i32 rdalo, rdahi;
144
+
145
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
146
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
147
+ return false;
148
+ }
149
+ if (a->rdahi == 15) {
150
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
151
+ return false;
152
+ }
153
+ if (!dc_isar_feature(aa32_mve, s) ||
154
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
155
+ a->rdahi == 13) {
156
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
157
+ unallocated_encoding(s);
158
+ return true;
159
+ }
160
+
161
+ if (a->shim == 0) {
162
+ a->shim = 32;
163
+ }
164
+
165
+ rda = tcg_temp_new_i64();
166
+ rdalo = load_reg(s, a->rdalo);
167
+ rdahi = load_reg(s, a->rdahi);
168
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
169
+
170
+ fn(rda, rda, a->shim);
171
+
172
+ tcg_gen_extrl_i64_i32(rdalo, rda);
173
+ tcg_gen_extrh_i64_i32(rdahi, rda);
174
+ store_reg(s, a->rdalo, rdalo);
175
+ store_reg(s, a->rdahi, rdahi);
176
+ tcg_temp_free_i64(rda);
177
+
178
+ return true;
179
+}
180
+
181
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
182
+{
183
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
184
+}
185
+
186
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
187
+{
188
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
189
+}
190
+
191
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
192
+{
193
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
194
+}
195
+
196
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
197
+{
198
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
199
+}
200
+
201
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
202
+{
203
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
204
+}
205
+
206
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
207
+{
208
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
209
+}
210
+
211
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
212
+{
213
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
214
+}
215
+
216
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
217
+{
218
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
219
+}
220
+
221
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
222
+{
223
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
224
+}
225
+
226
/*
227
* Multiply and multiply accumulate
228
*/
229
--
230
2.20.1
231
232
diff view generated by jsdifflib
New patch
1
1
Implement the MVE long shifts by register, which perform shifts on a
2
pair of general-purpose registers treated as a 64-bit quantity, with
3
the shift count in another general-purpose register, which might be
4
either positive or negative.
5
6
Like the long-shifts-by-immediate, these encodings sit in the space
7
that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15.
8
Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and
9
also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases),
10
we have to move the CSEL pattern into the same decodetree group.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210628135835.6690-17-peter.maydell@linaro.org
15
---
16
target/arm/helper-mve.h | 6 +++
17
target/arm/translate.h | 1 +
18
target/arm/t32.decode | 16 +++++--
19
target/arm/mve_helper.c | 93 +++++++++++++++++++++++++++++++++++++++++
20
target/arm/translate.c | 69 ++++++++++++++++++++++++++++++
21
5 files changed, 182 insertions(+), 3 deletions(-)
22
23
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/helper-mve.h
26
+++ b/target/arm/helper-mve.h
27
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
29
DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
30
31
+DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
32
+DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32)
33
DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
34
DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
35
+DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
36
+DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
37
+DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
38
+DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
39
diff --git a/target/arm/translate.h b/target/arm/translate.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate.h
42
+++ b/target/arm/translate.h
43
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
44
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
45
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
46
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
47
+typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
48
49
/**
50
* arm_tbflags_from_tb:
51
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/t32.decode
54
+++ b/target/arm/t32.decode
55
@@ -XXX,XX +XXX,XX @@
56
&mcrr !extern cp opc1 crm rt rt2
57
58
&mve_shl_ri rdalo rdahi shim
59
+&mve_shl_rr rdalo rdahi rm
60
61
# rdahi: bits [3:1] from insn, bit 0 is 1
62
# rdalo: bits [3:1] from insn, bit 0 is 0
63
@@ -XXX,XX +XXX,XX @@
64
65
@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
66
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
67
+@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
68
+ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
69
70
{
71
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
72
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
73
URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
74
SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
75
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
76
+
77
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
78
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
79
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
80
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
81
+ UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
82
+ SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
83
]
84
85
MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
86
ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
87
+
88
+ # v8.1M CSEL and friends
89
+ CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
90
}
91
{
92
MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi
93
@@ -XXX,XX +XXX,XX @@ SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi
94
}
95
RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi
96
97
-# v8.1M CSEL and friends
98
-CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
99
-
100
# Data-processing (register-shifted register)
101
102
MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \
103
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/mve_helper.c
106
+++ b/target/arm/mve_helper.c
107
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
108
return rdm;
109
}
110
111
+uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
112
+{
113
+ return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
114
+}
115
+
116
+uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
117
+{
118
+ return do_uqrshl_d(n, (int8_t)shift, false, NULL);
119
+}
120
+
121
uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
122
{
123
return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
124
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
125
{
126
return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
127
}
128
+
129
+uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
130
+{
131
+ return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
132
+}
133
+
134
+uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
135
+{
136
+ return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
137
+}
138
+
139
+/* Operate on 64-bit values, but saturate at 48 bits */
140
+static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
141
+ bool round, uint32_t *sat)
142
+{
143
+ if (shift <= -48) {
144
+ /* Rounding the sign bit always produces 0. */
145
+ if (round) {
146
+ return 0;
147
+ }
148
+ return src >> 63;
149
+ } else if (shift < 0) {
150
+ if (round) {
151
+ src >>= -shift - 1;
152
+ return (src >> 1) + (src & 1);
153
+ }
154
+ return src >> -shift;
155
+ } else if (shift < 48) {
156
+ int64_t val = src << shift;
157
+ int64_t extval = sextract64(val, 0, 48);
158
+ if (!sat || val == extval) {
159
+ return extval;
160
+ }
161
+ } else if (!sat || src == 0) {
162
+ return 0;
163
+ }
164
+
165
+ *sat = 1;
166
+ return (1ULL << 47) - (src >= 0);
167
+}
168
+
169
+/* Operate on 64-bit values, but saturate at 48 bits */
170
+static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
171
+ bool round, uint32_t *sat)
172
+{
173
+ uint64_t val, extval;
174
+
175
+ if (shift <= -(48 + round)) {
176
+ return 0;
177
+ } else if (shift < 0) {
178
+ if (round) {
179
+ val = src >> (-shift - 1);
180
+ val = (val >> 1) + (val & 1);
181
+ } else {
182
+ val = src >> -shift;
183
+ }
184
+ extval = extract64(val, 0, 48);
185
+ if (!sat || val == extval) {
186
+ return extval;
187
+ }
188
+ } else if (shift < 48) {
189
+ uint64_t val = src << shift;
190
+ uint64_t extval = extract64(val, 0, 48);
191
+ if (!sat || val == extval) {
192
+ return extval;
193
+ }
194
+ } else if (!sat || src == 0) {
195
+ return 0;
196
+ }
197
+
198
+ *sat = 1;
199
+ return MAKE_64BIT_MASK(0, 48);
200
+}
201
+
202
+uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
203
+{
204
+ return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
205
+}
206
+
207
+uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
208
+{
209
+ return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
210
+}
211
diff --git a/target/arm/translate.c b/target/arm/translate.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/arm/translate.c
214
+++ b/target/arm/translate.c
215
@@ -XXX,XX +XXX,XX @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
216
return do_mve_shl_ri(s, a, gen_urshr64_i64);
217
}
218
219
+static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
220
+{
221
+ TCGv_i64 rda;
222
+ TCGv_i32 rdalo, rdahi;
223
+
224
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
225
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
226
+ return false;
227
+ }
228
+ if (a->rdahi == 15) {
229
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
230
+ return false;
231
+ }
232
+ if (!dc_isar_feature(aa32_mve, s) ||
233
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
234
+ a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
235
+ a->rm == a->rdahi || a->rm == a->rdalo) {
236
+ /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
237
+ unallocated_encoding(s);
238
+ return true;
239
+ }
240
+
241
+ rda = tcg_temp_new_i64();
242
+ rdalo = load_reg(s, a->rdalo);
243
+ rdahi = load_reg(s, a->rdahi);
244
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
245
+
246
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
247
+ fn(rda, cpu_env, rda, cpu_R[a->rm]);
248
+
249
+ tcg_gen_extrl_i64_i32(rdalo, rda);
250
+ tcg_gen_extrh_i64_i32(rdahi, rda);
251
+ store_reg(s, a->rdalo, rdalo);
252
+ store_reg(s, a->rdahi, rdahi);
253
+ tcg_temp_free_i64(rda);
254
+
255
+ return true;
256
+}
257
+
258
+static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
259
+{
260
+ return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
261
+}
262
+
263
+static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
264
+{
265
+ return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
266
+}
267
+
268
+static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
269
+{
270
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
271
+}
272
+
273
+static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
274
+{
275
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
276
+}
277
+
278
+static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
279
+{
280
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
281
+}
282
+
283
+static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
284
+{
285
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
286
+}
287
+
288
/*
289
* Multiply and multiply accumulate
290
*/
291
--
292
2.20.1
293
294
diff view generated by jsdifflib
New patch
1
1
Implement the MVE shifts by immediate, which perform shifts
2
on a single general-purpose register.
3
4
These patterns overlap with the long-shift-by-immediates,
5
so we have to rearrange the grouping a little here.
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210628135835.6690-18-peter.maydell@linaro.org
10
---
11
target/arm/helper-mve.h | 3 ++
12
target/arm/translate.h | 1 +
13
target/arm/t32.decode | 31 ++++++++++++++-----
14
target/arm/mve_helper.c | 10 ++++++
15
target/arm/translate.c | 68 +++++++++++++++++++++++++++++++++++++++--
16
5 files changed, 104 insertions(+), 9 deletions(-)
17
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
21
+++ b/target/arm/helper-mve.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
23
DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
24
DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
25
DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
26
+
27
+DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
28
+DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
29
diff --git a/target/arm/translate.h b/target/arm/translate.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.h
32
+++ b/target/arm/translate.h
33
@@ -XXX,XX +XXX,XX @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
34
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
35
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
36
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
37
+typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
38
39
/**
40
* arm_tbflags_from_tb:
41
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/t32.decode
44
+++ b/target/arm/t32.decode
45
@@ -XXX,XX +XXX,XX @@
46
47
&mve_shl_ri rdalo rdahi shim
48
&mve_shl_rr rdalo rdahi rm
49
+&mve_sh_ri rda shim
50
51
# rdahi: bits [3:1] from insn, bit 0 is 1
52
# rdalo: bits [3:1] from insn, bit 0 is 0
53
@@ -XXX,XX +XXX,XX @@
54
&mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
55
@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
56
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
57
+@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
58
+ &mve_sh_ri shim=%imm5_12_6
59
60
{
61
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
62
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
63
# the rest fall through (where ORR_rrri and MOV_rxri will end up
64
# handling them as r13 and r15 accesses with the same semantics as A32).
65
[
66
- LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
67
- LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
68
- ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
69
+ {
70
+ UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
71
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
72
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
73
+ }
74
75
- UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
76
- URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
77
- SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
78
- SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
79
+ {
80
+ URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
81
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
82
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
83
+ }
84
+
85
+ {
86
+ SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
87
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
88
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
89
+ }
90
+
91
+ {
92
+ SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
93
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
94
+ }
95
96
LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
97
ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
98
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/mve_helper.c
101
+++ b/target/arm/mve_helper.c
102
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
103
{
104
return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
105
}
106
+
107
+uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
108
+{
109
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
110
+}
111
+
112
+uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
113
+{
114
+ return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
115
+}
116
diff --git a/target/arm/translate.c b/target/arm/translate.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/translate.c
119
+++ b/target/arm/translate.c
120
@@ -XXX,XX +XXX,XX @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
121
122
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
123
{
124
- TCGv_i32 t = tcg_temp_new_i32();
125
+ TCGv_i32 t;
126
127
+ /* Handle shift by the input size for the benefit of trans_SRSHR_ri */
128
+ if (sh == 32) {
129
+ tcg_gen_movi_i32(d, 0);
130
+ return;
131
+ }
132
+ t = tcg_temp_new_i32();
133
tcg_gen_extract_i32(t, a, sh - 1, 1);
134
tcg_gen_sari_i32(d, a, sh);
135
tcg_gen_add_i32(d, d, t);
136
@@ -XXX,XX +XXX,XX @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
137
138
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
139
{
140
- TCGv_i32 t = tcg_temp_new_i32();
141
+ TCGv_i32 t;
142
143
+ /* Handle shift by the input size for the benefit of trans_URSHR_ri */
144
+ if (sh == 32) {
145
+ tcg_gen_extract_i32(d, a, sh - 1, 1);
146
+ return;
147
+ }
148
+ t = tcg_temp_new_i32();
149
tcg_gen_extract_i32(t, a, sh - 1, 1);
150
tcg_gen_shri_i32(d, a, sh);
151
tcg_gen_add_i32(d, d, t);
152
@@ -XXX,XX +XXX,XX @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
153
return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
154
}
155
156
+static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
157
+{
158
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
159
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
160
+ return false;
161
+ }
162
+ if (!dc_isar_feature(aa32_mve, s) ||
163
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
164
+ a->rda == 13 || a->rda == 15) {
165
+ /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
166
+ unallocated_encoding(s);
167
+ return true;
168
+ }
169
+
170
+ if (a->shim == 0) {
171
+ a->shim = 32;
172
+ }
173
+ fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
174
+
175
+ return true;
176
+}
177
+
178
+static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
179
+{
180
+ return do_mve_sh_ri(s, a, gen_urshr32_i32);
181
+}
182
+
183
+static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
184
+{
185
+ return do_mve_sh_ri(s, a, gen_srshr32_i32);
186
+}
187
+
188
+static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
189
+{
190
+ gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
191
+}
192
+
193
+static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
194
+{
195
+ return do_mve_sh_ri(s, a, gen_mve_sqshl);
196
+}
197
+
198
+static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
199
+{
200
+ gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
201
+}
202
+
203
+static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
204
+{
205
+ return do_mve_sh_ri(s, a, gen_mve_uqshl);
206
+}
207
+
208
/*
209
* Multiply and multiply accumulate
210
*/
211
--
212
2.20.1
213
214
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
Implement the MVE shifts by register, which perform
2
shifts on a single general-purpose register.
2
3
3
platform-bus were using machine_done notifier to get and map
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
(assign irq/mmio resources) dynamically added sysbus devices
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
after all '-device' options had been processed.
6
Message-id: 20210628135835.6690-19-peter.maydell@linaro.org
6
That however creates non obvious dependencies on ordering of
7
---
7
machine_done notifiers and requires carefull line juggling
8
target/arm/helper-mve.h | 2 ++
8
to keep it working. For example see comment above
9
target/arm/translate.h | 1 +
9
create_platform_bus() and 'straitforward' arm_load_kernel()
10
target/arm/t32.decode | 18 ++++++++++++++----
10
had to converted to machine_done notifier and that lead to
11
target/arm/mve_helper.c | 10 ++++++++++
11
yet another machine_done notifier to keep it working
12
target/arm/translate.c | 30 ++++++++++++++++++++++++++++++
12
arm_register_platform_bus_fdt_creator().
13
5 files changed, 57 insertions(+), 4 deletions(-)
13
14
14
Instead of hiding resource assignment in platform-bus-device
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
to magically initialize sysbus devices, use device plug
16
callback and assign resources explicitly at board level
17
at the moment each -device option is being processed.
18
19
That adds a bunch of machine declaration boiler plate to
20
e500plat board, similar to ARM/x86 but gets rid of hidden
21
machine_done notifier and would allow to remove the dependent
22
notifiers in ARM code simplifying it and making code flow
23
easier to follow.
24
25
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
26
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
27
Acked-by: David Gibson <david@gibson.dropbear.id.au>
28
Message-id: 1525691524-32265-3-git-send-email-imammedo@redhat.com
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
---
31
hw/ppc/e500.h | 5 +++++
32
include/hw/arm/virt.h | 1 +
33
include/hw/platform-bus.h | 4 ++--
34
hw/arm/sysbus-fdt.c | 3 ---
35
hw/arm/virt.c | 31 +++++++++++++++++++++++++++++++
36
hw/core/platform-bus.c | 29 +++++------------------------
37
hw/ppc/e500.c | 38 +++++++++++++++++---------------------
38
hw/ppc/e500plat.c | 31 +++++++++++++++++++++++++++++++
39
8 files changed, 92 insertions(+), 50 deletions(-)
40
41
diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h
42
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
43
--- a/hw/ppc/e500.h
17
--- a/target/arm/helper-mve.h
44
+++ b/hw/ppc/e500.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
20
21
DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
22
DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
23
+DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
24
+DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
25
diff --git a/target/arm/translate.h b/target/arm/translate.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.h
28
+++ b/target/arm/translate.h
29
@@ -XXX,XX +XXX,XX @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
30
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
31
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
32
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
33
+typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
34
35
/**
36
* arm_tbflags_from_tb:
37
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/t32.decode
40
+++ b/target/arm/t32.decode
45
@@ -XXX,XX +XXX,XX @@
41
@@ -XXX,XX +XXX,XX @@
46
#define PPCE500_H
42
&mve_shl_ri rdalo rdahi shim
47
43
&mve_shl_rr rdalo rdahi rm
48
#include "hw/boards.h"
44
&mve_sh_ri rda shim
49
+#include "hw/platform-bus.h"
45
+&mve_sh_rr rda rm
50
46
51
typedef struct PPCE500MachineState {
47
# rdahi: bits [3:1] from insn, bit 0 is 1
52
/*< private >*/
48
# rdalo: bits [3:1] from insn, bit 0 is 0
53
MachineState parent_obj;
49
@@ -XXX,XX +XXX,XX @@
54
50
&mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
55
+ /* points to instance of TYPE_PLATFORM_BUS_DEVICE if
51
@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
56
+ * board supports dynamic sysbus devices
52
&mve_sh_ri shim=%imm5_12_6
57
+ */
53
+@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr
58
+ PlatformBusDevice *pbus_dev;
54
59
} PPCE500MachineState;
55
{
60
56
TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
61
typedef struct PPCE500MachineClass {
57
@@ -XXX,XX +XXX,XX @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
62
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
58
SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
59
}
60
61
- LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
62
- ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
63
- UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
64
- SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
65
+ {
66
+ UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr
67
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
68
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
69
+ }
70
+
71
+ {
72
+ SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr
73
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
74
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
75
+ }
76
+
77
UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
78
SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
79
]
80
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
63
index XXXXXXX..XXXXXXX 100644
81
index XXXXXXX..XXXXXXX 100644
64
--- a/include/hw/arm/virt.h
82
--- a/target/arm/mve_helper.c
65
+++ b/include/hw/arm/virt.h
83
+++ b/target/arm/mve_helper.c
66
@@ -XXX,XX +XXX,XX @@ typedef struct {
84
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
67
typedef struct {
85
{
68
MachineState parent;
86
return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
69
Notifier machine_done;
87
}
70
+ DeviceState *platform_bus_dev;
71
FWCfgState *fw_cfg;
72
bool secure;
73
bool highmem;
74
diff --git a/include/hw/platform-bus.h b/include/hw/platform-bus.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/include/hw/platform-bus.h
77
+++ b/include/hw/platform-bus.h
78
@@ -XXX,XX +XXX,XX @@ typedef struct PlatformBusDevice PlatformBusDevice;
79
struct PlatformBusDevice {
80
/*< private >*/
81
SysBusDevice parent_obj;
82
- Notifier notifier;
83
- bool done_gathering;
84
85
/*< public >*/
86
uint32_t mmio_size;
87
@@ -XXX,XX +XXX,XX @@ int platform_bus_get_irqn(PlatformBusDevice *platform_bus, SysBusDevice *sbdev,
88
hwaddr platform_bus_get_mmio_addr(PlatformBusDevice *pbus, SysBusDevice *sbdev,
89
int n);
90
91
+void platform_bus_link_device(PlatformBusDevice *pbus, SysBusDevice *sbdev);
92
+
88
+
93
#endif /* HW_PLATFORM_BUS_H */
89
+uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
94
diff --git a/hw/arm/sysbus-fdt.c b/hw/arm/sysbus-fdt.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/hw/arm/sysbus-fdt.c
97
+++ b/hw/arm/sysbus-fdt.c
98
@@ -XXX,XX +XXX,XX @@ static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
99
dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE);
100
pbus = PLATFORM_BUS_DEVICE(dev);
101
102
- /* We can only create dt nodes for dynamic devices when they're ready */
103
- assert(pbus->done_gathering);
104
-
105
PlatformBusFDTData data = {
106
.fdt = fdt,
107
.irq_start = irq_start,
108
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/hw/arm/virt.c
111
+++ b/hw/arm/virt.c
112
@@ -XXX,XX +XXX,XX @@ static void create_platform_bus(VirtMachineState *vms, qemu_irq *pic)
113
qdev_prop_set_uint32(dev, "mmio_size",
114
platform_bus_params.platform_bus_size);
115
qdev_init_nofail(dev);
116
+ vms->platform_bus_dev = dev;
117
s = SYS_BUS_DEVICE(dev);
118
119
for (i = 0; i < platform_bus_params.platform_bus_num_irqs; i++) {
120
@@ -XXX,XX +XXX,XX @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
121
return ms->possible_cpus;
122
}
123
124
+static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
125
+ DeviceState *dev, Error **errp)
126
+{
90
+{
127
+ VirtMachineState *vms = VIRT_MACHINE(hotplug_dev);
91
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
128
+
129
+ if (vms->platform_bus_dev) {
130
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
131
+ platform_bus_link_device(PLATFORM_BUS_DEVICE(vms->platform_bus_dev),
132
+ SYS_BUS_DEVICE(dev));
133
+ }
134
+ }
135
+}
92
+}
136
+
93
+
137
+static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
94
+uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
138
+ DeviceState *dev)
139
+{
95
+{
140
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
96
+ return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
141
+ return HOTPLUG_HANDLER(machine);
97
+}
98
diff --git a/target/arm/translate.c b/target/arm/translate.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate.c
101
+++ b/target/arm/translate.c
102
@@ -XXX,XX +XXX,XX @@ static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
103
return do_mve_sh_ri(s, a, gen_mve_uqshl);
104
}
105
106
+static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
107
+{
108
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
109
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
110
+ return false;
111
+ }
112
+ if (!dc_isar_feature(aa32_mve, s) ||
113
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
114
+ a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
115
+ a->rm == a->rda) {
116
+ /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
117
+ unallocated_encoding(s);
118
+ return true;
142
+ }
119
+ }
143
+
120
+
144
+ return NULL;
121
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
122
+ fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
123
+ return true;
145
+}
124
+}
146
+
125
+
147
static void virt_machine_class_init(ObjectClass *oc, void *data)
126
+static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
148
{
149
MachineClass *mc = MACHINE_CLASS(oc);
150
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
151
152
mc->init = machvirt_init;
153
/* Start max_cpus at the maximum QEMU supports. We'll further restrict
154
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
155
mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
156
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15");
157
mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
158
+ mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
159
+ hc->plug = virt_machine_device_plug_cb;
160
}
161
162
static const TypeInfo virt_machine_info = {
163
@@ -XXX,XX +XXX,XX @@ static const TypeInfo virt_machine_info = {
164
.instance_size = sizeof(VirtMachineState),
165
.class_size = sizeof(VirtMachineClass),
166
.class_init = virt_machine_class_init,
167
+ .interfaces = (InterfaceInfo[]) {
168
+ { TYPE_HOTPLUG_HANDLER },
169
+ { }
170
+ },
171
};
172
173
static void machvirt_machine_init(void)
174
diff --git a/hw/core/platform-bus.c b/hw/core/platform-bus.c
175
index XXXXXXX..XXXXXXX 100644
176
--- a/hw/core/platform-bus.c
177
+++ b/hw/core/platform-bus.c
178
@@ -XXX,XX +XXX,XX @@ static void plaform_bus_refresh_irqs(PlatformBusDevice *pbus)
179
{
180
bitmap_zero(pbus->used_irqs, pbus->num_irqs);
181
foreach_dynamic_sysbus_device(platform_bus_count_irqs, pbus);
182
- pbus->done_gathering = true;
183
}
184
185
static void platform_bus_map_irq(PlatformBusDevice *pbus, SysBusDevice *sbdev,
186
@@ -XXX,XX +XXX,XX @@ static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev,
187
}
188
189
/*
190
- * For each sysbus device, look for unassigned IRQ lines as well as
191
- * unassociated MMIO regions. Connect them to the platform bus if available.
192
+ * Look for unassigned IRQ lines as well as unassociated MMIO regions.
193
+ * Connect them to the platform bus if available.
194
*/
195
-static void link_sysbus_device(SysBusDevice *sbdev, void *opaque)
196
+void platform_bus_link_device(PlatformBusDevice *pbus, SysBusDevice *sbdev)
197
{
198
- PlatformBusDevice *pbus = opaque;
199
int i;
200
201
for (i = 0; sysbus_has_irq(sbdev, i); i++) {
202
@@ -XXX,XX +XXX,XX @@ static void link_sysbus_device(SysBusDevice *sbdev, void *opaque)
203
}
204
}
205
206
-static void platform_bus_init_notify(Notifier *notifier, void *data)
207
-{
208
- PlatformBusDevice *pb = container_of(notifier, PlatformBusDevice, notifier);
209
-
210
- /*
211
- * Generate a bitmap of used IRQ lines, as the user might have specified
212
- * them on the command line.
213
- */
214
- plaform_bus_refresh_irqs(pb);
215
-
216
- foreach_dynamic_sysbus_device(link_sysbus_device, pb);
217
-}
218
-
219
static void platform_bus_realize(DeviceState *dev, Error **errp)
220
{
221
PlatformBusDevice *pbus;
222
@@ -XXX,XX +XXX,XX @@ static void platform_bus_realize(DeviceState *dev, Error **errp)
223
sysbus_init_irq(d, &pbus->irqs[i]);
224
}
225
226
- /*
227
- * Register notifier that allows us to gather dangling devices once the
228
- * machine is completely assembled
229
- */
230
- pbus->notifier.notify = platform_bus_init_notify;
231
- qemu_add_machine_init_done_notifier(&pbus->notifier);
232
+ /* some devices might be initialized before so update used IRQs map */
233
+ plaform_bus_refresh_irqs(pbus);
234
}
235
236
static Property platform_bus_properties[] = {
237
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/hw/ppc/e500.c
240
+++ b/hw/ppc/e500.c
241
@@ -XXX,XX +XXX,XX @@ static void sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
242
}
243
}
244
245
-static void platform_bus_create_devtree(const PPCE500MachineClass *pmc,
246
+static void platform_bus_create_devtree(PPCE500MachineState *pms,
247
void *fdt, const char *mpic)
248
{
249
+ const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms);
250
gchar *node = g_strdup_printf("/platform@%"PRIx64, pmc->platform_bus_base);
251
const char platcomp[] = "qemu,platform\0simple-bus";
252
uint64_t addr = pmc->platform_bus_base;
253
uint64_t size = pmc->platform_bus_size;
254
int irq_start = pmc->platform_bus_first_irq;
255
- PlatformBusDevice *pbus;
256
- DeviceState *dev;
257
258
/* Create a /platform node that we can put all devices into */
259
260
@@ -XXX,XX +XXX,XX @@ static void platform_bus_create_devtree(const PPCE500MachineClass *pmc,
261
262
qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", mpic);
263
264
- dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE);
265
- pbus = PLATFORM_BUS_DEVICE(dev);
266
+ /* Create dt nodes for dynamic devices */
267
+ PlatformDevtreeData data = {
268
+ .fdt = fdt,
269
+ .mpic = mpic,
270
+ .irq_start = irq_start,
271
+ .node = node,
272
+ .pbus = pms->pbus_dev,
273
+ };
274
275
- /* We can only create dt nodes for dynamic devices when they're ready */
276
- if (pbus->done_gathering) {
277
- PlatformDevtreeData data = {
278
- .fdt = fdt,
279
- .mpic = mpic,
280
- .irq_start = irq_start,
281
- .node = node,
282
- .pbus = pbus,
283
- };
284
-
285
- /* Loop through all dynamic sysbus devices and create nodes for them */
286
- foreach_dynamic_sysbus_device(sysbus_device_create_devtree, &data);
287
- }
288
+ /* Loop through all dynamic sysbus devices and create nodes for them */
289
+ foreach_dynamic_sysbus_device(sysbus_device_create_devtree, &data);
290
291
g_free(node);
292
}
293
@@ -XXX,XX +XXX,XX @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
294
}
295
g_free(soc);
296
297
- if (pmc->has_platform_bus) {
298
- platform_bus_create_devtree(pmc, fdt, mpic);
299
+ if (pms->pbus_dev) {
300
+ platform_bus_create_devtree(pms, fdt, mpic);
301
}
302
g_free(mpic);
303
304
@@ -XXX,XX +XXX,XX @@ void ppce500_init(MachineState *machine)
305
qdev_prop_set_uint32(dev, "num_irqs", pmc->platform_bus_num_irqs);
306
qdev_prop_set_uint32(dev, "mmio_size", pmc->platform_bus_size);
307
qdev_init_nofail(dev);
308
- s = SYS_BUS_DEVICE(dev);
309
+ pms->pbus_dev = PLATFORM_BUS_DEVICE(dev);
310
311
+ s = SYS_BUS_DEVICE(pms->pbus_dev);
312
for (i = 0; i < pmc->platform_bus_num_irqs; i++) {
313
int irqn = pmc->platform_bus_first_irq + i;
314
sysbus_connect_irq(s, i, qdev_get_gpio_in(mpicdev, irqn));
315
@@ -XXX,XX +XXX,XX @@ static const TypeInfo ppce500_info = {
316
.name = TYPE_PPCE500_MACHINE,
317
.parent = TYPE_MACHINE,
318
.abstract = true,
319
+ .instance_size = sizeof(PPCE500MachineState),
320
.class_size = sizeof(PPCE500MachineClass),
321
};
322
323
diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c
324
index XXXXXXX..XXXXXXX 100644
325
--- a/hw/ppc/e500plat.c
326
+++ b/hw/ppc/e500plat.c
327
@@ -XXX,XX +XXX,XX @@ static void e500plat_init(MachineState *machine)
328
ppce500_init(machine);
329
}
330
331
+static void e500plat_machine_device_plug_cb(HotplugHandler *hotplug_dev,
332
+ DeviceState *dev, Error **errp)
333
+{
127
+{
334
+ PPCE500MachineState *pms = PPCE500_MACHINE(hotplug_dev);
128
+ return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
335
+
336
+ if (pms->pbus_dev) {
337
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
338
+ platform_bus_link_device(pms->pbus_dev, SYS_BUS_DEVICE(dev));
339
+ }
340
+ }
341
+}
129
+}
342
+
130
+
343
+static
131
+static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
344
+HotplugHandler *e500plat_machine_get_hotpug_handler(MachineState *machine,
345
+ DeviceState *dev)
346
+{
132
+{
347
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
133
+ return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
348
+ return HOTPLUG_HANDLER(machine);
349
+ }
350
+
351
+ return NULL;
352
+}
134
+}
353
+
135
+
354
#define TYPE_E500PLAT_MACHINE MACHINE_TYPE_NAME("ppce500")
136
/*
355
137
* Multiply and multiply accumulate
356
static void e500plat_machine_class_init(ObjectClass *oc, void *data)
138
*/
357
{
358
PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc);
359
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
360
MachineClass *mc = MACHINE_CLASS(oc);
361
362
+ mc->get_hotplug_handler = e500plat_machine_get_hotpug_handler;
363
+ hc->plug = e500plat_machine_device_plug_cb;
364
+
365
pmc->pci_first_slot = 0x1;
366
pmc->pci_nr_slots = PCI_SLOT_MAX - 1;
367
pmc->fixup_devtree = e500plat_fixup_devtree;
368
@@ -XXX,XX +XXX,XX @@ static const TypeInfo e500plat_info = {
369
.name = TYPE_E500PLAT_MACHINE,
370
.parent = TYPE_PPCE500_MACHINE,
371
.class_init = e500plat_machine_class_init,
372
+ .interfaces = (InterfaceInfo[]) {
373
+ { TYPE_HOTPLUG_HANDLER },
374
+ { }
375
+ }
376
};
377
378
static void e500plat_register_types(void)
379
--
139
--
380
2.17.0
140
2.20.1
381
141
382
142
diff view generated by jsdifflib