1
The following changes since commit 53f306f316549d20c76886903181413d20842423:
1
Two small bugfixes, plus most of RTH's refactoring of cpregs
2
handling.
2
3
3
Merge remote-tracking branch 'remotes/ehabkost-gl/tags/x86-next-pull-request' into staging (2021-06-21 11:26:04 +0100)
4
-- PMM
5
6
The following changes since commit 1fba9dc71a170b3a05b9d3272dd8ecfe7f26e215:
7
8
Merge tag 'pull-request-2022-05-04' of https://gitlab.com/thuth/qemu into staging (2022-05-04 08:07:02 -0700)
4
9
5
are available in the Git repository at:
10
are available in the Git repository at:
6
11
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210621
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220505
8
13
9
for you to fetch changes up to a83f1d9263d281f938a3984cda7104d55affd43a:
14
for you to fetch changes up to 99a50d1a67c602126fc2b3a4812d3000eba9bf34:
10
15
11
docs/system: arm: Add nRF boards description (2021-06-21 17:24:33 +0100)
16
target/arm: read access to performance counters from EL0 (2022-05-05 09:36:22 +0100)
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
target-arm queue:
19
target-arm queue:
15
* Don't require 'virt' board to be compiled in for ACPI GHES code
20
* Enable read access to performance counters from EL0
16
* docs: Document which architecture extensions we emulate
21
* Enable SCTLR_EL1.BT0 for aarch64-linux-user
17
* Fix bugs in M-profile FPCXT_NS accesses
22
* Refactoring of cpreg handling
18
* First slice of MVE patches
19
* Implement MTE3
20
* docs/system: arm: Add nRF boards description
21
23
22
----------------------------------------------------------------
24
----------------------------------------------------------------
23
Alexandre Iooss (1):
25
Alex Zuepke (1):
24
docs/system: arm: Add nRF boards description
26
target/arm: read access to performance counters from EL0
25
27
26
Peter Collingbourne (1):
28
Richard Henderson (22):
27
target/arm: Implement MTE3
29
target/arm: Enable SCTLR_EL1.BT0 for aarch64-linux-user
30
target/arm: Split out cpregs.h
31
target/arm: Reorg CPAccessResult and access_check_cp_reg
32
target/arm: Replace sentinels with ARRAY_SIZE in cpregs.h
33
target/arm: Make some more cpreg data static const
34
target/arm: Reorg ARMCPRegInfo type field bits
35
target/arm: Avoid bare abort() or assert(0)
36
target/arm: Change cpreg access permissions to enum
37
target/arm: Name CPState type
38
target/arm: Name CPSecureState type
39
target/arm: Drop always-true test in define_arm_vh_e2h_redirects_aliases
40
target/arm: Store cpregs key in the hash table directly
41
target/arm: Merge allocation of the cpreg and its name
42
target/arm: Hoist computation of key in add_cpreg_to_hashtable
43
target/arm: Consolidate cpreg updates in add_cpreg_to_hashtable
44
target/arm: Use bool for is64 and ns in add_cpreg_to_hashtable
45
target/arm: Hoist isbanked computation in add_cpreg_to_hashtable
46
target/arm: Perform override check early in add_cpreg_to_hashtable
47
target/arm: Reformat comments in add_cpreg_to_hashtable
48
target/arm: Remove HOST_BIG_ENDIAN ifdef in add_cpreg_to_hashtable
49
target/arm: Add isar predicates for FEAT_Debugv8p2
50
target/arm: Add isar_feature_{aa64,any}_ras
28
51
29
Peter Maydell (55):
52
target/arm/cpregs.h | 453 ++++++++++++++++++++++++++++++++++++++
30
hw/acpi: Provide stub version of acpi_ghes_record_errors()
53
target/arm/cpu.h | 393 +++------------------------------
31
hw/acpi: Provide function acpi_ghes_present()
54
hw/arm/pxa2xx.c | 2 +-
32
target/arm: Use acpi_ghes_present() to see if we report ACPI memory errors
55
hw/arm/pxa2xx_pic.c | 2 +-
33
docs/system/arm: Document which architecture extensions we emulate
56
hw/intc/arm_gicv3_cpuif.c | 6 +-
34
target/arm/translate-vfp.c: Whitespace fixes
57
hw/intc/arm_gicv3_kvm.c | 3 +-
35
target/arm: Handle FPU being disabled in FPCXT_NS accesses
58
target/arm/cpu.c | 25 +--
36
target/arm: Don't NOCP fault for FPCXT_NS accesses
59
target/arm/cpu64.c | 2 +-
37
target/arm: Handle writeback in VLDR/VSTR sysreg with no memory access
60
target/arm/cpu_tcg.c | 5 +-
38
target/arm: Factor FP context update code out into helper function
61
target/arm/gdbstub.c | 5 +-
39
target/arm: Split vfp_access_check() into A and M versions
62
target/arm/helper.c | 358 +++++++++++++-----------------
40
target/arm: Handle FPU check for FPCXT_NS insns via vfp_access_check_m()
63
target/arm/hvf/hvf.c | 2 +-
41
target/arm: Implement MVE VLDR/VSTR (non-widening forms)
64
target/arm/kvm-stub.c | 4 +-
42
target/arm: Implement widening/narrowing MVE VLDR/VSTR insns
65
target/arm/kvm.c | 4 +-
43
target/arm: Implement MVE VCLZ
66
target/arm/machine.c | 4 +-
44
target/arm: Implement MVE VCLS
67
target/arm/op_helper.c | 57 ++---
45
target/arm: Implement MVE VREV16, VREV32, VREV64
68
target/arm/translate-a64.c | 14 +-
46
target/arm: Implement MVE VMVN (register)
69
target/arm/translate-neon.c | 2 +-
47
target/arm: Implement MVE VABS
70
target/arm/translate.c | 13 +-
48
target/arm: Implement MVE VNEG
71
tests/tcg/aarch64/bti-3.c | 42 ++++
49
tcg: Make gen_dup_i32/i64() public as tcg_gen_dup_i32/i64
72
tests/tcg/aarch64/Makefile.target | 6 +-
50
target/arm: Implement MVE VDUP
73
21 files changed, 738 insertions(+), 664 deletions(-)
51
target/arm: Implement MVE VAND, VBIC, VORR, VORN, VEOR
74
create mode 100644 target/arm/cpregs.h
52
target/arm: Implement MVE VADD, VSUB, VMUL
75
create mode 100644 tests/tcg/aarch64/bti-3.c
53
target/arm: Implement MVE VMULH
54
target/arm: Implement MVE VRMULH
55
target/arm: Implement MVE VMAX, VMIN
56
target/arm: Implement MVE VABD
57
target/arm: Implement MVE VHADD, VHSUB
58
target/arm: Implement MVE VMULL
59
target/arm: Implement MVE VMLALDAV
60
target/arm: Implement MVE VMLSLDAV
61
target/arm: Implement MVE VRMLALDAVH, VRMLSLDAVH
62
target/arm: Implement MVE VADD (scalar)
63
target/arm: Implement MVE VSUB, VMUL (scalar)
64
target/arm: Implement MVE VHADD, VHSUB (scalar)
65
target/arm: Implement MVE VBRSR
66
target/arm: Implement MVE VPST
67
target/arm: Implement MVE VQADD and VQSUB
68
target/arm: Implement MVE VQDMULH and VQRDMULH (scalar)
69
target/arm: Implement MVE VQDMULL scalar
70
target/arm: Implement MVE VQDMULH, VQRDMULH (vector)
71
target/arm: Implement MVE VQADD, VQSUB (vector)
72
target/arm: Implement MVE VQSHL (vector)
73
target/arm: Implement MVE VQRSHL
74
target/arm: Implement MVE VSHL insn
75
target/arm: Implement MVE VRSHL
76
target/arm: Implement MVE VQDMLADH and VQRDMLADH
77
target/arm: Implement MVE VQDMLSDH and VQRDMLSDH
78
target/arm: Implement MVE VQDMULL (vector)
79
target/arm: Implement MVE VRHADD
80
target/arm: Implement MVE VADC, VSBC
81
target/arm: Implement MVE VCADD
82
target/arm: Implement MVE VHCADD
83
target/arm: Implement MVE VADDV
84
target/arm: Make VMOV scalar <-> gpreg beatwise for MVE
85
86
docs/system/arm/emulation.rst | 103 ++++
87
docs/system/arm/nrf.rst | 51 ++
88
docs/system/target-arm.rst | 7 +
89
include/hw/acpi/ghes.h | 9 +
90
include/tcg/tcg-op.h | 8 +
91
include/tcg/tcg.h | 1 -
92
target/arm/helper-mve.h | 357 +++++++++++++
93
target/arm/helper.h | 2 +
94
target/arm/internals.h | 11 +
95
target/arm/translate-a32.h | 3 +
96
target/arm/translate.h | 10 +
97
target/arm/m-nocp.decode | 24 +
98
target/arm/mve.decode | 240 +++++++++
99
target/arm/vfp.decode | 14 -
100
hw/acpi/ghes-stub.c | 22 +
101
hw/acpi/ghes.c | 17 +
102
target/arm/cpu64.c | 2 +-
103
target/arm/kvm64.c | 6 +-
104
target/arm/mte_helper.c | 82 +--
105
target/arm/mve_helper.c | 1160 +++++++++++++++++++++++++++++++++++++++++
106
target/arm/translate-m-nocp.c | 550 +++++++++++++++++++
107
target/arm/translate-mve.c | 759 +++++++++++++++++++++++++++
108
target/arm/translate-vfp.c | 741 +++++++-------------------
109
tcg/tcg-op-gvec.c | 20 +-
110
MAINTAINERS | 1 +
111
hw/acpi/meson.build | 6 +-
112
target/arm/meson.build | 1 +
113
27 files changed, 3578 insertions(+), 629 deletions(-)
114
create mode 100644 docs/system/arm/emulation.rst
115
create mode 100644 docs/system/arm/nrf.rst
116
create mode 100644 target/arm/helper-mve.h
117
create mode 100644 hw/acpi/ghes-stub.c
118
create mode 100644 target/arm/mve_helper.c
119
diff view generated by jsdifflib
Deleted patch
1
Generic code in target/arm wants to call acpi_ghes_record_errors();
2
provide a stub version so that we don't fail to link when
3
CONFIG_ACPI_APEI is not set. This requires us to add a new
4
ghes-stub.c file to contain it and the meson.build mechanics
5
to use it when appropriate.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Dongjiu Geng <gengdongjiu1@gmail.com>
10
Message-id: 20210603171259.27962-2-peter.maydell@linaro.org
11
---
12
hw/acpi/ghes-stub.c | 17 +++++++++++++++++
13
hw/acpi/meson.build | 6 +++---
14
2 files changed, 20 insertions(+), 3 deletions(-)
15
create mode 100644 hw/acpi/ghes-stub.c
16
17
diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c
18
new file mode 100644
19
index XXXXXXX..XXXXXXX
20
--- /dev/null
21
+++ b/hw/acpi/ghes-stub.c
22
@@ -XXX,XX +XXX,XX @@
23
+/*
24
+ * Support for generating APEI tables and recording CPER for Guests:
25
+ * stub functions.
26
+ *
27
+ * Copyright (c) 2021 Linaro, Ltd
28
+ *
29
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
30
+ * See the COPYING file in the top-level directory.
31
+ */
32
+
33
+#include "qemu/osdep.h"
34
+#include "hw/acpi/ghes.h"
35
+
36
+int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
37
+{
38
+ return -1;
39
+}
40
diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/acpi/meson.build
43
+++ b/hw/acpi/meson.build
44
@@ -XXX,XX +XXX,XX @@ acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c'))
45
acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c'))
46
acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c'))
47
acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c'))
48
-acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'))
49
+acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c'))
50
acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('core.c', 'piix4.c', 'pcihp.c'), if_false: files('acpi-stub.c'))
51
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
52
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
53
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
54
acpi_ss.add(when: 'CONFIG_TPM', if_true: files('tpm.c'))
55
-softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c'))
56
+softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c'))
57
softmmu_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss)
58
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c',
59
- 'acpi-x86-stub.c', 'ipmi-stub.c'))
60
+ 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c'))
61
--
62
2.20.1
63
64
diff view generated by jsdifflib
Deleted patch
1
Allow code elsewhere in the system to check whether the ACPI GHES
2
table is present, so it can determine whether it is OK to try to
3
record an error by calling acpi_ghes_record_errors().
4
1
5
(We don't need to migrate the new 'present' field in AcpiGhesState,
6
because it is set once at system initialization and doesn't change.)
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Dongjiu Geng <gengdongjiu1@gmail.com>
11
Message-id: 20210603171259.27962-3-peter.maydell@linaro.org
12
---
13
include/hw/acpi/ghes.h | 9 +++++++++
14
hw/acpi/ghes-stub.c | 5 +++++
15
hw/acpi/ghes.c | 17 +++++++++++++++++
16
3 files changed, 31 insertions(+)
17
18
diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/acpi/ghes.h
21
+++ b/include/hw/acpi/ghes.h
22
@@ -XXX,XX +XXX,XX @@ enum {
23
24
typedef struct AcpiGhesState {
25
uint64_t ghes_addr_le;
26
+ bool present; /* True if GHES is present at all on this board */
27
} AcpiGhesState;
28
29
void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker);
30
@@ -XXX,XX +XXX,XX @@ void acpi_build_hest(GArray *table_data, BIOSLinker *linker,
31
void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s,
32
GArray *hardware_errors);
33
int acpi_ghes_record_errors(uint8_t notify, uint64_t error_physical_addr);
34
+
35
+/**
36
+ * acpi_ghes_present: Report whether ACPI GHES table is present
37
+ *
38
+ * Returns: true if the system has an ACPI GHES table and it is
39
+ * safe to call acpi_ghes_record_errors() to record a memory error.
40
+ */
41
+bool acpi_ghes_present(void);
42
#endif
43
diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/hw/acpi/ghes-stub.c
46
+++ b/hw/acpi/ghes-stub.c
47
@@ -XXX,XX +XXX,XX @@ int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
48
{
49
return -1;
50
}
51
+
52
+bool acpi_ghes_present(void)
53
+{
54
+ return false;
55
+}
56
diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/hw/acpi/ghes.c
59
+++ b/hw/acpi/ghes.c
60
@@ -XXX,XX +XXX,XX @@ void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s,
61
/* Create a read-write fw_cfg file for Address */
62
fw_cfg_add_file_callback(s, ACPI_GHES_DATA_ADDR_FW_CFG_FILE, NULL, NULL,
63
NULL, &(ags->ghes_addr_le), sizeof(ags->ghes_addr_le), false);
64
+
65
+ ags->present = true;
66
}
67
68
int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
69
@@ -XXX,XX +XXX,XX @@ int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address)
70
71
return ret;
72
}
73
+
74
+bool acpi_ghes_present(void)
75
+{
76
+ AcpiGedState *acpi_ged_state;
77
+ AcpiGhesState *ags;
78
+
79
+ acpi_ged_state = ACPI_GED(object_resolve_path_type("", TYPE_ACPI_GED,
80
+ NULL));
81
+
82
+ if (!acpi_ged_state) {
83
+ return false;
84
+ }
85
+ ags = &acpi_ged_state->ghes_state;
86
+ return ags->present;
87
+}
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
The virt_is_acpi_enabled() function is specific to the virt board, as
2
is the check for its 'ras' property. Use the new acpi_ghes_present()
3
function to check whether we should report memory errors via
4
acpi_ghes_record_errors().
5
1
6
This avoids a link error if QEMU was built without support for the
7
virt board, and provides a mechanism that can be used by any future
8
board models that want to add ACPI memory error reporting support
9
(they only need to call acpi_ghes_add_fw_cfg()).
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Dongjiu Geng <gengdongjiu1@gmail.com>
14
Message-id: 20210603171259.27962-4-peter.maydell@linaro.org
15
---
16
target/arm/kvm64.c | 6 +-----
17
1 file changed, 1 insertion(+), 5 deletions(-)
18
19
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/kvm64.c
22
+++ b/target/arm/kvm64.c
23
@@ -XXX,XX +XXX,XX @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
24
{
25
ram_addr_t ram_addr;
26
hwaddr paddr;
27
- Object *obj = qdev_get_machine();
28
- VirtMachineState *vms = VIRT_MACHINE(obj);
29
- bool acpi_enabled = virt_is_acpi_enabled(vms);
30
31
assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
32
33
- if (acpi_enabled && addr &&
34
- object_property_get_bool(obj, "ras", NULL)) {
35
+ if (acpi_ghes_present() && addr) {
36
ram_addr = qemu_ram_addr_from_host(addr);
37
if (ram_addr != RAM_ADDR_INVALID &&
38
kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
39
--
40
2.20.1
41
42
diff view generated by jsdifflib
1
Implement the forms of the MVE VLDR and VSTR insns which perform
1
From: Richard Henderson <richard.henderson@linaro.org>
2
non-widening loads of bytes, halfwords or words from memory into
3
vector elements of the same width (encodings T5, T6, T7).
4
2
5
(At the moment we know for MVE and M-profile in general that
3
This controls whether the PACI{A,B}SP instructions trap with BTYPE=3
6
vfp_access_check() can never return false, but we include the
4
(indirect branch from register other than x16/x17). The linux kernel
7
conventional return-true-on-failure check for consistency
5
sets this in bti_enable().
8
with non-M-profile translation code.)
9
6
7
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/998
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20220427042312.294300-1-richard.henderson@linaro.org
11
[PMM: remove stray change to makefile comment]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210617121628.20116-2-peter.maydell@linaro.org
13
---
13
---
14
target/arm/{translate-mve.c => helper-mve.h} | 19 +-
14
target/arm/cpu.c | 2 ++
15
target/arm/helper.h | 2 +
15
tests/tcg/aarch64/bti-3.c | 42 +++++++++++++++++++++++++++++++
16
target/arm/internals.h | 11 ++
16
tests/tcg/aarch64/Makefile.target | 6 ++---
17
target/arm/mve.decode | 22 +++
17
3 files changed, 47 insertions(+), 3 deletions(-)
18
target/arm/mve_helper.c | 172 +++++++++++++++++++
18
create mode 100644 tests/tcg/aarch64/bti-3.c
19
target/arm/translate-mve.c | 119 +++++++++++++
20
target/arm/meson.build | 1 +
21
7 files changed, 334 insertions(+), 12 deletions(-)
22
copy target/arm/{translate-mve.c => helper-mve.h} (61%)
23
create mode 100644 target/arm/mve_helper.c
24
19
25
diff --git a/target/arm/translate-mve.c b/target/arm/helper-mve.h
20
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
26
similarity index 61%
27
copy from target/arm/translate-mve.c
28
copy to target/arm/helper-mve.h
29
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-mve.c
22
--- a/target/arm/cpu.c
31
+++ b/target/arm/helper-mve.h
23
+++ b/target/arm/cpu.c
32
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
33
/*
25
/* Enable all PAC keys. */
34
- * ARM translation: M-profile MVE instructions
26
env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
35
+ * M-profile MVE specific helper definitions
27
SCTLR_EnDA | SCTLR_EnDB);
36
*
28
+ /* Trap on btype=3 for PACIxSP. */
37
* Copyright (c) 2021 Linaro, Ltd.
29
+ env->cp15.sctlr_el[1] |= SCTLR_BT0;
38
*
30
/* and to the FP/Neon instructions */
39
@@ -XXX,XX +XXX,XX @@
31
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
40
* You should have received a copy of the GNU Lesser General Public
32
/* and to the SVE instructions */
41
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
33
diff --git a/tests/tcg/aarch64/bti-3.c b/tests/tcg/aarch64/bti-3.c
42
*/
43
-
44
-#include "qemu/osdep.h"
45
-#include "tcg/tcg-op.h"
46
-#include "tcg/tcg-op-gvec.h"
47
-#include "exec/exec-all.h"
48
-#include "exec/gen-icount.h"
49
-#include "translate.h"
50
-#include "translate-a32.h"
51
-
52
-/* Include the generated decoder */
53
-#include "decode-mve.c.inc"
54
+DEF_HELPER_FLAGS_3(mve_vldrb, TCG_CALL_NO_WG, void, env, ptr, i32)
55
+DEF_HELPER_FLAGS_3(mve_vldrh, TCG_CALL_NO_WG, void, env, ptr, i32)
56
+DEF_HELPER_FLAGS_3(mve_vldrw, TCG_CALL_NO_WG, void, env, ptr, i32)
57
+DEF_HELPER_FLAGS_3(mve_vstrb, TCG_CALL_NO_WG, void, env, ptr, i32)
58
+DEF_HELPER_FLAGS_3(mve_vstrh, TCG_CALL_NO_WG, void, env, ptr, i32)
59
+DEF_HELPER_FLAGS_3(mve_vstrw, TCG_CALL_NO_WG, void, env, ptr, i32)
60
diff --git a/target/arm/helper.h b/target/arm/helper.h
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/helper.h
63
+++ b/target/arm/helper.h
64
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
65
#include "helper-a64.h"
66
#include "helper-sve.h"
67
#endif
68
+
69
+#include "helper-mve.h"
70
diff --git a/target/arm/internals.h b/target/arm/internals.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/internals.h
73
+++ b/target/arm/internals.h
74
@@ -XXX,XX +XXX,XX @@ static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
75
return ptr;
76
}
77
78
+/* Values for M-profile PSR.ECI for MVE insns */
79
+enum MVEECIState {
80
+ ECI_NONE = 0, /* No completed beats */
81
+ ECI_A0 = 1, /* Completed: A0 */
82
+ ECI_A0A1 = 2, /* Completed: A0, A1 */
83
+ /* 3 is reserved */
84
+ ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
85
+ ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
86
+ /* All other values reserved */
87
+};
88
+
89
#endif
90
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/mve.decode
93
+++ b/target/arm/mve.decode
94
@@ -XXX,XX +XXX,XX @@
95
#
96
# This file is processed by scripts/decodetree.py
97
#
98
+
99
+%qd 22:1 13:3
100
+
101
+&vldr_vstr rn qd imm p a w size l
102
+
103
+@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd
104
+
105
+# Vector loads and stores
106
+
107
+# Non-widening loads/stores (P=0 W=0 is 'related encoding')
108
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111100 ....... @vldr_vstr \
109
+ size=0 p=0 w=1
110
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111101 ....... @vldr_vstr \
111
+ size=1 p=0 w=1
112
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111110 ....... @vldr_vstr \
113
+ size=2 p=0 w=1
114
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111100 ....... @vldr_vstr \
115
+ size=0 p=1
116
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
117
+ size=1 p=1
118
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
119
+ size=2 p=1
120
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
121
new file mode 100644
34
new file mode 100644
122
index XXXXXXX..XXXXXXX
35
index XXXXXXX..XXXXXXX
123
--- /dev/null
36
--- /dev/null
124
+++ b/target/arm/mve_helper.c
37
+++ b/tests/tcg/aarch64/bti-3.c
125
@@ -XXX,XX +XXX,XX @@
38
@@ -XXX,XX +XXX,XX @@
126
+/*
39
+/*
127
+ * M-profile MVE Operations
40
+ * BTI vs PACIASP
128
+ *
129
+ * Copyright (c) 2021 Linaro, Ltd.
130
+ *
131
+ * This library is free software; you can redistribute it and/or
132
+ * modify it under the terms of the GNU Lesser General Public
133
+ * License as published by the Free Software Foundation; either
134
+ * version 2.1 of the License, or (at your option) any later version.
135
+ *
136
+ * This library is distributed in the hope that it will be useful,
137
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
138
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
139
+ * Lesser General Public License for more details.
140
+ *
141
+ * You should have received a copy of the GNU Lesser General Public
142
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
143
+ */
41
+ */
144
+
42
+
145
+#include "qemu/osdep.h"
43
+#include "bti-crt.inc.c"
146
+#include "cpu.h"
147
+#include "internals.h"
148
+#include "vec_internal.h"
149
+#include "exec/helper-proto.h"
150
+#include "exec/cpu_ldst.h"
151
+#include "exec/exec-all.h"
152
+
44
+
153
+static uint16_t mve_element_mask(CPUARMState *env)
45
+static void skip2_sigill(int sig, siginfo_t *info, ucontext_t *uc)
154
+{
46
+{
155
+ /*
47
+ uc->uc_mcontext.pc += 8;
156
+ * Return the mask of which elements in the MVE vector should be
48
+ uc->uc_mcontext.pstate = 1;
157
+ * updated. This is a combination of multiple things:
158
+ * (1) by default, we update every lane in the vector
159
+ * (2) VPT predication stores its state in the VPR register;
160
+ * (3) low-overhead-branch tail predication will mask out part
161
+ * the vector on the final iteration of the loop
162
+ * (4) if EPSR.ECI is set then we must execute only some beats
163
+ * of the insn
164
+ * We combine all these into a 16-bit result with the same semantics
165
+ * as VPR.P0: 0 to mask the lane, 1 if it is active.
166
+ * 8-bit vector ops will look at all bits of the result;
167
+ * 16-bit ops will look at bits 0, 2, 4, ...;
168
+ * 32-bit ops will look at bits 0, 4, 8 and 12.
169
+ * Compare pseudocode GetCurInstrBeat(), though that only returns
170
+ * the 4-bit slice of the mask corresponding to a single beat.
171
+ */
172
+ uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
173
+
174
+ if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
175
+ mask |= 0xff;
176
+ }
177
+ if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
178
+ mask |= 0xff00;
179
+ }
180
+
181
+ if (env->v7m.ltpsize < 4 &&
182
+ env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
183
+ /*
184
+ * Tail predication active, and this is the last loop iteration.
185
+ * The element size is (1 << ltpsize), and we only want to process
186
+ * loopcount elements, so we want to retain the least significant
187
+ * (loopcount * esize) predicate bits and zero out bits above that.
188
+ */
189
+ int masklen = env->regs[14] << env->v7m.ltpsize;
190
+ assert(masklen <= 16);
191
+ mask &= MAKE_64BIT_MASK(0, masklen);
192
+ }
193
+
194
+ if ((env->condexec_bits & 0xf) == 0) {
195
+ /*
196
+ * ECI bits indicate which beats are already executed;
197
+ * we handle this by effectively predicating them out.
198
+ */
199
+ int eci = env->condexec_bits >> 4;
200
+ switch (eci) {
201
+ case ECI_NONE:
202
+ break;
203
+ case ECI_A0:
204
+ mask &= 0xfff0;
205
+ break;
206
+ case ECI_A0A1:
207
+ mask &= 0xff00;
208
+ break;
209
+ case ECI_A0A1A2:
210
+ case ECI_A0A1A2B0:
211
+ mask &= 0xf000;
212
+ break;
213
+ default:
214
+ g_assert_not_reached();
215
+ }
216
+ }
217
+
218
+ return mask;
219
+}
49
+}
220
+
50
+
221
+static void mve_advance_vpt(CPUARMState *env)
51
+#define BTYPE_1() \
52
+ asm("mov %0,#1; adr x16, 1f; br x16; 1: hint #25; mov %0,#0" \
53
+ : "=r"(skipped) : : "x16", "x30")
54
+
55
+#define BTYPE_2() \
56
+ asm("mov %0,#1; adr x16, 1f; blr x16; 1: hint #25; mov %0,#0" \
57
+ : "=r"(skipped) : : "x16", "x30")
58
+
59
+#define BTYPE_3() \
60
+ asm("mov %0,#1; adr x15, 1f; br x15; 1: hint #25; mov %0,#0" \
61
+ : "=r"(skipped) : : "x15", "x30")
62
+
63
+#define TEST(WHICH, EXPECT) \
64
+ do { WHICH(); fail += skipped ^ EXPECT; } while (0)
65
+
66
+int main()
222
+{
67
+{
223
+ /* Advance the VPT and ECI state if necessary */
68
+ int fail = 0;
224
+ uint32_t vpr = env->v7m.vpr;
69
+ int skipped;
225
+ unsigned mask01, mask23;
226
+
70
+
227
+ if ((env->condexec_bits & 0xf) == 0) {
71
+ /* Signal-like with SA_SIGINFO. */
228
+ env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
72
+ signal_info(SIGILL, skip2_sigill);
229
+ (ECI_A0 << 4) : (ECI_NONE << 4);
230
+ }
231
+
73
+
232
+ if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
74
+ /* With SCTLR_EL1.BT0 set, PACIASP is not compatible with type=3. */
233
+ /* VPT not enabled, nothing to do */
75
+ TEST(BTYPE_1, 0);
234
+ return;
76
+ TEST(BTYPE_2, 0);
235
+ }
77
+ TEST(BTYPE_3, 1);
236
+
78
+
237
+ mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
79
+ return fail;
238
+ mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
239
+ if (mask01 > 8) {
240
+ /* high bit set, but not 0b1000: invert the relevant half of P0 */
241
+ vpr ^= 0xff;
242
+ }
243
+ if (mask23 > 8) {
244
+ /* high bit set, but not 0b1000: invert the relevant half of P0 */
245
+ vpr ^= 0xff00;
246
+ }
247
+ vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
248
+ vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
249
+ env->v7m.vpr = vpr;
250
+}
80
+}
251
+
81
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
252
+
253
+#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
254
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
255
+ { \
256
+ TYPE *d = vd; \
257
+ uint16_t mask = mve_element_mask(env); \
258
+ unsigned b, e; \
259
+ /* \
260
+ * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
261
+ * beats so we don't care if we update part of the dest and \
262
+ * then take an exception. \
263
+ */ \
264
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
265
+ if (mask & (1 << b)) { \
266
+ d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \
267
+ } \
268
+ addr += MSIZE; \
269
+ } \
270
+ mve_advance_vpt(env); \
271
+ }
272
+
273
+#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
274
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
275
+ { \
276
+ TYPE *d = vd; \
277
+ uint16_t mask = mve_element_mask(env); \
278
+ unsigned b, e; \
279
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
280
+ if (mask & (1 << b)) { \
281
+ cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
282
+ } \
283
+ addr += MSIZE; \
284
+ } \
285
+ mve_advance_vpt(env); \
286
+ }
287
+
288
+DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
289
+DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
290
+DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
291
+
292
+DO_VSTR(vstrb, 1, stb, 1, uint8_t)
293
+DO_VSTR(vstrh, 2, stw, 2, uint16_t)
294
+DO_VSTR(vstrw, 4, stl, 4, uint32_t)
295
+
296
+#undef DO_VLDR
297
+#undef DO_VSTR
298
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
299
index XXXXXXX..XXXXXXX 100644
82
index XXXXXXX..XXXXXXX 100644
300
--- a/target/arm/translate-mve.c
83
--- a/tests/tcg/aarch64/Makefile.target
301
+++ b/target/arm/translate-mve.c
84
+++ b/tests/tcg/aarch64/Makefile.target
302
@@ -XXX,XX +XXX,XX @@
85
@@ -XXX,XX +XXX,XX @@ endif
303
86
# BTI Tests
304
/* Include the generated decoder */
87
# bti-1 tests the elf notes, so we require special compiler support.
305
#include "decode-mve.c.inc"
88
ifneq ($(CROSS_CC_HAS_ARMV8_BTI),)
306
+
89
-AARCH64_TESTS += bti-1
307
+typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
90
-bti-1: CFLAGS += -mbranch-protection=standard
308
+
91
-bti-1: LDFLAGS += -nostdlib
309
+/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
92
+AARCH64_TESTS += bti-1 bti-3
310
+static inline long mve_qreg_offset(unsigned reg)
93
+bti-1 bti-3: CFLAGS += -mbranch-protection=standard
311
+{
94
+bti-1 bti-3: LDFLAGS += -nostdlib
312
+ return offsetof(CPUARMState, vfp.zregs[reg].d[0]);
95
endif
313
+}
96
# bti-2 tests PROT_BTI, so no special compiler support required.
314
+
97
AARCH64_TESTS += bti-2
315
+static TCGv_ptr mve_qreg_ptr(unsigned reg)
316
+{
317
+ TCGv_ptr ret = tcg_temp_new_ptr();
318
+ tcg_gen_addi_ptr(ret, cpu_env, mve_qreg_offset(reg));
319
+ return ret;
320
+}
321
+
322
+static bool mve_check_qreg_bank(DisasContext *s, int qmask)
323
+{
324
+ /*
325
+ * Check whether Qregs are in range. For v8.1M only Q0..Q7
326
+ * are supported, see VFPSmallRegisterBank().
327
+ */
328
+ return qmask < 8;
329
+}
330
+
331
+static bool mve_eci_check(DisasContext *s)
332
+{
333
+ /*
334
+ * This is a beatwise insn: check that ECI is valid (not a
335
+ * reserved value) and note that we are handling it.
336
+ * Return true if OK, false if we generated an exception.
337
+ */
338
+ s->eci_handled = true;
339
+ switch (s->eci) {
340
+ case ECI_NONE:
341
+ case ECI_A0:
342
+ case ECI_A0A1:
343
+ case ECI_A0A1A2:
344
+ case ECI_A0A1A2B0:
345
+ return true;
346
+ default:
347
+ /* Reserved value: INVSTATE UsageFault */
348
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
349
+ default_exception_el(s));
350
+ return false;
351
+ }
352
+}
353
+
354
+static void mve_update_eci(DisasContext *s)
355
+{
356
+ /*
357
+ * The helper function will always update the CPUState field,
358
+ * so we only need to update the DisasContext field.
359
+ */
360
+ if (s->eci) {
361
+ s->eci = (s->eci == ECI_A0A1A2B0) ? ECI_A0 : ECI_NONE;
362
+ }
363
+}
364
+
365
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
366
+{
367
+ TCGv_i32 addr;
368
+ uint32_t offset;
369
+ TCGv_ptr qreg;
370
+
371
+ if (!dc_isar_feature(aa32_mve, s) ||
372
+ !mve_check_qreg_bank(s, a->qd) ||
373
+ !fn) {
374
+ return false;
375
+ }
376
+
377
+ /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
378
+ if (a->rn == 15 || (a->rn == 13 && a->w)) {
379
+ return false;
380
+ }
381
+
382
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
383
+ return true;
384
+ }
385
+
386
+ offset = a->imm << a->size;
387
+ if (!a->a) {
388
+ offset = -offset;
389
+ }
390
+ addr = load_reg(s, a->rn);
391
+ if (a->p) {
392
+ tcg_gen_addi_i32(addr, addr, offset);
393
+ }
394
+
395
+ qreg = mve_qreg_ptr(a->qd);
396
+ fn(cpu_env, qreg, addr);
397
+ tcg_temp_free_ptr(qreg);
398
+
399
+ /*
400
+ * Writeback always happens after the last beat of the insn,
401
+ * regardless of predication
402
+ */
403
+ if (a->w) {
404
+ if (!a->p) {
405
+ tcg_gen_addi_i32(addr, addr, offset);
406
+ }
407
+ store_reg(s, a->rn, addr);
408
+ } else {
409
+ tcg_temp_free_i32(addr);
410
+ }
411
+ mve_update_eci(s);
412
+ return true;
413
+}
414
+
415
+static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
416
+{
417
+ static MVEGenLdStFn * const ldstfns[4][2] = {
418
+ { gen_helper_mve_vstrb, gen_helper_mve_vldrb },
419
+ { gen_helper_mve_vstrh, gen_helper_mve_vldrh },
420
+ { gen_helper_mve_vstrw, gen_helper_mve_vldrw },
421
+ { NULL, NULL }
422
+ };
423
+ return do_ldst(s, a, ldstfns[a->size][a->l]);
424
+}
425
diff --git a/target/arm/meson.build b/target/arm/meson.build
426
index XXXXXXX..XXXXXXX 100644
427
--- a/target/arm/meson.build
428
+++ b/target/arm/meson.build
429
@@ -XXX,XX +XXX,XX @@ arm_ss.add(files(
430
'helper.c',
431
'iwmmxt_helper.c',
432
'm_helper.c',
433
+ 'mve_helper.c',
434
'neon_helper.c',
435
'op_helper.c',
436
'tlb_helper.c',
437
--
98
--
438
2.20.1
99
2.25.1
439
440
diff view generated by jsdifflib
1
Implement the MVE VADC and VSBC insns. These perform an
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add-with-carry or subtract-with-carry of the 32-bit elements in each
3
lane of the input vectors, where the carry-out of each add is the
4
carry-in of the next. The initial carry input is either 1 or is from
5
FPSCR.C; the carry out at the end is written back to FPSCR.C.
6
2
3
Move ARMCPRegInfo and all related declarations to a new
4
internal header, out of the public cpu.h.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220501055028.646596-2-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-41-peter.maydell@linaro.org
10
---
11
---
11
target/arm/helper-mve.h | 5 ++++
12
target/arm/cpregs.h | 413 +++++++++++++++++++++++++++++++++++++
12
target/arm/mve.decode | 5 ++++
13
target/arm/cpu.h | 368 ---------------------------------
13
target/arm/mve_helper.c | 52 ++++++++++++++++++++++++++++++++++++++
14
hw/arm/pxa2xx.c | 1 +
14
target/arm/translate-mve.c | 37 +++++++++++++++++++++++++++
15
hw/arm/pxa2xx_pic.c | 1 +
15
4 files changed, 99 insertions(+)
16
hw/intc/arm_gicv3_cpuif.c | 1 +
17
hw/intc/arm_gicv3_kvm.c | 2 +
18
target/arm/cpu.c | 1 +
19
target/arm/cpu64.c | 1 +
20
target/arm/cpu_tcg.c | 1 +
21
target/arm/gdbstub.c | 3 +-
22
target/arm/helper.c | 1 +
23
target/arm/op_helper.c | 1 +
24
target/arm/translate-a64.c | 4 +-
25
target/arm/translate.c | 3 +-
26
14 files changed, 427 insertions(+), 374 deletions(-)
27
create mode 100644 target/arm/cpregs.h
16
28
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
29
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
18
index XXXXXXX..XXXXXXX 100644
30
new file mode 100644
19
--- a/target/arm/helper-mve.h
31
index XXXXXXX..XXXXXXX
20
+++ b/target/arm/helper-mve.h
32
--- /dev/null
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+++ b/target/arm/cpregs.h
22
DEF_HELPER_FLAGS_4(mve_vrhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
@@ -XXX,XX +XXX,XX @@
23
DEF_HELPER_FLAGS_4(mve_vrhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+/*
24
36
+ * QEMU ARM CP Register access and descriptions
25
+DEF_HELPER_FLAGS_4(mve_vadc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
37
+ *
26
+DEF_HELPER_FLAGS_4(mve_vadci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
38
+ * Copyright (c) 2022 Linaro Ltd
27
+DEF_HELPER_FLAGS_4(mve_vsbc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
39
+ *
28
+DEF_HELPER_FLAGS_4(mve_vsbci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
40
+ * This program is free software; you can redistribute it and/or
29
+
41
+ * modify it under the terms of the GNU General Public License
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
+ * as published by the Free Software Foundation; either version 2
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
+ * of the License, or (at your option) any later version.
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
+ *
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
45
+ * This program is distributed in the hope that it will be useful,
34
index XXXXXXX..XXXXXXX 100644
46
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
35
--- a/target/arm/mve.decode
47
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36
+++ b/target/arm/mve.decode
48
+ * GNU General Public License for more details.
37
@@ -XXX,XX +XXX,XX @@ VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
49
+ *
38
VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
50
+ * You should have received a copy of the GNU General Public License
39
VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
51
+ * along with this program; if not, see
40
52
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
41
+VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
53
+ */
42
+VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
54
+
43
+VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
55
+#ifndef TARGET_ARM_CPREGS_H
44
+VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
56
+#define TARGET_ARM_CPREGS_H
45
+
57
+
46
# Vector miscellaneous
58
+/*
47
59
+ * ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
48
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
60
+ * special-behaviour cp reg and bits [11..8] indicate what behaviour
49
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
61
+ * it has. Otherwise it is a simple cp reg, where CONST indicates that
50
index XXXXXXX..XXXXXXX 100644
62
+ * TCG can assume the value to be constant (ie load at translate time)
51
--- a/target/arm/mve_helper.c
63
+ * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
52
+++ b/target/arm/mve_helper.c
64
+ * indicates that the TB should not be ended after a write to this register
53
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vrshlu, DO_VRSHLU)
65
+ * (the default is that the TB ends after cp writes). OVERRIDE permits
54
DO_2OP_S(vrhadds, DO_RHADD_S)
66
+ * a register definition to override a previous definition for the
55
DO_2OP_U(vrhaddu, DO_RHADD_U)
67
+ * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
56
68
+ * old must have the OVERRIDE bit set.
57
+static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
69
+ * ALIAS indicates that this register is an alias view of some underlying
58
+ uint32_t inv, uint32_t carry_in, bool update_flags)
70
+ * state which is also visible via another register, and that the other
71
+ * register is handling migration and reset; registers marked ALIAS will not be
72
+ * migrated but may have their state set by syncing of register state from KVM.
73
+ * NO_RAW indicates that this register has no underlying state and does not
74
+ * support raw access for state saving/loading; it will not be used for either
75
+ * migration or KVM state synchronization. (Typically this is for "registers"
76
+ * which are actually used as instructions for cache maintenance and so on.)
77
+ * IO indicates that this register does I/O and therefore its accesses
78
+ * need to be marked with gen_io_start() and also end the TB. In particular,
79
+ * registers which implement clocks or timers require this.
80
+ * RAISES_EXC is for when the read or write hook might raise an exception;
81
+ * the generated code will synchronize the CPU state before calling the hook
82
+ * so that it is safe for the hook to call raise_exception().
83
+ * NEWEL is for writes to registers that might change the exception
84
+ * level - typically on older ARM chips. For those cases we need to
85
+ * re-read the new el when recomputing the translation flags.
86
+ */
87
+#define ARM_CP_SPECIAL 0x0001
88
+#define ARM_CP_CONST 0x0002
89
+#define ARM_CP_64BIT 0x0004
90
+#define ARM_CP_SUPPRESS_TB_END 0x0008
91
+#define ARM_CP_OVERRIDE 0x0010
92
+#define ARM_CP_ALIAS 0x0020
93
+#define ARM_CP_IO 0x0040
94
+#define ARM_CP_NO_RAW 0x0080
95
+#define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100)
96
+#define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200)
97
+#define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300)
98
+#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400)
99
+#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500)
100
+#define ARM_CP_DC_GVA (ARM_CP_SPECIAL | 0x0600)
101
+#define ARM_CP_DC_GZVA (ARM_CP_SPECIAL | 0x0700)
102
+#define ARM_LAST_SPECIAL ARM_CP_DC_GZVA
103
+#define ARM_CP_FPU 0x1000
104
+#define ARM_CP_SVE 0x2000
105
+#define ARM_CP_NO_GDB 0x4000
106
+#define ARM_CP_RAISES_EXC 0x8000
107
+#define ARM_CP_NEWEL 0x10000
108
+/* Used only as a terminator for ARMCPRegInfo lists */
109
+#define ARM_CP_SENTINEL 0xfffff
110
+/* Mask of only the flag bits in a type field */
111
+#define ARM_CP_FLAG_MASK 0x1f0ff
112
+
113
+/*
114
+ * Valid values for ARMCPRegInfo state field, indicating which of
115
+ * the AArch32 and AArch64 execution states this register is visible in.
116
+ * If the reginfo doesn't explicitly specify then it is AArch32 only.
117
+ * If the reginfo is declared to be visible in both states then a second
118
+ * reginfo is synthesised for the AArch32 view of the AArch64 register,
119
+ * such that the AArch32 view is the lower 32 bits of the AArch64 one.
120
+ * Note that we rely on the values of these enums as we iterate through
121
+ * the various states in some places.
122
+ */
123
+enum {
124
+ ARM_CP_STATE_AA32 = 0,
125
+ ARM_CP_STATE_AA64 = 1,
126
+ ARM_CP_STATE_BOTH = 2,
127
+};
128
+
129
+/*
130
+ * ARM CP register secure state flags. These flags identify security state
131
+ * attributes for a given CP register entry.
132
+ * The existence of both or neither secure and non-secure flags indicates that
133
+ * the register has both a secure and non-secure hash entry. A single one of
134
+ * these flags causes the register to only be hashed for the specified
135
+ * security state.
136
+ * Although definitions may have any combination of the S/NS bits, each
137
+ * registered entry will only have one to identify whether the entry is secure
138
+ * or non-secure.
139
+ */
140
+enum {
141
+ ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
142
+ ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
143
+};
144
+
145
+/*
146
+ * Return true if cptype is a valid type field. This is used to try to
147
+ * catch errors where the sentinel has been accidentally left off the end
148
+ * of a list of registers.
149
+ */
150
+static inline bool cptype_valid(int cptype)
59
+{
151
+{
60
+ uint16_t mask = mve_element_mask(env);
152
+ return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
61
+ unsigned e;
153
+ || ((cptype & ARM_CP_SPECIAL) &&
62
+
154
+ ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
63
+ /* If any additions trigger, we will update flags. */
64
+ if (mask & 0x1111) {
65
+ update_flags = true;
66
+ }
67
+
68
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
69
+ uint64_t r = carry_in;
70
+ r += n[H4(e)];
71
+ r += m[H4(e)] ^ inv;
72
+ if (mask & 1) {
73
+ carry_in = r >> 32;
74
+ }
75
+ mergemask(&d[H4(e)], r, mask);
76
+ }
77
+
78
+ if (update_flags) {
79
+ /* Store C, clear NZV. */
80
+ env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
81
+ env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
82
+ }
83
+ mve_advance_vpt(env);
84
+}
155
+}
85
+
156
+
86
+void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
157
+/*
158
+ * Access rights:
159
+ * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
160
+ * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
161
+ * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
162
+ * (ie any of the privileged modes in Secure state, or Monitor mode).
163
+ * If a register is accessible in one privilege level it's always accessible
164
+ * in higher privilege levels too. Since "Secure PL1" also follows this rule
165
+ * (ie anything visible in PL2 is visible in S-PL1, some things are only
166
+ * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
167
+ * terminology a little and call this PL3.
168
+ * In AArch64 things are somewhat simpler as the PLx bits line up exactly
169
+ * with the ELx exception levels.
170
+ *
171
+ * If access permissions for a register are more complex than can be
172
+ * described with these bits, then use a laxer set of restrictions, and
173
+ * do the more restrictive/complex check inside a helper function.
174
+ */
175
+#define PL3_R 0x80
176
+#define PL3_W 0x40
177
+#define PL2_R (0x20 | PL3_R)
178
+#define PL2_W (0x10 | PL3_W)
179
+#define PL1_R (0x08 | PL2_R)
180
+#define PL1_W (0x04 | PL2_W)
181
+#define PL0_R (0x02 | PL1_R)
182
+#define PL0_W (0x01 | PL1_W)
183
+
184
+/*
185
+ * For user-mode some registers are accessible to EL0 via a kernel
186
+ * trap-and-emulate ABI. In this case we define the read permissions
187
+ * as actually being PL0_R. However some bits of any given register
188
+ * may still be masked.
189
+ */
190
+#ifdef CONFIG_USER_ONLY
191
+#define PL0U_R PL0_R
192
+#else
193
+#define PL0U_R PL1_R
194
+#endif
195
+
196
+#define PL3_RW (PL3_R | PL3_W)
197
+#define PL2_RW (PL2_R | PL2_W)
198
+#define PL1_RW (PL1_R | PL1_W)
199
+#define PL0_RW (PL0_R | PL0_W)
200
+
201
+typedef enum CPAccessResult {
202
+ /* Access is permitted */
203
+ CP_ACCESS_OK = 0,
204
+ /*
205
+ * Access fails due to a configurable trap or enable which would
206
+ * result in a categorized exception syndrome giving information about
207
+ * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
208
+ * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
209
+ * PL1 if in EL0, otherwise to the current EL).
210
+ */
211
+ CP_ACCESS_TRAP = 1,
212
+ /*
213
+ * Access fails and results in an exception syndrome 0x0 ("uncategorized").
214
+ * Note that this is not a catch-all case -- the set of cases which may
215
+ * result in this failure is specifically defined by the architecture.
216
+ */
217
+ CP_ACCESS_TRAP_UNCATEGORIZED = 2,
218
+ /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
219
+ CP_ACCESS_TRAP_EL2 = 3,
220
+ CP_ACCESS_TRAP_EL3 = 4,
221
+ /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
222
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
223
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
224
+} CPAccessResult;
225
+
226
+typedef struct ARMCPRegInfo ARMCPRegInfo;
227
+
228
+/*
229
+ * Access functions for coprocessor registers. These cannot fail and
230
+ * may not raise exceptions.
231
+ */
232
+typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
233
+typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
234
+ uint64_t value);
235
+/* Access permission check functions for coprocessor registers. */
236
+typedef CPAccessResult CPAccessFn(CPUARMState *env,
237
+ const ARMCPRegInfo *opaque,
238
+ bool isread);
239
+/* Hook function for register reset */
240
+typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
241
+
242
+#define CP_ANY 0xff
243
+
244
+/* Definition of an ARM coprocessor register */
245
+struct ARMCPRegInfo {
246
+ /* Name of register (useful mainly for debugging, need not be unique) */
247
+ const char *name;
248
+ /*
249
+ * Location of register: coprocessor number and (crn,crm,opc1,opc2)
250
+ * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
251
+ * 'wildcard' field -- any value of that field in the MRC/MCR insn
252
+ * will be decoded to this register. The register read and write
253
+ * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
254
+ * used by the program, so it is possible to register a wildcard and
255
+ * then behave differently on read/write if necessary.
256
+ * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
257
+ * must both be zero.
258
+ * For AArch64-visible registers, opc0 is also used.
259
+ * Since there are no "coprocessors" in AArch64, cp is purely used as a
260
+ * way to distinguish (for KVM's benefit) guest-visible system registers
261
+ * from demuxed ones provided to preserve the "no side effects on
262
+ * KVM register read/write from QEMU" semantics. cp==0x13 is guest
263
+ * visible (to match KVM's encoding); cp==0 will be converted to
264
+ * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
265
+ */
266
+ uint8_t cp;
267
+ uint8_t crn;
268
+ uint8_t crm;
269
+ uint8_t opc0;
270
+ uint8_t opc1;
271
+ uint8_t opc2;
272
+ /* Execution state in which this register is visible: ARM_CP_STATE_* */
273
+ int state;
274
+ /* Register type: ARM_CP_* bits/values */
275
+ int type;
276
+ /* Access rights: PL*_[RW] */
277
+ int access;
278
+ /* Security state: ARM_CP_SECSTATE_* bits/values */
279
+ int secure;
280
+ /*
281
+ * The opaque pointer passed to define_arm_cp_regs_with_opaque() when
282
+ * this register was defined: can be used to hand data through to the
283
+ * register read/write functions, since they are passed the ARMCPRegInfo*.
284
+ */
285
+ void *opaque;
286
+ /*
287
+ * Value of this register, if it is ARM_CP_CONST. Otherwise, if
288
+ * fieldoffset is non-zero, the reset value of the register.
289
+ */
290
+ uint64_t resetvalue;
291
+ /*
292
+ * Offset of the field in CPUARMState for this register.
293
+ * This is not needed if either:
294
+ * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
295
+ * 2. both readfn and writefn are specified
296
+ */
297
+ ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
298
+
299
+ /*
300
+ * Offsets of the secure and non-secure fields in CPUARMState for the
301
+ * register if it is banked. These fields are only used during the static
302
+ * registration of a register. During hashing the bank associated
303
+ * with a given security state is copied to fieldoffset which is used from
304
+ * there on out.
305
+ *
306
+ * It is expected that register definitions use either fieldoffset or
307
+ * bank_fieldoffsets in the definition but not both. It is also expected
308
+ * that both bank offsets are set when defining a banked register. This
309
+ * use indicates that a register is banked.
310
+ */
311
+ ptrdiff_t bank_fieldoffsets[2];
312
+
313
+ /*
314
+ * Function for making any access checks for this register in addition to
315
+ * those specified by the 'access' permissions bits. If NULL, no extra
316
+ * checks required. The access check is performed at runtime, not at
317
+ * translate time.
318
+ */
319
+ CPAccessFn *accessfn;
320
+ /*
321
+ * Function for handling reads of this register. If NULL, then reads
322
+ * will be done by loading from the offset into CPUARMState specified
323
+ * by fieldoffset.
324
+ */
325
+ CPReadFn *readfn;
326
+ /*
327
+ * Function for handling writes of this register. If NULL, then writes
328
+ * will be done by writing to the offset into CPUARMState specified
329
+ * by fieldoffset.
330
+ */
331
+ CPWriteFn *writefn;
332
+ /*
333
+ * Function for doing a "raw" read; used when we need to copy
334
+ * coprocessor state to the kernel for KVM or out for
335
+ * migration. This only needs to be provided if there is also a
336
+ * readfn and it has side effects (for instance clear-on-read bits).
337
+ */
338
+ CPReadFn *raw_readfn;
339
+ /*
340
+ * Function for doing a "raw" write; used when we need to copy KVM
341
+ * kernel coprocessor state into userspace, or for inbound
342
+ * migration. This only needs to be provided if there is also a
343
+ * writefn and it masks out "unwritable" bits or has write-one-to-clear
344
+ * or similar behaviour.
345
+ */
346
+ CPWriteFn *raw_writefn;
347
+ /*
348
+ * Function for resetting the register. If NULL, then reset will be done
349
+ * by writing resetvalue to the field specified in fieldoffset. If
350
+ * fieldoffset is 0 then no reset will be done.
351
+ */
352
+ CPResetFn *resetfn;
353
+
354
+ /*
355
+ * "Original" writefn and readfn.
356
+ * For ARMv8.1-VHE register aliases, we overwrite the read/write
357
+ * accessor functions of various EL1/EL0 to perform the runtime
358
+ * check for which sysreg should actually be modified, and then
359
+ * forwards the operation. Before overwriting the accessors,
360
+ * the original function is copied here, so that accesses that
361
+ * really do go to the EL1/EL0 version proceed normally.
362
+ * (The corresponding EL2 register is linked via opaque.)
363
+ */
364
+ CPReadFn *orig_readfn;
365
+ CPWriteFn *orig_writefn;
366
+};
367
+
368
+/*
369
+ * Macros which are lvalues for the field in CPUARMState for the
370
+ * ARMCPRegInfo *ri.
371
+ */
372
+#define CPREG_FIELD32(env, ri) \
373
+ (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
374
+#define CPREG_FIELD64(env, ri) \
375
+ (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
376
+
377
+#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
378
+
379
+void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
380
+ const ARMCPRegInfo *regs, void *opaque);
381
+void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
382
+ const ARMCPRegInfo *regs, void *opaque);
383
+static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
87
+{
384
+{
88
+ bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
385
+ define_arm_cp_regs_with_opaque(cpu, regs, 0);
89
+ do_vadc(env, vd, vn, vm, 0, carry_in, false);
90
+}
386
+}
91
+
387
+static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
92
+void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
93
+{
388
+{
94
+ bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
389
+ define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
95
+ do_vadc(env, vd, vn, vm, -1, carry_in, false);
96
+}
390
+}
97
+
391
+const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
98
+
392
+
99
+void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
393
+/*
394
+ * Definition of an ARM co-processor register as viewed from
395
+ * userspace. This is used for presenting sanitised versions of
396
+ * registers to userspace when emulating the Linux AArch64 CPU
397
+ * ID/feature ABI (advertised as HWCAP_CPUID).
398
+ */
399
+typedef struct ARMCPRegUserSpaceInfo {
400
+ /* Name of register */
401
+ const char *name;
402
+
403
+ /* Is the name actually a glob pattern */
404
+ bool is_glob;
405
+
406
+ /* Only some bits are exported to user space */
407
+ uint64_t exported_bits;
408
+
409
+ /* Fixed bits are applied after the mask */
410
+ uint64_t fixed_bits;
411
+} ARMCPRegUserSpaceInfo;
412
+
413
+#define REGUSERINFO_SENTINEL { .name = NULL }
414
+
415
+void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods);
416
+
417
+/* CPWriteFn that can be used to implement writes-ignored behaviour */
418
+void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
419
+ uint64_t value);
420
+/* CPReadFn that can be used for read-as-zero behaviour */
421
+uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
422
+
423
+/*
424
+ * CPResetFn that does nothing, for use if no reset is required even
425
+ * if fieldoffset is non zero.
426
+ */
427
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
428
+
429
+/*
430
+ * Return true if this reginfo struct's field in the cpu state struct
431
+ * is 64 bits wide.
432
+ */
433
+static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
100
+{
434
+{
101
+ do_vadc(env, vd, vn, vm, 0, 0, true);
435
+ return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
102
+}
436
+}
103
+
437
+
104
+void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
438
+static inline bool cp_access_ok(int current_el,
439
+ const ARMCPRegInfo *ri, int isread)
105
+{
440
+{
106
+ do_vadc(env, vd, vn, vm, -1, 1, true);
441
+ return (ri->access >> ((current_el * 2) + isread)) & 1;
107
+}
442
+}
108
+
443
+
109
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
444
+/* Raw read of a coprocessor register (as needed for migration, etc) */
445
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
446
+
447
+#endif /* TARGET_ARM_CPREGS_H */
448
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
449
index XXXXXXX..XXXXXXX 100644
450
--- a/target/arm/cpu.h
451
+++ b/target/arm/cpu.h
452
@@ -XXX,XX +XXX,XX @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
453
return kvmid;
454
}
455
456
-/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
457
- * special-behaviour cp reg and bits [11..8] indicate what behaviour
458
- * it has. Otherwise it is a simple cp reg, where CONST indicates that
459
- * TCG can assume the value to be constant (ie load at translate time)
460
- * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
461
- * indicates that the TB should not be ended after a write to this register
462
- * (the default is that the TB ends after cp writes). OVERRIDE permits
463
- * a register definition to override a previous definition for the
464
- * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
465
- * old must have the OVERRIDE bit set.
466
- * ALIAS indicates that this register is an alias view of some underlying
467
- * state which is also visible via another register, and that the other
468
- * register is handling migration and reset; registers marked ALIAS will not be
469
- * migrated but may have their state set by syncing of register state from KVM.
470
- * NO_RAW indicates that this register has no underlying state and does not
471
- * support raw access for state saving/loading; it will not be used for either
472
- * migration or KVM state synchronization. (Typically this is for "registers"
473
- * which are actually used as instructions for cache maintenance and so on.)
474
- * IO indicates that this register does I/O and therefore its accesses
475
- * need to be marked with gen_io_start() and also end the TB. In particular,
476
- * registers which implement clocks or timers require this.
477
- * RAISES_EXC is for when the read or write hook might raise an exception;
478
- * the generated code will synchronize the CPU state before calling the hook
479
- * so that it is safe for the hook to call raise_exception().
480
- * NEWEL is for writes to registers that might change the exception
481
- * level - typically on older ARM chips. For those cases we need to
482
- * re-read the new el when recomputing the translation flags.
483
- */
484
-#define ARM_CP_SPECIAL 0x0001
485
-#define ARM_CP_CONST 0x0002
486
-#define ARM_CP_64BIT 0x0004
487
-#define ARM_CP_SUPPRESS_TB_END 0x0008
488
-#define ARM_CP_OVERRIDE 0x0010
489
-#define ARM_CP_ALIAS 0x0020
490
-#define ARM_CP_IO 0x0040
491
-#define ARM_CP_NO_RAW 0x0080
492
-#define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100)
493
-#define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200)
494
-#define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300)
495
-#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400)
496
-#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500)
497
-#define ARM_CP_DC_GVA (ARM_CP_SPECIAL | 0x0600)
498
-#define ARM_CP_DC_GZVA (ARM_CP_SPECIAL | 0x0700)
499
-#define ARM_LAST_SPECIAL ARM_CP_DC_GZVA
500
-#define ARM_CP_FPU 0x1000
501
-#define ARM_CP_SVE 0x2000
502
-#define ARM_CP_NO_GDB 0x4000
503
-#define ARM_CP_RAISES_EXC 0x8000
504
-#define ARM_CP_NEWEL 0x10000
505
-/* Used only as a terminator for ARMCPRegInfo lists */
506
-#define ARM_CP_SENTINEL 0xfffff
507
-/* Mask of only the flag bits in a type field */
508
-#define ARM_CP_FLAG_MASK 0x1f0ff
509
-
510
-/* Valid values for ARMCPRegInfo state field, indicating which of
511
- * the AArch32 and AArch64 execution states this register is visible in.
512
- * If the reginfo doesn't explicitly specify then it is AArch32 only.
513
- * If the reginfo is declared to be visible in both states then a second
514
- * reginfo is synthesised for the AArch32 view of the AArch64 register,
515
- * such that the AArch32 view is the lower 32 bits of the AArch64 one.
516
- * Note that we rely on the values of these enums as we iterate through
517
- * the various states in some places.
518
- */
519
-enum {
520
- ARM_CP_STATE_AA32 = 0,
521
- ARM_CP_STATE_AA64 = 1,
522
- ARM_CP_STATE_BOTH = 2,
523
-};
524
-
525
-/* ARM CP register secure state flags. These flags identify security state
526
- * attributes for a given CP register entry.
527
- * The existence of both or neither secure and non-secure flags indicates that
528
- * the register has both a secure and non-secure hash entry. A single one of
529
- * these flags causes the register to only be hashed for the specified
530
- * security state.
531
- * Although definitions may have any combination of the S/NS bits, each
532
- * registered entry will only have one to identify whether the entry is secure
533
- * or non-secure.
534
- */
535
-enum {
536
- ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
537
- ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
538
-};
539
-
540
-/* Return true if cptype is a valid type field. This is used to try to
541
- * catch errors where the sentinel has been accidentally left off the end
542
- * of a list of registers.
543
- */
544
-static inline bool cptype_valid(int cptype)
545
-{
546
- return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
547
- || ((cptype & ARM_CP_SPECIAL) &&
548
- ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
549
-}
550
-
551
-/* Access rights:
552
- * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
553
- * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
554
- * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
555
- * (ie any of the privileged modes in Secure state, or Monitor mode).
556
- * If a register is accessible in one privilege level it's always accessible
557
- * in higher privilege levels too. Since "Secure PL1" also follows this rule
558
- * (ie anything visible in PL2 is visible in S-PL1, some things are only
559
- * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
560
- * terminology a little and call this PL3.
561
- * In AArch64 things are somewhat simpler as the PLx bits line up exactly
562
- * with the ELx exception levels.
563
- *
564
- * If access permissions for a register are more complex than can be
565
- * described with these bits, then use a laxer set of restrictions, and
566
- * do the more restrictive/complex check inside a helper function.
567
- */
568
-#define PL3_R 0x80
569
-#define PL3_W 0x40
570
-#define PL2_R (0x20 | PL3_R)
571
-#define PL2_W (0x10 | PL3_W)
572
-#define PL1_R (0x08 | PL2_R)
573
-#define PL1_W (0x04 | PL2_W)
574
-#define PL0_R (0x02 | PL1_R)
575
-#define PL0_W (0x01 | PL1_W)
576
-
577
-/*
578
- * For user-mode some registers are accessible to EL0 via a kernel
579
- * trap-and-emulate ABI. In this case we define the read permissions
580
- * as actually being PL0_R. However some bits of any given register
581
- * may still be masked.
582
- */
583
-#ifdef CONFIG_USER_ONLY
584
-#define PL0U_R PL0_R
585
-#else
586
-#define PL0U_R PL1_R
587
-#endif
588
-
589
-#define PL3_RW (PL3_R | PL3_W)
590
-#define PL2_RW (PL2_R | PL2_W)
591
-#define PL1_RW (PL1_R | PL1_W)
592
-#define PL0_RW (PL0_R | PL0_W)
593
-
594
/* Return the highest implemented Exception Level */
595
static inline int arm_highest_el(CPUARMState *env)
110
{
596
{
111
if (val > max) {
597
@@ -XXX,XX +XXX,XX @@ static inline int arm_current_el(CPUARMState *env)
112
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
598
}
113
index XXXXXXX..XXXXXXX 100644
114
--- a/target/arm/translate-mve.c
115
+++ b/target/arm/translate-mve.c
116
@@ -XXX,XX +XXX,XX @@ static bool trans_VQDMULLT(DisasContext *s, arg_2op *a)
117
return do_2op(s, a, fns[a->size]);
118
}
599
}
119
600
120
+/*
601
-typedef struct ARMCPRegInfo ARMCPRegInfo;
121
+ * VADC and VSBC: these perform an add-with-carry or subtract-with-carry
602
-
122
+ * of the 32-bit elements in each lane of the input vectors, where the
603
-typedef enum CPAccessResult {
123
+ * carry-out of each add is the carry-in of the next. The initial carry
604
- /* Access is permitted */
124
+ * input is either fixed (0 for VADCI, 1 for VSBCI) or is from FPSCR.C
605
- CP_ACCESS_OK = 0,
125
+ * (for VADC and VSBC); the carry out at the end is written back to FPSCR.C.
606
- /* Access fails due to a configurable trap or enable which would
126
+ * These insns are subject to beat-wise execution. Partial execution
607
- * result in a categorized exception syndrome giving information about
127
+ * of an I=1 (initial carry input fixed) insn which does not
608
- * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
128
+ * execute the first beat must start with the current FPSCR.NZCV
609
- * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
129
+ * value, not the fixed constant input.
610
- * PL1 if in EL0, otherwise to the current EL).
130
+ */
611
- */
131
+static bool trans_VADC(DisasContext *s, arg_2op *a)
612
- CP_ACCESS_TRAP = 1,
132
+{
613
- /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
133
+ return do_2op(s, a, gen_helper_mve_vadc);
614
- * Note that this is not a catch-all case -- the set of cases which may
134
+}
615
- * result in this failure is specifically defined by the architecture.
135
+
616
- */
136
+static bool trans_VADCI(DisasContext *s, arg_2op *a)
617
- CP_ACCESS_TRAP_UNCATEGORIZED = 2,
137
+{
618
- /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
138
+ if (mve_skip_first_beat(s)) {
619
- CP_ACCESS_TRAP_EL2 = 3,
139
+ return trans_VADC(s, a);
620
- CP_ACCESS_TRAP_EL3 = 4,
140
+ }
621
- /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
141
+ return do_2op(s, a, gen_helper_mve_vadci);
622
- CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
142
+}
623
- CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
143
+
624
-} CPAccessResult;
144
+static bool trans_VSBC(DisasContext *s, arg_2op *a)
625
-
145
+{
626
-/* Access functions for coprocessor registers. These cannot fail and
146
+ return do_2op(s, a, gen_helper_mve_vsbc);
627
- * may not raise exceptions.
147
+}
628
- */
148
+
629
-typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
149
+static bool trans_VSBCI(DisasContext *s, arg_2op *a)
630
-typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
150
+{
631
- uint64_t value);
151
+ if (mve_skip_first_beat(s)) {
632
-/* Access permission check functions for coprocessor registers. */
152
+ return trans_VSBC(s, a);
633
-typedef CPAccessResult CPAccessFn(CPUARMState *env,
153
+ }
634
- const ARMCPRegInfo *opaque,
154
+ return do_2op(s, a, gen_helper_mve_vsbci);
635
- bool isread);
155
+}
636
-/* Hook function for register reset */
156
+
637
-typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
157
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
638
-
158
MVEGenTwoOpScalarFn fn)
639
-#define CP_ANY 0xff
640
-
641
-/* Definition of an ARM coprocessor register */
642
-struct ARMCPRegInfo {
643
- /* Name of register (useful mainly for debugging, need not be unique) */
644
- const char *name;
645
- /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
646
- * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
647
- * 'wildcard' field -- any value of that field in the MRC/MCR insn
648
- * will be decoded to this register. The register read and write
649
- * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
650
- * used by the program, so it is possible to register a wildcard and
651
- * then behave differently on read/write if necessary.
652
- * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
653
- * must both be zero.
654
- * For AArch64-visible registers, opc0 is also used.
655
- * Since there are no "coprocessors" in AArch64, cp is purely used as a
656
- * way to distinguish (for KVM's benefit) guest-visible system registers
657
- * from demuxed ones provided to preserve the "no side effects on
658
- * KVM register read/write from QEMU" semantics. cp==0x13 is guest
659
- * visible (to match KVM's encoding); cp==0 will be converted to
660
- * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
661
- */
662
- uint8_t cp;
663
- uint8_t crn;
664
- uint8_t crm;
665
- uint8_t opc0;
666
- uint8_t opc1;
667
- uint8_t opc2;
668
- /* Execution state in which this register is visible: ARM_CP_STATE_* */
669
- int state;
670
- /* Register type: ARM_CP_* bits/values */
671
- int type;
672
- /* Access rights: PL*_[RW] */
673
- int access;
674
- /* Security state: ARM_CP_SECSTATE_* bits/values */
675
- int secure;
676
- /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
677
- * this register was defined: can be used to hand data through to the
678
- * register read/write functions, since they are passed the ARMCPRegInfo*.
679
- */
680
- void *opaque;
681
- /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
682
- * fieldoffset is non-zero, the reset value of the register.
683
- */
684
- uint64_t resetvalue;
685
- /* Offset of the field in CPUARMState for this register.
686
- *
687
- * This is not needed if either:
688
- * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
689
- * 2. both readfn and writefn are specified
690
- */
691
- ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
692
-
693
- /* Offsets of the secure and non-secure fields in CPUARMState for the
694
- * register if it is banked. These fields are only used during the static
695
- * registration of a register. During hashing the bank associated
696
- * with a given security state is copied to fieldoffset which is used from
697
- * there on out.
698
- *
699
- * It is expected that register definitions use either fieldoffset or
700
- * bank_fieldoffsets in the definition but not both. It is also expected
701
- * that both bank offsets are set when defining a banked register. This
702
- * use indicates that a register is banked.
703
- */
704
- ptrdiff_t bank_fieldoffsets[2];
705
-
706
- /* Function for making any access checks for this register in addition to
707
- * those specified by the 'access' permissions bits. If NULL, no extra
708
- * checks required. The access check is performed at runtime, not at
709
- * translate time.
710
- */
711
- CPAccessFn *accessfn;
712
- /* Function for handling reads of this register. If NULL, then reads
713
- * will be done by loading from the offset into CPUARMState specified
714
- * by fieldoffset.
715
- */
716
- CPReadFn *readfn;
717
- /* Function for handling writes of this register. If NULL, then writes
718
- * will be done by writing to the offset into CPUARMState specified
719
- * by fieldoffset.
720
- */
721
- CPWriteFn *writefn;
722
- /* Function for doing a "raw" read; used when we need to copy
723
- * coprocessor state to the kernel for KVM or out for
724
- * migration. This only needs to be provided if there is also a
725
- * readfn and it has side effects (for instance clear-on-read bits).
726
- */
727
- CPReadFn *raw_readfn;
728
- /* Function for doing a "raw" write; used when we need to copy KVM
729
- * kernel coprocessor state into userspace, or for inbound
730
- * migration. This only needs to be provided if there is also a
731
- * writefn and it masks out "unwritable" bits or has write-one-to-clear
732
- * or similar behaviour.
733
- */
734
- CPWriteFn *raw_writefn;
735
- /* Function for resetting the register. If NULL, then reset will be done
736
- * by writing resetvalue to the field specified in fieldoffset. If
737
- * fieldoffset is 0 then no reset will be done.
738
- */
739
- CPResetFn *resetfn;
740
-
741
- /*
742
- * "Original" writefn and readfn.
743
- * For ARMv8.1-VHE register aliases, we overwrite the read/write
744
- * accessor functions of various EL1/EL0 to perform the runtime
745
- * check for which sysreg should actually be modified, and then
746
- * forwards the operation. Before overwriting the accessors,
747
- * the original function is copied here, so that accesses that
748
- * really do go to the EL1/EL0 version proceed normally.
749
- * (The corresponding EL2 register is linked via opaque.)
750
- */
751
- CPReadFn *orig_readfn;
752
- CPWriteFn *orig_writefn;
753
-};
754
-
755
-/* Macros which are lvalues for the field in CPUARMState for the
756
- * ARMCPRegInfo *ri.
757
- */
758
-#define CPREG_FIELD32(env, ri) \
759
- (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
760
-#define CPREG_FIELD64(env, ri) \
761
- (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
762
-
763
-#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
764
-
765
-void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
766
- const ARMCPRegInfo *regs, void *opaque);
767
-void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
768
- const ARMCPRegInfo *regs, void *opaque);
769
-static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
770
-{
771
- define_arm_cp_regs_with_opaque(cpu, regs, 0);
772
-}
773
-static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
774
-{
775
- define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
776
-}
777
-const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
778
-
779
-/*
780
- * Definition of an ARM co-processor register as viewed from
781
- * userspace. This is used for presenting sanitised versions of
782
- * registers to userspace when emulating the Linux AArch64 CPU
783
- * ID/feature ABI (advertised as HWCAP_CPUID).
784
- */
785
-typedef struct ARMCPRegUserSpaceInfo {
786
- /* Name of register */
787
- const char *name;
788
-
789
- /* Is the name actually a glob pattern */
790
- bool is_glob;
791
-
792
- /* Only some bits are exported to user space */
793
- uint64_t exported_bits;
794
-
795
- /* Fixed bits are applied after the mask */
796
- uint64_t fixed_bits;
797
-} ARMCPRegUserSpaceInfo;
798
-
799
-#define REGUSERINFO_SENTINEL { .name = NULL }
800
-
801
-void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods);
802
-
803
-/* CPWriteFn that can be used to implement writes-ignored behaviour */
804
-void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
805
- uint64_t value);
806
-/* CPReadFn that can be used for read-as-zero behaviour */
807
-uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
808
-
809
-/* CPResetFn that does nothing, for use if no reset is required even
810
- * if fieldoffset is non zero.
811
- */
812
-void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
813
-
814
-/* Return true if this reginfo struct's field in the cpu state struct
815
- * is 64 bits wide.
816
- */
817
-static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
818
-{
819
- return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
820
-}
821
-
822
-static inline bool cp_access_ok(int current_el,
823
- const ARMCPRegInfo *ri, int isread)
824
-{
825
- return (ri->access >> ((current_el * 2) + isread)) & 1;
826
-}
827
-
828
-/* Raw read of a coprocessor register (as needed for migration, etc) */
829
-uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
830
-
831
/**
832
* write_list_to_cpustate
833
* @cpu: ARMCPU
834
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
835
index XXXXXXX..XXXXXXX 100644
836
--- a/hw/arm/pxa2xx.c
837
+++ b/hw/arm/pxa2xx.c
838
@@ -XXX,XX +XXX,XX @@
839
#include "qemu/cutils.h"
840
#include "qemu/log.h"
841
#include "qom/object.h"
842
+#include "target/arm/cpregs.h"
843
844
static struct {
845
hwaddr io_base;
846
diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c
847
index XXXXXXX..XXXXXXX 100644
848
--- a/hw/arm/pxa2xx_pic.c
849
+++ b/hw/arm/pxa2xx_pic.c
850
@@ -XXX,XX +XXX,XX @@
851
#include "hw/sysbus.h"
852
#include "migration/vmstate.h"
853
#include "qom/object.h"
854
+#include "target/arm/cpregs.h"
855
856
#define ICIP    0x00    /* Interrupt Controller IRQ Pending register */
857
#define ICMR    0x04    /* Interrupt Controller Mask register */
858
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
859
index XXXXXXX..XXXXXXX 100644
860
--- a/hw/intc/arm_gicv3_cpuif.c
861
+++ b/hw/intc/arm_gicv3_cpuif.c
862
@@ -XXX,XX +XXX,XX @@
863
#include "gicv3_internal.h"
864
#include "hw/irq.h"
865
#include "cpu.h"
866
+#include "target/arm/cpregs.h"
867
868
/*
869
* Special case return value from hppvi_index(); must be larger than
870
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
871
index XXXXXXX..XXXXXXX 100644
872
--- a/hw/intc/arm_gicv3_kvm.c
873
+++ b/hw/intc/arm_gicv3_kvm.c
874
@@ -XXX,XX +XXX,XX @@
875
#include "vgic_common.h"
876
#include "migration/blocker.h"
877
#include "qom/object.h"
878
+#include "target/arm/cpregs.h"
879
+
880
881
#ifdef DEBUG_GICV3_KVM
882
#define DPRINTF(fmt, ...) \
883
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/arm/cpu.c
886
+++ b/target/arm/cpu.c
887
@@ -XXX,XX +XXX,XX @@
888
#include "kvm_arm.h"
889
#include "disas/capstone.h"
890
#include "fpu/softfloat.h"
891
+#include "cpregs.h"
892
893
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
159
{
894
{
895
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
896
index XXXXXXX..XXXXXXX 100644
897
--- a/target/arm/cpu64.c
898
+++ b/target/arm/cpu64.c
899
@@ -XXX,XX +XXX,XX @@
900
#include "hvf_arm.h"
901
#include "qapi/visitor.h"
902
#include "hw/qdev-properties.h"
903
+#include "cpregs.h"
904
905
906
#ifndef CONFIG_USER_ONLY
907
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
908
index XXXXXXX..XXXXXXX 100644
909
--- a/target/arm/cpu_tcg.c
910
+++ b/target/arm/cpu_tcg.c
911
@@ -XXX,XX +XXX,XX @@
912
#if !defined(CONFIG_USER_ONLY)
913
#include "hw/boards.h"
914
#endif
915
+#include "cpregs.h"
916
917
/* CPU models. These are not needed for the AArch64 linux-user build. */
918
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
919
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
920
index XXXXXXX..XXXXXXX 100644
921
--- a/target/arm/gdbstub.c
922
+++ b/target/arm/gdbstub.c
923
@@ -XXX,XX +XXX,XX @@
924
*/
925
#include "qemu/osdep.h"
926
#include "cpu.h"
927
-#include "internals.h"
928
#include "exec/gdbstub.h"
929
+#include "internals.h"
930
+#include "cpregs.h"
931
932
typedef struct RegisterSysregXmlParam {
933
CPUState *cs;
934
diff --git a/target/arm/helper.c b/target/arm/helper.c
935
index XXXXXXX..XXXXXXX 100644
936
--- a/target/arm/helper.c
937
+++ b/target/arm/helper.c
938
@@ -XXX,XX +XXX,XX @@
939
#include "exec/cpu_ldst.h"
940
#include "semihosting/common-semi.h"
941
#endif
942
+#include "cpregs.h"
943
944
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
945
#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
946
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
947
index XXXXXXX..XXXXXXX 100644
948
--- a/target/arm/op_helper.c
949
+++ b/target/arm/op_helper.c
950
@@ -XXX,XX +XXX,XX @@
951
#include "internals.h"
952
#include "exec/exec-all.h"
953
#include "exec/cpu_ldst.h"
954
+#include "cpregs.h"
955
956
#define SIGNBIT (uint32_t)0x80000000
957
#define SIGNBIT64 ((uint64_t)1 << 63)
958
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
959
index XXXXXXX..XXXXXXX 100644
960
--- a/target/arm/translate-a64.c
961
+++ b/target/arm/translate-a64.c
962
@@ -XXX,XX +XXX,XX @@
963
#include "translate.h"
964
#include "internals.h"
965
#include "qemu/host-utils.h"
966
-
967
#include "semihosting/semihost.h"
968
#include "exec/gen-icount.h"
969
-
970
#include "exec/helper-proto.h"
971
#include "exec/helper-gen.h"
972
#include "exec/log.h"
973
-
974
+#include "cpregs.h"
975
#include "translate-a64.h"
976
#include "qemu/atomic128.h"
977
978
diff --git a/target/arm/translate.c b/target/arm/translate.c
979
index XXXXXXX..XXXXXXX 100644
980
--- a/target/arm/translate.c
981
+++ b/target/arm/translate.c
982
@@ -XXX,XX +XXX,XX @@
983
#include "qemu/bitops.h"
984
#include "arm_ldst.h"
985
#include "semihosting/semihost.h"
986
-
987
#include "exec/helper-proto.h"
988
#include "exec/helper-gen.h"
989
-
990
#include "exec/log.h"
991
+#include "cpregs.h"
992
993
994
#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
160
--
995
--
161
2.20.1
996
2.25.1
162
997
163
998
diff view generated by jsdifflib
1
A few subcases of VLDR/VSTR sysreg succeed but do not perform a
1
From: Richard Henderson <richard.henderson@linaro.org>
2
memory access:
3
* VSTR of VPR when unprivileged
4
* VLDR to VPR when unprivileged
5
* VLDR to FPCXT_NS when fpInactive
6
2
7
In these cases, even though we don't do the memory access we should
3
Rearrange the values of the enumerators of CPAccessResult
8
still update the base register and perform the stack limit check if
4
so that we may directly extract the target el. For the two
9
the insn's addressing mode specifies writeback. Our implementation
5
special cases in access_check_cp_reg, use CPAccessResult.
10
failed to do this, because we handle these side-effects inside the
11
memory_to_fp_sysreg() and fp_sysreg_to_memory() callback functions,
12
which are only called if there's something to load or store.
13
6
14
Fix this by adding an extra argument to the callbacks which is set to
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
true to actually perform the access and false to only do side effects
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
like writeback, and calling the callback with do_access = false
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
for the three cases listed above.
10
Message-id: 20220501055028.646596-3-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
target/arm/cpregs.h | 26 ++++++++++++--------
14
target/arm/op_helper.c | 56 +++++++++++++++++++++---------------------
15
2 files changed, 44 insertions(+), 38 deletions(-)
18
16
19
This produces slightly suboptimal code for the case of a write
17
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
20
to FPCXT_NS when the FPU is inactive and the insn didn't have
21
side effects (ie no writeback, or via VMSR), in which case we'll
22
generate a conditional branch over an unconditional branch.
23
But this doesn't seem to be important enough to merit requiring
24
the callback to report back whether it generated any code or not.
25
26
Cc: qemu-stable@nongnu.org
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-id: 20210618141019.10671-5-peter.maydell@linaro.org
30
---
31
target/arm/translate-m-nocp.c | 102 ++++++++++++++++++++++++----------
32
1 file changed, 72 insertions(+), 30 deletions(-)
33
34
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
35
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-m-nocp.c
19
--- a/target/arm/cpregs.h
37
+++ b/target/arm/translate-m-nocp.c
20
+++ b/target/arm/cpregs.h
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
21
@@ -XXX,XX +XXX,XX @@ static inline bool cptype_valid(int cptype)
39
22
typedef enum CPAccessResult {
40
/*
23
/* Access is permitted */
41
* Emit code to store the sysreg to its final destination; frees the
24
CP_ACCESS_OK = 0,
42
- * TCG temp 'value' it is passed.
25
+
43
+ * TCG temp 'value' it is passed. do_access is true to do the store,
26
+ /*
44
+ * and false to skip it and only perform side-effects like base
27
+ * Combined with one of the following, the low 2 bits indicate the
45
+ * register writeback.
28
+ * target exception level. If 0, the exception is taken to the usual
46
*/
29
+ * target EL (EL1 or PL1 if in EL0, otherwise to the current EL).
47
-typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
30
+ */
48
+typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
31
+ CP_ACCESS_EL_MASK = 3,
49
+ bool do_access);
32
+
50
/*
33
/*
51
* Emit code to load the value to be copied to the sysreg; returns
34
* Access fails due to a configurable trap or enable which would
52
- * a new TCG temporary
35
* result in a categorized exception syndrome giving information about
53
+ * a new TCG temporary. do_access is true to do the store,
36
* the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
54
+ * and false to skip it and only perform side-effects like base
37
- * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
55
+ * register writeback.
38
- * PL1 if in EL0, otherwise to the current EL).
56
*/
39
+ * 0xc or 0x18).
57
-typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
40
*/
58
+typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
41
- CP_ACCESS_TRAP = 1,
59
+ bool do_access);
42
+ CP_ACCESS_TRAP = (1 << 2),
60
43
+ CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP | 2,
61
/* Common decode/access checks for fp sysreg read/write */
44
+ CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP | 3,
62
typedef enum FPSysRegCheckResult {
45
+
63
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
46
/*
64
47
* Access fails and results in an exception syndrome 0x0 ("uncategorized").
65
switch (regno) {
48
* Note that this is not a catch-all case -- the set of cases which may
66
case ARM_VFP_FPSCR:
49
* result in this failure is specifically defined by the architecture.
67
- tmp = loadfn(s, opaque);
50
*/
68
+ tmp = loadfn(s, opaque, true);
51
- CP_ACCESS_TRAP_UNCATEGORIZED = 2,
69
gen_helper_vfp_set_fpscr(cpu_env, tmp);
52
- /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
70
tcg_temp_free_i32(tmp);
53
- CP_ACCESS_TRAP_EL2 = 3,
71
gen_lookup_tb(s);
54
- CP_ACCESS_TRAP_EL3 = 4,
72
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
55
- /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
73
case ARM_VFP_FPSCR_NZCVQC:
56
- CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
74
{
57
- CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
75
TCGv_i32 fpscr;
58
+ CP_ACCESS_TRAP_UNCATEGORIZED = (2 << 2),
76
- tmp = loadfn(s, opaque);
59
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = CP_ACCESS_TRAP_UNCATEGORIZED | 2,
77
+ tmp = loadfn(s, opaque, true);
60
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = CP_ACCESS_TRAP_UNCATEGORIZED | 3,
78
if (dc_isar_feature(aa32_mve, s)) {
61
} CPAccessResult;
79
/* QC is only present for MVE; otherwise RES0 */
62
80
TCGv_i32 qc = tcg_temp_new_i32();
63
typedef struct ARMCPRegInfo ARMCPRegInfo;
81
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
64
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/op_helper.c
67
+++ b/target/arm/op_helper.c
68
@@ -XXX,XX +XXX,XX @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
69
uint32_t isread)
70
{
71
const ARMCPRegInfo *ri = rip;
72
+ CPAccessResult res = CP_ACCESS_OK;
73
int target_el;
74
75
if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
76
&& extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
77
- raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
78
+ res = CP_ACCESS_TRAP;
79
+ goto fail;
80
}
81
82
/*
83
@@ -XXX,XX +XXX,XX @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
84
mask &= ~((1 << 4) | (1 << 14));
85
86
if (env->cp15.hstr_el2 & mask) {
87
- target_el = 2;
88
- goto exept;
89
+ res = CP_ACCESS_TRAP_EL2;
90
+ goto fail;
91
}
92
}
93
94
- if (!ri->accessfn) {
95
+ if (ri->accessfn) {
96
+ res = ri->accessfn(env, ri, isread);
97
+ }
98
+ if (likely(res == CP_ACCESS_OK)) {
99
return;
100
}
101
102
- switch (ri->accessfn(env, ri, isread)) {
103
- case CP_ACCESS_OK:
104
- return;
105
+ fail:
106
+ switch (res & ~CP_ACCESS_EL_MASK) {
107
case CP_ACCESS_TRAP:
108
- target_el = exception_target_el(env);
109
- break;
110
- case CP_ACCESS_TRAP_EL2:
111
- /* Requesting a trap to EL2 when we're in EL3 is
112
- * a bug in the access function.
113
- */
114
- assert(arm_current_el(env) != 3);
115
- target_el = 2;
116
- break;
117
- case CP_ACCESS_TRAP_EL3:
118
- target_el = 3;
82
break;
119
break;
83
}
120
case CP_ACCESS_TRAP_UNCATEGORIZED:
84
case ARM_VFP_FPCXT_NS:
121
- target_el = exception_target_el(env);
85
+ {
122
- syndrome = syn_uncategorized();
86
+ TCGLabel *lab_active = gen_new_label();
123
- break;
87
+
124
- case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
88
lab_end = gen_new_label();
125
- target_el = 2;
89
- /* fpInactive case: write is a NOP, so branch to end */
126
- syndrome = syn_uncategorized();
90
- gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
127
- break;
91
+ gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
128
- case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
92
+ /*
129
- target_el = 3;
93
+ * fpInactive case: write is a NOP, so only do side effects
130
syndrome = syn_uncategorized();
94
+ * like register writeback before we branch to end
95
+ */
96
+ loadfn(s, opaque, false);
97
+ tcg_gen_br(lab_end);
98
+
99
+ gen_set_label(lab_active);
100
/*
101
* !fpInactive: if FPU disabled, take NOCP exception;
102
* otherwise PreserveFPState(), and then FPCXT_NS writes
103
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
104
break;
105
}
106
gen_preserve_fp_state(s);
107
- /* fall through */
108
+ }
109
+ /* fall through */
110
case ARM_VFP_FPCXT_S:
111
{
112
TCGv_i32 sfpa, control;
113
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
114
* Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
115
* bits [27:0] from value and zeroes bits [31:28].
116
*/
117
- tmp = loadfn(s, opaque);
118
+ tmp = loadfn(s, opaque, true);
119
sfpa = tcg_temp_new_i32();
120
tcg_gen_shri_i32(sfpa, tmp, 31);
121
control = load_cpu_field(v7m.control[M_REG_S]);
122
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
123
case ARM_VFP_VPR:
124
/* Behaves as NOP if not privileged */
125
if (IS_USER(s)) {
126
+ loadfn(s, opaque, false);
127
break;
128
}
129
- tmp = loadfn(s, opaque);
130
+ tmp = loadfn(s, opaque, true);
131
store_cpu_field(tmp, v7m.vpr);
132
break;
133
case ARM_VFP_P0:
134
{
135
TCGv_i32 vpr;
136
- tmp = loadfn(s, opaque);
137
+ tmp = loadfn(s, opaque, true);
138
vpr = load_cpu_field(v7m.vpr);
139
tcg_gen_deposit_i32(vpr, vpr, tmp,
140
R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
141
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
142
case ARM_VFP_FPSCR:
143
tmp = tcg_temp_new_i32();
144
gen_helper_vfp_get_fpscr(tmp, cpu_env);
145
- storefn(s, opaque, tmp);
146
+ storefn(s, opaque, tmp, true);
147
break;
148
case ARM_VFP_FPSCR_NZCVQC:
149
tmp = tcg_temp_new_i32();
150
gen_helper_vfp_get_fpscr(tmp, cpu_env);
151
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
152
- storefn(s, opaque, tmp);
153
+ storefn(s, opaque, tmp, true);
154
break;
155
case QEMU_VFP_FPSCR_NZCV:
156
/*
157
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
158
*/
159
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
160
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
161
- storefn(s, opaque, tmp);
162
+ storefn(s, opaque, tmp, true);
163
break;
164
case ARM_VFP_FPCXT_S:
165
{
166
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
167
* Store result before updating FPSCR etc, in case
168
* it is a memory write which causes an exception.
169
*/
170
- storefn(s, opaque, tmp);
171
+ storefn(s, opaque, tmp, true);
172
/*
173
* Now we must reset FPSCR from FPDSCR_NS, and clear
174
* CONTROL.SFPA; so we'll end the TB here.
175
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
176
gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
177
/* fpInactive case: reads as FPDSCR_NS */
178
TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
179
- storefn(s, opaque, tmp);
180
+ storefn(s, opaque, tmp, true);
181
lab_end = gen_new_label();
182
tcg_gen_br(lab_end);
183
184
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
185
tcg_gen_or_i32(tmp, tmp, sfpa);
186
tcg_temp_free_i32(control);
187
/* Store result before updating FPSCR, in case it faults */
188
- storefn(s, opaque, tmp);
189
+ storefn(s, opaque, tmp, true);
190
/* If SFPA is zero then set FPSCR from FPDSCR_NS */
191
fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
192
zero = tcg_const_i32(0);
193
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
194
case ARM_VFP_VPR:
195
/* Behaves as NOP if not privileged */
196
if (IS_USER(s)) {
197
+ storefn(s, opaque, NULL, false);
198
break;
199
}
200
tmp = load_cpu_field(v7m.vpr);
201
- storefn(s, opaque, tmp);
202
+ storefn(s, opaque, tmp, true);
203
break;
204
case ARM_VFP_P0:
205
tmp = load_cpu_field(v7m.vpr);
206
tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
207
- storefn(s, opaque, tmp);
208
+ storefn(s, opaque, tmp, true);
209
break;
131
break;
210
default:
132
default:
211
g_assert_not_reached();
133
g_assert_not_reached();
212
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
134
}
213
return true;
135
214
}
136
-exept:
215
137
+ target_el = res & CP_ACCESS_EL_MASK;
216
-static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
138
+ switch (target_el) {
217
+static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
139
+ case 0:
218
+ bool do_access)
140
+ target_el = exception_target_el(env);
219
{
141
+ break;
220
arg_VMSR_VMRS *a = opaque;
142
+ case 2:
221
143
+ assert(arm_current_el(env) != 3);
222
+ if (!do_access) {
144
+ assert(arm_is_el2_enabled(env));
223
+ return;
145
+ break;
146
+ case 3:
147
+ assert(arm_feature(env, ARM_FEATURE_EL3));
148
+ break;
149
+ default:
150
+ /* No "direct" traps to EL1 */
151
+ g_assert_not_reached();
224
+ }
152
+ }
225
+
153
+
226
if (a->rt == 15) {
154
raise_exception(env, EXCP_UDEF, syndrome, target_el);
227
/* Set the 4 flag bits in the CPSR */
228
gen_set_nzcv(value);
229
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
230
}
231
}
155
}
232
156
233
-static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
234
+static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
235
{
236
arg_VMSR_VMRS *a = opaque;
237
238
+ if (!do_access) {
239
+ return NULL;
240
+ }
241
return load_reg(s, a->rt);
242
}
243
244
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
245
}
246
}
247
248
-static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
249
+static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
250
+ bool do_access)
251
{
252
arg_vldr_sysreg *a = opaque;
253
uint32_t offset = a->imm;
254
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
255
offset = -offset;
256
}
257
258
+ if (!do_access && !a->w) {
259
+ return;
260
+ }
261
+
262
addr = load_reg(s, a->rn);
263
if (a->p) {
264
tcg_gen_addi_i32(addr, addr, offset);
265
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
266
gen_helper_v8m_stackcheck(cpu_env, addr);
267
}
268
269
- gen_aa32_st_i32(s, value, addr, get_mem_index(s),
270
- MO_UL | MO_ALIGN | s->be_data);
271
- tcg_temp_free_i32(value);
272
+ if (do_access) {
273
+ gen_aa32_st_i32(s, value, addr, get_mem_index(s),
274
+ MO_UL | MO_ALIGN | s->be_data);
275
+ tcg_temp_free_i32(value);
276
+ }
277
278
if (a->w) {
279
/* writeback */
280
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
281
}
282
}
283
284
-static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
285
+static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
286
+ bool do_access)
287
{
288
arg_vldr_sysreg *a = opaque;
289
uint32_t offset = a->imm;
290
TCGv_i32 addr;
291
- TCGv_i32 value = tcg_temp_new_i32();
292
+ TCGv_i32 value = NULL;
293
294
if (!a->a) {
295
offset = -offset;
296
}
297
298
+ if (!do_access && !a->w) {
299
+ return NULL;
300
+ }
301
+
302
addr = load_reg(s, a->rn);
303
if (a->p) {
304
tcg_gen_addi_i32(addr, addr, offset);
305
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
306
gen_helper_v8m_stackcheck(cpu_env, addr);
307
}
308
309
- gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
310
- MO_UL | MO_ALIGN | s->be_data);
311
+ if (do_access) {
312
+ value = tcg_temp_new_i32();
313
+ gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
314
+ MO_UL | MO_ALIGN | s->be_data);
315
+ }
316
317
if (a->w) {
318
/* writeback */
319
--
157
--
320
2.20.1
158
2.25.1
321
159
322
160
diff view generated by jsdifflib
1
From: Peter Collingbourne <pcc@google.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
MTE3 introduces an asymmetric tag checking mode, in which loads are
3
Remove a possible source of error by removing REGINFO_SENTINEL
4
checked synchronously and stores are checked asynchronously. Add
4
and using ARRAY_SIZE (convinently hidden inside a macro) to
5
support for it.
5
find the end of the set of regs being registered or modified.
6
6
7
Signed-off-by: Peter Collingbourne <pcc@google.com>
7
The space saved by not having the extra array element reduces
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
the executable's .data.rel.ro section by about 9k.
9
Message-id: 20210616195614.11785-1-pcc@google.com
9
10
[PMM: Add line to emulation.rst]
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20220501055028.646596-4-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
15
---
13
docs/system/arm/emulation.rst | 1 +
16
target/arm/cpregs.h | 53 +++++++++---------
14
target/arm/cpu64.c | 2 +-
17
hw/arm/pxa2xx.c | 1 -
15
target/arm/mte_helper.c | 82 ++++++++++++++++++++++-------------
18
hw/arm/pxa2xx_pic.c | 1 -
16
3 files changed, 53 insertions(+), 32 deletions(-)
19
hw/intc/arm_gicv3_cpuif.c | 5 --
20
hw/intc/arm_gicv3_kvm.c | 1 -
21
target/arm/cpu64.c | 1 -
22
target/arm/cpu_tcg.c | 4 --
23
target/arm/helper.c | 111 ++++++++------------------------------
24
8 files changed, 48 insertions(+), 129 deletions(-)
17
25
18
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
26
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
19
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
20
--- a/docs/system/arm/emulation.rst
28
--- a/target/arm/cpregs.h
21
+++ b/docs/system/arm/emulation.rst
29
+++ b/target/arm/cpregs.h
22
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
30
@@ -XXX,XX +XXX,XX @@
23
- FEAT_LSE (Large System Extensions)
31
#define ARM_CP_NO_GDB 0x4000
24
- FEAT_MTE (Memory Tagging Extension)
32
#define ARM_CP_RAISES_EXC 0x8000
25
- FEAT_MTE2 (Memory Tagging Extension)
33
#define ARM_CP_NEWEL 0x10000
26
+- FEAT_MTE3 (MTE Asymmetric Fault Handling)
34
-/* Used only as a terminator for ARMCPRegInfo lists */
27
- FEAT_PAN (Privileged access never)
35
-#define ARM_CP_SENTINEL 0xfffff
28
- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
36
/* Mask of only the flag bits in a type field */
29
- FEAT_PAuth (Pointer authentication)
37
#define ARM_CP_FLAG_MASK 0x1f0ff
38
39
@@ -XXX,XX +XXX,XX @@ enum {
40
ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
41
};
42
43
-/*
44
- * Return true if cptype is a valid type field. This is used to try to
45
- * catch errors where the sentinel has been accidentally left off the end
46
- * of a list of registers.
47
- */
48
-static inline bool cptype_valid(int cptype)
49
-{
50
- return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
51
- || ((cptype & ARM_CP_SPECIAL) &&
52
- ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
53
-}
54
-
55
/*
56
* Access rights:
57
* We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
58
@@ -XXX,XX +XXX,XX @@ struct ARMCPRegInfo {
59
#define CPREG_FIELD64(env, ri) \
60
(*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
61
62
-#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
63
+void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *reg,
64
+ void *opaque);
65
66
-void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
67
- const ARMCPRegInfo *regs, void *opaque);
68
-void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
69
- const ARMCPRegInfo *regs, void *opaque);
70
-static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
71
-{
72
- define_arm_cp_regs_with_opaque(cpu, regs, 0);
73
-}
74
static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
75
{
76
- define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
77
+ define_one_arm_cp_reg_with_opaque(cpu, regs, NULL);
78
}
79
+
80
+void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
81
+ void *opaque, size_t len);
82
+
83
+#define define_arm_cp_regs_with_opaque(CPU, REGS, OPAQUE) \
84
+ do { \
85
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \
86
+ define_arm_cp_regs_with_opaque_len(CPU, REGS, OPAQUE, \
87
+ ARRAY_SIZE(REGS)); \
88
+ } while (0)
89
+
90
+#define define_arm_cp_regs(CPU, REGS) \
91
+ define_arm_cp_regs_with_opaque(CPU, REGS, NULL)
92
+
93
const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
94
95
/*
96
@@ -XXX,XX +XXX,XX @@ typedef struct ARMCPRegUserSpaceInfo {
97
uint64_t fixed_bits;
98
} ARMCPRegUserSpaceInfo;
99
100
-#define REGUSERINFO_SENTINEL { .name = NULL }
101
+void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
102
+ const ARMCPRegUserSpaceInfo *mods,
103
+ size_t mods_len);
104
105
-void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods);
106
+#define modify_arm_cp_regs(REGS, MODS) \
107
+ do { \
108
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \
109
+ QEMU_BUILD_BUG_ON(ARRAY_SIZE(MODS) == 0); \
110
+ modify_arm_cp_regs_with_len(REGS, ARRAY_SIZE(REGS), \
111
+ MODS, ARRAY_SIZE(MODS)); \
112
+ } while (0)
113
114
/* CPWriteFn that can be used to implement writes-ignored behaviour */
115
void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
116
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/hw/arm/pxa2xx.c
119
+++ b/hw/arm/pxa2xx.c
120
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pxa_cp_reginfo[] = {
121
{ .name = "PWRMODE", .cp = 14, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 0,
122
.access = PL1_RW, .type = ARM_CP_IO,
123
.readfn = arm_cp_read_zero, .writefn = pxa2xx_pwrmode_write },
124
- REGINFO_SENTINEL
125
};
126
127
static void pxa2xx_setup_cp14(PXA2xxState *s)
128
diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/hw/arm/pxa2xx_pic.c
131
+++ b/hw/arm/pxa2xx_pic.c
132
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pxa_pic_cp_reginfo[] = {
133
REGINFO_FOR_PIC_CP("ICLR2", 8),
134
REGINFO_FOR_PIC_CP("ICFP2", 9),
135
REGINFO_FOR_PIC_CP("ICPR2", 0xa),
136
- REGINFO_SENTINEL
137
};
138
139
static const MemoryRegionOps pxa2xx_pic_ops = {
140
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/hw/intc/arm_gicv3_cpuif.c
143
+++ b/hw/intc/arm_gicv3_cpuif.c
144
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
145
.readfn = icc_igrpen1_el3_read,
146
.writefn = icc_igrpen1_el3_write,
147
},
148
- REGINFO_SENTINEL
149
};
150
151
static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
152
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
153
.readfn = ich_vmcr_read,
154
.writefn = ich_vmcr_write,
155
},
156
- REGINFO_SENTINEL
157
};
158
159
static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
160
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
161
.readfn = ich_ap_read,
162
.writefn = ich_ap_write,
163
},
164
- REGINFO_SENTINEL
165
};
166
167
static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
168
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
169
.readfn = ich_ap_read,
170
.writefn = ich_ap_write,
171
},
172
- REGINFO_SENTINEL
173
};
174
175
static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
176
@@ -XXX,XX +XXX,XX @@ void gicv3_init_cpuif(GICv3State *s)
177
.readfn = ich_lr_read,
178
.writefn = ich_lr_write,
179
},
180
- REGINFO_SENTINEL
181
};
182
define_arm_cp_regs(cpu, lr_regset);
183
}
184
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
185
index XXXXXXX..XXXXXXX 100644
186
--- a/hw/intc/arm_gicv3_kvm.c
187
+++ b/hw/intc/arm_gicv3_kvm.c
188
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
189
*/
190
.resetfn = arm_gicv3_icc_reset,
191
},
192
- REGINFO_SENTINEL
193
};
194
195
/**
30
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
196
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
31
index XXXXXXX..XXXXXXX 100644
197
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu64.c
198
--- a/target/arm/cpu64.c
33
+++ b/target/arm/cpu64.c
199
+++ b/target/arm/cpu64.c
34
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
200
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
35
* during realize if the board provides no tag memory, much like
201
{ .name = "L2MERRSR",
36
* we do for EL2 with the virtualization=on property.
202
.cp = 15, .opc1 = 3, .crm = 15,
37
*/
203
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
38
- t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2);
204
- REGINFO_SENTINEL
39
+ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
205
};
40
cpu->isar.id_aa64pfr1 = t;
206
41
207
static void aarch64_a57_initfn(Object *obj)
42
t = cpu->isar.id_aa64mmfr0;
208
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
43
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
44
index XXXXXXX..XXXXXXX 100644
209
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/mte_helper.c
210
--- a/target/arm/cpu_tcg.c
46
+++ b/target/arm/mte_helper.c
211
+++ b/target/arm/cpu_tcg.c
47
@@ -XXX,XX +XXX,XX @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
212
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
213
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
214
{ .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
215
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
216
- REGINFO_SENTINEL
217
};
218
219
static void cortex_a8_initfn(Object *obj)
220
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
221
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
222
{ .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
223
.access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
224
- REGINFO_SENTINEL
225
};
226
227
static void cortex_a9_initfn(Object *obj)
228
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
229
#endif
230
{ .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
231
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
232
- REGINFO_SENTINEL
233
};
234
235
static void cortex_a7_initfn(Object *obj)
236
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
237
.access = PL1_RW, .type = ARM_CP_CONST },
238
{ .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
239
.opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
240
- REGINFO_SENTINEL
241
};
242
243
static void cortex_r5_initfn(Object *obj)
244
diff --git a/target/arm/helper.c b/target/arm/helper.c
245
index XXXXXXX..XXXXXXX 100644
246
--- a/target/arm/helper.c
247
+++ b/target/arm/helper.c
248
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cp_reginfo[] = {
249
.secure = ARM_CP_SECSTATE_S,
250
.fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
251
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
252
- REGINFO_SENTINEL
253
};
254
255
static const ARMCPRegInfo not_v8_cp_reginfo[] = {
256
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo not_v8_cp_reginfo[] = {
257
{ .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
258
.opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
259
.type = ARM_CP_NOP | ARM_CP_OVERRIDE },
260
- REGINFO_SENTINEL
261
};
262
263
static const ARMCPRegInfo not_v6_cp_reginfo[] = {
264
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo not_v6_cp_reginfo[] = {
265
*/
266
{ .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
267
.access = PL1_W, .type = ARM_CP_WFI },
268
- REGINFO_SENTINEL
269
};
270
271
static const ARMCPRegInfo not_v7_cp_reginfo[] = {
272
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
273
.opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
274
{ .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
275
.opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
276
- REGINFO_SENTINEL
277
};
278
279
static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
280
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
281
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
282
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
283
.resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
284
- REGINFO_SENTINEL
285
};
286
287
typedef struct pm_event {
288
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
289
{ .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
290
.type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
291
.writefn = tlbimvaa_write },
292
- REGINFO_SENTINEL
293
};
294
295
static const ARMCPRegInfo v7mp_cp_reginfo[] = {
296
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v7mp_cp_reginfo[] = {
297
{ .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
298
.type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
299
.writefn = tlbimvaa_is_write },
300
- REGINFO_SENTINEL
301
};
302
303
static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
304
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
305
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
306
.writefn = pmovsset_write,
307
.raw_writefn = raw_write },
308
- REGINFO_SENTINEL
309
};
310
311
static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
312
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo t2ee_cp_reginfo[] = {
313
{ .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
314
.access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
315
.accessfn = teehbr_access, .resetvalue = 0 },
316
- REGINFO_SENTINEL
317
};
318
319
static const ARMCPRegInfo v6k_cp_reginfo[] = {
320
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
321
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
322
offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
323
.resetvalue = 0 },
324
- REGINFO_SENTINEL
325
};
326
327
#ifndef CONFIG_USER_ONLY
328
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
329
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
330
.writefn = gt_sec_cval_write, .raw_writefn = raw_write,
331
},
332
- REGINFO_SENTINEL
333
};
334
335
static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
336
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
337
.access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
338
.readfn = gt_virt_cnt_read,
339
},
340
- REGINFO_SENTINEL
341
};
342
343
#endif
344
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vapa_cp_reginfo[] = {
345
.access = PL1_W, .accessfn = ats_access,
346
.writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
347
#endif
348
- REGINFO_SENTINEL
349
};
350
351
/* Return basic MPU access permission bits. */
352
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
353
.fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
354
.writefn = pmsav7_rgnr_write,
355
.resetfn = arm_cp_reset_ignore },
356
- REGINFO_SENTINEL
357
};
358
359
static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
360
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
361
{ .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
362
.opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
363
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
364
- REGINFO_SENTINEL
365
};
366
367
static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
368
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
369
.access = PL1_RW, .accessfn = access_tvm_trvm,
370
.fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
371
.resetvalue = 0, },
372
- REGINFO_SENTINEL
373
};
374
375
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
376
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
377
/* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
378
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
379
offsetof(CPUARMState, cp15.tcr_el[1])} },
380
- REGINFO_SENTINEL
381
};
382
383
/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
384
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
385
{ .name = "C9", .cp = 15, .crn = 9,
386
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
387
.type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
388
- REGINFO_SENTINEL
389
};
390
391
static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
392
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo xscale_cp_reginfo[] = {
393
{ .name = "XSCALE_UNLOCK_DCACHE",
394
.cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
395
.access = PL1_W, .type = ARM_CP_NOP },
396
- REGINFO_SENTINEL
397
};
398
399
static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
400
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
401
.access = PL1_RW,
402
.type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
403
.resetvalue = 0 },
404
- REGINFO_SENTINEL
405
};
406
407
static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
408
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
409
{ .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
410
.access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
411
.resetvalue = 0 },
412
- REGINFO_SENTINEL
413
};
414
415
static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
416
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
417
.access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
418
{ .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
419
.access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
420
- REGINFO_SENTINEL
421
};
422
423
static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
424
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
425
{ .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
426
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
427
.resetvalue = (1 << 30) },
428
- REGINFO_SENTINEL
429
};
430
431
static const ARMCPRegInfo strongarm_cp_reginfo[] = {
432
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
433
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
434
.access = PL1_RW, .resetvalue = 0,
435
.type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
436
- REGINFO_SENTINEL
437
};
438
439
static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
440
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
441
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
442
offsetof(CPUARMState, cp15.ttbr1_ns) },
443
.writefn = vmsa_ttbr_write, },
444
- REGINFO_SENTINEL
445
};
446
447
static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
448
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
449
.access = PL1_RW, .accessfn = access_trap_aa32s_el1,
450
.writefn = sdcr_write,
451
.fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
452
- REGINFO_SENTINEL
453
};
454
455
/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
456
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
457
.type = ARM_CP_CONST,
458
.cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
459
.access = PL2_RW, .resetvalue = 0 },
460
- REGINFO_SENTINEL
461
};
462
463
/* Ditto, but for registers which exist in ARMv8 but not v7 */
464
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
465
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
466
.access = PL2_RW,
467
.type = ARM_CP_CONST, .resetvalue = 0 },
468
- REGINFO_SENTINEL
469
};
470
471
static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
472
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
473
.cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
474
.access = PL2_RW,
475
.fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
476
- REGINFO_SENTINEL
477
};
478
479
static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
480
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
481
.access = PL2_RW,
482
.fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
483
.writefn = hcr_writehigh },
484
- REGINFO_SENTINEL
485
};
486
487
static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
488
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
489
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
490
.access = PL2_RW, .accessfn = sel2_access,
491
.fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
492
- REGINFO_SENTINEL
493
};
494
495
static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
496
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
497
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
498
.access = PL3_W, .type = ARM_CP_NO_RAW,
499
.writefn = tlbi_aa64_vae3_write },
500
- REGINFO_SENTINEL
501
};
502
503
#ifndef CONFIG_USER_ONLY
504
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
505
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
506
.access = PL1_RW, .accessfn = access_tda,
507
.type = ARM_CP_NOP },
508
- REGINFO_SENTINEL
509
};
510
511
static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
512
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
513
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
514
{ .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
515
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
516
- REGINFO_SENTINEL
517
};
518
519
/* Return the exception level to which exceptions should be taken
520
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
521
.fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
522
.writefn = dbgbcr_write, .raw_writefn = raw_write
523
},
524
- REGINFO_SENTINEL
525
};
526
define_arm_cp_regs(cpu, dbgregs);
527
}
528
@@ -XXX,XX +XXX,XX @@ static void define_debug_regs(ARMCPU *cpu)
529
.fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
530
.writefn = dbgwcr_write, .raw_writefn = raw_write
531
},
532
- REGINFO_SENTINEL
533
};
534
define_arm_cp_regs(cpu, dbgregs);
535
}
536
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
537
.type = ARM_CP_IO,
538
.readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
539
.raw_writefn = pmevtyper_rawwrite },
540
- REGINFO_SENTINEL
541
};
542
define_arm_cp_regs(cpu, pmev_regs);
543
g_free(pmevcntr_name);
544
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
545
.cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
546
.access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
547
.resetvalue = extract64(cpu->pmceid1, 32, 32) },
548
- REGINFO_SENTINEL
549
};
550
define_arm_cp_regs(cpu, v81_pmu_regs);
551
}
552
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo lor_reginfo[] = {
553
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
554
.access = PL1_R, .accessfn = access_lor_ns,
555
.type = ARM_CP_CONST, .resetvalue = 0 },
556
- REGINFO_SENTINEL
557
};
558
559
#ifdef TARGET_AARCH64
560
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pauth_reginfo[] = {
561
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
562
.access = PL1_RW, .accessfn = access_pauth,
563
.fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
564
- REGINFO_SENTINEL
565
};
566
567
static const ARMCPRegInfo tlbirange_reginfo[] = {
568
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
569
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
570
.access = PL3_W, .type = ARM_CP_NO_RAW,
571
.writefn = tlbi_aa64_rvae3_write },
572
- REGINFO_SENTINEL
573
};
574
575
static const ARMCPRegInfo tlbios_reginfo[] = {
576
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbios_reginfo[] = {
577
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
578
.access = PL3_W, .type = ARM_CP_NO_RAW,
579
.writefn = tlbi_aa64_vae3is_write },
580
- REGINFO_SENTINEL
581
};
582
583
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
584
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo rndr_reginfo[] = {
585
.type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
586
.opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
587
.access = PL0_R, .readfn = rndr_readfn },
588
- REGINFO_SENTINEL
589
};
590
591
#ifndef CONFIG_USER_ONLY
592
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo dcpop_reg[] = {
593
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
594
.access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
595
.accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
596
- REGINFO_SENTINEL
597
};
598
599
static const ARMCPRegInfo dcpodp_reg[] = {
600
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo dcpodp_reg[] = {
601
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
602
.access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
603
.accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
604
- REGINFO_SENTINEL
605
};
606
#endif /*CONFIG_USER_ONLY*/
607
608
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo mte_reginfo[] = {
609
{ .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
610
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
611
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
612
- REGINFO_SENTINEL
613
};
614
615
static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
616
{ .name = "TCO", .state = ARM_CP_STATE_AA64,
617
.opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
618
.type = ARM_CP_CONST, .access = PL0_RW, },
619
- REGINFO_SENTINEL
620
};
621
622
static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
623
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
624
.accessfn = aa64_zva_access,
625
#endif
626
},
627
- REGINFO_SENTINEL
628
};
629
630
#endif
631
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo predinv_reginfo[] = {
632
{ .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
633
.cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
634
.type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
635
- REGINFO_SENTINEL
636
};
637
638
static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
639
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo ccsidr2_reginfo[] = {
640
.access = PL1_R,
641
.accessfn = access_aa64_tid2,
642
.readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
643
- REGINFO_SENTINEL
644
};
645
646
static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
647
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo jazelle_regs[] = {
648
.cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
649
.accessfn = access_joscr_jmcr,
650
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
651
- REGINFO_SENTINEL
652
};
653
654
static const ARMCPRegInfo vhe_reginfo[] = {
655
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vhe_reginfo[] = {
656
.access = PL2_RW, .accessfn = e2h_access,
657
.writefn = gt_virt_cval_write, .raw_writefn = raw_write },
658
#endif
659
- REGINFO_SENTINEL
660
};
661
662
#ifndef CONFIG_USER_ONLY
663
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo ats1e1_reginfo[] = {
664
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
665
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
666
.writefn = ats_write64 },
667
- REGINFO_SENTINEL
668
};
669
670
static const ARMCPRegInfo ats1cp_reginfo[] = {
671
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo ats1cp_reginfo[] = {
672
.cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
673
.access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
674
.writefn = ats_write },
675
- REGINFO_SENTINEL
676
};
677
#endif
678
679
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
680
.cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
681
.access = PL2_RW, .type = ARM_CP_CONST,
682
.resetvalue = 0 },
683
- REGINFO_SENTINEL
684
};
685
686
void register_cp_regs_for_features(ARMCPU *cpu)
687
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
688
.access = PL1_R, .type = ARM_CP_CONST,
689
.accessfn = access_aa32_tid3,
690
.resetvalue = cpu->isar.id_isar6 },
691
- REGINFO_SENTINEL
692
};
693
define_arm_cp_regs(cpu, v6_idregs);
694
define_arm_cp_regs(cpu, v6_cp_reginfo);
695
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
696
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
697
.access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
698
.resetvalue = cpu->pmceid1 },
699
- REGINFO_SENTINEL
700
};
701
#ifdef CONFIG_USER_ONLY
702
ARMCPRegUserSpaceInfo v8_user_idregs[] = {
703
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
704
.exported_bits = 0x000000f0ffffffff },
705
{ .name = "ID_AA64ISAR*_EL1_RESERVED",
706
.is_glob = true },
707
- REGUSERINFO_SENTINEL
708
};
709
modify_arm_cp_regs(v8_idregs, v8_user_idregs);
710
#endif
711
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
712
.access = PL2_RW,
713
.resetvalue = vmpidr_def,
714
.fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
715
- REGINFO_SENTINEL
716
};
717
define_arm_cp_regs(cpu, vpidr_regs);
718
define_arm_cp_regs(cpu, el2_cp_reginfo);
719
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
720
.access = PL2_RW, .accessfn = access_el3_aa32ns,
721
.type = ARM_CP_NO_RAW,
722
.writefn = arm_cp_write_ignore, .readfn = mpidr_read },
723
- REGINFO_SENTINEL
724
};
725
define_arm_cp_regs(cpu, vpidr_regs);
726
define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
727
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
728
.raw_writefn = raw_write, .writefn = sctlr_write,
729
.fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
730
.resetvalue = cpu->reset_sctlr },
731
- REGINFO_SENTINEL
732
};
733
734
define_arm_cp_regs(cpu, el3_regs);
735
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
736
{ .name = "DUMMY",
737
.cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
738
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
739
- REGINFO_SENTINEL
740
};
741
ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
742
{ .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
743
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
744
.access = PL1_R,
745
.accessfn = access_aa64_tid1,
746
.type = ARM_CP_CONST, .resetvalue = cpu->revidr },
747
- REGINFO_SENTINEL
748
};
749
ARMCPRegInfo id_cp_reginfo[] = {
750
/* These are common to v8 and pre-v8 */
751
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
752
.access = PL1_R,
753
.accessfn = access_aa32_tid1,
754
.type = ARM_CP_CONST, .resetvalue = 0 },
755
- REGINFO_SENTINEL
756
};
757
/* TLBTR is specific to VMSA */
758
ARMCPRegInfo id_tlbtr_reginfo = {
759
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
760
{ .name = "MIDR_EL1",
761
.exported_bits = 0x00000000ffffffff },
762
{ .name = "REVIDR_EL1" },
763
- REGUSERINFO_SENTINEL
764
};
765
modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
766
#endif
767
if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
768
arm_feature(env, ARM_FEATURE_STRONGARM)) {
769
- ARMCPRegInfo *r;
770
+ size_t i;
771
/* Register the blanket "writes ignored" value first to cover the
772
* whole space. Then update the specific ID registers to allow write
773
* access, so that they ignore writes rather than causing them to
774
* UNDEF.
775
*/
776
define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
777
- for (r = id_pre_v8_midr_cp_reginfo;
778
- r->type != ARM_CP_SENTINEL; r++) {
779
- r->access = PL1_RW;
780
+ for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
781
+ id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
782
}
783
- for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
784
- r->access = PL1_RW;
785
+ for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
786
+ id_cp_reginfo[i].access = PL1_RW;
787
}
788
id_mpuir_reginfo.access = PL1_RW;
789
id_tlbtr_reginfo.access = PL1_RW;
790
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
791
{ .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
792
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
793
.access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
794
- REGINFO_SENTINEL
795
};
796
#ifdef CONFIG_USER_ONLY
797
ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
798
{ .name = "MPIDR_EL1",
799
.fixed_bits = 0x0000000080000000 },
800
- REGUSERINFO_SENTINEL
801
};
802
modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
803
#endif
804
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
805
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
806
.access = PL3_RW, .type = ARM_CP_CONST,
807
.resetvalue = 0 },
808
- REGINFO_SENTINEL
809
};
810
define_arm_cp_regs(cpu, auxcr_reginfo);
811
if (cpu_isar_feature(aa32_ac2, cpu)) {
812
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
813
.type = ARM_CP_CONST,
814
.opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
815
.access = PL1_R, .resetvalue = cpu->reset_cbar },
816
- REGINFO_SENTINEL
817
};
818
/* We don't implement a r/w 64 bit CBAR currently */
819
assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
820
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
821
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
822
offsetof(CPUARMState, cp15.vbar_ns) },
823
.resetvalue = 0 },
824
- REGINFO_SENTINEL
825
};
826
define_arm_cp_regs(cpu, vbar_cp_reginfo);
827
}
828
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
829
r->writefn);
830
}
831
}
832
- /* Bad type field probably means missing sentinel at end of reg list */
833
- assert(cptype_valid(r->type));
834
+
835
for (crm = crmmin; crm <= crmmax; crm++) {
836
for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
837
for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
838
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
48
}
839
}
49
}
840
}
50
841
51
+static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
842
-void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
52
+ uint64_t dirty_ptr, uintptr_t ra)
843
- const ARMCPRegInfo *regs, void *opaque)
53
+{
844
+/* Define a whole list of registers */
54
+ int is_write, syn;
845
+void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
55
+
846
+ void *opaque, size_t len)
56
+ env->exception.vaddress = dirty_ptr;
57
+
58
+ is_write = FIELD_EX32(desc, MTEDESC, WRITE);
59
+ syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
60
+ 0x11);
61
+ raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
62
+ g_assert_not_reached();
63
+}
64
+
65
+static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
66
+ uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
67
+{
68
+ int select;
69
+
70
+ if (regime_has_2_ranges(arm_mmu_idx)) {
71
+ select = extract64(dirty_ptr, 55, 1);
72
+ } else {
73
+ select = 0;
74
+ }
75
+ env->cp15.tfsr_el[el] |= 1 << select;
76
+#ifdef CONFIG_USER_ONLY
77
+ /*
78
+ * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
79
+ * which then sends a SIGSEGV when the thread is next scheduled.
80
+ * This cpu will return to the main loop at the end of the TB,
81
+ * which is rather sooner than "normal". But the alternative
82
+ * is waiting until the next syscall.
83
+ */
84
+ qemu_cpu_kick(env_cpu(env));
85
+#endif
86
+}
87
+
88
/* Record a tag check failure. */
89
static void mte_check_fail(CPUARMState *env, uint32_t desc,
90
uint64_t dirty_ptr, uintptr_t ra)
91
{
847
{
92
int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
848
- /* Define a whole list of registers */
93
ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
849
- const ARMCPRegInfo *r;
94
- int el, reg_el, tcf, select, is_write, syn;
850
- for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
95
+ int el, reg_el, tcf;
851
- define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
96
uint64_t sctlr;
852
+ size_t i;
97
853
+ for (i = 0; i < len; ++i) {
98
reg_el = regime_el(env, arm_mmu_idx);
854
+ define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
99
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
100
switch (tcf) {
101
case 1:
102
/* Tag check fail causes a synchronous exception. */
103
- env->exception.vaddress = dirty_ptr;
104
-
105
- is_write = FIELD_EX32(desc, MTEDESC, WRITE);
106
- syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
107
- is_write, 0x11);
108
- raise_exception_ra(env, EXCP_DATA_ABORT, syn,
109
- exception_target_el(env), ra);
110
- /* noreturn, but fall through to the assert anyway */
111
+ mte_sync_check_fail(env, desc, dirty_ptr, ra);
112
+ break;
113
114
case 0:
115
/*
116
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
117
118
case 2:
119
/* Tag check fail causes asynchronous flag set. */
120
- if (regime_has_2_ranges(arm_mmu_idx)) {
121
- select = extract64(dirty_ptr, 55, 1);
122
- } else {
123
- select = 0;
124
- }
125
- env->cp15.tfsr_el[el] |= 1 << select;
126
-#ifdef CONFIG_USER_ONLY
127
- /*
128
- * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
129
- * which then sends a SIGSEGV when the thread is next scheduled.
130
- * This cpu will return to the main loop at the end of the TB,
131
- * which is rather sooner than "normal". But the alternative
132
- * is waiting until the next syscall.
133
- */
134
- qemu_cpu_kick(env_cpu(env));
135
-#endif
136
+ mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
137
break;
138
139
- default:
140
- /* Case 3: Reserved. */
141
- qemu_log_mask(LOG_GUEST_ERROR,
142
- "Tag check failure with SCTLR_EL%d.TCF%s "
143
- "set to reserved value %d\n",
144
- reg_el, el ? "" : "0", tcf);
145
+ case 3:
146
+ /*
147
+ * Tag check fail causes asynchronous flag set for stores, or
148
+ * a synchronous exception for loads.
149
+ */
150
+ if (FIELD_EX32(desc, MTEDESC, WRITE)) {
151
+ mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
152
+ } else {
153
+ mte_sync_check_fail(env, desc, dirty_ptr, ra);
154
+ }
155
break;
156
}
855
}
157
}
856
}
857
858
@@ -XXX,XX +XXX,XX @@ void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
859
* user-space cannot alter any values and dynamic values pertaining to
860
* execution state are hidden from user space view anyway.
861
*/
862
-void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
863
+void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
864
+ const ARMCPRegUserSpaceInfo *mods,
865
+ size_t mods_len)
866
{
867
- const ARMCPRegUserSpaceInfo *m;
868
- ARMCPRegInfo *r;
869
-
870
- for (m = mods; m->name; m++) {
871
+ for (size_t mi = 0; mi < mods_len; ++mi) {
872
+ const ARMCPRegUserSpaceInfo *m = mods + mi;
873
GPatternSpec *pat = NULL;
874
+
875
if (m->is_glob) {
876
pat = g_pattern_spec_new(m->name);
877
}
878
- for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
879
+ for (size_t ri = 0; ri < regs_len; ++ri) {
880
+ ARMCPRegInfo *r = regs + ri;
881
+
882
if (pat && g_pattern_match_string(pat, r->name)) {
883
r->type = ARM_CP_CONST;
884
r->access = PL0U_R;
158
--
885
--
159
2.20.1
886
2.25.1
160
887
161
888
diff view generated by jsdifflib
1
Implement the vector forms of the MVE VQDMULH and VQRDMULH insns.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
These particular data structures are not modified at runtime.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220501055028.646596-5-richard.henderson@linaro.org
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-31-peter.maydell@linaro.org
6
---
10
---
7
target/arm/helper-mve.h | 8 ++++++++
11
target/arm/helper.c | 16 ++++++++--------
8
target/arm/mve.decode | 3 +++
12
1 file changed, 8 insertions(+), 8 deletions(-)
9
target/arm/mve_helper.c | 27 +++++++++++++++++++++++++++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 40 insertions(+)
12
13
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
--- a/target/arm/helper.c
16
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
18
DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
.resetvalue = cpu->pmceid1 },
19
DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
};
20
21
#ifdef CONFIG_USER_ONLY
21
+DEF_HELPER_FLAGS_4(mve_vqdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
- ARMCPRegUserSpaceInfo v8_user_idregs[] = {
22
+DEF_HELPER_FLAGS_4(mve_vqdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+ static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
23
+DEF_HELPER_FLAGS_4(mve_vqdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
{ .name = "ID_AA64PFR0_EL1",
24
+
25
.exported_bits = 0x000f000f00ff0000,
25
+DEF_HELPER_FLAGS_4(mve_vqrdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
.fixed_bits = 0x0000000000000011 },
26
+DEF_HELPER_FLAGS_4(mve_vqrdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
27
+DEF_HELPER_FLAGS_4(mve_vqrdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
*/
28
+
29
if (arm_feature(env, ARM_FEATURE_EL3)) {
29
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
- ARMCPRegInfo nsacr = {
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+ static const ARMCPRegInfo nsacr = {
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
.name = "NSACR", .type = ARM_CP_CONST,
33
index XXXXXXX..XXXXXXX 100644
34
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
34
--- a/target/arm/mve.decode
35
.access = PL1_RW, .accessfn = nsacr_access,
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
36
@@ -XXX,XX +XXX,XX @@ VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
37
};
37
VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
38
define_one_arm_cp_reg(cpu, &nsacr);
38
VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
39
} else {
39
40
- ARMCPRegInfo nsacr = {
40
+VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
41
+ static const ARMCPRegInfo nsacr = {
41
+VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
42
.name = "NSACR",
42
+
43
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
43
# Vector miscellaneous
44
.access = PL3_RW | PL1_R,
44
45
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
45
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
46
}
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
} else {
47
index XXXXXXX..XXXXXXX 100644
48
if (arm_feature(env, ARM_FEATURE_V8)) {
48
--- a/target/arm/mve_helper.c
49
- ARMCPRegInfo nsacr = {
49
+++ b/target/arm/mve_helper.c
50
+ static const ARMCPRegInfo nsacr = {
50
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
51
.name = "NSACR", .type = ARM_CP_CONST,
51
mve_advance_vpt(env); \
52
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
53
.access = PL1_R,
54
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
55
.access = PL1_R, .type = ARM_CP_CONST,
56
.resetvalue = cpu->pmsav7_dregion << 8
57
};
58
- ARMCPRegInfo crn0_wi_reginfo = {
59
+ static const ARMCPRegInfo crn0_wi_reginfo = {
60
.name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
61
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
62
.type = ARM_CP_NOP | ARM_CP_OVERRIDE
63
};
64
#ifdef CONFIG_USER_ONLY
65
- ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
66
+ static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
67
{ .name = "MIDR_EL1",
68
.exported_bits = 0x00000000ffffffff },
69
{ .name = "REVIDR_EL1" },
70
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
71
.access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
72
};
73
#ifdef CONFIG_USER_ONLY
74
- ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
75
+ static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
76
{ .name = "MPIDR_EL1",
77
.fixed_bits = 0x0000000080000000 },
78
};
79
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
52
}
80
}
53
81
54
+#define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
82
if (arm_feature(env, ARM_FEATURE_VBAR)) {
55
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
83
- ARMCPRegInfo vbar_cp_reginfo[] = {
56
+ { \
84
+ static const ARMCPRegInfo vbar_cp_reginfo[] = {
57
+ TYPE *d = vd, *n = vn, *m = vm; \
85
{ .name = "VBAR", .state = ARM_CP_STATE_BOTH,
58
+ uint16_t mask = mve_element_mask(env); \
86
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
59
+ unsigned e; \
87
.access = PL1_RW, .writefn = vbar_write,
60
+ bool qc = false; \
61
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
62
+ bool sat = false; \
63
+ TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
64
+ mergemask(&d[H##ESIZE(e)], r, mask); \
65
+ qc |= sat & mask & 1; \
66
+ } \
67
+ if (qc) { \
68
+ env->vfp.qc[0] = qc; \
69
+ } \
70
+ mve_advance_vpt(env); \
71
+ }
72
+
73
#define DO_AND(N, M) ((N) & (M))
74
#define DO_BIC(N, M) ((N) & ~(M))
75
#define DO_ORR(N, M) ((N) | (M))
76
@@ -XXX,XX +XXX,XX @@ static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
77
#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
78
INT32_MIN, INT32_MAX, s)
79
80
+DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
81
+DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
82
+DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
83
+
84
+DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
85
+DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
86
+DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
87
+
88
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
89
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
90
uint32_t rm) \
91
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/translate-mve.c
94
+++ b/target/arm/translate-mve.c
95
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_BS, vmullbs)
96
DO_2OP(VMULL_BU, vmullbu)
97
DO_2OP(VMULL_TS, vmullts)
98
DO_2OP(VMULL_TU, vmulltu)
99
+DO_2OP(VQDMULH, vqdmulh)
100
+DO_2OP(VQRDMULH, vqrdmulh)
101
102
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
103
MVEGenTwoOpScalarFn fn)
104
--
88
--
105
2.20.1
89
2.25.1
106
90
107
91
diff view generated by jsdifflib
1
Implement the scalar variants of the MVE VHADD and VHSUB insns.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Instead of defining ARM_CP_FLAG_MASK to remove flags,
4
define ARM_CP_SPECIAL_MASK to isolate special cases.
5
Sort the specials to the low bits. Use an enum.
6
7
Split the large comment block so as to document each
8
value separately.
9
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Message-id: 20220501055028.646596-6-richard.henderson@linaro.org
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-25-peter.maydell@linaro.org
6
---
14
---
7
target/arm/helper-mve.h | 16 ++++++++++++++++
15
target/arm/cpregs.h | 130 +++++++++++++++++++++++--------------
8
target/arm/mve.decode | 4 ++++
16
target/arm/cpu.c | 4 +-
9
target/arm/mve_helper.c | 8 ++++++++
17
target/arm/helper.c | 4 +-
10
target/arm/translate-mve.c | 4 ++++
18
target/arm/translate-a64.c | 6 +-
11
4 files changed, 32 insertions(+)
19
target/arm/translate.c | 6 +-
12
20
5 files changed, 92 insertions(+), 58 deletions(-)
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
21
14
index XXXXXXX..XXXXXXX 100644
22
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
15
--- a/target/arm/helper-mve.h
23
index XXXXXXX..XXXXXXX 100644
16
+++ b/target/arm/helper-mve.h
24
--- a/target/arm/cpregs.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmul_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+++ b/target/arm/cpregs.h
18
DEF_HELPER_FLAGS_4(mve_vmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
@@ -XXX,XX +XXX,XX @@
19
DEF_HELPER_FLAGS_4(mve_vmul_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
#define TARGET_ARM_CPREGS_H
20
28
21
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
/*
22
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
- * ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
23
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
- * special-behaviour cp reg and bits [11..8] indicate what behaviour
32
- * it has. Otherwise it is a simple cp reg, where CONST indicates that
33
- * TCG can assume the value to be constant (ie load at translate time)
34
- * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
35
- * indicates that the TB should not be ended after a write to this register
36
- * (the default is that the TB ends after cp writes). OVERRIDE permits
37
- * a register definition to override a previous definition for the
38
- * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
39
- * old must have the OVERRIDE bit set.
40
- * ALIAS indicates that this register is an alias view of some underlying
41
- * state which is also visible via another register, and that the other
42
- * register is handling migration and reset; registers marked ALIAS will not be
43
- * migrated but may have their state set by syncing of register state from KVM.
44
- * NO_RAW indicates that this register has no underlying state and does not
45
- * support raw access for state saving/loading; it will not be used for either
46
- * migration or KVM state synchronization. (Typically this is for "registers"
47
- * which are actually used as instructions for cache maintenance and so on.)
48
- * IO indicates that this register does I/O and therefore its accesses
49
- * need to be marked with gen_io_start() and also end the TB. In particular,
50
- * registers which implement clocks or timers require this.
51
- * RAISES_EXC is for when the read or write hook might raise an exception;
52
- * the generated code will synchronize the CPU state before calling the hook
53
- * so that it is safe for the hook to call raise_exception().
54
- * NEWEL is for writes to registers that might change the exception
55
- * level - typically on older ARM chips. For those cases we need to
56
- * re-read the new el when recomputing the translation flags.
57
+ * ARMCPRegInfo type field bits:
58
*/
59
-#define ARM_CP_SPECIAL 0x0001
60
-#define ARM_CP_CONST 0x0002
61
-#define ARM_CP_64BIT 0x0004
62
-#define ARM_CP_SUPPRESS_TB_END 0x0008
63
-#define ARM_CP_OVERRIDE 0x0010
64
-#define ARM_CP_ALIAS 0x0020
65
-#define ARM_CP_IO 0x0040
66
-#define ARM_CP_NO_RAW 0x0080
67
-#define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100)
68
-#define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200)
69
-#define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300)
70
-#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400)
71
-#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500)
72
-#define ARM_CP_DC_GVA (ARM_CP_SPECIAL | 0x0600)
73
-#define ARM_CP_DC_GZVA (ARM_CP_SPECIAL | 0x0700)
74
-#define ARM_LAST_SPECIAL ARM_CP_DC_GZVA
75
-#define ARM_CP_FPU 0x1000
76
-#define ARM_CP_SVE 0x2000
77
-#define ARM_CP_NO_GDB 0x4000
78
-#define ARM_CP_RAISES_EXC 0x8000
79
-#define ARM_CP_NEWEL 0x10000
80
-/* Mask of only the flag bits in a type field */
81
-#define ARM_CP_FLAG_MASK 0x1f0ff
82
+enum {
83
+ /*
84
+ * Register must be handled specially during translation.
85
+ * The method is one of the values below:
86
+ */
87
+ ARM_CP_SPECIAL_MASK = 0x000f,
88
+ /* Special: no change to PE state: writes ignored, reads ignored. */
89
+ ARM_CP_NOP = 0x0001,
90
+ /* Special: sysreg is WFI, for v5 and v6. */
91
+ ARM_CP_WFI = 0x0002,
92
+ /* Special: sysreg is NZCV. */
93
+ ARM_CP_NZCV = 0x0003,
94
+ /* Special: sysreg is CURRENTEL. */
95
+ ARM_CP_CURRENTEL = 0x0004,
96
+ /* Special: sysreg is DC ZVA or similar. */
97
+ ARM_CP_DC_ZVA = 0x0005,
98
+ ARM_CP_DC_GVA = 0x0006,
99
+ ARM_CP_DC_GZVA = 0x0007,
24
+
100
+
25
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
101
+ /* Flag: reads produce resetvalue; writes ignored. */
26
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
102
+ ARM_CP_CONST = 1 << 4,
27
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
103
+ /* Flag: For ARM_CP_STATE_AA32, sysreg is 64-bit. */
28
+
104
+ ARM_CP_64BIT = 1 << 5,
29
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
105
+ /*
30
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
106
+ * Flag: TB should not be ended after a write to this register
31
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
107
+ * (the default is that the TB ends after cp writes).
32
+
108
+ */
33
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
109
+ ARM_CP_SUPPRESS_TB_END = 1 << 6,
34
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
110
+ /*
35
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
111
+ * Flag: Permit a register definition to override a previous definition
36
+
112
+ * for the same (cp, is64, crn, crm, opc1, opc2) tuple: either the new
37
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
113
+ * or the old must have the ARM_CP_OVERRIDE bit set.
38
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
114
+ */
39
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
115
+ ARM_CP_OVERRIDE = 1 << 7,
40
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
116
+ /*
41
index XXXXXXX..XXXXXXX 100644
117
+ * Flag: Register is an alias view of some underlying state which is also
42
--- a/target/arm/mve.decode
118
+ * visible via another register, and that the other register is handling
43
+++ b/target/arm/mve.decode
119
+ * migration and reset; registers marked ARM_CP_ALIAS will not be migrated
44
@@ -XXX,XX +XXX,XX @@ VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_no
120
+ * but may have their state set by syncing of register state from KVM.
45
VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
121
+ */
46
VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar
122
+ ARM_CP_ALIAS = 1 << 8,
47
VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
123
+ /*
48
+VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
124
+ * Flag: Register does I/O and therefore its accesses need to be marked
49
+VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
125
+ * with gen_io_start() and also end the TB. In particular, registers which
50
+VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
126
+ * implement clocks or timers require this.
51
+VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
127
+ */
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
128
+ ARM_CP_IO = 1 << 9,
53
index XXXXXXX..XXXXXXX 100644
129
+ /*
54
--- a/target/arm/mve_helper.c
130
+ * Flag: Register has no underlying state and does not support raw access
55
+++ b/target/arm/mve_helper.c
131
+ * for state saving/loading; it will not be used for either migration or
56
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
132
+ * KVM state synchronization. Typically this is for "registers" which are
57
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
133
+ * actually used as instructions for cache maintenance and so on.
58
DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
134
+ */
59
DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
135
+ ARM_CP_NO_RAW = 1 << 10,
60
+#define DO_2OP_SCALAR_S(OP, FN) \
136
+ /*
61
+ DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
137
+ * Flag: The read or write hook might raise an exception; the generated
62
+ DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
138
+ * code will synchronize the CPU state before calling the hook so that it
63
+ DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
139
+ * is safe for the hook to call raise_exception().
64
140
+ */
65
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
141
+ ARM_CP_RAISES_EXC = 1 << 11,
66
DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
142
+ /*
67
DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
143
+ * Flag: Writes to the sysreg might change the exception level - typically
68
+DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
144
+ * on older ARM chips. For those cases we need to re-read the new el when
69
+DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
145
+ * recomputing the translation flags.
70
+DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
146
+ */
71
+DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
147
+ ARM_CP_NEWEL = 1 << 12,
148
+ /*
149
+ * Flag: Access check for this sysreg is identical to accessing FPU state
150
+ * from an instruction: use translation fp_access_check().
151
+ */
152
+ ARM_CP_FPU = 1 << 13,
153
+ /*
154
+ * Flag: Access check for this sysreg is identical to accessing SVE state
155
+ * from an instruction: use translation sve_access_check().
156
+ */
157
+ ARM_CP_SVE = 1 << 14,
158
+ /* Flag: Do not expose in gdb sysreg xml. */
159
+ ARM_CP_NO_GDB = 1 << 15,
160
+};
72
161
73
/*
162
/*
74
* Multiply add long dual accumulate ops.
163
* Valid values for ARMCPRegInfo state field, indicating which of
75
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
164
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
76
index XXXXXXX..XXXXXXX 100644
165
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/translate-mve.c
166
--- a/target/arm/cpu.c
78
+++ b/target/arm/translate-mve.c
167
+++ b/target/arm/cpu.c
79
@@ -XXX,XX +XXX,XX @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
168
@@ -XXX,XX +XXX,XX @@ static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
80
DO_2OP_SCALAR(VADD_scalar, vadd_scalar)
169
ARMCPRegInfo *ri = value;
81
DO_2OP_SCALAR(VSUB_scalar, vsub_scalar)
170
ARMCPU *cpu = opaque;
82
DO_2OP_SCALAR(VMUL_scalar, vmul_scalar)
171
83
+DO_2OP_SCALAR(VHADD_S_scalar, vhadds_scalar)
172
- if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
84
+DO_2OP_SCALAR(VHADD_U_scalar, vhaddu_scalar)
173
+ if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS)) {
85
+DO_2OP_SCALAR(VHSUB_S_scalar, vhsubs_scalar)
174
return;
86
+DO_2OP_SCALAR(VHSUB_U_scalar, vhsubu_scalar)
175
}
87
176
88
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
177
@@ -XXX,XX +XXX,XX @@ static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque)
89
MVEGenDualAccOpFn *fn)
178
ARMCPU *cpu = opaque;
179
uint64_t oldvalue, newvalue;
180
181
- if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
182
+ if (ri->type & (ARM_CP_SPECIAL_MASK | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
183
return;
184
}
185
186
diff --git a/target/arm/helper.c b/target/arm/helper.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/arm/helper.c
189
+++ b/target/arm/helper.c
190
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
191
* multiple times. Special registers (ie NOP/WFI) are
192
* never migratable and not even raw-accessible.
193
*/
194
- if ((r->type & ARM_CP_SPECIAL)) {
195
+ if (r->type & ARM_CP_SPECIAL_MASK) {
196
r2->type |= ARM_CP_NO_RAW;
197
}
198
if (((r->crm == CP_ANY) && crm != 0) ||
199
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
200
/* Check that the register definition has enough info to handle
201
* reads and writes if they are permitted.
202
*/
203
- if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
204
+ if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
205
if (r->access & PL3_R) {
206
assert((r->fieldoffset ||
207
(r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
208
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
209
index XXXXXXX..XXXXXXX 100644
210
--- a/target/arm/translate-a64.c
211
+++ b/target/arm/translate-a64.c
212
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
213
}
214
215
/* Handle special cases first */
216
- switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
217
+ switch (ri->type & ARM_CP_SPECIAL_MASK) {
218
+ case 0:
219
+ break;
220
case ARM_CP_NOP:
221
return;
222
case ARM_CP_NZCV:
223
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
224
}
225
return;
226
default:
227
- break;
228
+ g_assert_not_reached();
229
}
230
if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
231
return;
232
diff --git a/target/arm/translate.c b/target/arm/translate.c
233
index XXXXXXX..XXXXXXX 100644
234
--- a/target/arm/translate.c
235
+++ b/target/arm/translate.c
236
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
237
}
238
239
/* Handle special cases first */
240
- switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
241
+ switch (ri->type & ARM_CP_SPECIAL_MASK) {
242
+ case 0:
243
+ break;
244
case ARM_CP_NOP:
245
return;
246
case ARM_CP_WFI:
247
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
248
s->base.is_jmp = DISAS_WFI;
249
return;
250
default:
251
- break;
252
+ g_assert_not_reached();
253
}
254
255
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
90
--
256
--
91
2.20.1
257
2.25.1
92
93
diff view generated by jsdifflib
1
vfp_access_check and its helper routine full_vfp_access_check() has
1
From: Richard Henderson <richard.henderson@linaro.org>
2
gradually grown and is now an awkward mix of A-profile only and
3
M-profile only pieces. Refactor it into an A-profile only and an
4
M-profile only version, taking advantage of the fact that now the
5
only direct call to full_vfp_access_check() is in A-profile-only
6
code.
7
2
3
Standardize on g_assert_not_reached() for "should not happen".
4
Retain abort() when preceeded by fprintf or error_report.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20220501055028.646596-7-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210618141019.10671-7-peter.maydell@linaro.org
11
---
10
---
12
target/arm/translate-vfp.c | 79 +++++++++++++++++++++++---------------
11
target/arm/helper.c | 7 +++----
13
1 file changed, 48 insertions(+), 31 deletions(-)
12
target/arm/hvf/hvf.c | 2 +-
13
target/arm/kvm-stub.c | 4 ++--
14
target/arm/kvm.c | 4 ++--
15
target/arm/machine.c | 4 ++--
16
target/arm/translate-a64.c | 4 ++--
17
target/arm/translate-neon.c | 2 +-
18
target/arm/translate.c | 4 ++--
19
8 files changed, 15 insertions(+), 16 deletions(-)
14
20
15
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
21
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.c
23
--- a/target/arm/helper.c
18
+++ b/target/arm/translate-vfp.c
24
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void gen_update_fp_context(DisasContext *s)
25
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
26
break;
27
default:
28
/* broken reginfo with out-of-range opc1 */
29
- assert(false);
30
- break;
31
+ g_assert_not_reached();
32
}
33
/* assert our permissions are not too lax (stricter is fine) */
34
assert((r->access & ~mask) == 0);
35
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
36
break;
37
default:
38
/* Never happens, but compiler isn't smart enough to tell. */
39
- abort();
40
+ g_assert_not_reached();
41
}
42
}
43
*prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
44
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
45
break;
46
default:
47
/* Never happens, but compiler isn't smart enough to tell. */
48
- abort();
49
+ g_assert_not_reached();
50
}
51
}
52
if (domain_prot == 3) {
53
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/hvf/hvf.c
56
+++ b/target/arm/hvf/hvf.c
57
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
58
/* we got kicked, no exit to process */
59
return 0;
60
default:
61
- assert(0);
62
+ g_assert_not_reached();
63
}
64
65
hvf_sync_vtimer(cpu);
66
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/kvm-stub.c
69
+++ b/target/arm/kvm-stub.c
70
@@ -XXX,XX +XXX,XX @@
71
72
bool write_kvmstate_to_list(ARMCPU *cpu)
73
{
74
- abort();
75
+ g_assert_not_reached();
20
}
76
}
21
77
22
/*
78
bool write_list_to_kvmstate(ARMCPU *cpu, int level)
23
- * Check that VFP access is enabled. If it is, do the necessary
24
- * M-profile lazy-FP handling and then return true.
25
- * If not, emit code to generate an appropriate exception and
26
- * return false.
27
+ * Check that VFP access is enabled, A-profile specific version.
28
+ *
29
+ * If VFP is enabled, return true. If not, emit code to generate an
30
+ * appropriate exception and return false.
31
* The ignore_vfp_enabled argument specifies that we should ignore
32
- * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
33
+ * whether VFP is enabled via FPEXC.EN: this should be true for FMXR/FMRX
34
* accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
35
*/
36
-static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
37
+static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
38
{
79
{
39
if (s->fp_excp_el) {
80
- abort();
40
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
81
+ g_assert_not_reached();
41
- /*
82
}
42
- * M-profile mostly catches the "FPU disabled" case early, in
83
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
43
- * disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
84
index XXXXXXX..XXXXXXX 100644
44
- * which do coprocessor-checks are outside the large ranges of
85
--- a/target/arm/kvm.c
45
- * the encoding space handled by the patterns in m-nocp.decode,
86
+++ b/target/arm/kvm.c
46
- * and for them we may need to raise NOCP here.
87
@@ -XXX,XX +XXX,XX @@ bool write_kvmstate_to_list(ARMCPU *cpu)
47
- */
88
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
48
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
89
break;
49
- syn_uncategorized(), s->fp_excp_el);
90
default:
50
- } else {
91
- abort();
51
- gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
92
+ g_assert_not_reached();
52
- syn_fp_access_trap(1, 0xe, false),
93
}
53
- s->fp_excp_el);
94
if (ret) {
54
- }
95
ok = false;
55
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
96
@@ -XXX,XX +XXX,XX @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
56
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
97
r.addr = (uintptr_t)(cpu->cpreg_values + i);
57
return false;
98
break;
99
default:
100
- abort();
101
+ g_assert_not_reached();
102
}
103
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
104
if (ret) {
105
diff --git a/target/arm/machine.c b/target/arm/machine.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/machine.c
108
+++ b/target/arm/machine.c
109
@@ -XXX,XX +XXX,XX @@ static int cpu_pre_save(void *opaque)
110
if (kvm_enabled()) {
111
if (!write_kvmstate_to_list(cpu)) {
112
/* This should never fail */
113
- abort();
114
+ g_assert_not_reached();
115
}
116
117
/*
118
@@ -XXX,XX +XXX,XX @@ static int cpu_pre_save(void *opaque)
119
} else {
120
if (!write_cpustate_to_list(cpu, false)) {
121
/* This should never fail. */
122
- abort();
123
+ g_assert_not_reached();
124
}
58
}
125
}
59
126
60
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
127
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
61
unallocated_encoding(s);
128
index XXXXXXX..XXXXXXX 100644
62
return false;
129
--- a/target/arm/translate-a64.c
130
+++ b/target/arm/translate-a64.c
131
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
132
gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
133
break;
134
default:
135
- abort();
136
+ g_assert_not_reached();
63
}
137
}
64
+ return true;
138
65
+}
139
write_fp_sreg(s, rd, tcg_res);
66
140
@@ -XXX,XX +XXX,XX @@ static void handle_fp_fcvt(DisasContext *s, int opcode,
67
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
141
break;
68
- /* Handle M-profile lazy FP state mechanics */
69
-
70
- /* Trigger lazy-state preservation if necessary */
71
- gen_preserve_fp_state(s);
72
-
73
- /* Update ownership of FP context and create new FP context if needed */
74
- gen_update_fp_context(s);
75
+/*
76
+ * Check that VFP access is enabled, M-profile specific version.
77
+ *
78
+ * If VFP is enabled, do the necessary M-profile lazy-FP handling and then
79
+ * return true. If not, emit code to generate an appropriate exception and
80
+ * return false.
81
+ */
82
+static bool vfp_access_check_m(DisasContext *s)
83
+{
84
+ if (s->fp_excp_el) {
85
+ /*
86
+ * M-profile mostly catches the "FPU disabled" case early, in
87
+ * disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
88
+ * which do coprocessor-checks are outside the large ranges of
89
+ * the encoding space handled by the patterns in m-nocp.decode,
90
+ * and for them we may need to raise NOCP here.
91
+ */
92
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
93
+ syn_uncategorized(), s->fp_excp_el);
94
+ return false;
95
}
142
}
96
143
default:
97
+ /* Handle M-profile lazy FP state mechanics */
144
- abort();
98
+
145
+ g_assert_not_reached();
99
+ /* Trigger lazy-state preservation if necessary */
146
}
100
+ gen_preserve_fp_state(s);
101
+
102
+ /* Update ownership of FP context and create new FP context if needed */
103
+ gen_update_fp_context(s);
104
+
105
return true;
106
}
147
}
107
148
108
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
149
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
109
*/
150
index XXXXXXX..XXXXXXX 100644
110
bool vfp_access_check(DisasContext *s)
151
--- a/target/arm/translate-neon.c
111
{
152
+++ b/target/arm/translate-neon.c
112
- return full_vfp_access_check(s, false);
153
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
113
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
154
}
114
+ return vfp_access_check_m(s);
155
break;
115
+ } else {
156
default:
116
+ return vfp_access_check_a(s, false);
157
- abort();
117
+ }
158
+ g_assert_not_reached();
118
}
119
120
static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
121
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
122
return false;
123
}
159
}
124
160
if ((vd + a->stride * (nregs - 1)) > 31) {
125
- if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
161
/*
126
+ /*
162
diff --git a/target/arm/translate.c b/target/arm/translate.c
127
+ * Call vfp_access_check_a() directly, because we need to tell
163
index XXXXXXX..XXXXXXX 100644
128
+ * it to ignore FPEXC.EN for some register accesses.
164
--- a/target/arm/translate.c
129
+ */
165
+++ b/target/arm/translate.c
130
+ if (!vfp_access_check_a(s, ignore_vfp_enabled)) {
166
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
131
return true;
167
offset = 4;
168
break;
169
default:
170
- abort();
171
+ g_assert_not_reached();
132
}
172
}
133
173
tcg_gen_addi_i32(addr, addr, offset);
174
tmp = load_reg(s, 14);
175
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
176
offset = 0;
177
break;
178
default:
179
- abort();
180
+ g_assert_not_reached();
181
}
182
tcg_gen_addi_i32(addr, addr, offset);
183
gen_helper_set_r13_banked(cpu_env, tcg_constant_i32(mode), addr);
134
--
184
--
135
2.20.1
185
2.25.1
136
137
diff view generated by jsdifflib
1
Implement the MVE VADDV insn, which performs an addition
1
From: Richard Henderson <richard.henderson@linaro.org>
2
across vector lanes.
3
2
3
Create a typedef as well, and use it in ARMCPRegInfo.
4
This won't be perfect for debugging, but it'll nicely
5
display the most common cases.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220501055028.646596-8-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-44-peter.maydell@linaro.org
7
---
11
---
8
target/arm/helper-mve.h | 7 +++++++
12
target/arm/cpregs.h | 44 +++++++++++++++++++++++---------------------
9
target/arm/mve.decode | 2 ++
13
target/arm/helper.c | 2 +-
10
target/arm/mve_helper.c | 24 +++++++++++++++++++++
14
2 files changed, 24 insertions(+), 22 deletions(-)
11
target/arm/translate-mve.c | 43 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 76 insertions(+)
13
15
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
18
--- a/target/arm/cpregs.h
17
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/cpregs.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
20
@@ -XXX,XX +XXX,XX @@ enum {
19
21
* described with these bits, then use a laxer set of restrictions, and
20
DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
22
* do the more restrictive/complex check inside a helper function.
21
DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
23
*/
22
+
24
-#define PL3_R 0x80
23
+DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
25
-#define PL3_W 0x40
24
+DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
26
-#define PL2_R (0x20 | PL3_R)
25
+DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
27
-#define PL2_W (0x10 | PL3_W)
26
+DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
28
-#define PL1_R (0x08 | PL2_R)
27
+DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
29
-#define PL1_W (0x04 | PL2_W)
28
+DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
30
-#define PL0_R (0x02 | PL1_R)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
31
-#define PL0_W (0x01 | PL1_W)
32
+typedef enum {
33
+ PL3_R = 0x80,
34
+ PL3_W = 0x40,
35
+ PL2_R = 0x20 | PL3_R,
36
+ PL2_W = 0x10 | PL3_W,
37
+ PL1_R = 0x08 | PL2_R,
38
+ PL1_W = 0x04 | PL2_W,
39
+ PL0_R = 0x02 | PL1_R,
40
+ PL0_W = 0x01 | PL1_W,
41
42
-/*
43
- * For user-mode some registers are accessible to EL0 via a kernel
44
- * trap-and-emulate ABI. In this case we define the read permissions
45
- * as actually being PL0_R. However some bits of any given register
46
- * may still be masked.
47
- */
48
+ /*
49
+ * For user-mode some registers are accessible to EL0 via a kernel
50
+ * trap-and-emulate ABI. In this case we define the read permissions
51
+ * as actually being PL0_R. However some bits of any given register
52
+ * may still be masked.
53
+ */
54
#ifdef CONFIG_USER_ONLY
55
-#define PL0U_R PL0_R
56
+ PL0U_R = PL0_R,
57
#else
58
-#define PL0U_R PL1_R
59
+ PL0U_R = PL1_R,
60
#endif
61
62
-#define PL3_RW (PL3_R | PL3_W)
63
-#define PL2_RW (PL2_R | PL2_W)
64
-#define PL1_RW (PL1_R | PL1_W)
65
-#define PL0_RW (PL0_R | PL0_W)
66
+ PL3_RW = PL3_R | PL3_W,
67
+ PL2_RW = PL2_R | PL2_W,
68
+ PL1_RW = PL1_R | PL1_W,
69
+ PL0_RW = PL0_R | PL0_W,
70
+} CPAccessRights;
71
72
typedef enum CPAccessResult {
73
/* Access is permitted */
74
@@ -XXX,XX +XXX,XX @@ struct ARMCPRegInfo {
75
/* Register type: ARM_CP_* bits/values */
76
int type;
77
/* Access rights: PL*_[RW] */
78
- int access;
79
+ CPAccessRights access;
80
/* Security state: ARM_CP_SECSTATE_* bits/values */
81
int secure;
82
/*
83
diff --git a/target/arm/helper.c b/target/arm/helper.c
30
index XXXXXXX..XXXXXXX 100644
84
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
85
--- a/target/arm/helper.c
32
+++ b/target/arm/mve.decode
86
+++ b/target/arm/helper.c
33
@@ -XXX,XX +XXX,XX @@ VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
87
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
34
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
88
* to encompass the generic architectural permission check.
35
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
89
*/
36
90
if (r->state != ARM_CP_STATE_AA32) {
37
+# Vector add across vector
91
- int mask = 0;
38
+VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
92
+ CPAccessRights mask;
39
93
switch (r->opc1) {
40
# Predicate operations
94
case 0:
41
%mask_22_13 22:1 13:3
95
/* min_EL EL1, but some accessible to EL0 via kernel ABI */
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64
47
48
DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
49
DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
50
+
51
+/* Vector add across vector */
52
+#define DO_VADDV(OP, ESIZE, TYPE) \
53
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
54
+ uint32_t ra) \
55
+ { \
56
+ uint16_t mask = mve_element_mask(env); \
57
+ unsigned e; \
58
+ TYPE *m = vm; \
59
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
60
+ if (mask & 1) { \
61
+ ra += m[H##ESIZE(e)]; \
62
+ } \
63
+ } \
64
+ mve_advance_vpt(env); \
65
+ return ra; \
66
+ } \
67
+
68
+DO_VADDV(vaddvsb, 1, uint8_t)
69
+DO_VADDV(vaddvsh, 2, uint16_t)
70
+DO_VADDV(vaddvsw, 4, uint32_t)
71
+DO_VADDV(vaddvub, 1, uint8_t)
72
+DO_VADDV(vaddvuh, 2, uint16_t)
73
+DO_VADDV(vaddvuw, 4, uint32_t)
74
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate-mve.c
77
+++ b/target/arm/translate-mve.c
78
@@ -XXX,XX +XXX,XX @@ typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
79
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
80
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
81
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
82
+typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
83
84
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
85
static inline long mve_qreg_offset(unsigned reg)
86
@@ -XXX,XX +XXX,XX @@ static bool trans_VPST(DisasContext *s, arg_VPST *a)
87
mve_update_and_store_eci(s);
88
return true;
89
}
90
+
91
+static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
92
+{
93
+ /* VADDV: vector add across vector */
94
+ static MVEGenVADDVFn * const fns[4][2] = {
95
+ { gen_helper_mve_vaddvsb, gen_helper_mve_vaddvub },
96
+ { gen_helper_mve_vaddvsh, gen_helper_mve_vaddvuh },
97
+ { gen_helper_mve_vaddvsw, gen_helper_mve_vaddvuw },
98
+ { NULL, NULL }
99
+ };
100
+ TCGv_ptr qm;
101
+ TCGv_i32 rda;
102
+
103
+ if (!dc_isar_feature(aa32_mve, s) ||
104
+ a->size == 3) {
105
+ return false;
106
+ }
107
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
108
+ return true;
109
+ }
110
+
111
+ /*
112
+ * This insn is subject to beat-wise execution. Partial execution
113
+ * of an A=0 (no-accumulate) insn which does not execute the first
114
+ * beat must start with the current value of Rda, not zero.
115
+ */
116
+ if (a->a || mve_skip_first_beat(s)) {
117
+ /* Accumulate input from Rda */
118
+ rda = load_reg(s, a->rda);
119
+ } else {
120
+ /* Accumulate starting at zero */
121
+ rda = tcg_const_i32(0);
122
+ }
123
+
124
+ qm = mve_qreg_ptr(a->qm);
125
+ fns[a->size][a->u](rda, cpu_env, qm, rda);
126
+ store_reg(s, a->rda, rda);
127
+ tcg_temp_free_ptr(qm);
128
+
129
+ mve_update_eci(s);
130
+ return true;
131
+}
132
--
96
--
133
2.20.1
97
2.25.1
134
135
diff view generated by jsdifflib
1
Implement the MVE VHCADD insn, which is similar to VCADD
1
From: Richard Henderson <richard.henderson@linaro.org>
2
but performs a halving step. This one overlaps with VADC.
3
2
3
Give this enum a name and use in ARMCPRegInfo,
4
add_cpreg_to_hashtable and define_one_arm_cp_reg_with_opaque.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220501055028.646596-9-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-43-peter.maydell@linaro.org
7
---
11
---
8
target/arm/helper-mve.h | 8 ++++++++
12
target/arm/cpregs.h | 6 +++---
9
target/arm/mve.decode | 8 ++++++--
13
target/arm/helper.c | 6 ++++--
10
target/arm/mve_helper.c | 2 ++
14
2 files changed, 7 insertions(+), 5 deletions(-)
11
target/arm/translate-mve.c | 4 +++-
12
4 files changed, 19 insertions(+), 3 deletions(-)
13
15
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
18
--- a/target/arm/cpregs.h
17
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/cpregs.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
@@ -XXX,XX +XXX,XX @@ enum {
19
DEF_HELPER_FLAGS_4(mve_vcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
* Note that we rely on the values of these enums as we iterate through
20
DEF_HELPER_FLAGS_4(mve_vcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
* the various states in some places.
21
23
*/
22
+DEF_HELPER_FLAGS_4(mve_vhcadd90b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
-enum {
23
+DEF_HELPER_FLAGS_4(mve_vhcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+typedef enum {
24
+DEF_HELPER_FLAGS_4(mve_vhcadd90w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
ARM_CP_STATE_AA32 = 0,
27
ARM_CP_STATE_AA64 = 1,
28
ARM_CP_STATE_BOTH = 2,
29
-};
30
+} CPState;
31
32
/*
33
* ARM CP register secure state flags. These flags identify security state
34
@@ -XXX,XX +XXX,XX @@ struct ARMCPRegInfo {
35
uint8_t opc1;
36
uint8_t opc2;
37
/* Execution state in which this register is visible: ARM_CP_STATE_* */
38
- int state;
39
+ CPState state;
40
/* Register type: ARM_CP_* bits/values */
41
int type;
42
/* Access rights: PL*_[RW] */
43
diff --git a/target/arm/helper.c b/target/arm/helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/helper.c
46
+++ b/target/arm/helper.c
47
@@ -XXX,XX +XXX,XX @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
48
}
49
50
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
51
- void *opaque, int state, int secstate,
52
+ void *opaque, CPState state, int secstate,
53
int crm, int opc1, int opc2,
54
const char *name)
55
{
56
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
57
* bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
58
* the register, if any.
59
*/
60
- int crm, opc1, opc2, state;
61
+ int crm, opc1, opc2;
62
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
63
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
64
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
65
int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
66
int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
67
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
68
+ CPState state;
25
+
69
+
26
+DEF_HELPER_FLAGS_4(mve_vhcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
70
/* 64 bit registers have only CRm and Opc1 fields */
27
+DEF_HELPER_FLAGS_4(mve_vhcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
71
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
28
+DEF_HELPER_FLAGS_4(mve_vhcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
72
/* op0 only exists in the AArch64 encodings */
29
+
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
38
VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
39
VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
40
41
-VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
42
-VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
43
+{
44
+ VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
45
+ VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
46
+ VHCADD90 1110 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op
47
+ VHCADD270 1110 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op
48
+}
49
50
{
51
VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
57
58
DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
59
DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
60
+DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
61
+DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
62
63
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
64
{
65
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/arm/translate-mve.c
68
+++ b/target/arm/translate-mve.c
69
@@ -XXX,XX +XXX,XX @@ DO_2OP(VRHADD_U, vrhaddu)
70
/*
71
* VCADD Qd == Qm at size MO_32 is UNPREDICTABLE; we choose not to diagnose
72
* so we can reuse the DO_2OP macro. (Our implementation calculates the
73
- * "expected" results in this case.)
74
+ * "expected" results in this case.) Similarly for VHCADD.
75
*/
76
DO_2OP(VCADD90, vcadd90)
77
DO_2OP(VCADD270, vcadd270)
78
+DO_2OP(VHCADD90, vhcadd90)
79
+DO_2OP(VHCADD270, vhcadd270)
80
81
static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
82
{
83
--
73
--
84
2.20.1
74
2.25.1
85
75
86
76
diff view generated by jsdifflib
1
Implement the MVE VCADD insn, which performs a complex add with
1
From: Richard Henderson <richard.henderson@linaro.org>
2
rotate. Note that the size=0b11 encoding is VSBC.
3
2
4
The architecture grants some leeway for the "destination and Vm
3
Give this enum a name and use in ARMCPRegInfo and add_cpreg_to_hashtable.
5
source overlap" case for the size MO_32 case, but we choose not to
4
Add the enumerator ARM_CP_SECSTATE_BOTH to clarify how 0
6
make use of it, instead always calculating all 16 bytes worth of
5
is handled in define_one_arm_cp_reg_with_opaque.
7
results before setting the destination register.
8
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220501055028.646596-10-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210617121628.20116-42-peter.maydell@linaro.org
12
---
11
---
13
target/arm/helper-mve.h | 8 ++++++++
12
target/arm/cpregs.h | 7 ++++---
14
target/arm/mve.decode | 9 +++++++--
13
target/arm/helper.c | 7 +++++--
15
target/arm/mve_helper.c | 29 +++++++++++++++++++++++++++++
14
2 files changed, 9 insertions(+), 5 deletions(-)
16
target/arm/translate-mve.c | 7 +++++++
17
4 files changed, 51 insertions(+), 2 deletions(-)
18
15
19
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
20
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper-mve.h
18
--- a/target/arm/cpregs.h
22
+++ b/target/arm/helper-mve.h
19
+++ b/target/arm/cpregs.h
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vadci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
@@ -XXX,XX +XXX,XX @@ typedef enum {
24
DEF_HELPER_FLAGS_4(mve_vsbc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
* registered entry will only have one to identify whether the entry is secure
25
DEF_HELPER_FLAGS_4(mve_vsbci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
* or non-secure.
26
23
*/
27
+DEF_HELPER_FLAGS_4(mve_vcadd90b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
-enum {
28
+DEF_HELPER_FLAGS_4(mve_vcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+typedef enum {
29
+DEF_HELPER_FLAGS_4(mve_vcadd90w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+ ARM_CP_SECSTATE_BOTH = 0, /* define one cpreg for each secstate */
30
+
27
ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
31
+DEF_HELPER_FLAGS_4(mve_vcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
32
+DEF_HELPER_FLAGS_4(mve_vcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
-};
33
+DEF_HELPER_FLAGS_4(mve_vcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+} CPSecureState;
34
+
31
35
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
/*
36
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
* Access rights:
37
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
@@ -XXX,XX +XXX,XX @@ struct ARMCPRegInfo {
38
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
35
/* Access rights: PL*_[RW] */
36
CPAccessRights access;
37
/* Security state: ARM_CP_SECSTATE_* bits/values */
38
- int secure;
39
+ CPSecureState secure;
40
/*
41
* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
42
* this register was defined: can be used to hand data through to the
43
diff --git a/target/arm/helper.c b/target/arm/helper.c
39
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve.decode
45
--- a/target/arm/helper.c
41
+++ b/target/arm/mve.decode
46
+++ b/target/arm/helper.c
42
@@ -XXX,XX +XXX,XX @@ VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
47
@@ -XXX,XX +XXX,XX @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
43
VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
44
45
VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
46
-VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
47
VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
48
-VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
49
+
50
+{
51
+ VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
52
+ VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
53
+ VCADD90 1111 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op
54
+ VCADD270 1111 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op
55
+}
56
57
# Vector miscellaneous
58
59
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/mve_helper.c
62
+++ b/target/arm/mve_helper.c
63
@@ -XXX,XX +XXX,XX @@ void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
64
do_vadc(env, vd, vn, vm, -1, 1, true);
65
}
48
}
66
49
67
+#define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
50
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
68
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
51
- void *opaque, CPState state, int secstate,
69
+ { \
52
+ void *opaque, CPState state,
70
+ TYPE *d = vd, *n = vn, *m = vm; \
53
+ CPSecureState secstate,
71
+ uint16_t mask = mve_element_mask(env); \
54
int crm, int opc1, int opc2,
72
+ unsigned e; \
55
const char *name)
73
+ TYPE r[16 / ESIZE]; \
74
+ /* Calculate all results first to avoid overwriting inputs */ \
75
+ for (e = 0; e < 16 / ESIZE; e++) { \
76
+ if (!(e & 1)) { \
77
+ r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
78
+ } else { \
79
+ r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
80
+ } \
81
+ } \
82
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
83
+ mergemask(&d[H##ESIZE(e)], r[e], mask); \
84
+ } \
85
+ mve_advance_vpt(env); \
86
+ }
87
+
88
+#define DO_VCADD_ALL(OP, FN0, FN1) \
89
+ DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
90
+ DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
91
+ DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
92
+
93
+DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
94
+DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
95
+
96
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
97
{
56
{
98
if (val > max) {
57
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
99
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
58
r->secure, crm, opc1, opc2,
100
index XXXXXXX..XXXXXXX 100644
59
r->name);
101
--- a/target/arm/translate-mve.c
60
break;
102
+++ b/target/arm/translate-mve.c
61
- default:
103
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQRDMLSDH, vqrdmlsdh)
62
+ case ARM_CP_SECSTATE_BOTH:
104
DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
63
name = g_strdup_printf("%s_S", r->name);
105
DO_2OP(VRHADD_S, vrhadds)
64
add_cpreg_to_hashtable(cpu, r, opaque, state,
106
DO_2OP(VRHADD_U, vrhaddu)
65
ARM_CP_SECSTATE_S,
107
+/*
66
@@ -XXX,XX +XXX,XX @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
108
+ * VCADD Qd == Qm at size MO_32 is UNPREDICTABLE; we choose not to diagnose
67
ARM_CP_SECSTATE_NS,
109
+ * so we can reuse the DO_2OP macro. (Our implementation calculates the
68
crm, opc1, opc2, r->name);
110
+ * "expected" results in this case.)
69
break;
111
+ */
70
+ default:
112
+DO_2OP(VCADD90, vcadd90)
71
+ g_assert_not_reached();
113
+DO_2OP(VCADD270, vcadd270)
72
}
114
73
} else {
115
static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
74
/* AArch64 registers get mapped to non-secure instance
116
{
117
--
75
--
118
2.20.1
76
2.25.1
119
120
diff view generated by jsdifflib
1
Implement the MVE VQDMLSDH and VQRDMLSDH insns, which are
1
From: Richard Henderson <richard.henderson@linaro.org>
2
like VQDMLADH and VQRDMLADH except that products are subtracted
3
rather than added.
4
2
3
The new_key field is always non-zero -- drop the if.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20220501055028.646596-11-richard.henderson@linaro.org
8
[PMM: reinstated dropped PL3_RW mask]
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-38-peter.maydell@linaro.org
8
---
10
---
9
target/arm/helper-mve.h | 16 ++++++++++++++
11
target/arm/helper.c | 23 +++++++++++------------
10
target/arm/mve.decode | 5 +++++
12
1 file changed, 11 insertions(+), 12 deletions(-)
11
target/arm/mve_helper.c | 44 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 4 ++++
13
4 files changed, 69 insertions(+)
14
13
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
16
--- a/target/arm/helper.c
18
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
@@ -XXX,XX +XXX,XX @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
20
DEF_HELPER_FLAGS_4(mve_vqrdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
21
DEF_HELPER_FLAGS_4(mve_vqrdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
for (i = 0; i < ARRAY_SIZE(aliases); i++) {
22
21
const struct E2HAlias *a = &aliases[i];
23
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
- ARMCPRegInfo *src_reg, *dst_reg;
24
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+ ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
25
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+ uint32_t *new_key;
26
+
25
+ bool ok;
27
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
28
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
if (a->feature && !a->feature(&cpu->isar)) {
29
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
continue;
30
+
29
@@ -XXX,XX +XXX,XX @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
31
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
g_assert(src_reg->opaque == NULL);
32
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
33
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
/* Create alias before redirection so we dup the right data. */
34
+
33
- if (a->new_key) {
35
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
- ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
36
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
- uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
37
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
- bool ok;
38
+
37
+ new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
39
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+ new_key = g_memdup(&a->new_key, sizeof(uint32_t));
40
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
41
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
- new_reg->name = a->new_name;
42
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
41
- new_reg->type |= ARM_CP_ALIAS;
43
index XXXXXXX..XXXXXXX 100644
42
- /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
44
--- a/target/arm/mve.decode
43
- new_reg->access &= PL2_RW | PL3_RW;
45
+++ b/target/arm/mve.decode
44
+ new_reg->name = a->new_name;
46
@@ -XXX,XX +XXX,XX @@ VQDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
45
+ new_reg->type |= ARM_CP_ALIAS;
47
VQRDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
46
+ /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
48
VQRDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
47
+ new_reg->access &= PL2_RW | PL3_RW;
49
48
50
+VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op
49
- ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
51
+VQDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
50
- g_assert(ok);
52
+VQRDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
51
- }
53
+VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
52
+ ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
54
+
53
+ g_assert(ok);
55
# Vector miscellaneous
54
56
55
src_reg->opaque = dst_reg;
57
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
56
src_reg->orig_readfn = src_reg->readfn ?: raw_read;
58
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mve_helper.c
61
+++ b/target/arm/mve_helper.c
62
@@ -XXX,XX +XXX,XX @@ static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
63
return r >> 32;
64
}
65
66
+static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
67
+ int round, bool *sat)
68
+{
69
+ int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
70
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
71
+}
72
+
73
+static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
74
+ int round, bool *sat)
75
+{
76
+ int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
77
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
78
+}
79
+
80
+static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
81
+ int round, bool *sat)
82
+{
83
+ int64_t m1 = (int64_t)a * b;
84
+ int64_t m2 = (int64_t)c * d;
85
+ int64_t r;
86
+ /* The same ordering issue as in do_vqdmladh_w applies here too */
87
+ if (ssub64_overflow(m1, m2, &r) ||
88
+ sadd64_overflow(r, (round << 30), &r) ||
89
+ sadd64_overflow(r, r, &r)) {
90
+ *sat = true;
91
+ return r < 0 ? INT32_MAX : INT32_MIN;
92
+ }
93
+ return r >> 32;
94
+}
95
+
96
DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
97
DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
98
DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
99
@@ -XXX,XX +XXX,XX @@ DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
100
DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
101
DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
102
103
+DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
104
+DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
105
+DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
106
+DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
107
+DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
108
+DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
109
+
110
+DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
111
+DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
112
+DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
113
+DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
114
+DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
115
+DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
116
+
117
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
118
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
119
uint32_t rm) \
120
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/target/arm/translate-mve.c
123
+++ b/target/arm/translate-mve.c
124
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQDMLADH, vqdmladh)
125
DO_2OP(VQDMLADHX, vqdmladhx)
126
DO_2OP(VQRDMLADH, vqrdmladh)
127
DO_2OP(VQRDMLADHX, vqrdmladhx)
128
+DO_2OP(VQDMLSDH, vqdmlsdh)
129
+DO_2OP(VQDMLSDHX, vqdmlsdhx)
130
+DO_2OP(VQRDMLSDH, vqrdmlsdh)
131
+DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
132
133
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
134
MVEGenTwoOpScalarFn fn)
135
--
57
--
136
2.20.1
58
2.25.1
137
138
diff view generated by jsdifflib
1
The Arm MVE VDUP implementation would like to be able to emit code to
1
From: Richard Henderson <richard.henderson@linaro.org>
2
duplicate a byte or halfword value into an i32. We have code to do
3
this already in tcg-op-gvec.c, so all we need to do is make the
4
functions global.
5
2
6
For consistency with other functions made available to the frontends:
3
Cast the uint32_t key into a gpointer directly, which
7
* we rename to tcg_gen_dup_*
4
allows us to avoid allocating storage for each key.
8
* we expose both the _i32 and _i64 forms
9
* we provide the #define for a _tl form
10
5
11
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
6
Use g_hash_table_lookup when we already have a gpointer
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
(e.g. for callbacks like count_cpreg), or when using
8
get_arm_cp_reginfo would require casting away const.
9
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Message-id: 20220501055028.646596-12-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Message-id: 20210617121628.20116-10-peter.maydell@linaro.org
15
---
14
---
16
include/tcg/tcg-op.h | 8 ++++++++
15
target/arm/cpu.c | 4 ++--
17
include/tcg/tcg.h | 1 -
16
target/arm/gdbstub.c | 2 +-
18
tcg/tcg-op-gvec.c | 20 ++++++++++----------
17
target/arm/helper.c | 41 ++++++++++++++++++-----------------------
19
3 files changed, 18 insertions(+), 11 deletions(-)
18
3 files changed, 21 insertions(+), 26 deletions(-)
20
19
21
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
20
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
22
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg-op.h
22
--- a/target/arm/cpu.c
24
+++ b/include/tcg/tcg-op.h
23
+++ b/target/arm/cpu.c
25
@@ -XXX,XX +XXX,XX @@ void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
24
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
26
void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
25
ARMCPU *cpu = ARM_CPU(obj);
27
void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
26
28
27
cpu_set_cpustate_pointers(cpu);
29
+/* Replicate a value of size @vece from @in to all the lanes in @out */
28
- cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
30
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in);
29
- g_free, cpreg_hashtable_data_destroy);
31
+
30
+ cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal,
32
static inline void tcg_gen_discard_i32(TCGv_i32 arg)
31
+ NULL, cpreg_hashtable_data_destroy);
32
33
QLIST_INIT(&cpu->pre_el_change_hooks);
34
QLIST_INIT(&cpu->el_change_hooks);
35
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/gdbstub.c
38
+++ b/target/arm/gdbstub.c
39
@@ -XXX,XX +XXX,XX @@ static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml,
40
static void arm_register_sysreg_for_xml(gpointer key, gpointer value,
41
gpointer p)
33
{
42
{
34
tcg_gen_op1_i32(INDEX_op_discard, arg);
43
- uint32_t ri_key = *(uint32_t *)key;
35
@@ -XXX,XX +XXX,XX @@ void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
44
+ uint32_t ri_key = (uintptr_t)key;
36
void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
45
ARMCPRegInfo *ri = value;
37
void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
46
RegisterSysregXmlParam *param = (RegisterSysregXmlParam *)p;
38
47
GString *s = param->s;
39
+/* Replicate a value of size @vece from @in to all the lanes in @out */
48
diff --git a/target/arm/helper.c b/target/arm/helper.c
40
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in);
49
index XXXXXXX..XXXXXXX 100644
41
+
50
--- a/target/arm/helper.c
42
#if TCG_TARGET_REG_BITS == 64
51
+++ b/target/arm/helper.c
43
static inline void tcg_gen_discard_i64(TCGv_i64 arg)
52
@@ -XXX,XX +XXX,XX @@ bool write_list_to_cpustate(ARMCPU *cpu)
53
static void add_cpreg_to_list(gpointer key, gpointer opaque)
44
{
54
{
45
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
55
ARMCPU *cpu = opaque;
46
#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64
56
- uint64_t regidx;
47
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64
57
- const ARMCPRegInfo *ri;
48
#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec
49
+#define tcg_gen_dup_tl tcg_gen_dup_i64
50
#else
51
#define tcg_gen_movi_tl tcg_gen_movi_i32
52
#define tcg_gen_mov_tl tcg_gen_mov_i32
53
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
54
#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32
55
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32
56
#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec
57
+#define tcg_gen_dup_tl tcg_gen_dup_i32
58
#endif
59
60
#if UINTPTR_MAX == UINT32_MAX
61
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/include/tcg/tcg.h
64
+++ b/include/tcg/tcg.h
65
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
66
: (qemu_build_not_reached_always(), 0)) \
67
: dup_const(VECE, C))
68
69
-
58
-
70
/*
59
- regidx = *(uint32_t *)key;
71
* Memory helpers that will be used by TCG generated code.
60
- ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
72
*/
61
+ uint32_t regidx = (uintptr_t)key;
73
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
62
+ const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
74
index XXXXXXX..XXXXXXX 100644
63
75
--- a/tcg/tcg-op-gvec.c
64
if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
76
+++ b/tcg/tcg-op-gvec.c
65
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
77
@@ -XXX,XX +XXX,XX @@ uint64_t (dup_const)(unsigned vece, uint64_t c)
66
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
67
static void count_cpreg(gpointer key, gpointer opaque)
68
{
69
ARMCPU *cpu = opaque;
70
- uint64_t regidx;
71
const ARMCPRegInfo *ri;
72
73
- regidx = *(uint32_t *)key;
74
- ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
75
+ ri = g_hash_table_lookup(cpu->cp_regs, key);
76
77
if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
78
cpu->cpreg_array_len++;
79
@@ -XXX,XX +XXX,XX @@ static void count_cpreg(gpointer key, gpointer opaque)
80
81
static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
82
{
83
- uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
84
- uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
85
+ uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
86
+ uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
87
88
if (aidx > bidx) {
89
return 1;
90
@@ -XXX,XX +XXX,XX @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
91
for (i = 0; i < ARRAY_SIZE(aliases); i++) {
92
const struct E2HAlias *a = &aliases[i];
93
ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
94
- uint32_t *new_key;
95
bool ok;
96
97
if (a->feature && !a->feature(&cpu->isar)) {
98
continue;
99
}
100
101
- src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
102
- dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
103
+ src_reg = g_hash_table_lookup(cpu->cp_regs,
104
+ (gpointer)(uintptr_t)a->src_key);
105
+ dst_reg = g_hash_table_lookup(cpu->cp_regs,
106
+ (gpointer)(uintptr_t)a->dst_key);
107
g_assert(src_reg != NULL);
108
g_assert(dst_reg != NULL);
109
110
@@ -XXX,XX +XXX,XX @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
111
112
/* Create alias before redirection so we dup the right data. */
113
new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
114
- new_key = g_memdup(&a->new_key, sizeof(uint32_t));
115
116
new_reg->name = a->new_name;
117
new_reg->type |= ARM_CP_ALIAS;
118
/* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
119
new_reg->access &= PL2_RW | PL3_RW;
120
121
- ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
122
+ ok = g_hash_table_insert(cpu->cp_regs,
123
+ (gpointer)(uintptr_t)a->new_key, new_reg);
124
g_assert(ok);
125
126
src_reg->opaque = dst_reg;
127
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
128
/* Private utility function for define_one_arm_cp_reg_with_opaque():
129
* add a single reginfo struct to the hash table.
130
*/
131
- uint32_t *key = g_new(uint32_t, 1);
132
+ uint32_t key;
133
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
134
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
135
int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
136
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
137
if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
138
r2->cp = CP_REG_ARM64_SYSREG_CP;
139
}
140
- *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
141
- r2->opc0, opc1, opc2);
142
+ key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
143
+ r2->opc0, opc1, opc2);
144
} else {
145
- *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
146
+ key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
147
}
148
if (opaque) {
149
r2->opaque = opaque;
150
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
151
* requested.
152
*/
153
if (!(r->type & ARM_CP_OVERRIDE)) {
154
- ARMCPRegInfo *oldreg;
155
- oldreg = g_hash_table_lookup(cpu->cp_regs, key);
156
+ const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
157
if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
158
fprintf(stderr, "Register redefined: cp=%d %d bit "
159
"crn=%d crm=%d opc1=%d opc2=%d, "
160
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
161
g_assert_not_reached();
162
}
163
}
164
- g_hash_table_insert(cpu->cp_regs, key, r2);
165
+ g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
78
}
166
}
79
167
80
/* Duplicate IN into OUT as per VECE. */
168
81
-static void gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
169
@@ -XXX,XX +XXX,XX @@ void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
82
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
170
171
const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
83
{
172
{
84
switch (vece) {
173
- return g_hash_table_lookup(cpregs, &encoded_cp);
85
case MO_8:
174
+ return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
86
@@ -XXX,XX +XXX,XX @@ static void gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
87
}
88
}
175
}
89
176
90
-static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
177
void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
91
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
92
{
93
switch (vece) {
94
case MO_8:
95
@@ -XXX,XX +XXX,XX @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
96
&& (vece != MO_32 || !check_size_impl(oprsz, 4))) {
97
t_64 = tcg_temp_new_i64();
98
tcg_gen_extu_i32_i64(t_64, in_32);
99
- gen_dup_i64(vece, t_64, t_64);
100
+ tcg_gen_dup_i64(vece, t_64, t_64);
101
} else {
102
t_32 = tcg_temp_new_i32();
103
- gen_dup_i32(vece, t_32, in_32);
104
+ tcg_gen_dup_i32(vece, t_32, in_32);
105
}
106
} else if (in_64) {
107
/* We are given a 64-bit variable input. */
108
t_64 = tcg_temp_new_i64();
109
- gen_dup_i64(vece, t_64, in_64);
110
+ tcg_gen_dup_i64(vece, t_64, in_64);
111
} else {
112
/* We are given a constant input. */
113
/* For 64-bit hosts, use 64-bit constants for "simple" constants
114
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
115
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
116
TCGv_i64 t64 = tcg_temp_new_i64();
117
118
- gen_dup_i64(g->vece, t64, c);
119
+ tcg_gen_dup_i64(g->vece, t64, c);
120
expand_2s_i64(dofs, aofs, oprsz, t64, g->scalar_first, g->fni8);
121
tcg_temp_free_i64(t64);
122
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
123
TCGv_i32 t32 = tcg_temp_new_i32();
124
125
tcg_gen_extrl_i64_i32(t32, c);
126
- gen_dup_i32(g->vece, t32, t32);
127
+ tcg_gen_dup_i32(g->vece, t32, t32);
128
expand_2s_i32(dofs, aofs, oprsz, t32, g->scalar_first, g->fni4);
129
tcg_temp_free_i32(t32);
130
} else {
131
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
132
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
133
{
134
TCGv_i64 tmp = tcg_temp_new_i64();
135
- gen_dup_i64(vece, tmp, c);
136
+ tcg_gen_dup_i64(vece, tmp, c);
137
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
138
tcg_temp_free_i64(tmp);
139
}
140
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
141
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
142
{
143
TCGv_i64 tmp = tcg_temp_new_i64();
144
- gen_dup_i64(vece, tmp, c);
145
+ tcg_gen_dup_i64(vece, tmp, c);
146
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
147
tcg_temp_free_i64(tmp);
148
}
149
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
150
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
151
{
152
TCGv_i64 tmp = tcg_temp_new_i64();
153
- gen_dup_i64(vece, tmp, c);
154
+ tcg_gen_dup_i64(vece, tmp, c);
155
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
156
tcg_temp_free_i64(tmp);
157
}
158
--
178
--
159
2.20.1
179
2.25.1
160
161
diff view generated by jsdifflib
1
Factor the code in full_vfp_access_check() which updates the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
ownership of the FP context and creates a new FP context
3
out into its own function.
4
2
3
Simplify freeing cp_regs hash table entries by using a single
4
allocation for the entire value.
5
6
This fixes a theoretical bug if we were to ever free the entire
7
hash table, because we've been installing string literal constants
8
into the cpreg structure in define_arm_vh_e2h_redirects_aliases.
9
However, at present we only free entries created for AArch32
10
wildcard cpregs which get overwritten by more specific cpregs,
11
so this bug is never exposed.
12
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Message-id: 20220501055028.646596-13-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210618141019.10671-6-peter.maydell@linaro.org
8
---
17
---
9
target/arm/translate-vfp.c | 104 +++++++++++++++++++++----------------
18
target/arm/cpu.c | 16 +---------------
10
1 file changed, 58 insertions(+), 46 deletions(-)
19
target/arm/helper.c | 10 ++++++++--
20
2 files changed, 9 insertions(+), 17 deletions(-)
11
21
12
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
22
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate-vfp.c
24
--- a/target/arm/cpu.c
15
+++ b/target/arm/translate-vfp.c
25
+++ b/target/arm/cpu.c
16
@@ -XXX,XX +XXX,XX @@ void gen_preserve_fp_state(DisasContext *s)
26
@@ -XXX,XX +XXX,XX @@ uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
17
}
27
return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
18
}
28
}
19
29
20
+/*
30
-static void cpreg_hashtable_data_destroy(gpointer data)
21
+ * Generate code for M-profile FP context handling: update the
31
-{
22
+ * ownership of the FP context, and create a new context if
32
- /*
23
+ * necessary. This corresponds to the parts of the pseudocode
33
- * Destroy function for cpu->cp_regs hashtable data entries.
24
+ * ExecuteFPCheck() after the inital PreserveFPState() call.
34
- * We must free the name string because it was g_strdup()ed in
25
+ */
35
- * add_cpreg_to_hashtable(). It's OK to cast away the 'const'
26
+static void gen_update_fp_context(DisasContext *s)
36
- * from r->name because we know we definitely allocated it.
27
+{
37
- */
28
+ /* Update ownership of FP context: set FPCCR.S to match current state */
38
- ARMCPRegInfo *r = data;
29
+ if (s->v8m_fpccr_s_wrong) {
39
-
30
+ TCGv_i32 tmp;
40
- g_free((void *)r->name);
41
- g_free(r);
42
-}
43
-
44
static void arm_cpu_initfn(Object *obj)
45
{
46
ARMCPU *cpu = ARM_CPU(obj);
47
48
cpu_set_cpustate_pointers(cpu);
49
cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal,
50
- NULL, cpreg_hashtable_data_destroy);
51
+ NULL, g_free);
52
53
QLIST_INIT(&cpu->pre_el_change_hooks);
54
QLIST_INIT(&cpu->el_change_hooks);
55
diff --git a/target/arm/helper.c b/target/arm/helper.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/helper.c
58
+++ b/target/arm/helper.c
59
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
60
* add a single reginfo struct to the hash table.
61
*/
62
uint32_t key;
63
- ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
64
+ ARMCPRegInfo *r2;
65
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
66
int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
67
+ size_t name_len;
31
+
68
+
32
+ tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
69
+ /* Combine cpreg and name into one allocation. */
33
+ if (s->v8m_secure) {
70
+ name_len = strlen(name) + 1;
34
+ tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
71
+ r2 = g_malloc(sizeof(*r2) + name_len);
35
+ } else {
72
+ *r2 = *r;
36
+ tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
73
+ r2->name = memcpy(r2 + 1, name, name_len);
37
+ }
74
38
+ store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
75
- r2->name = g_strdup(name);
39
+ /* Don't need to do this for any further FP insns in this TB */
76
/* Reset the secure state to the specific incoming state. This is
40
+ s->v8m_fpccr_s_wrong = false;
77
* necessary as the register may have been defined with both states.
41
+ }
78
*/
42
+
43
+ if (s->v7m_new_fp_ctxt_needed) {
44
+ /*
45
+ * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
46
+ * the FPSCR, and VPR.
47
+ */
48
+ TCGv_i32 control, fpscr;
49
+ uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
50
+
51
+ fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
52
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
53
+ tcg_temp_free_i32(fpscr);
54
+ if (dc_isar_feature(aa32_mve, s)) {
55
+ TCGv_i32 z32 = tcg_const_i32(0);
56
+ store_cpu_field(z32, v7m.vpr);
57
+ }
58
+
59
+ /*
60
+ * We don't need to arrange to end the TB, because the only
61
+ * parts of FPSCR which we cache in the TB flags are the VECLEN
62
+ * and VECSTRIDE, and those don't exist for M-profile.
63
+ */
64
+
65
+ if (s->v8m_secure) {
66
+ bits |= R_V7M_CONTROL_SFPA_MASK;
67
+ }
68
+ control = load_cpu_field(v7m.control[M_REG_S]);
69
+ tcg_gen_ori_i32(control, control, bits);
70
+ store_cpu_field(control, v7m.control[M_REG_S]);
71
+ /* Don't need to do this for any further FP insns in this TB */
72
+ s->v7m_new_fp_ctxt_needed = false;
73
+ }
74
+}
75
+
76
/*
77
* Check that VFP access is enabled. If it is, do the necessary
78
* M-profile lazy-FP handling and then return true.
79
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
80
/* Trigger lazy-state preservation if necessary */
81
gen_preserve_fp_state(s);
82
83
- /* Update ownership of FP context: set FPCCR.S to match current state */
84
- if (s->v8m_fpccr_s_wrong) {
85
- TCGv_i32 tmp;
86
-
87
- tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
88
- if (s->v8m_secure) {
89
- tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
90
- } else {
91
- tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
92
- }
93
- store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
94
- /* Don't need to do this for any further FP insns in this TB */
95
- s->v8m_fpccr_s_wrong = false;
96
- }
97
-
98
- if (s->v7m_new_fp_ctxt_needed) {
99
- /*
100
- * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
101
- * the FPSCR, and VPR.
102
- */
103
- TCGv_i32 control, fpscr;
104
- uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
105
-
106
- fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
107
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
108
- tcg_temp_free_i32(fpscr);
109
- if (dc_isar_feature(aa32_mve, s)) {
110
- TCGv_i32 z32 = tcg_const_i32(0);
111
- store_cpu_field(z32, v7m.vpr);
112
- }
113
-
114
- /*
115
- * We don't need to arrange to end the TB, because the only
116
- * parts of FPSCR which we cache in the TB flags are the VECLEN
117
- * and VECSTRIDE, and those don't exist for M-profile.
118
- */
119
-
120
- if (s->v8m_secure) {
121
- bits |= R_V7M_CONTROL_SFPA_MASK;
122
- }
123
- control = load_cpu_field(v7m.control[M_REG_S]);
124
- tcg_gen_ori_i32(control, control, bits);
125
- store_cpu_field(control, v7m.control[M_REG_S]);
126
- /* Don't need to do this for any further FP insns in this TB */
127
- s->v7m_new_fp_ctxt_needed = false;
128
- }
129
+ /* Update ownership of FP context and create new FP context if needed */
130
+ gen_update_fp_context(s);
131
}
132
133
return true;
134
--
79
--
135
2.20.1
80
2.25.1
136
137
diff view generated by jsdifflib
1
The M-profile architecture requires that accesses to FPCXT_NS when
1
From: Richard Henderson <richard.henderson@linaro.org>
2
there is no active FP state must not take a NOCP fault even if the
3
FPU is disabled. We were not implementing this correctly, because
4
in our decode we catch the NOCP faults early in m-nocp.decode.
5
2
6
Fix this bug by moving all the handling of M-profile FP system
3
Move the computation of key to the top of the function.
7
register accesses from vfp.decode into m-nocp.decode and putting
4
Hoist the resolution of cp as well, as an input to the
8
it above the NOCP blocks. This provides the correct behaviour:
5
computation of key.
9
* for accesses other than FPCXT_NS the trans functions call
10
vfp_access_check(), which will check for FPU disabled and
11
raise a NOCP exception if necessary
12
* for FPCXT_NS we have the special case code that doesn't
13
call vfp_access_check()
14
* when these trans functions want to raise an UNDEF they return
15
false, so the decoder will fall through into the NOCP blocks.
16
This means that NOCP correctly takes precedence over UNDEF
17
for these insns. (This is a difference from the other insns
18
handled by m-nocp.decode, where UNDEF takes precedence and
19
which we implement by having those trans functions call
20
unallocated_encoding() in the appropriate places.)
21
6
22
[Note for backport to stable: this commit has a semantic dependency
7
This will be required by a subsequent patch.
23
on commit 9a486856e9173af, which was not marked as cc-stable because
24
we didn't know we'd need it for a for-stable bugfix.]
25
8
26
Cc: qemu-stable@nongnu.org
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20220501055028.646596-14-richard.henderson@linaro.org
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-id: 20210618141019.10671-4-peter.maydell@linaro.org
30
---
13
---
31
target/arm/translate-a32.h | 1 +
14
target/arm/helper.c | 49 +++++++++++++++++++++++++--------------------
32
target/arm/m-nocp.decode | 24 ++
15
1 file changed, 27 insertions(+), 22 deletions(-)
33
target/arm/vfp.decode | 14 -
34
target/arm/translate-m-nocp.c | 514 +++++++++++++++++++++++++++++++++
35
target/arm/translate-vfp.c | 517 +---------------------------------
36
5 files changed, 542 insertions(+), 528 deletions(-)
37
16
38
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
39
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/translate-a32.h
19
--- a/target/arm/helper.c
41
+++ b/target/arm/translate-a32.h
20
+++ b/target/arm/helper.c
42
@@ -XXX,XX +XXX,XX @@ bool disas_neon_shared(DisasContext *s, uint32_t insn);
21
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
43
void load_reg_var(DisasContext *s, TCGv_i32 var, int reg);
22
ARMCPRegInfo *r2;
44
void arm_gen_condlabel(DisasContext *s);
23
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
45
bool vfp_access_check(DisasContext *s);
24
int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
46
+void gen_preserve_fp_state(DisasContext *s);
25
+ int cp = r->cp;
47
void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop);
26
size_t name_len;
48
void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop);
27
49
void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop);
28
+ switch (state) {
50
diff --git a/target/arm/m-nocp.decode b/target/arm/m-nocp.decode
29
+ case ARM_CP_STATE_AA32:
51
index XXXXXXX..XXXXXXX 100644
30
+ /* We assume it is a cp15 register if the .cp field is left unset. */
52
--- a/target/arm/m-nocp.decode
31
+ if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
53
+++ b/target/arm/m-nocp.decode
32
+ cp = 15;
54
@@ -XXX,XX +XXX,XX @@
33
+ }
55
34
+ key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
56
&nocp cp
57
58
+# M-profile VLDR/VSTR to sysreg
59
+%vldr_sysreg 22:1 13:3
60
+%imm7_0x4 0:7 !function=times_4
61
+
62
+&vldr_sysreg rn reg imm a w p
63
+@vldr_sysreg .... ... . a:1 . . . rn:4 ... . ... .. ....... \
64
+ reg=%vldr_sysreg imm=%imm7_0x4 &vldr_sysreg
65
+
66
{
67
# Special cases which do not take an early NOCP: VLLDM and VLSTM
68
VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 op:1 000 0000
69
@@ -XXX,XX +XXX,XX @@
70
VSCCLRM 1110 1100 1.01 1111 .... 1011 imm:7 0 vd=%vd_dp size=3
71
VSCCLRM 1110 1100 1.01 1111 .... 1010 imm:8 vd=%vd_sp size=2
72
73
+ # FP system register accesses: these are a special case because accesses
74
+ # to FPCXT_NS succeed even if the FPU is disabled. We therefore need
75
+ # to handle them before the big NOCP blocks. Note that within these
76
+ # insns NOCP still has higher priority than UNDEFs; this is implemented
77
+ # by their returning 'false' for UNDEF so as to fall through into the
78
+ # NOCP check (in contrast to VLLDM etc, which call unallocated_encoding()
79
+ # for the UNDEFs there that must take precedence over NOCP.)
80
+
81
+ VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
82
+
83
+ # P=0 W=0 is SEE "Related encodings", so split into two patterns
84
+ VLDR_sysreg ---- 110 1 . . w:1 1 .... ... 0 111 11 ....... @vldr_sysreg p=1
85
+ VLDR_sysreg ---- 110 0 . . 1 1 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
86
+ VSTR_sysreg ---- 110 1 . . w:1 0 .... ... 0 111 11 ....... @vldr_sysreg p=1
87
+ VSTR_sysreg ---- 110 0 . . 1 0 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
88
+
89
NOCP 111- 1110 ---- ---- ---- cp:4 ---- ---- &nocp
90
NOCP 111- 110- ---- ---- ---- cp:4 ---- ---- &nocp
91
# From v8.1M onwards this range will also NOCP:
92
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/vfp.decode
95
+++ b/target/arm/vfp.decode
96
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR_hp ---- 1101 u:1 .0 l:1 rn:4 .... 1001 imm:8 vd=%vd_sp
97
VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 vd=%vd_sp
98
VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 vd=%vd_dp
99
100
-# M-profile VLDR/VSTR to sysreg
101
-%vldr_sysreg 22:1 13:3
102
-%imm7_0x4 0:7 !function=times_4
103
-
104
-&vldr_sysreg rn reg imm a w p
105
-@vldr_sysreg .... ... . a:1 . . . rn:4 ... . ... .. ....... \
106
- reg=%vldr_sysreg imm=%imm7_0x4 &vldr_sysreg
107
-
108
-# P=0 W=0 is SEE "Related encodings", so split into two patterns
109
-VLDR_sysreg ---- 110 1 . . w:1 1 .... ... 0 111 11 ....... @vldr_sysreg p=1
110
-VLDR_sysreg ---- 110 0 . . 1 1 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
111
-VSTR_sysreg ---- 110 1 . . w:1 0 .... ... 0 111 11 ....... @vldr_sysreg p=1
112
-VSTR_sysreg ---- 110 0 . . 1 0 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
113
-
114
# We split the load/store multiple up into two patterns to avoid
115
# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
116
# grouping:
117
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/translate-m-nocp.c
120
+++ b/target/arm/translate-m-nocp.c
121
@@ -XXX,XX +XXX,XX @@
122
123
#include "qemu/osdep.h"
124
#include "tcg/tcg-op.h"
125
+#include "tcg/tcg-op-gvec.h"
126
#include "translate.h"
127
#include "translate-a32.h"
128
129
@@ -XXX,XX +XXX,XX @@ static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
130
return true;
131
}
132
133
+/*
134
+ * M-profile provides two different sets of instructions that can
135
+ * access floating point system registers: VMSR/VMRS (which move
136
+ * to/from a general purpose register) and VLDR/VSTR sysreg (which
137
+ * move directly to/from memory). In some cases there are also side
138
+ * effects which must happen after any write to memory (which could
139
+ * cause an exception). So we implement the common logic for the
140
+ * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
141
+ * which take pointers to callback functions which will perform the
142
+ * actual "read/write general purpose register" and "read/write
143
+ * memory" operations.
144
+ */
145
+
146
+/*
147
+ * Emit code to store the sysreg to its final destination; frees the
148
+ * TCG temp 'value' it is passed.
149
+ */
150
+typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
151
+/*
152
+ * Emit code to load the value to be copied to the sysreg; returns
153
+ * a new TCG temporary
154
+ */
155
+typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
156
+
157
+/* Common decode/access checks for fp sysreg read/write */
158
+typedef enum FPSysRegCheckResult {
159
+ FPSysRegCheckFailed, /* caller should return false */
160
+ FPSysRegCheckDone, /* caller should return true */
161
+ FPSysRegCheckContinue, /* caller should continue generating code */
162
+} FPSysRegCheckResult;
163
+
164
+static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
165
+{
166
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
167
+ return FPSysRegCheckFailed;
168
+ }
169
+
170
+ switch (regno) {
171
+ case ARM_VFP_FPSCR:
172
+ case QEMU_VFP_FPSCR_NZCV:
173
+ break;
35
+ break;
174
+ case ARM_VFP_FPSCR_NZCVQC:
36
+ case ARM_CP_STATE_AA64:
175
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
37
+ /*
176
+ return FPSysRegCheckFailed;
38
+ * To allow abbreviation of ARMCPRegInfo definitions, we treat
39
+ * cp == 0 as equivalent to the value for "standard guest-visible
40
+ * sysreg". STATE_BOTH definitions are also always "standard sysreg"
41
+ * in their AArch64 view (the .cp value may be non-zero for the
42
+ * benefit of the AArch32 view).
43
+ */
44
+ if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
45
+ cp = CP_REG_ARM64_SYSREG_CP;
177
+ }
46
+ }
178
+ break;
47
+ key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
179
+ case ARM_VFP_FPCXT_S:
180
+ case ARM_VFP_FPCXT_NS:
181
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
182
+ return FPSysRegCheckFailed;
183
+ }
184
+ if (!s->v8m_secure) {
185
+ return FPSysRegCheckFailed;
186
+ }
187
+ break;
188
+ case ARM_VFP_VPR:
189
+ case ARM_VFP_P0:
190
+ if (!dc_isar_feature(aa32_mve, s)) {
191
+ return FPSysRegCheckFailed;
192
+ }
193
+ break;
194
+ default:
195
+ return FPSysRegCheckFailed;
196
+ }
197
+
198
+ /*
199
+ * FPCXT_NS is a special case: it has specific handling for
200
+ * "current FP state is inactive", and must do the PreserveFPState()
201
+ * but not the usual full set of actions done by ExecuteFPCheck().
202
+ * So we don't call vfp_access_check() and the callers must handle this.
203
+ */
204
+ if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
205
+ return FPSysRegCheckDone;
206
+ }
207
+ return FPSysRegCheckContinue;
208
+}
209
+
210
+static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
211
+ TCGLabel *label)
212
+{
213
+ /*
214
+ * FPCXT_NS is a special case: it has specific handling for
215
+ * "current FP state is inactive", and must do the PreserveFPState()
216
+ * but not the usual full set of actions done by ExecuteFPCheck().
217
+ * We don't have a TB flag that matches the fpInactive check, so we
218
+ * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
219
+ *
220
+ * Emit code that checks fpInactive and does a conditional
221
+ * branch to label based on it:
222
+ * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
223
+ * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
224
+ */
225
+ assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
226
+
227
+ /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
228
+ TCGv_i32 aspen, fpca;
229
+ aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
230
+ fpca = load_cpu_field(v7m.control[M_REG_S]);
231
+ tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
232
+ tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
233
+ tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
234
+ tcg_gen_or_i32(fpca, fpca, aspen);
235
+ tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
236
+ tcg_temp_free_i32(aspen);
237
+ tcg_temp_free_i32(fpca);
238
+}
239
+
240
+static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
241
+ fp_sysreg_loadfn *loadfn,
242
+ void *opaque)
243
+{
244
+ /* Do a write to an M-profile floating point system register */
245
+ TCGv_i32 tmp;
246
+ TCGLabel *lab_end = NULL;
247
+
248
+ switch (fp_sysreg_checks(s, regno)) {
249
+ case FPSysRegCheckFailed:
250
+ return false;
251
+ case FPSysRegCheckDone:
252
+ return true;
253
+ case FPSysRegCheckContinue:
254
+ break;
255
+ }
256
+
257
+ switch (regno) {
258
+ case ARM_VFP_FPSCR:
259
+ tmp = loadfn(s, opaque);
260
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
261
+ tcg_temp_free_i32(tmp);
262
+ gen_lookup_tb(s);
263
+ break;
264
+ case ARM_VFP_FPSCR_NZCVQC:
265
+ {
266
+ TCGv_i32 fpscr;
267
+ tmp = loadfn(s, opaque);
268
+ if (dc_isar_feature(aa32_mve, s)) {
269
+ /* QC is only present for MVE; otherwise RES0 */
270
+ TCGv_i32 qc = tcg_temp_new_i32();
271
+ tcg_gen_andi_i32(qc, tmp, FPCR_QC);
272
+ /*
273
+ * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
274
+ * here writing the same value into all elements is simplest.
275
+ */
276
+ tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
277
+ 16, 16, qc);
278
+ }
279
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
280
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
281
+ tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
282
+ tcg_gen_or_i32(fpscr, fpscr, tmp);
283
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
284
+ tcg_temp_free_i32(tmp);
285
+ break;
286
+ }
287
+ case ARM_VFP_FPCXT_NS:
288
+ lab_end = gen_new_label();
289
+ /* fpInactive case: write is a NOP, so branch to end */
290
+ gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
291
+ /*
292
+ * !fpInactive: if FPU disabled, take NOCP exception;
293
+ * otherwise PreserveFPState(), and then FPCXT_NS writes
294
+ * behave the same as FPCXT_S writes.
295
+ */
296
+ if (s->fp_excp_el) {
297
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
298
+ syn_uncategorized(), s->fp_excp_el);
299
+ /*
300
+ * This was only a conditional exception, so override
301
+ * gen_exception_insn()'s default to DISAS_NORETURN
302
+ */
303
+ s->base.is_jmp = DISAS_NEXT;
304
+ break;
305
+ }
306
+ gen_preserve_fp_state(s);
307
+ /* fall through */
308
+ case ARM_VFP_FPCXT_S:
309
+ {
310
+ TCGv_i32 sfpa, control;
311
+ /*
312
+ * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
313
+ * bits [27:0] from value and zeroes bits [31:28].
314
+ */
315
+ tmp = loadfn(s, opaque);
316
+ sfpa = tcg_temp_new_i32();
317
+ tcg_gen_shri_i32(sfpa, tmp, 31);
318
+ control = load_cpu_field(v7m.control[M_REG_S]);
319
+ tcg_gen_deposit_i32(control, control, sfpa,
320
+ R_V7M_CONTROL_SFPA_SHIFT, 1);
321
+ store_cpu_field(control, v7m.control[M_REG_S]);
322
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
323
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
324
+ tcg_temp_free_i32(tmp);
325
+ tcg_temp_free_i32(sfpa);
326
+ break;
327
+ }
328
+ case ARM_VFP_VPR:
329
+ /* Behaves as NOP if not privileged */
330
+ if (IS_USER(s)) {
331
+ break;
332
+ }
333
+ tmp = loadfn(s, opaque);
334
+ store_cpu_field(tmp, v7m.vpr);
335
+ break;
336
+ case ARM_VFP_P0:
337
+ {
338
+ TCGv_i32 vpr;
339
+ tmp = loadfn(s, opaque);
340
+ vpr = load_cpu_field(v7m.vpr);
341
+ tcg_gen_deposit_i32(vpr, vpr, tmp,
342
+ R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
343
+ store_cpu_field(vpr, v7m.vpr);
344
+ tcg_temp_free_i32(tmp);
345
+ break;
346
+ }
347
+ default:
348
+ g_assert_not_reached();
349
+ }
350
+ if (lab_end) {
351
+ gen_set_label(lab_end);
352
+ }
353
+ return true;
354
+}
355
+
356
+static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
357
+ fp_sysreg_storefn *storefn,
358
+ void *opaque)
359
+{
360
+ /* Do a read from an M-profile floating point system register */
361
+ TCGv_i32 tmp;
362
+ TCGLabel *lab_end = NULL;
363
+ bool lookup_tb = false;
364
+
365
+ switch (fp_sysreg_checks(s, regno)) {
366
+ case FPSysRegCheckFailed:
367
+ return false;
368
+ case FPSysRegCheckDone:
369
+ return true;
370
+ case FPSysRegCheckContinue:
371
+ break;
372
+ }
373
+
374
+ if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
375
+ /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
376
+ regno = QEMU_VFP_FPSCR_NZCV;
377
+ }
378
+
379
+ switch (regno) {
380
+ case ARM_VFP_FPSCR:
381
+ tmp = tcg_temp_new_i32();
382
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
383
+ storefn(s, opaque, tmp);
384
+ break;
385
+ case ARM_VFP_FPSCR_NZCVQC:
386
+ tmp = tcg_temp_new_i32();
387
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
388
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
389
+ storefn(s, opaque, tmp);
390
+ break;
391
+ case QEMU_VFP_FPSCR_NZCV:
392
+ /*
393
+ * Read just NZCV; this is a special case to avoid the
394
+ * helper call for the "VMRS to CPSR.NZCV" insn.
395
+ */
396
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
397
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
398
+ storefn(s, opaque, tmp);
399
+ break;
400
+ case ARM_VFP_FPCXT_S:
401
+ {
402
+ TCGv_i32 control, sfpa, fpscr;
403
+ /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
404
+ tmp = tcg_temp_new_i32();
405
+ sfpa = tcg_temp_new_i32();
406
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
407
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
408
+ control = load_cpu_field(v7m.control[M_REG_S]);
409
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
410
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
411
+ tcg_gen_or_i32(tmp, tmp, sfpa);
412
+ tcg_temp_free_i32(sfpa);
413
+ /*
414
+ * Store result before updating FPSCR etc, in case
415
+ * it is a memory write which causes an exception.
416
+ */
417
+ storefn(s, opaque, tmp);
418
+ /*
419
+ * Now we must reset FPSCR from FPDSCR_NS, and clear
420
+ * CONTROL.SFPA; so we'll end the TB here.
421
+ */
422
+ tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
423
+ store_cpu_field(control, v7m.control[M_REG_S]);
424
+ fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
425
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
426
+ tcg_temp_free_i32(fpscr);
427
+ lookup_tb = true;
428
+ break;
429
+ }
430
+ case ARM_VFP_FPCXT_NS:
431
+ {
432
+ TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
433
+ TCGLabel *lab_active = gen_new_label();
434
+
435
+ lookup_tb = true;
436
+
437
+ gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
438
+ /* fpInactive case: reads as FPDSCR_NS */
439
+ TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
440
+ storefn(s, opaque, tmp);
441
+ lab_end = gen_new_label();
442
+ tcg_gen_br(lab_end);
443
+
444
+ gen_set_label(lab_active);
445
+ /*
446
+ * !fpInactive: if FPU disabled, take NOCP exception;
447
+ * otherwise PreserveFPState(), and then FPCXT_NS
448
+ * reads the same as FPCXT_S.
449
+ */
450
+ if (s->fp_excp_el) {
451
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
452
+ syn_uncategorized(), s->fp_excp_el);
453
+ /*
454
+ * This was only a conditional exception, so override
455
+ * gen_exception_insn()'s default to DISAS_NORETURN
456
+ */
457
+ s->base.is_jmp = DISAS_NEXT;
458
+ break;
459
+ }
460
+ gen_preserve_fp_state(s);
461
+ tmp = tcg_temp_new_i32();
462
+ sfpa = tcg_temp_new_i32();
463
+ fpscr = tcg_temp_new_i32();
464
+ gen_helper_vfp_get_fpscr(fpscr, cpu_env);
465
+ tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
466
+ control = load_cpu_field(v7m.control[M_REG_S]);
467
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
468
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
469
+ tcg_gen_or_i32(tmp, tmp, sfpa);
470
+ tcg_temp_free_i32(control);
471
+ /* Store result before updating FPSCR, in case it faults */
472
+ storefn(s, opaque, tmp);
473
+ /* If SFPA is zero then set FPSCR from FPDSCR_NS */
474
+ fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
475
+ zero = tcg_const_i32(0);
476
+ tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
477
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
478
+ tcg_temp_free_i32(zero);
479
+ tcg_temp_free_i32(sfpa);
480
+ tcg_temp_free_i32(fpdscr);
481
+ tcg_temp_free_i32(fpscr);
482
+ break;
483
+ }
484
+ case ARM_VFP_VPR:
485
+ /* Behaves as NOP if not privileged */
486
+ if (IS_USER(s)) {
487
+ break;
488
+ }
489
+ tmp = load_cpu_field(v7m.vpr);
490
+ storefn(s, opaque, tmp);
491
+ break;
492
+ case ARM_VFP_P0:
493
+ tmp = load_cpu_field(v7m.vpr);
494
+ tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
495
+ storefn(s, opaque, tmp);
496
+ break;
48
+ break;
497
+ default:
49
+ default:
498
+ g_assert_not_reached();
50
+ g_assert_not_reached();
499
+ }
51
+ }
500
+
52
+
501
+ if (lab_end) {
53
/* Combine cpreg and name into one allocation. */
502
+ gen_set_label(lab_end);
54
name_len = strlen(name) + 1;
503
+ }
55
r2 = g_malloc(sizeof(*r2) + name_len);
504
+ if (lookup_tb) {
56
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
505
+ gen_lookup_tb(s);
57
}
506
+ }
58
507
+ return true;
59
if (r->state == ARM_CP_STATE_BOTH) {
508
+}
60
- /* We assume it is a cp15 register if the .cp field is left unset.
509
+
61
- */
510
+static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
62
- if (r2->cp == 0) {
511
+{
63
- r2->cp = 15;
512
+ arg_VMSR_VMRS *a = opaque;
64
- }
513
+
514
+ if (a->rt == 15) {
515
+ /* Set the 4 flag bits in the CPSR */
516
+ gen_set_nzcv(value);
517
+ tcg_temp_free_i32(value);
518
+ } else {
519
+ store_reg(s, a->rt, value);
520
+ }
521
+}
522
+
523
+static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
524
+{
525
+ arg_VMSR_VMRS *a = opaque;
526
+
527
+ return load_reg(s, a->rt);
528
+}
529
+
530
+static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
531
+{
532
+ /*
533
+ * Accesses to R15 are UNPREDICTABLE; we choose to undef.
534
+ * FPSCR -> r15 is a special case which writes to the PSR flags;
535
+ * set a->reg to a special value to tell gen_M_fp_sysreg_read()
536
+ * we only care about the top 4 bits of FPSCR there.
537
+ */
538
+ if (a->rt == 15) {
539
+ if (a->l && a->reg == ARM_VFP_FPSCR) {
540
+ a->reg = QEMU_VFP_FPSCR_NZCV;
541
+ } else {
542
+ return false;
543
+ }
544
+ }
545
+
546
+ if (a->l) {
547
+ /* VMRS, move FP system register to gp register */
548
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
549
+ } else {
550
+ /* VMSR, move gp register to FP system register */
551
+ return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
552
+ }
553
+}
554
+
555
+static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
556
+{
557
+ arg_vldr_sysreg *a = opaque;
558
+ uint32_t offset = a->imm;
559
+ TCGv_i32 addr;
560
+
561
+ if (!a->a) {
562
+ offset = -offset;
563
+ }
564
+
565
+ addr = load_reg(s, a->rn);
566
+ if (a->p) {
567
+ tcg_gen_addi_i32(addr, addr, offset);
568
+ }
569
+
570
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
571
+ gen_helper_v8m_stackcheck(cpu_env, addr);
572
+ }
573
+
574
+ gen_aa32_st_i32(s, value, addr, get_mem_index(s),
575
+ MO_UL | MO_ALIGN | s->be_data);
576
+ tcg_temp_free_i32(value);
577
+
578
+ if (a->w) {
579
+ /* writeback */
580
+ if (!a->p) {
581
+ tcg_gen_addi_i32(addr, addr, offset);
582
+ }
583
+ store_reg(s, a->rn, addr);
584
+ } else {
585
+ tcg_temp_free_i32(addr);
586
+ }
587
+}
588
+
589
+static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
590
+{
591
+ arg_vldr_sysreg *a = opaque;
592
+ uint32_t offset = a->imm;
593
+ TCGv_i32 addr;
594
+ TCGv_i32 value = tcg_temp_new_i32();
595
+
596
+ if (!a->a) {
597
+ offset = -offset;
598
+ }
599
+
600
+ addr = load_reg(s, a->rn);
601
+ if (a->p) {
602
+ tcg_gen_addi_i32(addr, addr, offset);
603
+ }
604
+
605
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
606
+ gen_helper_v8m_stackcheck(cpu_env, addr);
607
+ }
608
+
609
+ gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
610
+ MO_UL | MO_ALIGN | s->be_data);
611
+
612
+ if (a->w) {
613
+ /* writeback */
614
+ if (!a->p) {
615
+ tcg_gen_addi_i32(addr, addr, offset);
616
+ }
617
+ store_reg(s, a->rn, addr);
618
+ } else {
619
+ tcg_temp_free_i32(addr);
620
+ }
621
+ return value;
622
+}
623
+
624
+static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
625
+{
626
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
627
+ return false;
628
+ }
629
+ if (a->rn == 15) {
630
+ return false;
631
+ }
632
+ return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
633
+}
634
+
635
+static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
636
+{
637
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
638
+ return false;
639
+ }
640
+ if (a->rn == 15) {
641
+ return false;
642
+ }
643
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
644
+}
645
+
646
static bool trans_NOCP(DisasContext *s, arg_nocp *a)
647
{
648
/*
649
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
650
index XXXXXXX..XXXXXXX 100644
651
--- a/target/arm/translate-vfp.c
652
+++ b/target/arm/translate-vfp.c
653
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
654
* Generate code for M-profile lazy FP state preservation if needed;
655
* this corresponds to the pseudocode PreserveFPState() function.
656
*/
657
-static void gen_preserve_fp_state(DisasContext *s)
658
+void gen_preserve_fp_state(DisasContext *s)
659
{
660
if (s->v7m_lspact) {
661
/*
662
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
663
return true;
664
}
665
666
-/*
667
- * M-profile provides two different sets of instructions that can
668
- * access floating point system registers: VMSR/VMRS (which move
669
- * to/from a general purpose register) and VLDR/VSTR sysreg (which
670
- * move directly to/from memory). In some cases there are also side
671
- * effects which must happen after any write to memory (which could
672
- * cause an exception). So we implement the common logic for the
673
- * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
674
- * which take pointers to callback functions which will perform the
675
- * actual "read/write general purpose register" and "read/write
676
- * memory" operations.
677
- */
678
-
65
-
679
-/*
66
#if HOST_BIG_ENDIAN
680
- * Emit code to store the sysreg to its final destination; frees the
67
if (r2->fieldoffset) {
681
- * TCG temp 'value' it is passed.
68
r2->fieldoffset += sizeof(uint32_t);
682
- */
69
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
683
-typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
70
#endif
684
-/*
71
}
685
- * Emit code to load the value to be copied to the sysreg; returns
72
}
686
- * a new TCG temporary
73
- if (state == ARM_CP_STATE_AA64) {
687
- */
74
- /* To allow abbreviation of ARMCPRegInfo
688
-typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
75
- * definitions, we treat cp == 0 as equivalent to
689
-
76
- * the value for "standard guest-visible sysreg".
690
-/* Common decode/access checks for fp sysreg read/write */
77
- * STATE_BOTH definitions are also always "standard
691
-typedef enum FPSysRegCheckResult {
78
- * sysreg" in their AArch64 view (the .cp value may
692
- FPSysRegCheckFailed, /* caller should return false */
79
- * be non-zero for the benefit of the AArch32 view).
693
- FPSysRegCheckDone, /* caller should return true */
80
- */
694
- FPSysRegCheckContinue, /* caller should continue generating code */
81
- if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
695
-} FPSysRegCheckResult;
82
- r2->cp = CP_REG_ARM64_SYSREG_CP;
696
-
83
- }
697
-static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
84
- key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
698
-{
85
- r2->opc0, opc1, opc2);
699
- if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
86
- } else {
700
- return FPSysRegCheckFailed;
87
- key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
701
- }
88
- }
702
-
89
if (opaque) {
703
- switch (regno) {
90
r2->opaque = opaque;
704
- case ARM_VFP_FPSCR:
705
- case QEMU_VFP_FPSCR_NZCV:
706
- break;
707
- case ARM_VFP_FPSCR_NZCVQC:
708
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
709
- return FPSysRegCheckFailed;
710
- }
711
- break;
712
- case ARM_VFP_FPCXT_S:
713
- case ARM_VFP_FPCXT_NS:
714
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
715
- return FPSysRegCheckFailed;
716
- }
717
- if (!s->v8m_secure) {
718
- return FPSysRegCheckFailed;
719
- }
720
- break;
721
- case ARM_VFP_VPR:
722
- case ARM_VFP_P0:
723
- if (!dc_isar_feature(aa32_mve, s)) {
724
- return FPSysRegCheckFailed;
725
- }
726
- break;
727
- default:
728
- return FPSysRegCheckFailed;
729
- }
730
-
731
- /*
732
- * FPCXT_NS is a special case: it has specific handling for
733
- * "current FP state is inactive", and must do the PreserveFPState()
734
- * but not the usual full set of actions done by ExecuteFPCheck().
735
- * So we don't call vfp_access_check() and the callers must handle this.
736
- */
737
- if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
738
- return FPSysRegCheckDone;
739
- }
740
- return FPSysRegCheckContinue;
741
-}
742
-
743
-static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
744
- TCGLabel *label)
745
-{
746
- /*
747
- * FPCXT_NS is a special case: it has specific handling for
748
- * "current FP state is inactive", and must do the PreserveFPState()
749
- * but not the usual full set of actions done by ExecuteFPCheck().
750
- * We don't have a TB flag that matches the fpInactive check, so we
751
- * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
752
- *
753
- * Emit code that checks fpInactive and does a conditional
754
- * branch to label based on it:
755
- * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
756
- * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
757
- */
758
- assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
759
-
760
- /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
761
- TCGv_i32 aspen, fpca;
762
- aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
763
- fpca = load_cpu_field(v7m.control[M_REG_S]);
764
- tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
765
- tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
766
- tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
767
- tcg_gen_or_i32(fpca, fpca, aspen);
768
- tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
769
- tcg_temp_free_i32(aspen);
770
- tcg_temp_free_i32(fpca);
771
-}
772
-
773
-static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
774
- fp_sysreg_loadfn *loadfn,
775
- void *opaque)
776
-{
777
- /* Do a write to an M-profile floating point system register */
778
- TCGv_i32 tmp;
779
- TCGLabel *lab_end = NULL;
780
-
781
- switch (fp_sysreg_checks(s, regno)) {
782
- case FPSysRegCheckFailed:
783
- return false;
784
- case FPSysRegCheckDone:
785
- return true;
786
- case FPSysRegCheckContinue:
787
- break;
788
- }
789
-
790
- switch (regno) {
791
- case ARM_VFP_FPSCR:
792
- tmp = loadfn(s, opaque);
793
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
794
- tcg_temp_free_i32(tmp);
795
- gen_lookup_tb(s);
796
- break;
797
- case ARM_VFP_FPSCR_NZCVQC:
798
- {
799
- TCGv_i32 fpscr;
800
- tmp = loadfn(s, opaque);
801
- if (dc_isar_feature(aa32_mve, s)) {
802
- /* QC is only present for MVE; otherwise RES0 */
803
- TCGv_i32 qc = tcg_temp_new_i32();
804
- tcg_gen_andi_i32(qc, tmp, FPCR_QC);
805
- /*
806
- * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
807
- * here writing the same value into all elements is simplest.
808
- */
809
- tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
810
- 16, 16, qc);
811
- }
812
- tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
813
- fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
814
- tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
815
- tcg_gen_or_i32(fpscr, fpscr, tmp);
816
- store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
817
- tcg_temp_free_i32(tmp);
818
- break;
819
- }
820
- case ARM_VFP_FPCXT_NS:
821
- lab_end = gen_new_label();
822
- /* fpInactive case: write is a NOP, so branch to end */
823
- gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
824
- /*
825
- * !fpInactive: if FPU disabled, take NOCP exception;
826
- * otherwise PreserveFPState(), and then FPCXT_NS writes
827
- * behave the same as FPCXT_S writes.
828
- */
829
- if (s->fp_excp_el) {
830
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
831
- syn_uncategorized(), s->fp_excp_el);
832
- /*
833
- * This was only a conditional exception, so override
834
- * gen_exception_insn()'s default to DISAS_NORETURN
835
- */
836
- s->base.is_jmp = DISAS_NEXT;
837
- break;
838
- }
839
- gen_preserve_fp_state(s);
840
- /* fall through */
841
- case ARM_VFP_FPCXT_S:
842
- {
843
- TCGv_i32 sfpa, control;
844
- /*
845
- * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
846
- * bits [27:0] from value and zeroes bits [31:28].
847
- */
848
- tmp = loadfn(s, opaque);
849
- sfpa = tcg_temp_new_i32();
850
- tcg_gen_shri_i32(sfpa, tmp, 31);
851
- control = load_cpu_field(v7m.control[M_REG_S]);
852
- tcg_gen_deposit_i32(control, control, sfpa,
853
- R_V7M_CONTROL_SFPA_SHIFT, 1);
854
- store_cpu_field(control, v7m.control[M_REG_S]);
855
- tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
856
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
857
- tcg_temp_free_i32(tmp);
858
- tcg_temp_free_i32(sfpa);
859
- break;
860
- }
861
- case ARM_VFP_VPR:
862
- /* Behaves as NOP if not privileged */
863
- if (IS_USER(s)) {
864
- break;
865
- }
866
- tmp = loadfn(s, opaque);
867
- store_cpu_field(tmp, v7m.vpr);
868
- break;
869
- case ARM_VFP_P0:
870
- {
871
- TCGv_i32 vpr;
872
- tmp = loadfn(s, opaque);
873
- vpr = load_cpu_field(v7m.vpr);
874
- tcg_gen_deposit_i32(vpr, vpr, tmp,
875
- R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
876
- store_cpu_field(vpr, v7m.vpr);
877
- tcg_temp_free_i32(tmp);
878
- break;
879
- }
880
- default:
881
- g_assert_not_reached();
882
- }
883
- if (lab_end) {
884
- gen_set_label(lab_end);
885
- }
886
- return true;
887
-}
888
-
889
-static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
890
- fp_sysreg_storefn *storefn,
891
- void *opaque)
892
-{
893
- /* Do a read from an M-profile floating point system register */
894
- TCGv_i32 tmp;
895
- TCGLabel *lab_end = NULL;
896
- bool lookup_tb = false;
897
-
898
- switch (fp_sysreg_checks(s, regno)) {
899
- case FPSysRegCheckFailed:
900
- return false;
901
- case FPSysRegCheckDone:
902
- return true;
903
- case FPSysRegCheckContinue:
904
- break;
905
- }
906
-
907
- if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
908
- /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
909
- regno = QEMU_VFP_FPSCR_NZCV;
910
- }
911
-
912
- switch (regno) {
913
- case ARM_VFP_FPSCR:
914
- tmp = tcg_temp_new_i32();
915
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
916
- storefn(s, opaque, tmp);
917
- break;
918
- case ARM_VFP_FPSCR_NZCVQC:
919
- tmp = tcg_temp_new_i32();
920
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
921
- tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
922
- storefn(s, opaque, tmp);
923
- break;
924
- case QEMU_VFP_FPSCR_NZCV:
925
- /*
926
- * Read just NZCV; this is a special case to avoid the
927
- * helper call for the "VMRS to CPSR.NZCV" insn.
928
- */
929
- tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
930
- tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
931
- storefn(s, opaque, tmp);
932
- break;
933
- case ARM_VFP_FPCXT_S:
934
- {
935
- TCGv_i32 control, sfpa, fpscr;
936
- /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
937
- tmp = tcg_temp_new_i32();
938
- sfpa = tcg_temp_new_i32();
939
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
940
- tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
941
- control = load_cpu_field(v7m.control[M_REG_S]);
942
- tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
943
- tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
944
- tcg_gen_or_i32(tmp, tmp, sfpa);
945
- tcg_temp_free_i32(sfpa);
946
- /*
947
- * Store result before updating FPSCR etc, in case
948
- * it is a memory write which causes an exception.
949
- */
950
- storefn(s, opaque, tmp);
951
- /*
952
- * Now we must reset FPSCR from FPDSCR_NS, and clear
953
- * CONTROL.SFPA; so we'll end the TB here.
954
- */
955
- tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
956
- store_cpu_field(control, v7m.control[M_REG_S]);
957
- fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
958
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
959
- tcg_temp_free_i32(fpscr);
960
- lookup_tb = true;
961
- break;
962
- }
963
- case ARM_VFP_FPCXT_NS:
964
- {
965
- TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
966
- TCGLabel *lab_active = gen_new_label();
967
-
968
- lookup_tb = true;
969
-
970
- gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
971
- /* fpInactive case: reads as FPDSCR_NS */
972
- TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
973
- storefn(s, opaque, tmp);
974
- lab_end = gen_new_label();
975
- tcg_gen_br(lab_end);
976
-
977
- gen_set_label(lab_active);
978
- /*
979
- * !fpInactive: if FPU disabled, take NOCP exception;
980
- * otherwise PreserveFPState(), and then FPCXT_NS
981
- * reads the same as FPCXT_S.
982
- */
983
- if (s->fp_excp_el) {
984
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
985
- syn_uncategorized(), s->fp_excp_el);
986
- /*
987
- * This was only a conditional exception, so override
988
- * gen_exception_insn()'s default to DISAS_NORETURN
989
- */
990
- s->base.is_jmp = DISAS_NEXT;
991
- break;
992
- }
993
- gen_preserve_fp_state(s);
994
- tmp = tcg_temp_new_i32();
995
- sfpa = tcg_temp_new_i32();
996
- fpscr = tcg_temp_new_i32();
997
- gen_helper_vfp_get_fpscr(fpscr, cpu_env);
998
- tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
999
- control = load_cpu_field(v7m.control[M_REG_S]);
1000
- tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
1001
- tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
1002
- tcg_gen_or_i32(tmp, tmp, sfpa);
1003
- tcg_temp_free_i32(control);
1004
- /* Store result before updating FPSCR, in case it faults */
1005
- storefn(s, opaque, tmp);
1006
- /* If SFPA is zero then set FPSCR from FPDSCR_NS */
1007
- fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
1008
- zero = tcg_const_i32(0);
1009
- tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
1010
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
1011
- tcg_temp_free_i32(zero);
1012
- tcg_temp_free_i32(sfpa);
1013
- tcg_temp_free_i32(fpdscr);
1014
- tcg_temp_free_i32(fpscr);
1015
- break;
1016
- }
1017
- case ARM_VFP_VPR:
1018
- /* Behaves as NOP if not privileged */
1019
- if (IS_USER(s)) {
1020
- break;
1021
- }
1022
- tmp = load_cpu_field(v7m.vpr);
1023
- storefn(s, opaque, tmp);
1024
- break;
1025
- case ARM_VFP_P0:
1026
- tmp = load_cpu_field(v7m.vpr);
1027
- tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
1028
- storefn(s, opaque, tmp);
1029
- break;
1030
- default:
1031
- g_assert_not_reached();
1032
- }
1033
-
1034
- if (lab_end) {
1035
- gen_set_label(lab_end);
1036
- }
1037
- if (lookup_tb) {
1038
- gen_lookup_tb(s);
1039
- }
1040
- return true;
1041
-}
1042
-
1043
-static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
1044
-{
1045
- arg_VMSR_VMRS *a = opaque;
1046
-
1047
- if (a->rt == 15) {
1048
- /* Set the 4 flag bits in the CPSR */
1049
- gen_set_nzcv(value);
1050
- tcg_temp_free_i32(value);
1051
- } else {
1052
- store_reg(s, a->rt, value);
1053
- }
1054
-}
1055
-
1056
-static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
1057
-{
1058
- arg_VMSR_VMRS *a = opaque;
1059
-
1060
- return load_reg(s, a->rt);
1061
-}
1062
-
1063
-static bool gen_M_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
1064
-{
1065
- /*
1066
- * Accesses to R15 are UNPREDICTABLE; we choose to undef.
1067
- * FPSCR -> r15 is a special case which writes to the PSR flags;
1068
- * set a->reg to a special value to tell gen_M_fp_sysreg_read()
1069
- * we only care about the top 4 bits of FPSCR there.
1070
- */
1071
- if (a->rt == 15) {
1072
- if (a->l && a->reg == ARM_VFP_FPSCR) {
1073
- a->reg = QEMU_VFP_FPSCR_NZCV;
1074
- } else {
1075
- return false;
1076
- }
1077
- }
1078
-
1079
- if (a->l) {
1080
- /* VMRS, move FP system register to gp register */
1081
- return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
1082
- } else {
1083
- /* VMSR, move gp register to FP system register */
1084
- return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
1085
- }
1086
-}
1087
-
1088
static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
1089
{
1090
TCGv_i32 tmp;
1091
bool ignore_vfp_enabled = false;
1092
1093
if (arm_dc_feature(s, ARM_FEATURE_M)) {
1094
- return gen_M_VMSR_VMRS(s, a);
1095
+ /* M profile version was already handled in m-nocp.decode */
1096
+ return false;
1097
}
91
}
1098
92
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
1099
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
93
/* Make sure reginfo passed to helpers for wildcarded regs
1100
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
94
* has the correct crm/opc1/opc2 for this reg, not CP_ANY:
1101
return true;
95
*/
1102
}
96
+ r2->cp = cp;
1103
97
r2->crm = crm;
1104
-static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
98
r2->opc1 = opc1;
1105
-{
99
r2->opc2 = opc2;
1106
- arg_vldr_sysreg *a = opaque;
1107
- uint32_t offset = a->imm;
1108
- TCGv_i32 addr;
1109
-
1110
- if (!a->a) {
1111
- offset = -offset;
1112
- }
1113
-
1114
- addr = load_reg(s, a->rn);
1115
- if (a->p) {
1116
- tcg_gen_addi_i32(addr, addr, offset);
1117
- }
1118
-
1119
- if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1120
- gen_helper_v8m_stackcheck(cpu_env, addr);
1121
- }
1122
-
1123
- gen_aa32_st_i32(s, value, addr, get_mem_index(s),
1124
- MO_UL | MO_ALIGN | s->be_data);
1125
- tcg_temp_free_i32(value);
1126
-
1127
- if (a->w) {
1128
- /* writeback */
1129
- if (!a->p) {
1130
- tcg_gen_addi_i32(addr, addr, offset);
1131
- }
1132
- store_reg(s, a->rn, addr);
1133
- } else {
1134
- tcg_temp_free_i32(addr);
1135
- }
1136
-}
1137
-
1138
-static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
1139
-{
1140
- arg_vldr_sysreg *a = opaque;
1141
- uint32_t offset = a->imm;
1142
- TCGv_i32 addr;
1143
- TCGv_i32 value = tcg_temp_new_i32();
1144
-
1145
- if (!a->a) {
1146
- offset = -offset;
1147
- }
1148
-
1149
- addr = load_reg(s, a->rn);
1150
- if (a->p) {
1151
- tcg_gen_addi_i32(addr, addr, offset);
1152
- }
1153
-
1154
- if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1155
- gen_helper_v8m_stackcheck(cpu_env, addr);
1156
- }
1157
-
1158
- gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
1159
- MO_UL | MO_ALIGN | s->be_data);
1160
-
1161
- if (a->w) {
1162
- /* writeback */
1163
- if (!a->p) {
1164
- tcg_gen_addi_i32(addr, addr, offset);
1165
- }
1166
- store_reg(s, a->rn, addr);
1167
- } else {
1168
- tcg_temp_free_i32(addr);
1169
- }
1170
- return value;
1171
-}
1172
-
1173
-static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
1174
-{
1175
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
1176
- return false;
1177
- }
1178
- if (a->rn == 15) {
1179
- return false;
1180
- }
1181
- return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
1182
-}
1183
-
1184
-static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
1185
-{
1186
- if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
1187
- return false;
1188
- }
1189
- if (a->rn == 15) {
1190
- return false;
1191
- }
1192
- return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
1193
-}
1194
1195
static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
1196
{
1197
--
100
--
1198
2.20.1
101
2.25.1
1199
1200
diff view generated by jsdifflib
1
Implement the MVE VRHADD insn, which performs a rounded halving
1
From: Richard Henderson <richard.henderson@linaro.org>
2
addition.
3
2
3
Put most of the value writeback to the same place,
4
and improve the comment that goes with them.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20220501055028.646596-15-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-40-peter.maydell@linaro.org
7
---
10
---
8
target/arm/helper-mve.h | 8 ++++++++
11
target/arm/helper.c | 28 ++++++++++++----------------
9
target/arm/mve.decode | 3 +++
12
1 file changed, 12 insertions(+), 16 deletions(-)
10
target/arm/mve_helper.c | 6 ++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 19 insertions(+)
13
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqdmullbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
19
DEF_HELPER_FLAGS_4(mve_vqdmullth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
*r2 = *r;
20
DEF_HELPER_FLAGS_4(mve_vqdmulltw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
r2->name = memcpy(r2 + 1, name, name_len);
21
21
22
+DEF_HELPER_FLAGS_4(mve_vrhaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
- /* Reset the secure state to the specific incoming state. This is
23
+DEF_HELPER_FLAGS_4(mve_vrhaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
- * necessary as the register may have been defined with both states.
24
+DEF_HELPER_FLAGS_4(mve_vrhaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+ /*
25
+ * Update fields to match the instantiation, overwiting wildcards
26
+ * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
27
*/
28
+ r2->cp = cp;
29
+ r2->crm = crm;
30
+ r2->opc1 = opc1;
31
+ r2->opc2 = opc2;
32
+ r2->state = state;
33
r2->secure = secstate;
34
+ if (opaque) {
35
+ r2->opaque = opaque;
36
+ }
37
38
if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
39
/* Register is banked (using both entries in array).
40
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
41
#endif
42
}
43
}
44
- if (opaque) {
45
- r2->opaque = opaque;
46
- }
47
- /* reginfo passed to helpers is correct for the actual access,
48
- * and is never ARM_CP_STATE_BOTH:
49
- */
50
- r2->state = state;
51
- /* Make sure reginfo passed to helpers for wildcarded regs
52
- * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
53
- */
54
- r2->cp = cp;
55
- r2->crm = crm;
56
- r2->opc1 = opc1;
57
- r2->opc2 = opc2;
25
+
58
+
26
+DEF_HELPER_FLAGS_4(mve_vrhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
59
/* By convention, for wildcarded registers only the first
27
+DEF_HELPER_FLAGS_4(mve_vrhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
60
* entry is used for migration; the others are marked as
28
+DEF_HELPER_FLAGS_4(mve_vrhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
61
* ALIAS so we don't try to transfer the register
29
+
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
38
VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28
39
VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
40
41
+VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
42
+VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
43
+
44
# Vector miscellaneous
45
46
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vshlu, DO_VSHLU)
52
DO_2OP_S(vrshls, DO_VRSHLS)
53
DO_2OP_U(vrshlu, DO_VRSHLU)
54
55
+#define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
56
+#define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
57
+
58
+DO_2OP_S(vrhadds, DO_RHADD_S)
59
+DO_2OP_U(vrhaddu, DO_RHADD_U)
60
+
61
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
62
{
63
if (val > max) {
64
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-mve.c
67
+++ b/target/arm/translate-mve.c
68
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQDMLSDH, vqdmlsdh)
69
DO_2OP(VQDMLSDHX, vqdmlsdhx)
70
DO_2OP(VQRDMLSDH, vqrdmlsdh)
71
DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
72
+DO_2OP(VRHADD_S, vrhadds)
73
+DO_2OP(VRHADD_U, vrhaddu)
74
75
static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
76
{
77
--
62
--
78
2.20.1
63
2.25.1
79
80
diff view generated by jsdifflib
1
These days the Arm architecture has a wide range of fine-grained
1
From: Richard Henderson <richard.henderson@linaro.org>
2
optional extra architectural features. We implement quite a lot
3
of these but by no means all of them. Document what we do implement,
4
so that users can find out without having to dig through back-issues
5
of our Changelog on the wiki.
6
2
3
Bool is a more appropriate type for these variables.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20220501055028.646596-16-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20210617140328.28622-1-peter.maydell@linaro.org
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
9
---
12
docs/system/arm/emulation.rst | 102 ++++++++++++++++++++++++++++++++++
10
target/arm/helper.c | 4 ++--
13
docs/system/target-arm.rst | 6 ++
11
1 file changed, 2 insertions(+), 2 deletions(-)
14
2 files changed, 108 insertions(+)
15
create mode 100644 docs/system/arm/emulation.rst
16
12
17
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
new file mode 100644
19
index XXXXXXX..XXXXXXX
20
--- /dev/null
21
+++ b/docs/system/arm/emulation.rst
22
@@ -XXX,XX +XXX,XX @@
23
+A-profile CPU architecture support
24
+==================================
25
+
26
+QEMU's TCG emulation includes support for the Armv5, Armv6, Armv7 and
27
+Armv8 versions of the A-profile architecture. It also has support for
28
+the following architecture extensions:
29
+
30
+- FEAT_AA32BF16 (AArch32 BFloat16 instructions)
31
+- FEAT_AA32HPD (AArch32 hierarchical permission disables)
32
+- FEAT_AA32I8MM (AArch32 Int8 matrix multiplication instructions)
33
+- FEAT_AES (AESD and AESE instructions)
34
+- FEAT_BF16 (AArch64 BFloat16 instructions)
35
+- FEAT_BTI (Branch Target Identification)
36
+- FEAT_DIT (Data Independent Timing instructions)
37
+- FEAT_DPB (DC CVAP instruction)
38
+- FEAT_DotProd (Advanced SIMD dot product instructions)
39
+- FEAT_FCMA (Floating-point complex number instructions)
40
+- FEAT_FHM (Floating-point half-precision multiplication instructions)
41
+- FEAT_FP16 (Half-precision floating-point data processing)
42
+- FEAT_FRINTTS (Floating-point to integer instructions)
43
+- FEAT_FlagM (Flag manipulation instructions v2)
44
+- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
45
+- FEAT_HPDS (Hierarchical permission disables)
46
+- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
47
+- FEAT_JSCVT (JavaScript conversion instructions)
48
+- FEAT_LOR (Limited ordering regions)
49
+- FEAT_LRCPC (Load-acquire RCpc instructions)
50
+- FEAT_LRCPC2 (Load-acquire RCpc instructions v2)
51
+- FEAT_LSE (Large System Extensions)
52
+- FEAT_MTE (Memory Tagging Extension)
53
+- FEAT_MTE2 (Memory Tagging Extension)
54
+- FEAT_PAN (Privileged access never)
55
+- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
56
+- FEAT_PAuth (Pointer authentication)
57
+- FEAT_PMULL (PMULL, PMULL2 instructions)
58
+- FEAT_PMUv3p1 (PMU Extensions v3.1)
59
+- FEAT_PMUv3p4 (PMU Extensions v3.4)
60
+- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
61
+- FEAT_RNG (Random number generator)
62
+- FEAT_SB (Speculation Barrier)
63
+- FEAT_SEL2 (Secure EL2)
64
+- FEAT_SHA1 (SHA1 instructions)
65
+- FEAT_SHA256 (SHA256 instructions)
66
+- FEAT_SHA3 (Advanced SIMD SHA3 instructions)
67
+- FEAT_SHA512 (Advanced SIMD SHA512 instructions)
68
+- FEAT_SM3 (Advanced SIMD SM3 instructions)
69
+- FEAT_SM4 (Advanced SIMD SM4 instructions)
70
+- FEAT_SPECRES (Speculation restriction instructions)
71
+- FEAT_SSBS (Speculative Store Bypass Safe)
72
+- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
73
+- FEAT_TLBIRANGE (TLB invalidate range instructions)
74
+- FEAT_TTCNP (Translation table Common not private translations)
75
+- FEAT_TTST (Small translation tables)
76
+- FEAT_UAO (Unprivileged Access Override control)
77
+- FEAT_VHE (Virtualization Host Extensions)
78
+- FEAT_VMID16 (16-bit VMID)
79
+- FEAT_XNX (Translation table stage 2 Unprivileged Execute-never)
80
+- SVE (The Scalable Vector Extension)
81
+- SVE2 (The Scalable Vector Extension v2)
82
+
83
+For information on the specifics of these extensions, please refer
84
+to the `Armv8-A Arm Architecture Reference Manual
85
+<https://developer.arm.com/documentation/ddi0487/latest>`_.
86
+
87
+When a specific named CPU is being emulated, only those features which
88
+are present in hardware for that CPU are emulated. (If a feature is
89
+not in the list above then it is not supported, even if the real
90
+hardware should have it.) The ``max`` CPU enables all features.
91
+
92
+R-profile CPU architecture support
93
+==================================
94
+
95
+QEMU's TCG emulation support for R-profile CPUs is currently limited.
96
+We emulate only the Cortex-R5 and Cortex-R5F CPUs.
97
+
98
+M-profile CPU architecture support
99
+==================================
100
+
101
+QEMU's TCG emulation includes support for Armv6-M, Armv7-M, Armv8-M, and
102
+Armv8.1-M versions of the M-profile architucture. It also has support
103
+for the following architecture extensions:
104
+
105
+- FP (Floating-point Extension)
106
+- FPCXT (FPCXT access instructions)
107
+- HP (Half-precision floating-point instructions)
108
+- LOB (Low Overhead loops and Branch future)
109
+- M (Main Extension)
110
+- MPU (Memory Protection Unit Extension)
111
+- PXN (Privileged Execute Never)
112
+- RAS (Reliability, Serviceability and Availability): "minimum RAS Extension" only
113
+- S (Security Extension)
114
+- ST (System Timer Extension)
115
+
116
+For information on the specifics of these extensions, please refer
117
+to the `Armv8-M Arm Architecture Reference Manual
118
+<https://developer.arm.com/documentation/ddi0553/latest>`_.
119
+
120
+When a specific named CPU is being emulated, only those features which
121
+are present in hardware for that CPU are emulated. (If a feature is
122
+not in the list above then it is not supported, even if the real
123
+hardware should have it.) There is no equivalent of the ``max`` CPU for
124
+M-profile.
125
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
126
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
127
--- a/docs/system/target-arm.rst
15
--- a/target/arm/helper.c
128
+++ b/docs/system/target-arm.rst
16
+++ b/target/arm/helper.c
129
@@ -XXX,XX +XXX,XX @@ undocumented; you can get a complete list by running
17
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
130
arm/virt
18
*/
131
arm/xlnx-versal-virt
19
uint32_t key;
132
20
ARMCPRegInfo *r2;
133
+Emulated CPU architecture support
21
- int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
134
+=================================
22
- int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
135
+
23
+ bool is64 = r->type & ARM_CP_64BIT;
136
+.. toctree::
24
+ bool ns = secstate & ARM_CP_SECSTATE_NS;
137
+ arm/emulation
25
int cp = r->cp;
138
+
26
size_t name_len;
139
Arm CPU features
140
================
141
27
142
--
28
--
143
2.20.1
29
2.25.1
144
145
diff view generated by jsdifflib
1
Implement the MVE VCLS insn.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Computing isbanked only once makes the code
4
a bit easier to read.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20220501055028.646596-17-richard.henderson@linaro.org
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-5-peter.maydell@linaro.org
6
---
10
---
7
target/arm/helper-mve.h | 4 ++++
11
target/arm/helper.c | 6 ++++--
8
target/arm/mve.decode | 1 +
12
1 file changed, 4 insertions(+), 2 deletions(-)
9
target/arm/mve_helper.c | 7 +++++++
10
target/arm/translate-mve.c | 1 +
11
4 files changed, 13 insertions(+)
12
13
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
--- a/target/arm/helper.c
16
+++ b/target/arm/helper-mve.h
17
+++ b/target/arm/helper.c
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
18
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
18
DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
19
bool is64 = r->type & ARM_CP_64BIT;
19
DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
20
bool ns = secstate & ARM_CP_SECSTATE_NS;
20
21
int cp = r->cp;
21
+DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
22
+ bool isbanked;
22
+DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
size_t name_len;
23
+DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
24
+
25
switch (state) {
25
DEF_HELPER_FLAGS_3(mve_vclzb, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
26
DEF_HELPER_FLAGS_3(mve_vclzh, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
r2->opaque = opaque;
27
DEF_HELPER_FLAGS_3(mve_vclzw, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
33
34
# Vector miscellaneous
35
36
+VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
37
VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@ static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
43
mve_advance_vpt(env); \
44
}
28
}
45
29
46
+#define DO_CLS_B(N) (clrsb32(N) - 24)
30
- if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
47
+#define DO_CLS_H(N) (clrsb32(N) - 16)
31
+ isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
48
+
32
+ if (isbanked) {
49
+DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
33
/* Register is banked (using both entries in array).
50
+DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
34
* Overwriting fieldoffset as the array is only used to define
51
+DO_1OP(vclsw, 4, int32_t, clrsb32)
35
* banked registers but later only fieldoffset is used.
52
+
36
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
53
#define DO_CLZ_B(N) (clz32(N) - 24)
54
#define DO_CLZ_H(N) (clz32(N) - 16)
55
56
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate-mve.c
59
+++ b/target/arm/translate-mve.c
60
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
61
}
37
}
62
38
63
DO_1OP(VCLZ, vclz)
39
if (state == ARM_CP_STATE_AA32) {
64
+DO_1OP(VCLS, vcls)
40
- if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
41
+ if (isbanked) {
42
/* If the register is banked then we don't need to migrate or
43
* reset the 32-bit instance in certain cases:
44
*
65
--
45
--
66
2.20.1
46
2.25.1
67
68
diff view generated by jsdifflib
1
In a CPU with MVE, the VMOV (vector lane to general-purpose register)
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and VMOV (general-purpose register to vector lane) insns are not
3
predicated, but they are subject to beatwise execution if they
4
are not in an IT block.
5
2
6
Since our implementation always executes all 4 beats in one tick,
3
Perform the override check early, so that it is still done
7
this means only that we need to handle PSR.ECI:
4
even when we decide to discard an unreachable cpreg.
8
* we must do the usual check for bad ECI state
9
* we must advance ECI state if the insn succeeds
10
* if ECI says we should not be executing the beat corresponding
11
to the lane of the vector register being accessed then we
12
should skip performing the move
13
5
14
Note that if PSR.ECI is non-zero then we cannot be in an IT block.
6
Use assert not printf+abort.
15
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Message-id: 20220501055028.646596-18-richard.henderson@linaro.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 20210617121628.20116-45-peter.maydell@linaro.org
19
---
12
---
20
target/arm/translate-a32.h | 2 +
13
target/arm/helper.c | 22 ++++++++--------------
21
target/arm/translate-mve.c | 4 +-
14
1 file changed, 8 insertions(+), 14 deletions(-)
22
target/arm/translate-vfp.c | 77 +++++++++++++++++++++++++++++++++++---
23
3 files changed, 75 insertions(+), 8 deletions(-)
24
15
25
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
26
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-a32.h
18
--- a/target/arm/helper.c
28
+++ b/target/arm/translate-a32.h
19
+++ b/target/arm/helper.c
29
@@ -XXX,XX +XXX,XX @@ long neon_full_reg_offset(unsigned reg);
20
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
30
long neon_element_offset(int reg, int element, MemOp memop);
21
g_assert_not_reached();
31
void gen_rev16(TCGv_i32 dest, TCGv_i32 var);
32
void clear_eci_state(DisasContext *s);
33
+bool mve_eci_check(DisasContext *s);
34
+void mve_update_and_store_eci(DisasContext *s);
35
36
static inline TCGv_i32 load_cpu_offset(int offset)
37
{
38
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/translate-mve.c
41
+++ b/target/arm/translate-mve.c
42
@@ -XXX,XX +XXX,XX @@ static bool mve_check_qreg_bank(DisasContext *s, int qmask)
43
return qmask < 8;
44
}
45
46
-static bool mve_eci_check(DisasContext *s)
47
+bool mve_eci_check(DisasContext *s)
48
{
49
/*
50
* This is a beatwise insn: check that ECI is valid (not a
51
@@ -XXX,XX +XXX,XX @@ static void mve_update_eci(DisasContext *s)
52
}
22
}
53
}
23
54
24
+ /* Overriding of an existing definition must be explicitly requested. */
55
-static void mve_update_and_store_eci(DisasContext *s)
25
+ if (!(r->type & ARM_CP_OVERRIDE)) {
56
+void mve_update_and_store_eci(DisasContext *s)
26
+ const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
57
{
27
+ if (oldreg) {
58
/*
28
+ assert(oldreg->type & ARM_CP_OVERRIDE);
59
* For insns which don't call a helper function that will call
60
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/translate-vfp.c
63
+++ b/target/arm/translate-vfp.c
64
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
65
return true;
66
}
67
68
+static bool mve_skip_vmov(DisasContext *s, int vn, int index, int size)
69
+{
70
+ /*
71
+ * In a CPU with MVE, the VMOV (vector lane to general-purpose register)
72
+ * and VMOV (general-purpose register to vector lane) insns are not
73
+ * predicated, but they are subject to beatwise execution if they are
74
+ * not in an IT block.
75
+ *
76
+ * Since our implementation always executes all 4 beats in one tick,
77
+ * this means only that if PSR.ECI says we should not be executing
78
+ * the beat corresponding to the lane of the vector register being
79
+ * accessed then we should skip performing the move, and that we need
80
+ * to do the usual check for bad ECI state and advance of ECI state.
81
+ *
82
+ * Note that if PSR.ECI is non-zero then we cannot be in an IT block.
83
+ *
84
+ * Return true if this VMOV scalar <-> gpreg should be skipped because
85
+ * the MVE PSR.ECI state says we skip the beat where the store happens.
86
+ */
87
+
88
+ /* Calculate the byte offset into Qn which we're going to access */
89
+ int ofs = (index << size) + ((vn & 1) * 8);
90
+
91
+ if (!dc_isar_feature(aa32_mve, s)) {
92
+ return false;
93
+ }
94
+
95
+ switch (s->eci) {
96
+ case ECI_NONE:
97
+ return false;
98
+ case ECI_A0:
99
+ return ofs < 4;
100
+ case ECI_A0A1:
101
+ return ofs < 8;
102
+ case ECI_A0A1A2:
103
+ case ECI_A0A1A2B0:
104
+ return ofs < 12;
105
+ default:
106
+ g_assert_not_reached();
107
+ }
108
+}
109
+
110
static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
111
{
112
/* VMOV scalar to general purpose register */
113
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
114
return false;
115
}
116
117
+ if (dc_isar_feature(aa32_mve, s)) {
118
+ if (!mve_eci_check(s)) {
119
+ return true;
120
+ }
29
+ }
121
+ }
30
+ }
122
+
31
+
123
if (!vfp_access_check(s)) {
32
/* Combine cpreg and name into one allocation. */
124
return true;
33
name_len = strlen(name) + 1;
34
r2 = g_malloc(sizeof(*r2) + name_len);
35
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
36
assert(!raw_accessors_invalid(r2));
125
}
37
}
126
38
127
- tmp = tcg_temp_new_i32();
39
- /* Overriding of an existing definition must be explicitly
128
- read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
40
- * requested.
129
- store_reg(s, a->rt, tmp);
41
- */
130
+ if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
42
- if (!(r->type & ARM_CP_OVERRIDE)) {
131
+ tmp = tcg_temp_new_i32();
43
- const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
132
+ read_neon_element32(tmp, a->vn, a->index,
44
- if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
133
+ a->size | (a->u ? 0 : MO_SIGN));
45
- fprintf(stderr, "Register redefined: cp=%d %d bit "
134
+ store_reg(s, a->rt, tmp);
46
- "crn=%d crm=%d opc1=%d opc2=%d, "
135
+ }
47
- "was %s, now %s\n", r2->cp, 32 + 32 * is64,
136
48
- r2->crn, r2->crm, r2->opc1, r2->opc2,
137
+ if (dc_isar_feature(aa32_mve, s)) {
49
- oldreg->name, r2->name);
138
+ mve_update_and_store_eci(s);
50
- g_assert_not_reached();
139
+ }
51
- }
140
return true;
52
- }
53
g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
141
}
54
}
142
55
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
144
return false;
145
}
146
147
+ if (dc_isar_feature(aa32_mve, s)) {
148
+ if (!mve_eci_check(s)) {
149
+ return true;
150
+ }
151
+ }
152
+
153
if (!vfp_access_check(s)) {
154
return true;
155
}
156
157
- tmp = load_reg(s, a->rt);
158
- write_neon_element32(tmp, a->vn, a->index, a->size);
159
- tcg_temp_free_i32(tmp);
160
+ if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
161
+ tmp = load_reg(s, a->rt);
162
+ write_neon_element32(tmp, a->vn, a->index, a->size);
163
+ tcg_temp_free_i32(tmp);
164
+ }
165
166
+ if (dc_isar_feature(aa32_mve, s)) {
167
+ mve_update_and_store_eci(s);
168
+ }
169
return true;
170
}
171
172
--
56
--
173
2.20.1
57
2.25.1
174
175
diff view generated by jsdifflib
1
In the code for handling VFP system register accesses there is some
1
From: Richard Henderson <richard.henderson@linaro.org>
2
stray whitespace after a unary '-' operator, and also some incorrect
3
indent in a couple of function prototypes. We're about to move this
4
code to another file, so fix the code style issues first so
5
checkpatch doesn't complain about the code-movement patch.
6
2
7
Cc: qemu-stable@nongnu.org
3
Put the block comments into the current coding style.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20220501055028.646596-19-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210618141019.10671-2-peter.maydell@linaro.org
11
---
9
---
12
target/arm/translate-vfp.c | 11 +++++------
10
target/arm/helper.c | 24 +++++++++++++++---------
13
1 file changed, 5 insertions(+), 6 deletions(-)
11
1 file changed, 15 insertions(+), 9 deletions(-)
14
12
15
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.c
15
--- a/target/arm/helper.c
18
+++ b/target/arm/translate-vfp.c
16
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
17
@@ -XXX,XX +XXX,XX @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
18
return cpu_list;
20
}
19
}
21
20
22
static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
21
+/*
23
-
22
+ * Private utility function for define_one_arm_cp_reg_with_opaque():
24
fp_sysreg_loadfn *loadfn,
23
+ * add a single reginfo struct to the hash table.
25
- void *opaque)
24
+ */
26
+ void *opaque)
25
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
26
void *opaque, CPState state,
27
CPSecureState secstate,
28
int crm, int opc1, int opc2,
29
const char *name)
27
{
30
{
28
/* Do a write to an M-profile floating point system register */
31
- /* Private utility function for define_one_arm_cp_reg_with_opaque():
29
TCGv_i32 tmp;
32
- * add a single reginfo struct to the hash table.
30
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
33
- */
31
}
34
uint32_t key;
32
35
ARMCPRegInfo *r2;
33
static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
36
bool is64 = r->type & ARM_CP_64BIT;
34
- fp_sysreg_storefn *storefn,
37
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
35
- void *opaque)
38
36
+ fp_sysreg_storefn *storefn,
39
isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
37
+ void *opaque)
40
if (isbanked) {
38
{
41
- /* Register is banked (using both entries in array).
39
/* Do a read from an M-profile floating point system register */
42
+ /*
40
TCGv_i32 tmp;
43
+ * Register is banked (using both entries in array).
41
@@ -XXX,XX +XXX,XX @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
44
* Overwriting fieldoffset as the array is only used to define
42
TCGv_i32 addr;
45
* banked registers but later only fieldoffset is used.
43
46
*/
44
if (!a->a) {
47
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
45
- offset = - offset;
48
46
+ offset = -offset;
49
if (state == ARM_CP_STATE_AA32) {
50
if (isbanked) {
51
- /* If the register is banked then we don't need to migrate or
52
+ /*
53
+ * If the register is banked then we don't need to migrate or
54
* reset the 32-bit instance in certain cases:
55
*
56
* 1) If the register has both 32-bit and 64-bit instances then we
57
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
58
r2->type |= ARM_CP_ALIAS;
59
}
60
} else if ((secstate != r->secure) && !ns) {
61
- /* The register is not banked so we only want to allow migration of
62
- * the non-secure instance.
63
+ /*
64
+ * The register is not banked so we only want to allow migration
65
+ * of the non-secure instance.
66
*/
67
r2->type |= ARM_CP_ALIAS;
68
}
69
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
70
}
47
}
71
}
48
72
49
addr = load_reg(s, a->rn);
73
- /* By convention, for wildcarded registers only the first
50
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
74
+ /*
51
TCGv_i32 value = tcg_temp_new_i32();
75
+ * By convention, for wildcarded registers only the first
52
76
* entry is used for migration; the others are marked as
53
if (!a->a) {
77
* ALIAS so we don't try to transfer the register
54
- offset = - offset;
78
* multiple times. Special registers (ie NOP/WFI) are
55
+ offset = -offset;
79
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
80
r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
56
}
81
}
57
82
58
addr = load_reg(s, a->rn);
83
- /* Check that raw accesses are either forbidden or handled. Note that
84
+ /*
85
+ * Check that raw accesses are either forbidden or handled. Note that
86
* we can't assert this earlier because the setup of fieldoffset for
87
* banked registers has to be done first.
88
*/
59
--
89
--
60
2.20.1
90
2.25.1
61
62
diff view generated by jsdifflib
Deleted patch
1
If the guest makes an FPCXT_NS access when the FPU is disabled,
2
one of two things happens:
3
* if there is no active FP context, then the insn behaves the
4
same way as if the FPU was enabled: writes ignored, reads
5
same value as FPDSCR_NS
6
* if there is an active FP context, then we take a NOCP
7
exception
8
1
9
Add code to the sysreg read/write functions which emits
10
code to take the NOCP exception in the latter case.
11
12
At the moment this will never be used, because the NOCP checks in
13
m-nocp.decode happen first, and so the trans functions are never
14
called when the FPU is disabled. The code will be needed when we
15
move the sysreg access insns to before the NOCP patterns in the
16
following commit.
17
18
Cc: qemu-stable@nongnu.org
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Message-id: 20210618141019.10671-3-peter.maydell@linaro.org
22
---
23
target/arm/translate-vfp.c | 32 ++++++++++++++++++++++++++++++--
24
1 file changed, 30 insertions(+), 2 deletions(-)
25
26
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-vfp.c
29
+++ b/target/arm/translate-vfp.c
30
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
31
lab_end = gen_new_label();
32
/* fpInactive case: write is a NOP, so branch to end */
33
gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
34
- /* !fpInactive: PreserveFPState(), and reads same as FPCXT_S */
35
+ /*
36
+ * !fpInactive: if FPU disabled, take NOCP exception;
37
+ * otherwise PreserveFPState(), and then FPCXT_NS writes
38
+ * behave the same as FPCXT_S writes.
39
+ */
40
+ if (s->fp_excp_el) {
41
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
42
+ syn_uncategorized(), s->fp_excp_el);
43
+ /*
44
+ * This was only a conditional exception, so override
45
+ * gen_exception_insn()'s default to DISAS_NORETURN
46
+ */
47
+ s->base.is_jmp = DISAS_NEXT;
48
+ break;
49
+ }
50
gen_preserve_fp_state(s);
51
/* fall through */
52
case ARM_VFP_FPCXT_S:
53
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
54
tcg_gen_br(lab_end);
55
56
gen_set_label(lab_active);
57
- /* !fpInactive: Reads the same as FPCXT_S, but side effects differ */
58
+ /*
59
+ * !fpInactive: if FPU disabled, take NOCP exception;
60
+ * otherwise PreserveFPState(), and then FPCXT_NS
61
+ * reads the same as FPCXT_S.
62
+ */
63
+ if (s->fp_excp_el) {
64
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
65
+ syn_uncategorized(), s->fp_excp_el);
66
+ /*
67
+ * This was only a conditional exception, so override
68
+ * gen_exception_insn()'s default to DISAS_NORETURN
69
+ */
70
+ s->base.is_jmp = DISAS_NEXT;
71
+ break;
72
+ }
73
gen_preserve_fp_state(s);
74
tmp = tcg_temp_new_i32();
75
sfpa = tcg_temp_new_i32();
76
--
77
2.20.1
78
79
diff view generated by jsdifflib
Deleted patch
1
Instead of open-coding the "take NOCP exception if FPU disabled,
2
otherwise call gen_preserve_fp_state()" code in the accessors for
3
FPCXT_NS, add an argument to vfp_access_check_m() which tells it to
4
skip the gen_update_fp_context() call, so we can use it for the
5
FPCXT_NS case.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210618141019.10671-8-peter.maydell@linaro.org
10
---
11
target/arm/translate-a32.h | 2 +-
12
target/arm/translate-m-nocp.c | 10 ++--------
13
target/arm/translate-vfp.c | 13 ++++++++-----
14
3 files changed, 11 insertions(+), 14 deletions(-)
15
16
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a32.h
19
+++ b/target/arm/translate-a32.h
20
@@ -XXX,XX +XXX,XX @@ bool disas_neon_shared(DisasContext *s, uint32_t insn);
21
void load_reg_var(DisasContext *s, TCGv_i32 var, int reg);
22
void arm_gen_condlabel(DisasContext *s);
23
bool vfp_access_check(DisasContext *s);
24
-void gen_preserve_fp_state(DisasContext *s);
25
+bool vfp_access_check_m(DisasContext *s, bool skip_context_update);
26
void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop);
27
void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop);
28
void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop);
29
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-m-nocp.c
32
+++ b/target/arm/translate-m-nocp.c
33
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
34
* otherwise PreserveFPState(), and then FPCXT_NS writes
35
* behave the same as FPCXT_S writes.
36
*/
37
- if (s->fp_excp_el) {
38
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
39
- syn_uncategorized(), s->fp_excp_el);
40
+ if (!vfp_access_check_m(s, true)) {
41
/*
42
* This was only a conditional exception, so override
43
* gen_exception_insn()'s default to DISAS_NORETURN
44
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
45
s->base.is_jmp = DISAS_NEXT;
46
break;
47
}
48
- gen_preserve_fp_state(s);
49
}
50
/* fall through */
51
case ARM_VFP_FPCXT_S:
52
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
53
* otherwise PreserveFPState(), and then FPCXT_NS
54
* reads the same as FPCXT_S.
55
*/
56
- if (s->fp_excp_el) {
57
- gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
58
- syn_uncategorized(), s->fp_excp_el);
59
+ if (!vfp_access_check_m(s, true)) {
60
/*
61
* This was only a conditional exception, so override
62
* gen_exception_insn()'s default to DISAS_NORETURN
63
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
64
s->base.is_jmp = DISAS_NEXT;
65
break;
66
}
67
- gen_preserve_fp_state(s);
68
tmp = tcg_temp_new_i32();
69
sfpa = tcg_temp_new_i32();
70
fpscr = tcg_temp_new_i32();
71
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/translate-vfp.c
74
+++ b/target/arm/translate-vfp.c
75
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
76
* Generate code for M-profile lazy FP state preservation if needed;
77
* this corresponds to the pseudocode PreserveFPState() function.
78
*/
79
-void gen_preserve_fp_state(DisasContext *s)
80
+static void gen_preserve_fp_state(DisasContext *s)
81
{
82
if (s->v7m_lspact) {
83
/*
84
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
85
* If VFP is enabled, do the necessary M-profile lazy-FP handling and then
86
* return true. If not, emit code to generate an appropriate exception and
87
* return false.
88
+ * skip_context_update is true to skip the "update FP context" part of this.
89
*/
90
-static bool vfp_access_check_m(DisasContext *s)
91
+bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
92
{
93
if (s->fp_excp_el) {
94
/*
95
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_m(DisasContext *s)
96
/* Trigger lazy-state preservation if necessary */
97
gen_preserve_fp_state(s);
98
99
- /* Update ownership of FP context and create new FP context if needed */
100
- gen_update_fp_context(s);
101
+ if (!skip_context_update) {
102
+ /* Update ownership of FP context and create new FP context if needed */
103
+ gen_update_fp_context(s);
104
+ }
105
106
return true;
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_m(DisasContext *s)
109
bool vfp_access_check(DisasContext *s)
110
{
111
if (arm_dc_feature(s, ARM_FEATURE_M)) {
112
- return vfp_access_check_m(s);
113
+ return vfp_access_check_m(s, false);
114
} else {
115
return vfp_access_check_a(s, false);
116
}
117
--
118
2.20.1
119
120
diff view generated by jsdifflib
Deleted patch
1
Implement the variants of MVE VLDR (encodings T1, T2) which perform
2
"widening" loads where bytes or halfwords are loaded from memory and
3
zero or sign-extended into halfword or word length vector elements,
4
and the narrowing MVE VSTR (encodings T1, T2) where bytes or
5
halfwords are stored from halfword or word elements.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-3-peter.maydell@linaro.org
10
---
11
target/arm/helper-mve.h | 10 ++++++++++
12
target/arm/mve.decode | 25 +++++++++++++++++++++++--
13
target/arm/mve_helper.c | 11 +++++++++++
14
target/arm/translate-mve.c | 14 ++++++++++++++
15
4 files changed, 58 insertions(+), 2 deletions(-)
16
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
20
+++ b/target/arm/helper-mve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vldrw, TCG_CALL_NO_WG, void, env, ptr, i32)
22
DEF_HELPER_FLAGS_3(mve_vstrb, TCG_CALL_NO_WG, void, env, ptr, i32)
23
DEF_HELPER_FLAGS_3(mve_vstrh, TCG_CALL_NO_WG, void, env, ptr, i32)
24
DEF_HELPER_FLAGS_3(mve_vstrw, TCG_CALL_NO_WG, void, env, ptr, i32)
25
+
26
+DEF_HELPER_FLAGS_3(mve_vldrb_sh, TCG_CALL_NO_WG, void, env, ptr, i32)
27
+DEF_HELPER_FLAGS_3(mve_vldrb_sw, TCG_CALL_NO_WG, void, env, ptr, i32)
28
+DEF_HELPER_FLAGS_3(mve_vldrb_uh, TCG_CALL_NO_WG, void, env, ptr, i32)
29
+DEF_HELPER_FLAGS_3(mve_vldrb_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
30
+DEF_HELPER_FLAGS_3(mve_vldrh_sw, TCG_CALL_NO_WG, void, env, ptr, i32)
31
+DEF_HELPER_FLAGS_3(mve_vldrh_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
32
+DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
33
+DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
34
+DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
35
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/mve.decode
38
+++ b/target/arm/mve.decode
39
@@ -XXX,XX +XXX,XX @@
40
41
%qd 22:1 13:3
42
43
-&vldr_vstr rn qd imm p a w size l
44
+&vldr_vstr rn qd imm p a w size l u
45
46
-@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd
47
+@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
48
+# Note that both Rn and Qd are 3 bits only (no D bit)
49
+@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
50
51
# Vector loads and stores
52
53
+# Widening loads and narrowing stores:
54
+# for these P=0 W=0 is 'related encoding'; sz=11 is 'related encoding'
55
+# This means we need to expand out to multiple patterns for P, W, SZ.
56
+# For stores the U bit must be 0 but we catch that in the trans_ function.
57
+# The naming scheme here is "VLDSTB_H == in-memory byte load/store to/from
58
+# signed halfword element in register", etc.
59
+VLDSTB_H 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 01 ....... @vldst_wn \
60
+ p=0 w=1 size=1
61
+VLDSTB_H 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 01 ....... @vldst_wn \
62
+ p=1 size=1
63
+VLDSTB_W 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 10 ....... @vldst_wn \
64
+ p=0 w=1 size=2
65
+VLDSTB_W 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 10 ....... @vldst_wn \
66
+ p=1 size=2
67
+VLDSTH_W 111 . 110 0 a:1 0 1 . 1 ... ... 0 111 10 ....... @vldst_wn \
68
+ p=0 w=1 size=2
69
+VLDSTH_W 111 . 110 1 a:1 0 w:1 . 1 ... ... 0 111 10 ....... @vldst_wn \
70
+ p=1 size=2
71
+
72
# Non-widening loads/stores (P=0 W=0 is 'related encoding')
73
VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111100 ....... @vldr_vstr \
74
size=0 p=0 w=1
75
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/mve_helper.c
78
+++ b/target/arm/mve_helper.c
79
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrb, 1, stb, 1, uint8_t)
80
DO_VSTR(vstrh, 2, stw, 2, uint16_t)
81
DO_VSTR(vstrw, 4, stl, 4, uint32_t)
82
83
+DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
84
+DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
85
+DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
86
+DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
87
+DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
88
+DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
89
+
90
+DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
91
+DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
92
+DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
93
+
94
#undef DO_VLDR
95
#undef DO_VSTR
96
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate-mve.c
99
+++ b/target/arm/translate-mve.c
100
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
101
};
102
return do_ldst(s, a, ldstfns[a->size][a->l]);
103
}
104
+
105
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \
106
+ static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
107
+ { \
108
+ static MVEGenLdStFn * const ldstfns[2][2] = { \
109
+ { gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
110
+ { NULL, gen_helper_mve_##ULD }, \
111
+ }; \
112
+ return do_ldst(s, a, ldstfns[a->u][a->l]); \
113
+ }
114
+
115
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
116
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
117
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
118
--
119
2.20.1
120
121
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VCLZ insn (and the necessary machinery
2
for MVE 1-input vector ops).
3
1
4
Note that for non-load instructions predication is always performed
5
at a byte level granularity regardless of element size (R_ZLSJ),
6
and so the masking logic here differs from that used in the VLDR
7
and VSTR helpers.
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210617121628.20116-4-peter.maydell@linaro.org
12
---
13
target/arm/helper-mve.h | 4 ++
14
target/arm/mve.decode | 8 ++++
15
target/arm/mve_helper.c | 82 ++++++++++++++++++++++++++++++++++++++
16
target/arm/translate-mve.c | 38 ++++++++++++++++++
17
4 files changed, 132 insertions(+)
18
19
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper-mve.h
22
+++ b/target/arm/helper-mve.h
23
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vldrh_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
24
DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
25
DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
26
DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
27
+
28
+DEF_HELPER_FLAGS_3(mve_vclzb, TCG_CALL_NO_WG, void, env, ptr, ptr)
29
+DEF_HELPER_FLAGS_3(mve_vclzh, TCG_CALL_NO_WG, void, env, ptr, ptr)
30
+DEF_HELPER_FLAGS_3(mve_vclzw, TCG_CALL_NO_WG, void, env, ptr, ptr)
31
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/mve.decode
34
+++ b/target/arm/mve.decode
35
@@ -XXX,XX +XXX,XX @@
36
#
37
38
%qd 22:1 13:3
39
+%qm 5:1 1:3
40
41
&vldr_vstr rn qd imm p a w size l u
42
+&1op qd qm size
43
44
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
45
# Note that both Rn and Qd are 3 bits only (no D bit)
46
@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
47
48
+@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
49
+
50
# Vector loads and stores
51
52
# Widening loads and narrowing stores:
53
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
54
size=1 p=1
55
VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
56
size=2 p=1
57
+
58
+# Vector miscellaneous
59
+
60
+VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
61
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/mve_helper.c
64
+++ b/target/arm/mve_helper.c
65
@@ -XXX,XX +XXX,XX @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
66
67
#undef DO_VLDR
68
#undef DO_VSTR
69
+
70
+/*
71
+ * The mergemask(D, R, M) macro performs the operation "*D = R" but
72
+ * storing only the bytes which correspond to 1 bits in M,
73
+ * leaving other bytes in *D unchanged. We use _Generic
74
+ * to select the correct implementation based on the type of D.
75
+ */
76
+
77
+static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
78
+{
79
+ if (mask & 1) {
80
+ *d = r;
81
+ }
82
+}
83
+
84
+static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
85
+{
86
+ mergemask_ub((uint8_t *)d, r, mask);
87
+}
88
+
89
+static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
90
+{
91
+ uint16_t bmask = expand_pred_b_data[mask & 3];
92
+ *d = (*d & ~bmask) | (r & bmask);
93
+}
94
+
95
+static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
96
+{
97
+ mergemask_uh((uint16_t *)d, r, mask);
98
+}
99
+
100
+static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
101
+{
102
+ uint32_t bmask = expand_pred_b_data[mask & 0xf];
103
+ *d = (*d & ~bmask) | (r & bmask);
104
+}
105
+
106
+static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
107
+{
108
+ mergemask_uw((uint32_t *)d, r, mask);
109
+}
110
+
111
+static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
112
+{
113
+ uint64_t bmask = expand_pred_b_data[mask & 0xff];
114
+ *d = (*d & ~bmask) | (r & bmask);
115
+}
116
+
117
+static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
118
+{
119
+ mergemask_uq((uint64_t *)d, r, mask);
120
+}
121
+
122
+#define mergemask(D, R, M) \
123
+ _Generic(D, \
124
+ uint8_t *: mergemask_ub, \
125
+ int8_t *: mergemask_sb, \
126
+ uint16_t *: mergemask_uh, \
127
+ int16_t *: mergemask_sh, \
128
+ uint32_t *: mergemask_uw, \
129
+ int32_t *: mergemask_sw, \
130
+ uint64_t *: mergemask_uq, \
131
+ int64_t *: mergemask_sq)(D, R, M)
132
+
133
+#define DO_1OP(OP, ESIZE, TYPE, FN) \
134
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
135
+ { \
136
+ TYPE *d = vd, *m = vm; \
137
+ uint16_t mask = mve_element_mask(env); \
138
+ unsigned e; \
139
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
140
+ mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
141
+ } \
142
+ mve_advance_vpt(env); \
143
+ }
144
+
145
+#define DO_CLZ_B(N) (clz32(N) - 24)
146
+#define DO_CLZ_H(N) (clz32(N) - 16)
147
+
148
+DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
149
+DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
150
+DO_1OP(vclzw, 4, uint32_t, clz32)
151
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/target/arm/translate-mve.c
154
+++ b/target/arm/translate-mve.c
155
@@ -XXX,XX +XXX,XX @@
156
#include "decode-mve.c.inc"
157
158
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
159
+typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
160
161
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
162
static inline long mve_qreg_offset(unsigned reg)
163
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
164
DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
165
DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
166
DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
167
+
168
+static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
169
+{
170
+ TCGv_ptr qd, qm;
171
+
172
+ if (!dc_isar_feature(aa32_mve, s) ||
173
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
174
+ !fn) {
175
+ return false;
176
+ }
177
+
178
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
179
+ return true;
180
+ }
181
+
182
+ qd = mve_qreg_ptr(a->qd);
183
+ qm = mve_qreg_ptr(a->qm);
184
+ fn(cpu_env, qd, qm);
185
+ tcg_temp_free_ptr(qd);
186
+ tcg_temp_free_ptr(qm);
187
+ mve_update_eci(s);
188
+ return true;
189
+}
190
+
191
+#define DO_1OP(INSN, FN) \
192
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
193
+ { \
194
+ static MVEGenOneOpFn * const fns[] = { \
195
+ gen_helper_mve_##FN##b, \
196
+ gen_helper_mve_##FN##h, \
197
+ gen_helper_mve_##FN##w, \
198
+ NULL, \
199
+ }; \
200
+ return do_1op(s, a, fns[a->size]); \
201
+ }
202
+
203
+DO_1OP(VCLZ, vclz)
204
--
205
2.20.1
206
207
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE instructions VREV16, VREV32 and VREV64.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-6-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 7 +++++++
8
target/arm/mve.decode | 4 ++++
9
target/arm/mve_helper.c | 7 +++++++
10
target/arm/translate-mve.c | 33 +++++++++++++++++++++++++++++++++
11
4 files changed, 51 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
18
DEF_HELPER_FLAGS_3(mve_vclzb, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
DEF_HELPER_FLAGS_3(mve_vclzh, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vclzw, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_3(mve_vrev16b, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
+DEF_HELPER_FLAGS_3(mve_vrev32b, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+DEF_HELPER_FLAGS_3(mve_vrev32h, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
+DEF_HELPER_FLAGS_3(mve_vrev64b, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+DEF_HELPER_FLAGS_3(mve_vrev64h, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
+DEF_HELPER_FLAGS_3(mve_vrev64w, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
33
34
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
35
VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
36
+
37
+VREV16 1111 1111 1 . 11 .. 00 ... 0 0001 01 . 0 ... 0 @1op
38
+VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op
39
+VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op
40
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/mve_helper.c
43
+++ b/target/arm/mve_helper.c
44
@@ -XXX,XX +XXX,XX @@ DO_1OP(vclsw, 4, int32_t, clrsb32)
45
DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
46
DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
47
DO_1OP(vclzw, 4, uint32_t, clz32)
48
+
49
+DO_1OP(vrev16b, 2, uint16_t, bswap16)
50
+DO_1OP(vrev32b, 4, uint32_t, bswap32)
51
+DO_1OP(vrev32h, 4, uint32_t, hswap32)
52
+DO_1OP(vrev64b, 8, uint64_t, bswap64)
53
+DO_1OP(vrev64h, 8, uint64_t, hswap64)
54
+DO_1OP(vrev64w, 8, uint64_t, wswap64)
55
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-mve.c
58
+++ b/target/arm/translate-mve.c
59
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
60
61
DO_1OP(VCLZ, vclz)
62
DO_1OP(VCLS, vcls)
63
+
64
+static bool trans_VREV16(DisasContext *s, arg_1op *a)
65
+{
66
+ static MVEGenOneOpFn * const fns[] = {
67
+ gen_helper_mve_vrev16b,
68
+ NULL,
69
+ NULL,
70
+ NULL,
71
+ };
72
+ return do_1op(s, a, fns[a->size]);
73
+}
74
+
75
+static bool trans_VREV32(DisasContext *s, arg_1op *a)
76
+{
77
+ static MVEGenOneOpFn * const fns[] = {
78
+ gen_helper_mve_vrev32b,
79
+ gen_helper_mve_vrev32h,
80
+ NULL,
81
+ NULL,
82
+ };
83
+ return do_1op(s, a, fns[a->size]);
84
+}
85
+
86
+static bool trans_VREV64(DisasContext *s, arg_1op *a)
87
+{
88
+ static MVEGenOneOpFn * const fns[] = {
89
+ gen_helper_mve_vrev64b,
90
+ gen_helper_mve_vrev64h,
91
+ gen_helper_mve_vrev64w,
92
+ NULL,
93
+ };
94
+ return do_1op(s, a, fns[a->size]);
95
+}
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMVN(register) operation. Note that for
2
predication this operation is byte-by-byte.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-7-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 2 ++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 4 ++++
11
target/arm/translate-mve.c | 5 +++++
12
4 files changed, 14 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vrev32h, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
DEF_HELPER_FLAGS_3(mve_vrev64b, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vrev64h, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
DEF_HELPER_FLAGS_3(mve_vrev64w, TCG_CALL_NO_WG, void, env, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_3(mve_vmvn, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/mve.decode
27
+++ b/target/arm/mve.decode
28
@@ -XXX,XX +XXX,XX @@
29
@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
30
31
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
32
+@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
33
34
# Vector loads and stores
35
36
@@ -XXX,XX +XXX,XX @@ VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
37
VREV16 1111 1111 1 . 11 .. 00 ... 0 0001 01 . 0 ... 0 @1op
38
VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op
39
VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op
40
+
41
+VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_1OP(vrev32h, 4, uint32_t, hswap32)
47
DO_1OP(vrev64b, 8, uint64_t, bswap64)
48
DO_1OP(vrev64h, 8, uint64_t, hswap64)
49
DO_1OP(vrev64w, 8, uint64_t, wswap64)
50
+
51
+#define DO_NOT(N) (~(N))
52
+
53
+DO_1OP(vmvn, 8, uint64_t, DO_NOT)
54
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/translate-mve.c
57
+++ b/target/arm/translate-mve.c
58
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_1op *a)
59
};
60
return do_1op(s, a, fns[a->size]);
61
}
62
+
63
+static bool trans_VMVN(DisasContext *s, arg_1op *a)
64
+{
65
+ return do_1op(s, a, gen_helper_mve_vmvn);
66
+}
67
--
68
2.20.1
69
70
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VABS functions (both integer and floating point).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-8-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 6 ++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 13 +++++++++++++
10
target/arm/translate-mve.c | 15 +++++++++++++++
11
4 files changed, 37 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vrev64h, TCG_CALL_NO_WG, void, env, ptr, ptr)
18
DEF_HELPER_FLAGS_3(mve_vrev64w, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
20
DEF_HELPER_FLAGS_3(mve_vmvn, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_3(mve_vabsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
+DEF_HELPER_FLAGS_3(mve_vabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+DEF_HELPER_FLAGS_3(mve_vabsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
+DEF_HELPER_FLAGS_3(mve_vfabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+DEF_HELPER_FLAGS_3(mve_vfabss, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/mve.decode
30
+++ b/target/arm/mve.decode
31
@@ -XXX,XX +XXX,XX @@ VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op
32
VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op
33
34
VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz
35
+
36
+VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op
37
+VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@
43
#include "exec/helper-proto.h"
44
#include "exec/cpu_ldst.h"
45
#include "exec/exec-all.h"
46
+#include "tcg/tcg.h"
47
48
static uint16_t mve_element_mask(CPUARMState *env)
49
{
50
@@ -XXX,XX +XXX,XX @@ DO_1OP(vrev64w, 8, uint64_t, wswap64)
51
#define DO_NOT(N) (~(N))
52
53
DO_1OP(vmvn, 8, uint64_t, DO_NOT)
54
+
55
+#define DO_ABS(N) ((N) < 0 ? -(N) : (N))
56
+#define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
57
+#define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
58
+
59
+DO_1OP(vabsb, 1, int8_t, DO_ABS)
60
+DO_1OP(vabsh, 2, int16_t, DO_ABS)
61
+DO_1OP(vabsw, 4, int32_t, DO_ABS)
62
+
63
+/* We can do these 64 bits at a time */
64
+DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
65
+DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
66
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate-mve.c
69
+++ b/target/arm/translate-mve.c
70
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
71
72
DO_1OP(VCLZ, vclz)
73
DO_1OP(VCLS, vcls)
74
+DO_1OP(VABS, vabs)
75
76
static bool trans_VREV16(DisasContext *s, arg_1op *a)
77
{
78
@@ -XXX,XX +XXX,XX @@ static bool trans_VMVN(DisasContext *s, arg_1op *a)
79
{
80
return do_1op(s, a, gen_helper_mve_vmvn);
81
}
82
+
83
+static bool trans_VABS_fp(DisasContext *s, arg_1op *a)
84
+{
85
+ static MVEGenOneOpFn * const fns[] = {
86
+ NULL,
87
+ gen_helper_mve_vfabsh,
88
+ gen_helper_mve_vfabss,
89
+ NULL,
90
+ };
91
+ if (!dc_isar_feature(aa32_mve_fp, s)) {
92
+ return false;
93
+ }
94
+ return do_1op(s, a, fns[a->size]);
95
+}
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VNEG insn (both integer and floating point forms).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-9-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 6 ++++++
8
target/arm/mve.decode | 2 ++
9
target/arm/mve_helper.c | 12 ++++++++++++
10
target/arm/translate-mve.c | 15 +++++++++++++++
11
4 files changed, 35 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
18
DEF_HELPER_FLAGS_3(mve_vabsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
DEF_HELPER_FLAGS_3(mve_vfabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vfabss, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_3(mve_vnegb, TCG_CALL_NO_WG, void, env, ptr, ptr)
23
+DEF_HELPER_FLAGS_3(mve_vnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
24
+DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
25
+DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
+DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/mve.decode
30
+++ b/target/arm/mve.decode
31
@@ -XXX,XX +XXX,XX @@ VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz
32
33
VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op
34
VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
35
+VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op
36
+VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
37
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/mve_helper.c
40
+++ b/target/arm/mve_helper.c
41
@@ -XXX,XX +XXX,XX @@ DO_1OP(vabsw, 4, int32_t, DO_ABS)
42
/* We can do these 64 bits at a time */
43
DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
44
DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
45
+
46
+#define DO_NEG(N) (-(N))
47
+#define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
48
+#define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
49
+
50
+DO_1OP(vnegb, 1, int8_t, DO_NEG)
51
+DO_1OP(vnegh, 2, int16_t, DO_NEG)
52
+DO_1OP(vnegw, 4, int32_t, DO_NEG)
53
+
54
+/* We can do these 64 bits at a time */
55
+DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
56
+DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
57
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate-mve.c
60
+++ b/target/arm/translate-mve.c
61
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
62
DO_1OP(VCLZ, vclz)
63
DO_1OP(VCLS, vcls)
64
DO_1OP(VABS, vabs)
65
+DO_1OP(VNEG, vneg)
66
67
static bool trans_VREV16(DisasContext *s, arg_1op *a)
68
{
69
@@ -XXX,XX +XXX,XX @@ static bool trans_VABS_fp(DisasContext *s, arg_1op *a)
70
}
71
return do_1op(s, a, fns[a->size]);
72
}
73
+
74
+static bool trans_VNEG_fp(DisasContext *s, arg_1op *a)
75
+{
76
+ static MVEGenOneOpFn * const fns[] = {
77
+ NULL,
78
+ gen_helper_mve_vfnegh,
79
+ gen_helper_mve_vfnegs,
80
+ NULL,
81
+ };
82
+ if (!dc_isar_feature(aa32_mve_fp, s)) {
83
+ return false;
84
+ }
85
+ return do_1op(s, a, fns[a->size]);
86
+}
87
--
88
2.20.1
89
90
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VDUP insn, which duplicates a value from
2
a general-purpose register into every lane of a vector
3
register (subject to predication).
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-11-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 2 ++
10
target/arm/mve.decode | 10 ++++++++++
11
target/arm/mve_helper.c | 16 ++++++++++++++++
12
target/arm/translate-mve.c | 27 +++++++++++++++++++++++++++
13
4 files changed, 55 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
20
DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
21
DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
22
23
+DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
24
+
25
DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
26
DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
27
DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@
33
34
%qd 22:1 13:3
35
%qm 5:1 1:3
36
+%qn 7:1 17:3
37
38
&vldr_vstr rn qd imm p a w size l u
39
&1op qd qm size
40
@@ -XXX,XX +XXX,XX @@ VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op
41
VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
42
VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op
43
VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
44
+
45
+&vdup qd rt size
46
+# Qd is in the fields usually named Qn
47
+@vdup .... .... . . .. ... . rt:4 .... . . . . .... qd=%qn &vdup
48
+
49
+# B and E bits encode size, which we decode here to the usual size values
50
+VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0
51
+VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1
52
+VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
53
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/mve_helper.c
56
+++ b/target/arm/mve_helper.c
57
@@ -XXX,XX +XXX,XX @@ static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
58
uint64_t *: mergemask_uq, \
59
int64_t *: mergemask_sq)(D, R, M)
60
61
+void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
62
+{
63
+ /*
64
+ * The generated code already replicated an 8 or 16 bit constant
65
+ * into the 32-bit value, so we only need to write the 32-bit
66
+ * value to all elements of the Qreg, allowing for predication.
67
+ */
68
+ uint32_t *d = vd;
69
+ uint16_t mask = mve_element_mask(env);
70
+ unsigned e;
71
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
72
+ mergemask(&d[H4(e)], val, mask);
73
+ }
74
+ mve_advance_vpt(env);
75
+}
76
+
77
#define DO_1OP(OP, ESIZE, TYPE, FN) \
78
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
79
{ \
80
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/translate-mve.c
83
+++ b/target/arm/translate-mve.c
84
@@ -XXX,XX +XXX,XX @@ DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h)
85
DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w)
86
DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w)
87
88
+static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
89
+{
90
+ TCGv_ptr qd;
91
+ TCGv_i32 rt;
92
+
93
+ if (!dc_isar_feature(aa32_mve, s) ||
94
+ !mve_check_qreg_bank(s, a->qd)) {
95
+ return false;
96
+ }
97
+ if (a->rt == 13 || a->rt == 15) {
98
+ /* UNPREDICTABLE; we choose to UNDEF */
99
+ return false;
100
+ }
101
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
102
+ return true;
103
+ }
104
+
105
+ qd = mve_qreg_ptr(a->qd);
106
+ rt = load_reg(s, a->rt);
107
+ tcg_gen_dup_i32(a->size, rt, rt);
108
+ gen_helper_mve_vdup(cpu_env, qd, rt);
109
+ tcg_temp_free_ptr(qd);
110
+ tcg_temp_free_i32(rt);
111
+ mve_update_eci(s);
112
+ return true;
113
+}
114
+
115
static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
116
{
117
TCGv_ptr qd, qm;
118
--
119
2.20.1
120
121
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE vector logical operations operating
2
on two registers.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-12-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 6 ++++++
9
target/arm/mve.decode | 9 +++++++++
10
target/arm/mve_helper.c | 26 ++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 37 +++++++++++++++++++++++++++++++++++++
12
4 files changed, 78 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(mve_vnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
19
DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
20
DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
21
DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vand, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@
33
34
&vldr_vstr rn qd imm p a w size l u
35
&1op qd qm size
36
+&2op qd qm qn size
37
38
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
39
# Note that both Rn and Qd are 3 bits only (no D bit)
40
@@ -XXX,XX +XXX,XX @@
41
42
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
43
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
44
+@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
45
46
# Vector loads and stores
47
48
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
49
VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
50
size=2 p=1
51
52
+# Vector 2-op
53
+VAND 1110 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
54
+VBIC 1110 1111 0 . 01 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
55
+VORR 1110 1111 0 . 10 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
56
+VORN 1110 1111 0 . 11 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
57
+VEOR 1111 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
58
+
59
# Vector miscellaneous
60
61
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
62
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/arm/mve_helper.c
65
+++ b/target/arm/mve_helper.c
66
@@ -XXX,XX +XXX,XX @@ DO_1OP(vnegw, 4, int32_t, DO_NEG)
67
/* We can do these 64 bits at a time */
68
DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
69
DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
70
+
71
+#define DO_2OP(OP, ESIZE, TYPE, FN) \
72
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
73
+ void *vd, void *vn, void *vm) \
74
+ { \
75
+ TYPE *d = vd, *n = vn, *m = vm; \
76
+ uint16_t mask = mve_element_mask(env); \
77
+ unsigned e; \
78
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
79
+ mergemask(&d[H##ESIZE(e)], \
80
+ FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
81
+ } \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+#define DO_AND(N, M) ((N) & (M))
86
+#define DO_BIC(N, M) ((N) & ~(M))
87
+#define DO_ORR(N, M) ((N) | (M))
88
+#define DO_ORN(N, M) ((N) | ~(M))
89
+#define DO_EOR(N, M) ((N) ^ (M))
90
+
91
+DO_2OP(vand, 8, uint64_t, DO_AND)
92
+DO_2OP(vbic, 8, uint64_t, DO_BIC)
93
+DO_2OP(vorr, 8, uint64_t, DO_ORR)
94
+DO_2OP(vorn, 8, uint64_t, DO_ORN)
95
+DO_2OP(veor, 8, uint64_t, DO_EOR)
96
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate-mve.c
99
+++ b/target/arm/translate-mve.c
100
@@ -XXX,XX +XXX,XX @@
101
102
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
103
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
104
+typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
105
106
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
107
static inline long mve_qreg_offset(unsigned reg)
108
@@ -XXX,XX +XXX,XX @@ static bool trans_VNEG_fp(DisasContext *s, arg_1op *a)
109
}
110
return do_1op(s, a, fns[a->size]);
111
}
112
+
113
+static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn)
114
+{
115
+ TCGv_ptr qd, qn, qm;
116
+
117
+ if (!dc_isar_feature(aa32_mve, s) ||
118
+ !mve_check_qreg_bank(s, a->qd | a->qn | a->qm) ||
119
+ !fn) {
120
+ return false;
121
+ }
122
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
123
+ return true;
124
+ }
125
+
126
+ qd = mve_qreg_ptr(a->qd);
127
+ qn = mve_qreg_ptr(a->qn);
128
+ qm = mve_qreg_ptr(a->qm);
129
+ fn(cpu_env, qd, qn, qm);
130
+ tcg_temp_free_ptr(qd);
131
+ tcg_temp_free_ptr(qn);
132
+ tcg_temp_free_ptr(qm);
133
+ mve_update_eci(s);
134
+ return true;
135
+}
136
+
137
+#define DO_LOGIC(INSN, HELPER) \
138
+ static bool trans_##INSN(DisasContext *s, arg_2op *a) \
139
+ { \
140
+ return do_2op(s, a, HELPER); \
141
+ }
142
+
143
+DO_LOGIC(VAND, gen_helper_mve_vand)
144
+DO_LOGIC(VBIC, gen_helper_mve_vbic)
145
+DO_LOGIC(VORR, gen_helper_mve_vorr)
146
+DO_LOGIC(VORN, gen_helper_mve_vorn)
147
+DO_LOGIC(VEOR, gen_helper_mve_veor)
148
--
149
2.20.1
150
151
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VADD, VSUB and VMUL insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-13-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 12 ++++++++++++
8
target/arm/mve.decode | 5 +++++
9
target/arm/mve_helper.c | 14 ++++++++++++++
10
target/arm/translate-mve.c | 16 ++++++++++++++++
11
4 files changed, 47 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vaddw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+
26
+DEF_HELPER_FLAGS_4(mve_vsubb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vsubh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vsubw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vmulb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vmulw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@
38
39
@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
40
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
41
+@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
42
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
43
44
# Vector loads and stores
45
@@ -XXX,XX +XXX,XX @@ VORR 1110 1111 0 . 10 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
46
VORN 1110 1111 0 . 11 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
47
VEOR 1111 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
48
49
+VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
50
+VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
51
+VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
52
+
53
# Vector miscellaneous
54
55
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
56
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/mve_helper.c
59
+++ b/target/arm/mve_helper.c
60
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
61
mve_advance_vpt(env); \
62
}
63
64
+/* provide unsigned 2-op helpers for all sizes */
65
+#define DO_2OP_U(OP, FN) \
66
+ DO_2OP(OP##b, 1, uint8_t, FN) \
67
+ DO_2OP(OP##h, 2, uint16_t, FN) \
68
+ DO_2OP(OP##w, 4, uint32_t, FN)
69
+
70
#define DO_AND(N, M) ((N) & (M))
71
#define DO_BIC(N, M) ((N) & ~(M))
72
#define DO_ORR(N, M) ((N) | (M))
73
@@ -XXX,XX +XXX,XX @@ DO_2OP(vbic, 8, uint64_t, DO_BIC)
74
DO_2OP(vorr, 8, uint64_t, DO_ORR)
75
DO_2OP(vorn, 8, uint64_t, DO_ORN)
76
DO_2OP(veor, 8, uint64_t, DO_EOR)
77
+
78
+#define DO_ADD(N, M) ((N) + (M))
79
+#define DO_SUB(N, M) ((N) - (M))
80
+#define DO_MUL(N, M) ((N) * (M))
81
+
82
+DO_2OP_U(vadd, DO_ADD)
83
+DO_2OP_U(vsub, DO_SUB)
84
+DO_2OP_U(vmul, DO_MUL)
85
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/translate-mve.c
88
+++ b/target/arm/translate-mve.c
89
@@ -XXX,XX +XXX,XX @@ DO_LOGIC(VBIC, gen_helper_mve_vbic)
90
DO_LOGIC(VORR, gen_helper_mve_vorr)
91
DO_LOGIC(VORN, gen_helper_mve_vorn)
92
DO_LOGIC(VEOR, gen_helper_mve_veor)
93
+
94
+#define DO_2OP(INSN, FN) \
95
+ static bool trans_##INSN(DisasContext *s, arg_2op *a) \
96
+ { \
97
+ static MVEGenTwoOpFn * const fns[] = { \
98
+ gen_helper_mve_##FN##b, \
99
+ gen_helper_mve_##FN##h, \
100
+ gen_helper_mve_##FN##w, \
101
+ NULL, \
102
+ }; \
103
+ return do_2op(s, a, fns[a->size]); \
104
+ }
105
+
106
+DO_2OP(VADD, vadd)
107
+DO_2OP(VSUB, vsub)
108
+DO_2OP(VMUL, vmul)
109
--
110
2.20.1
111
112
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMULH insn, which performs a vector
2
multiply and returns the high half of the result.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-14-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 7 +++++++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 26 ++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 38 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vsubw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vmulb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vmulw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vmulhsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vmulhsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
34
VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
35
VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
36
37
+VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
38
+VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
39
+
40
# Vector miscellaneous
41
42
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
43
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/mve_helper.c
46
+++ b/target/arm/mve_helper.c
47
@@ -XXX,XX +XXX,XX @@ DO_2OP(veor, 8, uint64_t, DO_EOR)
48
DO_2OP_U(vadd, DO_ADD)
49
DO_2OP_U(vsub, DO_SUB)
50
DO_2OP_U(vmul, DO_MUL)
51
+
52
+/*
53
+ * Because the computation type is at least twice as large as required,
54
+ * these work for both signed and unsigned source types.
55
+ */
56
+static inline uint8_t do_mulh_b(int32_t n, int32_t m)
57
+{
58
+ return (n * m) >> 8;
59
+}
60
+
61
+static inline uint16_t do_mulh_h(int32_t n, int32_t m)
62
+{
63
+ return (n * m) >> 16;
64
+}
65
+
66
+static inline uint32_t do_mulh_w(int64_t n, int64_t m)
67
+{
68
+ return (n * m) >> 32;
69
+}
70
+
71
+DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
72
+DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
73
+DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
74
+DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
75
+DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
76
+DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
77
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate-mve.c
80
+++ b/target/arm/translate-mve.c
81
@@ -XXX,XX +XXX,XX @@ DO_LOGIC(VEOR, gen_helper_mve_veor)
82
DO_2OP(VADD, vadd)
83
DO_2OP(VSUB, vsub)
84
DO_2OP(VMUL, vmul)
85
+DO_2OP(VMULH_S, vmulhs)
86
+DO_2OP(VMULH_U, vmulhu)
87
--
88
2.20.1
89
90
diff view generated by jsdifflib
1
Implement the MVE VQSHL insn (encoding T4, which is the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
vector-shift-by-vector version).
3
2
4
The DO_SQSHL_OP and DO_UQSHL_OP macros here are derived from
3
Since e03b56863d2bc, our host endian indicator is unconditionally
5
the neon_helper.c code for qshl_u{8,16,32} and qshl_s{8,16,32}.
4
set, which means that we can use a normal C condition.
6
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20220501055028.646596-20-richard.henderson@linaro.org
9
[PMM: quote correct git hash in commit message]
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-33-peter.maydell@linaro.org
10
---
11
---
11
target/arm/helper-mve.h | 8 ++++++++
12
target/arm/helper.c | 9 +++------
12
target/arm/mve.decode | 12 ++++++++++++
13
1 file changed, 3 insertions(+), 6 deletions(-)
13
target/arm/mve_helper.c | 34 ++++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 2 ++
15
4 files changed, 56 insertions(+)
16
14
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
17
--- a/target/arm/helper.c
20
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/helper.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
22
DEF_HELPER_FLAGS_4(mve_vqsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
r2->type |= ARM_CP_ALIAS;
23
DEF_HELPER_FLAGS_4(mve_vqsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
}
24
22
25
+DEF_HELPER_FLAGS_4(mve_vqshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
- if (r->state == ARM_CP_STATE_BOTH) {
26
+DEF_HELPER_FLAGS_4(mve_vqshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
-#if HOST_BIG_ENDIAN
27
+DEF_HELPER_FLAGS_4(mve_vqshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
- if (r2->fieldoffset) {
28
+
26
- r2->fieldoffset += sizeof(uint32_t);
29
+DEF_HELPER_FLAGS_4(mve_vqshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
- }
30
+DEF_HELPER_FLAGS_4(mve_vqshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
-#endif
31
+DEF_HELPER_FLAGS_4(mve_vqshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+ if (HOST_BIG_ENDIAN &&
32
+
30
+ r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
33
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+ r2->fieldoffset += sizeof(uint32_t);
34
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
}
35
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/mve.decode
39
+++ b/target/arm/mve.decode
40
@@ -XXX,XX +XXX,XX @@
41
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
42
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
43
44
+# The _rev suffix indicates that Vn and Vm are reversed. This is
45
+# the case for shifts. In the Arm ARM these insns are documented
46
+# with the Vm and Vn fields in their usual places, but in the
47
+# assembly the operands are listed "backwards", ie in the order
48
+# Qd, Qm, Qn where other insns use Qd, Qn, Qm. For QEMU we choose
49
+# to consider Vm and Vn as being in different fields in the insn.
50
+# This gives us consistency with A64 and Neon.
51
+@2op_rev .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qn qn=%qm
52
+
53
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
54
@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
55
56
@@ -XXX,XX +XXX,XX @@ VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
57
VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
58
VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
59
60
+VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
61
+VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
62
+
63
# Vector miscellaneous
64
65
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
66
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/mve_helper.c
69
+++ b/target/arm/mve_helper.c
70
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
71
mve_advance_vpt(env); \
72
}
33
}
73
34
74
+/* provide unsigned 2-op helpers for all sizes */
75
+#define DO_2OP_SAT_U(OP, FN) \
76
+ DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
77
+ DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
78
+ DO_2OP_SAT(OP##w, 4, uint32_t, FN)
79
+
80
+/* provide signed 2-op helpers for all sizes */
81
+#define DO_2OP_SAT_S(OP, FN) \
82
+ DO_2OP_SAT(OP##b, 1, int8_t, FN) \
83
+ DO_2OP_SAT(OP##h, 2, int16_t, FN) \
84
+ DO_2OP_SAT(OP##w, 4, int32_t, FN)
85
+
86
#define DO_AND(N, M) ((N) & (M))
87
#define DO_BIC(N, M) ((N) & ~(M))
88
#define DO_ORR(N, M) ((N) | (M))
89
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
90
DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
91
DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
92
93
+/*
94
+ * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs()
95
+ * and friends wanting a uint32_t* sat and our needing a bool*.
96
+ */
97
+#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
98
+ ({ \
99
+ uint32_t su32 = 0; \
100
+ typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
101
+ if (su32) { \
102
+ *satp = true; \
103
+ } \
104
+ r; \
105
+ })
106
+
107
+#define DO_SQSHL_OP(N, M, satp) \
108
+ WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
109
+#define DO_UQSHL_OP(N, M, satp) \
110
+ WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
111
+
112
+DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
113
+DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
114
+
115
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
116
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
117
uint32_t rm) \
118
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/target/arm/translate-mve.c
121
+++ b/target/arm/translate-mve.c
122
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQADD_S, vqadds)
123
DO_2OP(VQADD_U, vqaddu)
124
DO_2OP(VQSUB_S, vqsubs)
125
DO_2OP(VQSUB_U, vqsubu)
126
+DO_2OP(VQSHL_S, vqshls)
127
+DO_2OP(VQSHL_U, vqshlu)
128
129
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
130
MVEGenTwoOpScalarFn fn)
131
--
35
--
132
2.20.1
36
2.25.1
133
134
diff view generated by jsdifflib
1
Implement the MVE VMLALDAV insn, which multiplies pairs of integer
1
From: Richard Henderson <richard.henderson@linaro.org>
2
elements, accumulating them into a 64-bit result in a pair of
3
general-purpose registers.
4
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220501055028.646596-24-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-20-peter.maydell@linaro.org
8
---
7
---
9
target/arm/helper-mve.h | 8 ++++
8
target/arm/cpu.h | 15 +++++++++++++++
10
target/arm/translate.h | 10 ++++
9
1 file changed, 15 insertions(+)
11
target/arm/mve.decode | 15 ++++++
12
target/arm/mve_helper.c | 34 ++++++++++++++
13
target/arm/translate-mve.c | 96 ++++++++++++++++++++++++++++++++++++++
14
5 files changed, 163 insertions(+)
15
10
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
11
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
13
--- a/target/arm/cpu.h
19
+++ b/target/arm/helper-mve.h
14
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulltsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
15
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id)
21
DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
16
return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0;
22
DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
+DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
28
+DEF_HELPER_FLAGS_4(mve_vmlaldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vmlaldavuh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
31
+DEF_HELPER_FLAGS_4(mve_vmlaldavuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
diff --git a/target/arm/translate.h b/target/arm/translate.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate.h
35
+++ b/target/arm/translate.h
36
@@ -XXX,XX +XXX,XX @@ static inline int negate(DisasContext *s, int x)
37
return -x;
38
}
17
}
39
18
40
+static inline int plus_1(DisasContext *s, int x)
19
+static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id)
41
+{
20
+{
42
+ return x + 1;
21
+ return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8;
43
+}
22
+}
44
+
23
+
45
static inline int plus_2(DisasContext *s, int x)
24
/*
46
{
25
* 64-bit feature tests via id registers.
47
return x + 2;
26
*/
48
@@ -XXX,XX +XXX,XX @@ static inline int times_4(DisasContext *s, int x)
27
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
49
return x * 4;
28
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
50
}
29
}
51
30
52
+static inline int times_2_plus_1(DisasContext *s, int x)
31
+static inline bool isar_feature_aa64_debugv8p2(const ARMISARegisters *id)
53
+{
32
+{
54
+ return x * 2 + 1;
33
+ return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, DEBUGVER) >= 8;
55
+}
34
+}
56
+
35
+
57
static inline int arm_dc_feature(DisasContext *dc, int feature)
36
static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
58
{
37
{
59
return (dc->features & (1ULL << feature)) != 0;
38
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
60
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
39
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id)
61
index XXXXXXX..XXXXXXX 100644
40
return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id);
62
--- a/target/arm/mve.decode
63
+++ b/target/arm/mve.decode
64
@@ -XXX,XX +XXX,XX @@ VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
65
VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0
66
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1
67
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
68
+
69
+# multiply-add long dual accumulate
70
+# rdahi: bits [3:1] from insn, bit 0 is 1
71
+# rdalo: bits [3:1] from insn, bit 0 is 0
72
+%rdahi 20:3 !function=times_2_plus_1
73
+%rdalo 13:3 !function=times_2
74
+# size bit is 0 for 16 bit, 1 for 32 bit
75
+%size_16 16:1 !function=plus_1
76
+
77
+&vmlaldav rdahi rdalo size qn qm x a
78
+
79
+@vmlaldav .... .... . ... ... . ... . .... .... qm:3 . \
80
+ qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
81
+VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
82
+VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
83
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/arm/mve_helper.c
86
+++ b/target/arm/mve_helper.c
87
@@ -XXX,XX +XXX,XX @@ DO_2OP_S(vhadds, do_vhadd_s)
88
DO_2OP_U(vhaddu, do_vhadd_u)
89
DO_2OP_S(vhsubs, do_vhsub_s)
90
DO_2OP_U(vhsubu, do_vhsub_u)
91
+
92
+
93
+/*
94
+ * Multiply add long dual accumulate ops.
95
+ */
96
+#define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
97
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
98
+ void *vm, uint64_t a) \
99
+ { \
100
+ uint16_t mask = mve_element_mask(env); \
101
+ unsigned e; \
102
+ TYPE *n = vn, *m = vm; \
103
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
104
+ if (mask & 1) { \
105
+ if (e & 1) { \
106
+ a ODDACC \
107
+ (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
108
+ } else { \
109
+ a EVENACC \
110
+ (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
111
+ } \
112
+ } \
113
+ } \
114
+ mve_advance_vpt(env); \
115
+ return a; \
116
+ }
117
+
118
+DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
119
+DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
120
+DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
121
+DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
122
+
123
+DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
124
+DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
125
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/translate-mve.c
128
+++ b/target/arm/translate-mve.c
129
@@ -XXX,XX +XXX,XX @@
130
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
131
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
132
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
133
+typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
134
135
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
136
static inline long mve_qreg_offset(unsigned reg)
137
@@ -XXX,XX +XXX,XX @@ static void mve_update_eci(DisasContext *s)
138
}
139
}
41
}
140
42
141
+static bool mve_skip_first_beat(DisasContext *s)
43
+static inline bool isar_feature_any_debugv8p2(const ARMISARegisters *id)
142
+{
44
+{
143
+ /* Return true if PSR.ECI says we must skip the first beat of this insn */
45
+ return isar_feature_aa64_debugv8p2(id) || isar_feature_aa32_debugv8p2(id);
144
+ switch (s->eci) {
145
+ case ECI_NONE:
146
+ return false;
147
+ case ECI_A0:
148
+ case ECI_A0A1:
149
+ case ECI_A0A1A2:
150
+ case ECI_A0A1A2B0:
151
+ return true;
152
+ default:
153
+ g_assert_not_reached();
154
+ }
155
+}
46
+}
156
+
47
+
157
static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn)
48
/*
158
{
49
* Forward to the above feature tests given an ARMCPU pointer.
159
TCGv_i32 addr;
50
*/
160
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_BS, vmullbs)
161
DO_2OP(VMULL_BU, vmullbu)
162
DO_2OP(VMULL_TS, vmullts)
163
DO_2OP(VMULL_TU, vmulltu)
164
+
165
+static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
166
+ MVEGenDualAccOpFn *fn)
167
+{
168
+ TCGv_ptr qn, qm;
169
+ TCGv_i64 rda;
170
+ TCGv_i32 rdalo, rdahi;
171
+
172
+ if (!dc_isar_feature(aa32_mve, s) ||
173
+ !mve_check_qreg_bank(s, a->qn | a->qm) ||
174
+ !fn) {
175
+ return false;
176
+ }
177
+ /*
178
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
179
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
180
+ */
181
+ if (a->rdahi == 13 || a->rdahi == 15) {
182
+ return false;
183
+ }
184
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
185
+ return true;
186
+ }
187
+
188
+ qn = mve_qreg_ptr(a->qn);
189
+ qm = mve_qreg_ptr(a->qm);
190
+
191
+ /*
192
+ * This insn is subject to beat-wise execution. Partial execution
193
+ * of an A=0 (no-accumulate) insn which does not execute the first
194
+ * beat must start with the current rda value, not 0.
195
+ */
196
+ if (a->a || mve_skip_first_beat(s)) {
197
+ rda = tcg_temp_new_i64();
198
+ rdalo = load_reg(s, a->rdalo);
199
+ rdahi = load_reg(s, a->rdahi);
200
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
201
+ tcg_temp_free_i32(rdalo);
202
+ tcg_temp_free_i32(rdahi);
203
+ } else {
204
+ rda = tcg_const_i64(0);
205
+ }
206
+
207
+ fn(rda, cpu_env, qn, qm, rda);
208
+ tcg_temp_free_ptr(qn);
209
+ tcg_temp_free_ptr(qm);
210
+
211
+ rdalo = tcg_temp_new_i32();
212
+ rdahi = tcg_temp_new_i32();
213
+ tcg_gen_extrl_i64_i32(rdalo, rda);
214
+ tcg_gen_extrh_i64_i32(rdahi, rda);
215
+ store_reg(s, a->rdalo, rdalo);
216
+ store_reg(s, a->rdahi, rdahi);
217
+ tcg_temp_free_i64(rda);
218
+ mve_update_eci(s);
219
+ return true;
220
+}
221
+
222
+static bool trans_VMLALDAV_S(DisasContext *s, arg_vmlaldav *a)
223
+{
224
+ static MVEGenDualAccOpFn * const fns[4][2] = {
225
+ { NULL, NULL },
226
+ { gen_helper_mve_vmlaldavsh, gen_helper_mve_vmlaldavxsh },
227
+ { gen_helper_mve_vmlaldavsw, gen_helper_mve_vmlaldavxsw },
228
+ { NULL, NULL },
229
+ };
230
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
231
+}
232
+
233
+static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a)
234
+{
235
+ static MVEGenDualAccOpFn * const fns[4][2] = {
236
+ { NULL, NULL },
237
+ { gen_helper_mve_vmlaldavuh, NULL },
238
+ { gen_helper_mve_vmlaldavuw, NULL },
239
+ { NULL, NULL },
240
+ };
241
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
242
+}
243
--
51
--
244
2.20.1
52
2.25.1
245
246
diff view generated by jsdifflib
1
Implement the MVE VRMULH insn, which performs a rounding multiply
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and then returns the high half.
3
2
3
Add the aa64 predicate for detecting RAS support from id registers.
4
We already have the aa32 version from the M-profile work.
5
Add the 'any' predicate for testing both aa64 and aa32.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220501055028.646596-34-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-15-peter.maydell@linaro.org
7
---
11
---
8
target/arm/helper-mve.h | 7 +++++++
12
target/arm/cpu.h | 10 ++++++++++
9
target/arm/mve.decode | 3 +++
13
1 file changed, 10 insertions(+)
10
target/arm/mve_helper.c | 22 ++++++++++++++++++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 34 insertions(+)
13
14
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
--- a/target/arm/cpu.h
17
+++ b/target/arm/helper-mve.h
18
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id)
19
DEF_HELPER_FLAGS_4(mve_vmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2;
20
DEF_HELPER_FLAGS_4(mve_vmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vrmulhsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vrmulhsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vrmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vrmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vrmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vrmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
34
VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
35
VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
36
37
+VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
38
+VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
39
+
40
# Vector miscellaneous
41
42
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
43
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/mve_helper.c
46
+++ b/target/arm/mve_helper.c
47
@@ -XXX,XX +XXX,XX @@ static inline uint32_t do_mulh_w(int64_t n, int64_t m)
48
return (n * m) >> 32;
49
}
21
}
50
22
51
+static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
23
+static inline bool isar_feature_aa64_ras(const ARMISARegisters *id)
52
+{
24
+{
53
+ return (n * m + (1U << 7)) >> 8;
25
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) != 0;
54
+}
26
+}
55
+
27
+
56
+static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
28
static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
29
{
30
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
31
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_any_debugv8p2(const ARMISARegisters *id)
32
return isar_feature_aa64_debugv8p2(id) || isar_feature_aa32_debugv8p2(id);
33
}
34
35
+static inline bool isar_feature_any_ras(const ARMISARegisters *id)
57
+{
36
+{
58
+ return (n * m + (1U << 15)) >> 16;
37
+ return isar_feature_aa64_ras(id) || isar_feature_aa32_ras(id);
59
+}
38
+}
60
+
39
+
61
+static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
40
/*
62
+{
41
* Forward to the above feature tests given an ARMCPU pointer.
63
+ return (n * m + (1U << 31)) >> 32;
42
*/
64
+}
65
+
66
DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
67
DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
68
DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
69
DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
70
DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
71
DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
72
+
73
+DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
74
+DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
75
+DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
76
+DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
77
+DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
78
+DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
79
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/translate-mve.c
82
+++ b/target/arm/translate-mve.c
83
@@ -XXX,XX +XXX,XX @@ DO_2OP(VSUB, vsub)
84
DO_2OP(VMUL, vmul)
85
DO_2OP(VMULH_S, vmulhs)
86
DO_2OP(VMULH_U, vmulhu)
87
+DO_2OP(VRMULH_S, vrmulhs)
88
+DO_2OP(VRMULH_U, vrmulhu)
89
--
43
--
90
2.20.1
44
2.25.1
91
92
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMAX and VMIN insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-16-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 14 ++++++++++++++
8
target/arm/mve.decode | 5 +++++
9
target/arm/mve_helper.c | 14 ++++++++++++++
10
target/arm/translate-mve.c | 4 ++++
11
4 files changed, 37 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vrmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vrmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vrmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vrmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_4(mve_vmaxsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vmaxsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vmaxsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vmaxub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vmaxuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vmaxuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vminsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+DEF_HELPER_FLAGS_4(mve_vminsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vminsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vminub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+DEF_HELPER_FLAGS_4(mve_vminuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vminuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/mve.decode
38
+++ b/target/arm/mve.decode
39
@@ -XXX,XX +XXX,XX @@ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
40
VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
41
VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
42
43
+VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
44
+VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
45
+VMIN_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
46
+VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
47
+
48
# Vector miscellaneous
49
50
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
51
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/mve_helper.c
54
+++ b/target/arm/mve_helper.c
55
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
56
DO_2OP(OP##h, 2, uint16_t, FN) \
57
DO_2OP(OP##w, 4, uint32_t, FN)
58
59
+/* provide signed 2-op helpers for all sizes */
60
+#define DO_2OP_S(OP, FN) \
61
+ DO_2OP(OP##b, 1, int8_t, FN) \
62
+ DO_2OP(OP##h, 2, int16_t, FN) \
63
+ DO_2OP(OP##w, 4, int32_t, FN)
64
+
65
#define DO_AND(N, M) ((N) & (M))
66
#define DO_BIC(N, M) ((N) & ~(M))
67
#define DO_ORR(N, M) ((N) | (M))
68
@@ -XXX,XX +XXX,XX @@ DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
69
DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
70
DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
71
DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
72
+
73
+#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
74
+#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
75
+
76
+DO_2OP_S(vmaxs, DO_MAX)
77
+DO_2OP_U(vmaxu, DO_MAX)
78
+DO_2OP_S(vmins, DO_MIN)
79
+DO_2OP_U(vminu, DO_MIN)
80
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/translate-mve.c
83
+++ b/target/arm/translate-mve.c
84
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULH_S, vmulhs)
85
DO_2OP(VMULH_U, vmulhu)
86
DO_2OP(VRMULH_S, vrmulhs)
87
DO_2OP(VRMULH_U, vrmulhu)
88
+DO_2OP(VMAX_S, vmaxs)
89
+DO_2OP(VMAX_U, vmaxu)
90
+DO_2OP(VMIN_S, vmins)
91
+DO_2OP(VMIN_U, vminu)
92
--
93
2.20.1
94
95
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VABD insn.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-17-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 7 +++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 5 +++++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 17 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vminsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vminub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vminuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vminuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
+
22
+DEF_HELPER_FLAGS_4(mve_vabdsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vabdsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vabdsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vabdub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vabduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vabduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
33
VMIN_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
34
VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
35
36
+VABD_S 111 0 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
37
+VABD_U 111 1 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
38
+
39
# Vector miscellaneous
40
41
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_2OP_S(vmaxs, DO_MAX)
47
DO_2OP_U(vmaxu, DO_MAX)
48
DO_2OP_S(vmins, DO_MIN)
49
DO_2OP_U(vminu, DO_MIN)
50
+
51
+#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
52
+
53
+DO_2OP_S(vabds, DO_ABD)
54
+DO_2OP_U(vabdu, DO_ABD)
55
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-mve.c
58
+++ b/target/arm/translate-mve.c
59
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMAX_S, vmaxs)
60
DO_2OP(VMAX_U, vmaxu)
61
DO_2OP(VMIN_S, vmins)
62
DO_2OP(VMIN_U, vminu)
63
+DO_2OP(VABD_S, vabds)
64
+DO_2OP(VABD_U, vabdu)
65
--
66
2.20.1
67
68
diff view generated by jsdifflib
Deleted patch
1
Implement MVE VHADD and VHSUB insns, which perform an addition
2
or subtraction and then halve the result.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-18-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 14 ++++++++++++++
9
target/arm/mve.decode | 5 +++++
10
target/arm/mve_helper.c | 25 +++++++++++++++++++++++++
11
target/arm/translate-mve.c | 4 ++++
12
4 files changed, 48 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vabdsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vabdub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vabduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vabduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vhaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vhaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vhaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vhsubsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vhsubsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vhsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+DEF_HELPER_FLAGS_4(mve_vhsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vhsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+DEF_HELPER_FLAGS_4(mve_vhsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/mve.decode
39
+++ b/target/arm/mve.decode
40
@@ -XXX,XX +XXX,XX @@ VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
41
VABD_S 111 0 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
42
VABD_U 111 1 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
43
44
+VHADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
45
+VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
46
+VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
47
+VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
48
+
49
# Vector miscellaneous
50
51
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vminu, DO_MIN)
57
58
DO_2OP_S(vabds, DO_ABD)
59
DO_2OP_U(vabdu, DO_ABD)
60
+
61
+static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
62
+{
63
+ return ((uint64_t)n + m) >> 1;
64
+}
65
+
66
+static inline int32_t do_vhadd_s(int32_t n, int32_t m)
67
+{
68
+ return ((int64_t)n + m) >> 1;
69
+}
70
+
71
+static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
72
+{
73
+ return ((uint64_t)n - m) >> 1;
74
+}
75
+
76
+static inline int32_t do_vhsub_s(int32_t n, int32_t m)
77
+{
78
+ return ((int64_t)n - m) >> 1;
79
+}
80
+
81
+DO_2OP_S(vhadds, do_vhadd_s)
82
+DO_2OP_U(vhaddu, do_vhadd_u)
83
+DO_2OP_S(vhsubs, do_vhsub_s)
84
+DO_2OP_U(vhsubu, do_vhsub_u)
85
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/translate-mve.c
88
+++ b/target/arm/translate-mve.c
89
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMIN_S, vmins)
90
DO_2OP(VMIN_U, vminu)
91
DO_2OP(VABD_S, vabds)
92
DO_2OP(VABD_U, vabdu)
93
+DO_2OP(VHADD_S, vhadds)
94
+DO_2OP(VHADD_U, vhaddu)
95
+DO_2OP(VHSUB_S, vhsubs)
96
+DO_2OP(VHSUB_U, vhsubu)
97
--
98
2.20.1
99
100
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VMULL insn, which multiplies two single
2
width integer elements to produce a double width result.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-19-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 14 ++++++++++++++
9
target/arm/mve.decode | 5 +++++
10
target/arm/mve_helper.c | 34 ++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 4 ++++
12
4 files changed, 57 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vhsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vhsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vhsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vhsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+
23
+DEF_HELPER_FLAGS_4(mve_vmullbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vmullbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vmullbsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vmullbub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vmullbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vmullbuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vmulltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vmulltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vmulltsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/mve.decode
39
+++ b/target/arm/mve.decode
40
@@ -XXX,XX +XXX,XX @@ VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
41
VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
42
VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
43
44
+VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
45
+VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
46
+VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
47
+VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
48
+
49
# Vector miscellaneous
50
51
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
57
DO_2OP(OP##h, 2, int16_t, FN) \
58
DO_2OP(OP##w, 4, int32_t, FN)
59
60
+/*
61
+ * "Long" operations where two half-sized inputs (taken from either the
62
+ * top or the bottom of the input vector) produce a double-width result.
63
+ * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
64
+ */
65
+#define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
66
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
67
+ { \
68
+ LTYPE *d = vd; \
69
+ TYPE *n = vn, *m = vm; \
70
+ uint16_t mask = mve_element_mask(env); \
71
+ unsigned le; \
72
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
73
+ LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
74
+ m[H##ESIZE(le * 2 + TOP)]); \
75
+ mergemask(&d[H##LESIZE(le)], r, mask); \
76
+ } \
77
+ mve_advance_vpt(env); \
78
+ }
79
+
80
#define DO_AND(N, M) ((N) & (M))
81
#define DO_BIC(N, M) ((N) & ~(M))
82
#define DO_ORR(N, M) ((N) | (M))
83
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vadd, DO_ADD)
84
DO_2OP_U(vsub, DO_SUB)
85
DO_2OP_U(vmul, DO_MUL)
86
87
+DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
88
+DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
89
+DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
90
+DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
91
+DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
92
+DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
93
+
94
+DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
95
+DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
96
+DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
97
+DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
98
+DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
99
+DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
100
+
101
/*
102
* Because the computation type is at least twice as large as required,
103
* these work for both signed and unsigned source types.
104
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/target/arm/translate-mve.c
107
+++ b/target/arm/translate-mve.c
108
@@ -XXX,XX +XXX,XX @@ DO_2OP(VHADD_S, vhadds)
109
DO_2OP(VHADD_U, vhaddu)
110
DO_2OP(VHSUB_S, vhsubs)
111
DO_2OP(VHSUB_U, vhsubu)
112
+DO_2OP(VMULL_BS, vmullbs)
113
+DO_2OP(VMULL_BU, vmullbu)
114
+DO_2OP(VMULL_TS, vmullts)
115
+DO_2OP(VMULL_TU, vmulltu)
116
--
117
2.20.1
118
119
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE insn VMLSLDAV, which multiplies source elements,
2
alternately adding and subtracting them, and accumulates into a
3
64-bit result in a pair of general purpose registers.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-21-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 5 +++++
10
target/arm/mve.decode | 2 ++
11
target/arm/mve_helper.c | 5 +++++
12
target/arm/translate-mve.c | 11 +++++++++++
13
4 files changed, 23 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmlaldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
20
21
DEF_HELPER_FLAGS_4(mve_vmlaldavuh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
22
DEF_HELPER_FLAGS_4(mve_vmlaldavuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
23
+
24
+DEF_HELPER_FLAGS_4(mve_vmlsldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
25
+DEF_HELPER_FLAGS_4(mve_vmlsldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vmlsldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
+DEF_HELPER_FLAGS_4(mve_vmlsldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
28
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/mve.decode
31
+++ b/target/arm/mve.decode
32
@@ -XXX,XX +XXX,XX @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
33
qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
34
VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
35
VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
36
+
37
+VMLSLDAV 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
43
44
DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
45
DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
46
+
47
+DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
48
+DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
49
+DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
50
+DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
51
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/translate-mve.c
54
+++ b/target/arm/translate-mve.c
55
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a)
56
};
57
return do_long_dual_acc(s, a, fns[a->size][a->x]);
58
}
59
+
60
+static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a)
61
+{
62
+ static MVEGenDualAccOpFn * const fns[4][2] = {
63
+ { NULL, NULL },
64
+ { gen_helper_mve_vmlsldavsh, gen_helper_mve_vmlsldavxsh },
65
+ { gen_helper_mve_vmlsldavsw, gen_helper_mve_vmlsldavxsw },
66
+ { NULL, NULL },
67
+ };
68
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
69
+}
70
--
71
2.20.1
72
73
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VRMLALDAVH and VRMLSLDAVH insns, which accumulate
2
the results of a rounded multiply of pairs of elements into a 72-bit
3
accumulator, returning the top 64 bits in a pair of general purpose
4
registers.
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210617121628.20116-22-peter.maydell@linaro.org
9
---
10
target/arm/helper-mve.h | 8 ++++++++
11
target/arm/mve.decode | 7 +++++++
12
target/arm/mve_helper.c | 37 +++++++++++++++++++++++++++++++++++++
13
target/arm/translate-mve.c | 24 ++++++++++++++++++++++++
14
4 files changed, 76 insertions(+)
15
16
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper-mve.h
19
+++ b/target/arm/helper-mve.h
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmlsldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
21
DEF_HELPER_FLAGS_4(mve_vmlsldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
22
DEF_HELPER_FLAGS_4(mve_vmlsldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
23
DEF_HELPER_FLAGS_4(mve_vmlsldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
26
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
+
28
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
29
+
30
+DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
31
+DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
37
38
@vmlaldav .... .... . ... ... . ... . .... .... qm:3 . \
39
qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
40
+@vmlaldav_nosz .... .... . ... ... . ... . .... .... qm:3 . \
41
+ qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav
42
VMLALDAV_S 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
43
VMLALDAV_U 1111 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 0 @vmlaldav
44
45
VMLSLDAV 1110 1110 1 ... ... . ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav
46
+
47
+VRMLALDAVH_S 1110 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
48
+VRMLALDAVH_U 1111 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
49
+
50
+VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_nosz
51
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/mve_helper.c
54
+++ b/target/arm/mve_helper.c
55
@@ -XXX,XX +XXX,XX @@
56
*/
57
58
#include "qemu/osdep.h"
59
+#include "qemu/int128.h"
60
#include "cpu.h"
61
#include "internals.h"
62
#include "vec_internal.h"
63
@@ -XXX,XX +XXX,XX @@ DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
64
DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
65
DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
66
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
67
+
68
+/*
69
+ * Rounding multiply add long dual accumulate high: we must keep
70
+ * a 72-bit internal accumulator value and return the top 64 bits.
71
+ */
72
+#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
73
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
74
+ void *vm, uint64_t a) \
75
+ { \
76
+ uint16_t mask = mve_element_mask(env); \
77
+ unsigned e; \
78
+ TYPE *n = vn, *m = vm; \
79
+ Int128 acc = int128_lshift(TO128(a), 8); \
80
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
81
+ if (mask & 1) { \
82
+ if (e & 1) { \
83
+ acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
84
+ m[H##ESIZE(e)])); \
85
+ } else { \
86
+ acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
87
+ m[H##ESIZE(e)])); \
88
+ } \
89
+ acc = int128_add(acc, 1 << 7); \
90
+ } \
91
+ } \
92
+ mve_advance_vpt(env); \
93
+ return int128_getlo(int128_rshift(acc, 8)); \
94
+ }
95
+
96
+DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
97
+DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
98
+
99
+DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
100
+
101
+DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
102
+DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
103
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/translate-mve.c
106
+++ b/target/arm/translate-mve.c
107
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a)
108
};
109
return do_long_dual_acc(s, a, fns[a->size][a->x]);
110
}
111
+
112
+static bool trans_VRMLALDAVH_S(DisasContext *s, arg_vmlaldav *a)
113
+{
114
+ static MVEGenDualAccOpFn * const fns[] = {
115
+ gen_helper_mve_vrmlaldavhsw, gen_helper_mve_vrmlaldavhxsw,
116
+ };
117
+ return do_long_dual_acc(s, a, fns[a->x]);
118
+}
119
+
120
+static bool trans_VRMLALDAVH_U(DisasContext *s, arg_vmlaldav *a)
121
+{
122
+ static MVEGenDualAccOpFn * const fns[] = {
123
+ gen_helper_mve_vrmlaldavhuw, NULL,
124
+ };
125
+ return do_long_dual_acc(s, a, fns[a->x]);
126
+}
127
+
128
+static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
129
+{
130
+ static MVEGenDualAccOpFn * const fns[] = {
131
+ gen_helper_mve_vrmlsldavhsw, gen_helper_mve_vrmlsldavhxsw,
132
+ };
133
+ return do_long_dual_acc(s, a, fns[a->x]);
134
+}
135
--
136
2.20.1
137
138
diff view generated by jsdifflib
Deleted patch
1
Implement the scalar form of the MVE VADD insn. This takes the
2
scalar operand from a general purpose register.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-23-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 4 ++++
9
target/arm/mve.decode | 7 ++++++
10
target/arm/mve_helper.c | 22 +++++++++++++++++++
11
target/arm/translate-mve.c | 45 ++++++++++++++++++++++++++++++++++++++
12
4 files changed, 78 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
22
+DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+
26
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
28
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@
34
&vldr_vstr rn qd imm p a w size l u
35
&1op qd qm size
36
&2op qd qm qn size
37
+&2scalar qd qn rm size
38
39
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
40
# Note that both Rn and Qd are 3 bits only (no D bit)
41
@@ -XXX,XX +XXX,XX @@
42
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
43
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
44
45
+@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
46
+
47
# Vector loads and stores
48
49
# Widening loads and narrowing stores:
50
@@ -XXX,XX +XXX,XX @@ VRMLALDAVH_S 1110 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_no
51
VRMLALDAVH_U 1111 1110 1 ... ... 0 ... x:1 1111 . 0 a:1 0 ... 0 @vmlaldav_nosz
52
53
VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_nosz
54
+
55
+# Scalar operations
56
+
57
+VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
58
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mve_helper.c
61
+++ b/target/arm/mve_helper.c
62
@@ -XXX,XX +XXX,XX @@ DO_2OP_S(vhsubs, do_vhsub_s)
63
DO_2OP_U(vhsubu, do_vhsub_u)
64
65
66
+#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
67
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
68
+ uint32_t rm) \
69
+ { \
70
+ TYPE *d = vd, *n = vn; \
71
+ TYPE m = rm; \
72
+ uint16_t mask = mve_element_mask(env); \
73
+ unsigned e; \
74
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
75
+ mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
76
+ } \
77
+ mve_advance_vpt(env); \
78
+ }
79
+
80
+/* provide unsigned 2-op scalar helpers for all sizes */
81
+#define DO_2OP_SCALAR_U(OP, FN) \
82
+ DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
83
+ DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
84
+ DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
85
+
86
+DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
87
+
88
/*
89
* Multiply add long dual accumulate ops.
90
*/
91
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/translate-mve.c
94
+++ b/target/arm/translate-mve.c
95
@@ -XXX,XX +XXX,XX @@
96
typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
97
typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
98
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
99
+typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
100
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
101
102
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
103
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_BU, vmullbu)
104
DO_2OP(VMULL_TS, vmullts)
105
DO_2OP(VMULL_TU, vmulltu)
106
107
+static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
108
+ MVEGenTwoOpScalarFn fn)
109
+{
110
+ TCGv_ptr qd, qn;
111
+ TCGv_i32 rm;
112
+
113
+ if (!dc_isar_feature(aa32_mve, s) ||
114
+ !mve_check_qreg_bank(s, a->qd | a->qn) ||
115
+ !fn) {
116
+ return false;
117
+ }
118
+ if (a->rm == 13 || a->rm == 15) {
119
+ /* UNPREDICTABLE */
120
+ return false;
121
+ }
122
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
123
+ return true;
124
+ }
125
+
126
+ qd = mve_qreg_ptr(a->qd);
127
+ qn = mve_qreg_ptr(a->qn);
128
+ rm = load_reg(s, a->rm);
129
+ fn(cpu_env, qd, qn, rm);
130
+ tcg_temp_free_i32(rm);
131
+ tcg_temp_free_ptr(qd);
132
+ tcg_temp_free_ptr(qn);
133
+ mve_update_eci(s);
134
+ return true;
135
+}
136
+
137
+#define DO_2OP_SCALAR(INSN, FN) \
138
+ static bool trans_##INSN(DisasContext *s, arg_2scalar *a) \
139
+ { \
140
+ static MVEGenTwoOpScalarFn * const fns[] = { \
141
+ gen_helper_mve_##FN##b, \
142
+ gen_helper_mve_##FN##h, \
143
+ gen_helper_mve_##FN##w, \
144
+ NULL, \
145
+ }; \
146
+ return do_2op_scalar(s, a, fns[a->size]); \
147
+ }
148
+
149
+DO_2OP_SCALAR(VADD_scalar, vadd_scalar)
150
+
151
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
152
MVEGenDualAccOpFn *fn)
153
{
154
--
155
2.20.1
156
157
diff view generated by jsdifflib
Deleted patch
1
Implement the scalar forms of the MVE VSUB and VMUL insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-24-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/mve.decode | 2 ++
9
target/arm/mve_helper.c | 2 ++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 14 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
18
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
21
+DEF_HELPER_FLAGS_4(mve_vsub_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_4(mve_vsub_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vsub_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vmul_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vmul_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
29
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
30
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
31
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VRMLSLDAVH 1111 1110 1 ... ... 0 ... x:1 1110 . 0 a:1 0 ... 1 @vmlaldav_no
37
# Scalar operations
38
39
VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
40
+VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar
41
+VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
42
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve_helper.c
45
+++ b/target/arm/mve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
47
DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
48
49
DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
50
+DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
51
+DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
52
53
/*
54
* Multiply add long dual accumulate ops.
55
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-mve.c
58
+++ b/target/arm/translate-mve.c
59
@@ -XXX,XX +XXX,XX @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
60
}
61
62
DO_2OP_SCALAR(VADD_scalar, vadd_scalar)
63
+DO_2OP_SCALAR(VSUB_scalar, vsub_scalar)
64
+DO_2OP_SCALAR(VMUL_scalar, vmul_scalar)
65
66
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
67
MVEGenDualAccOpFn *fn)
68
--
69
2.20.1
70
71
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VBRSR insn, which reverses a specified
2
number of bits in each element, setting the rest to zero.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-26-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 4 ++++
9
target/arm/mve.decode | 1 +
10
target/arm/mve_helper.c | 43 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-mve.c | 1 +
12
4 files changed, 49 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vhsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
+DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+
26
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
27
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
28
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@ VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
34
VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
35
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
36
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
37
+VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
38
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/mve_helper.c
41
+++ b/target/arm/mve_helper.c
42
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
43
DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
44
DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
45
46
+static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
47
+{
48
+ m &= 0xff;
49
+ if (m == 0) {
50
+ return 0;
51
+ }
52
+ n = revbit8(n);
53
+ if (m < 8) {
54
+ n >>= 8 - m;
55
+ }
56
+ return n;
57
+}
58
+
59
+static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
60
+{
61
+ m &= 0xff;
62
+ if (m == 0) {
63
+ return 0;
64
+ }
65
+ n = revbit16(n);
66
+ if (m < 16) {
67
+ n >>= 16 - m;
68
+ }
69
+ return n;
70
+}
71
+
72
+static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
73
+{
74
+ m &= 0xff;
75
+ if (m == 0) {
76
+ return 0;
77
+ }
78
+ n = revbit32(n);
79
+ if (m < 32) {
80
+ n >>= 32 - m;
81
+ }
82
+ return n;
83
+}
84
+
85
+DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
86
+DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
87
+DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
88
+
89
/*
90
* Multiply add long dual accumulate ops.
91
*/
92
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/translate-mve.c
95
+++ b/target/arm/translate-mve.c
96
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VHADD_S_scalar, vhadds_scalar)
97
DO_2OP_SCALAR(VHADD_U_scalar, vhaddu_scalar)
98
DO_2OP_SCALAR(VHSUB_S_scalar, vhsubs_scalar)
99
DO_2OP_SCALAR(VHSUB_U_scalar, vhsubu_scalar)
100
+DO_2OP_SCALAR(VBRSR, vbrsr)
101
102
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
103
MVEGenDualAccOpFn *fn)
104
--
105
2.20.1
106
107
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VPST insn, which sets the predicate mask
2
fields in the VPR to the immediate value encoded in the insn.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-27-peter.maydell@linaro.org
7
---
8
target/arm/mve.decode | 4 +++
9
target/arm/translate-mve.c | 59 ++++++++++++++++++++++++++++++++++++++
10
2 files changed, 63 insertions(+)
11
12
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/mve.decode
15
+++ b/target/arm/mve.decode
16
@@ -XXX,XX +XXX,XX @@ VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
17
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
18
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
19
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
20
+
21
+# Predicate operations
22
+%mask_22_13 22:1 13:3
23
+VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
24
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/translate-mve.c
27
+++ b/target/arm/translate-mve.c
28
@@ -XXX,XX +XXX,XX @@ static void mve_update_eci(DisasContext *s)
29
}
30
}
31
32
+static void mve_update_and_store_eci(DisasContext *s)
33
+{
34
+ /*
35
+ * For insns which don't call a helper function that will call
36
+ * mve_advance_vpt(), this version updates s->eci and also stores
37
+ * it out to the CPUState field.
38
+ */
39
+ if (s->eci) {
40
+ mve_update_eci(s);
41
+ store_cpu_field(tcg_constant_i32(s->eci << 4), condexec_bits);
42
+ }
43
+}
44
+
45
static bool mve_skip_first_beat(DisasContext *s)
46
{
47
/* Return true if PSR.ECI says we must skip the first beat of this insn */
48
@@ -XXX,XX +XXX,XX @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
49
};
50
return do_long_dual_acc(s, a, fns[a->x]);
51
}
52
+
53
+static bool trans_VPST(DisasContext *s, arg_VPST *a)
54
+{
55
+ TCGv_i32 vpr;
56
+
57
+ /* mask == 0 is a "related encoding" */
58
+ if (!dc_isar_feature(aa32_mve, s) || !a->mask) {
59
+ return false;
60
+ }
61
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
62
+ return true;
63
+ }
64
+ /*
65
+ * Set the VPR mask fields. We take advantage of MASK01 and MASK23
66
+ * being adjacent fields in the register.
67
+ *
68
+ * This insn is not predicated, but it is subject to beat-wise
69
+ * execution, and the mask is updated on the odd-numbered beats.
70
+ * So if PSR.ECI says we should skip beat 1, we mustn't update the
71
+ * 01 mask field.
72
+ */
73
+ vpr = load_cpu_field(v7m.vpr);
74
+ switch (s->eci) {
75
+ case ECI_NONE:
76
+ case ECI_A0:
77
+ /* Update both 01 and 23 fields */
78
+ tcg_gen_deposit_i32(vpr, vpr,
79
+ tcg_constant_i32(a->mask | (a->mask << 4)),
80
+ R_V7M_VPR_MASK01_SHIFT,
81
+ R_V7M_VPR_MASK01_LENGTH + R_V7M_VPR_MASK23_LENGTH);
82
+ break;
83
+ case ECI_A0A1:
84
+ case ECI_A0A1A2:
85
+ case ECI_A0A1A2B0:
86
+ /* Update only the 23 mask field */
87
+ tcg_gen_deposit_i32(vpr, vpr,
88
+ tcg_constant_i32(a->mask),
89
+ R_V7M_VPR_MASK23_SHIFT, R_V7M_VPR_MASK23_LENGTH);
90
+ break;
91
+ default:
92
+ g_assert_not_reached();
93
+ }
94
+ store_cpu_field(vpr, v7m.vpr);
95
+ mve_update_and_store_eci(s);
96
+ return true;
97
+}
98
--
99
2.20.1
100
101
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VQADD and VQSUB insns, which perform saturating
2
addition of a scalar to each element. Note that individual bytes of
3
each result element are used or discarded according to the predicate
4
mask, but FPSCR.QC is only set if the predicate mask for the lowest
5
byte of the element is set.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210617121628.20116-28-peter.maydell@linaro.org
10
---
11
target/arm/helper-mve.h | 16 ++++++++++
12
target/arm/mve.decode | 5 +++
13
target/arm/mve_helper.c | 62 ++++++++++++++++++++++++++++++++++++++
14
target/arm/translate-mve.c | 4 +++
15
4 files changed, 87 insertions(+)
16
17
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-mve.h
20
+++ b/target/arm/helper-mve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vhsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vhsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
25
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
+
33
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
36
+
37
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
+
41
DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
43
DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
44
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/mve.decode
47
+++ b/target/arm/mve.decode
48
@@ -XXX,XX +XXX,XX @@ VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
49
VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
50
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
51
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
52
+
53
+VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
54
+VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
55
+VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
56
+VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
57
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
58
59
# Predicate operations
60
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/mve_helper.c
63
+++ b/target/arm/mve_helper.c
64
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhaddu, do_vhadd_u)
65
DO_2OP_S(vhsubs, do_vhsub_s)
66
DO_2OP_U(vhsubu, do_vhsub_u)
67
68
+static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
69
+{
70
+ if (val > max) {
71
+ *s = true;
72
+ return max;
73
+ } else if (val < min) {
74
+ *s = true;
75
+ return min;
76
+ }
77
+ return val;
78
+}
79
+
80
+#define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
81
+#define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
82
+#define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
83
+
84
+#define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
85
+#define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
86
+#define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
87
+
88
+#define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
89
+#define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
90
+#define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
91
+
92
+#define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
93
+#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
94
+#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
95
96
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
97
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
98
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
99
mve_advance_vpt(env); \
100
}
101
102
+#define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
103
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
104
+ uint32_t rm) \
105
+ { \
106
+ TYPE *d = vd, *n = vn; \
107
+ TYPE m = rm; \
108
+ uint16_t mask = mve_element_mask(env); \
109
+ unsigned e; \
110
+ bool qc = false; \
111
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
112
+ bool sat = false; \
113
+ mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
114
+ mask); \
115
+ qc |= sat & mask & 1; \
116
+ } \
117
+ if (qc) { \
118
+ env->vfp.qc[0] = qc; \
119
+ } \
120
+ mve_advance_vpt(env); \
121
+ }
122
+
123
/* provide unsigned 2-op scalar helpers for all sizes */
124
#define DO_2OP_SCALAR_U(OP, FN) \
125
DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
126
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
127
DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
128
DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
129
130
+DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
131
+DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
132
+DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
133
+DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
134
+DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
135
+DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
136
+
137
+DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
138
+DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
139
+DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
140
+DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
141
+DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
142
+DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
143
+
144
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
145
{
146
m &= 0xff;
147
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/arm/translate-mve.c
150
+++ b/target/arm/translate-mve.c
151
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VHADD_S_scalar, vhadds_scalar)
152
DO_2OP_SCALAR(VHADD_U_scalar, vhaddu_scalar)
153
DO_2OP_SCALAR(VHSUB_S_scalar, vhsubs_scalar)
154
DO_2OP_SCALAR(VHSUB_U_scalar, vhsubu_scalar)
155
+DO_2OP_SCALAR(VQADD_S_scalar, vqadds_scalar)
156
+DO_2OP_SCALAR(VQADD_U_scalar, vqaddu_scalar)
157
+DO_2OP_SCALAR(VQSUB_S_scalar, vqsubs_scalar)
158
+DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
159
DO_2OP_SCALAR(VBRSR, vbrsr)
160
161
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
162
--
163
2.20.1
164
165
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VQDMULH and VQRDMULH scalar insns, which multiply
2
elements by the scalar, double, possibly round, take the high half
3
and saturate.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-29-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 8 ++++++++
10
target/arm/mve.decode | 3 +++
11
target/arm/mve_helper.c | 25 +++++++++++++++++++++++++
12
target/arm/translate-mve.c | 2 ++
13
4 files changed, 38 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_4(mve_vqsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_4(mve_vqsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
22
23
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+
31
DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
34
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/mve.decode
37
+++ b/target/arm/mve.decode
38
@@ -XXX,XX +XXX,XX @@ VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
39
VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
40
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
41
42
+VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
43
+VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
44
+
45
# Predicate operations
46
%mask_22_13 22:1 13:3
47
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
48
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/mve_helper.c
51
+++ b/target/arm/mve_helper.c
52
@@ -XXX,XX +XXX,XX @@ static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
53
#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
54
#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
55
56
+/*
57
+ * For QDMULH and QRDMULH we simplify "double and shift by esize" into
58
+ * "shift by esize-1", adjusting the QRDMULH rounding constant to match.
59
+ */
60
+#define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
61
+ INT8_MIN, INT8_MAX, s)
62
+#define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
63
+ INT16_MIN, INT16_MAX, s)
64
+#define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
65
+ INT32_MIN, INT32_MAX, s)
66
+
67
+#define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
68
+ INT8_MIN, INT8_MAX, s)
69
+#define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
70
+ INT16_MIN, INT16_MAX, s)
71
+#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
72
+ INT32_MIN, INT32_MAX, s)
73
+
74
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
75
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
76
uint32_t rm) \
77
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
78
DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
79
DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
80
81
+DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
82
+DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
83
+DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
84
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
85
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
86
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
87
+
88
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
89
{
90
m &= 0xff;
91
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/translate-mve.c
94
+++ b/target/arm/translate-mve.c
95
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VQADD_S_scalar, vqadds_scalar)
96
DO_2OP_SCALAR(VQADD_U_scalar, vqaddu_scalar)
97
DO_2OP_SCALAR(VQSUB_S_scalar, vqsubs_scalar)
98
DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
99
+DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
100
+DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
101
DO_2OP_SCALAR(VBRSR, vbrsr)
102
103
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
104
--
105
2.20.1
106
107
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VQDMULL scalar insn. This multiplies the top or
2
bottom half of each element by the scalar, doubles and saturates
3
to a double-width result.
4
1
5
Note that this encoding overlaps with VQADD and VQSUB; it uses
6
what in VQADD and VQSUB would be the 'size=0b11' encoding.
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210617121628.20116-30-peter.maydell@linaro.org
11
---
12
target/arm/helper-mve.h | 5 +++
13
target/arm/mve.decode | 23 +++++++++++---
14
target/arm/mve_helper.c | 65 ++++++++++++++++++++++++++++++++++++++
15
target/arm/translate-mve.c | 30 ++++++++++++++++++
16
4 files changed, 119 insertions(+), 4 deletions(-)
17
18
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-mve.h
21
+++ b/target/arm/helper-mve.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
25
26
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
30
+
31
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
32
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
33
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
34
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/mve.decode
37
+++ b/target/arm/mve.decode
38
@@ -XXX,XX +XXX,XX @@
39
%qm 5:1 1:3
40
%qn 7:1 17:3
41
42
+# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
43
+%size_28 28:1 !function=plus_1
44
+
45
&vldr_vstr rn qd imm p a w size l u
46
&1op qd qm size
47
&2op qd qm qn size
48
@@ -XXX,XX +XXX,XX @@
49
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
50
51
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
52
+@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
53
54
# Vector loads and stores
55
56
@@ -XXX,XX +XXX,XX @@ VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
57
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
58
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
59
60
-VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
61
-VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
62
-VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
63
-VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
64
+{
65
+ VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
66
+ VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
67
+ VQDMULLB_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 110 .... @2scalar_nosz \
68
+ size=%size_28
69
+}
70
+
71
+{
72
+ VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
73
+ VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
74
+ VQDMULLT_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 110 .... @2scalar_nosz \
75
+ size=%size_28
76
+}
77
+
78
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
79
80
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
81
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
82
83
+
84
# Predicate operations
85
%mask_22_13 22:1 13:3
86
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
87
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/mve_helper.c
90
+++ b/target/arm/mve_helper.c
91
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
92
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
93
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
94
95
+/*
96
+ * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
97
+ * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
98
+ * SATMASK specifies which bits of the predicate mask matter for determining
99
+ * whether to propagate a saturation indication into FPSCR.QC -- for
100
+ * the 16x16->32 case we must check only the bit corresponding to the T or B
101
+ * half that we used, but for the 32x32->64 case we propagate if the mask
102
+ * bit is set for either half.
103
+ */
104
+#define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
105
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
106
+ uint32_t rm) \
107
+ { \
108
+ LTYPE *d = vd; \
109
+ TYPE *n = vn; \
110
+ TYPE m = rm; \
111
+ uint16_t mask = mve_element_mask(env); \
112
+ unsigned le; \
113
+ bool qc = false; \
114
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
115
+ bool sat = false; \
116
+ LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
117
+ mergemask(&d[H##LESIZE(le)], r, mask); \
118
+ qc |= sat && (mask & SATMASK); \
119
+ } \
120
+ if (qc) { \
121
+ env->vfp.qc[0] = qc; \
122
+ } \
123
+ mve_advance_vpt(env); \
124
+ }
125
+
126
+static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
127
+{
128
+ int64_t r = ((int64_t)n * m) * 2;
129
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
130
+}
131
+
132
+static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
133
+{
134
+ /* The multiply can't overflow, but the doubling might */
135
+ int64_t r = (int64_t)n * m;
136
+ if (r > INT64_MAX / 2) {
137
+ *sat = true;
138
+ return INT64_MAX;
139
+ } else if (r < INT64_MIN / 2) {
140
+ *sat = true;
141
+ return INT64_MIN;
142
+ } else {
143
+ return r * 2;
144
+ }
145
+}
146
+
147
+#define SATMASK16B 1
148
+#define SATMASK16T (1 << 2)
149
+#define SATMASK32 ((1 << 4) | 1)
150
+
151
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
152
+ do_qdmullh, SATMASK16B)
153
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
154
+ do_qdmullw, SATMASK32)
155
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
156
+ do_qdmullh, SATMASK16T)
157
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
158
+ do_qdmullw, SATMASK32)
159
+
160
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
161
{
162
m &= 0xff;
163
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/arm/translate-mve.c
166
+++ b/target/arm/translate-mve.c
167
@@ -XXX,XX +XXX,XX @@ DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
168
DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
169
DO_2OP_SCALAR(VBRSR, vbrsr)
170
171
+static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
172
+{
173
+ static MVEGenTwoOpScalarFn * const fns[] = {
174
+ NULL,
175
+ gen_helper_mve_vqdmullb_scalarh,
176
+ gen_helper_mve_vqdmullb_scalarw,
177
+ NULL,
178
+ };
179
+ if (a->qd == a->qn && a->size == MO_32) {
180
+ /* UNPREDICTABLE; we choose to undef */
181
+ return false;
182
+ }
183
+ return do_2op_scalar(s, a, fns[a->size]);
184
+}
185
+
186
+static bool trans_VQDMULLT_scalar(DisasContext *s, arg_2scalar *a)
187
+{
188
+ static MVEGenTwoOpScalarFn * const fns[] = {
189
+ NULL,
190
+ gen_helper_mve_vqdmullt_scalarh,
191
+ gen_helper_mve_vqdmullt_scalarw,
192
+ NULL,
193
+ };
194
+ if (a->qd == a->qn && a->size == MO_32) {
195
+ /* UNPREDICTABLE; we choose to undef */
196
+ return false;
197
+ }
198
+ return do_2op_scalar(s, a, fns[a->size]);
199
+}
200
+
201
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
202
MVEGenDualAccOpFn *fn)
203
{
204
--
205
2.20.1
206
207
diff view generated by jsdifflib
Deleted patch
1
Implement the vector forms of the MVE VQADD and VQSUB insns.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-32-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 16 ++++++++++++++++
8
target/arm/mve.decode | 5 +++++
9
target/arm/mve_helper.c | 14 ++++++++++++++
10
target/arm/translate-mve.c | 4 ++++
11
4 files changed, 39 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vqrdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqrdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
21
+DEF_HELPER_FLAGS_4(mve_vqaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vqaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vqaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vqaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vqadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vqadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
+DEF_HELPER_FLAGS_4(mve_vqsubsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+DEF_HELPER_FLAGS_4(mve_vqsubsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
+DEF_HELPER_FLAGS_4(mve_vqsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+
33
+DEF_HELPER_FLAGS_4(mve_vqsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+DEF_HELPER_FLAGS_4(mve_vqsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
35
+DEF_HELPER_FLAGS_4(mve_vqsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
+
37
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
38
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
39
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/mve.decode
43
+++ b/target/arm/mve.decode
44
@@ -XXX,XX +XXX,XX @@ VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
45
VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
46
VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
47
48
+VQADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
49
+VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
50
+VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
51
+VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
52
+
53
# Vector miscellaneous
54
55
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
56
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/mve_helper.c
59
+++ b/target/arm/mve_helper.c
60
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
61
DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
62
DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
63
64
+DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
65
+DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
66
+DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
67
+DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
68
+DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
69
+DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
70
+
71
+DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
72
+DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
73
+DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
74
+DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
75
+DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
76
+DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
77
+
78
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
79
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
80
uint32_t rm) \
81
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/arm/translate-mve.c
84
+++ b/target/arm/translate-mve.c
85
@@ -XXX,XX +XXX,XX @@ DO_2OP(VMULL_TS, vmullts)
86
DO_2OP(VMULL_TU, vmulltu)
87
DO_2OP(VQDMULH, vqdmulh)
88
DO_2OP(VQRDMULH, vqrdmulh)
89
+DO_2OP(VQADD_S, vqadds)
90
+DO_2OP(VQADD_U, vqaddu)
91
+DO_2OP(VQSUB_S, vqsubs)
92
+DO_2OP(VQSUB_U, vqsubu)
93
94
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
95
MVEGenTwoOpScalarFn fn)
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Implement the MV VQRSHL (vector) insn. Again, the code to perform
2
the actual shifts is borrowed from neon_helper.c.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210617121628.20116-34-peter.maydell@linaro.org
7
---
8
target/arm/helper-mve.h | 8 ++++++++
9
target/arm/mve.decode | 3 +++
10
target/arm/mve_helper.c | 6 ++++++
11
target/arm/translate-mve.c | 2 ++
12
4 files changed, 19 insertions(+)
13
14
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-mve.h
17
+++ b/target/arm/helper-mve.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vqshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
22
+DEF_HELPER_FLAGS_4(mve_vqrshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vqrshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vqrshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+
26
+DEF_HELPER_FLAGS_4(mve_vqrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vqrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vqrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+
30
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
31
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
32
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
33
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/mve.decode
36
+++ b/target/arm/mve.decode
37
@@ -XXX,XX +XXX,XX @@ VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
38
VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
39
VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
40
41
+VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
42
+VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
43
+
44
# Vector miscellaneous
45
46
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
47
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/mve_helper.c
50
+++ b/target/arm/mve_helper.c
51
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
52
WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
53
#define DO_UQSHL_OP(N, M, satp) \
54
WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
55
+#define DO_SQRSHL_OP(N, M, satp) \
56
+ WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
57
+#define DO_UQRSHL_OP(N, M, satp) \
58
+ WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
59
60
DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
61
DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
62
+DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
63
+DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
64
65
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
66
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
67
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/translate-mve.c
70
+++ b/target/arm/translate-mve.c
71
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQSUB_S, vqsubs)
72
DO_2OP(VQSUB_U, vqsubu)
73
DO_2OP(VQSHL_S, vqshls)
74
DO_2OP(VQSHL_U, vqshlu)
75
+DO_2OP(VQRSHL_S, vqrshls)
76
+DO_2OP(VQRSHL_U, vqrshlu)
77
78
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
79
MVEGenTwoOpScalarFn fn)
80
--
81
2.20.1
82
83
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VSHL insn (vector form).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-35-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 6 ++++++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 19 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vqsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
21
+DEF_HELPER_FLAGS_4(mve_vshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
DEF_HELPER_FLAGS_4(mve_vqshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
DEF_HELPER_FLAGS_4(mve_vqshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
DEF_HELPER_FLAGS_4(mve_vqshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
37
VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
38
VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
39
40
+VSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
41
+VSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
42
+
43
VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
44
VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
45
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhaddu, do_vhadd_u)
51
DO_2OP_S(vhsubs, do_vhsub_s)
52
DO_2OP_U(vhsubu, do_vhsub_u)
53
54
+#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
55
+#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
56
+
57
+DO_2OP_S(vshls, DO_VSHLS)
58
+DO_2OP_U(vshlu, DO_VSHLU)
59
+
60
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
61
{
62
if (val > max) {
63
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate-mve.c
66
+++ b/target/arm/translate-mve.c
67
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQADD_S, vqadds)
68
DO_2OP(VQADD_U, vqaddu)
69
DO_2OP(VQSUB_S, vqsubs)
70
DO_2OP(VQSUB_U, vqsubu)
71
+DO_2OP(VSHL_S, vshls)
72
+DO_2OP(VSHL_U, vshlu)
73
DO_2OP(VQSHL_S, vqshls)
74
DO_2OP(VQSHL_U, vqshlu)
75
DO_2OP(VQRSHL_S, vqrshls)
76
--
77
2.20.1
78
79
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VRSHL insn (vector form).
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-36-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 8 ++++++++
8
target/arm/mve.decode | 3 +++
9
target/arm/mve_helper.c | 4 ++++
10
target/arm/translate-mve.c | 2 ++
11
4 files changed, 17 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
21
+DEF_HELPER_FLAGS_4(mve_vrshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vrshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vrshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+
25
+DEF_HELPER_FLAGS_4(mve_vrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+DEF_HELPER_FLAGS_4(mve_vrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
27
+DEF_HELPER_FLAGS_4(mve_vrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+
29
DEF_HELPER_FLAGS_4(mve_vqshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
DEF_HELPER_FLAGS_4(mve_vqshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
31
DEF_HELPER_FLAGS_4(mve_vqshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/mve.decode
35
+++ b/target/arm/mve.decode
36
@@ -XXX,XX +XXX,XX @@ VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
37
VSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
38
VSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
39
40
+VRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev
41
+VRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev
42
+
43
VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
44
VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
45
46
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/mve_helper.c
49
+++ b/target/arm/mve_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_2OP_U(vhsubu, do_vhsub_u)
51
52
#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
53
#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
54
+#define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
55
+#define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
56
57
DO_2OP_S(vshls, DO_VSHLS)
58
DO_2OP_U(vshlu, DO_VSHLU)
59
+DO_2OP_S(vrshls, DO_VRSHLS)
60
+DO_2OP_U(vrshlu, DO_VRSHLU)
61
62
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
63
{
64
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-mve.c
67
+++ b/target/arm/translate-mve.c
68
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQSUB_S, vqsubs)
69
DO_2OP(VQSUB_U, vqsubu)
70
DO_2OP(VSHL_S, vshls)
71
DO_2OP(VSHL_U, vshlu)
72
+DO_2OP(VRSHL_S, vrshls)
73
+DO_2OP(VRSHL_U, vrshlu)
74
DO_2OP(VQSHL_S, vqshls)
75
DO_2OP(VQSHL_U, vqshlu)
76
DO_2OP(VQRSHL_S, vqrshls)
77
--
78
2.20.1
79
80
diff view generated by jsdifflib
Deleted patch
1
Implement the MVE VQDMLADH and VQRDMLADH insns. These multiply
2
elements, and then add pairs of products, double, possibly round,
3
saturate and return the high half of the result.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210617121628.20116-37-peter.maydell@linaro.org
8
---
9
target/arm/helper-mve.h | 16 +++++++
10
target/arm/mve.decode | 5 +++
11
target/arm/mve_helper.c | 89 ++++++++++++++++++++++++++++++++++++++
12
target/arm/translate-mve.c | 4 ++
13
4 files changed, 114 insertions(+)
14
15
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-mve.h
18
+++ b/target/arm/helper-mve.h
19
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
DEF_HELPER_FLAGS_4(mve_vqrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
21
DEF_HELPER_FLAGS_4(mve_vqrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
23
+DEF_HELPER_FLAGS_4(mve_vqdmladhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vqdmladhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+DEF_HELPER_FLAGS_4(mve_vqdmladhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
26
+
27
+DEF_HELPER_FLAGS_4(mve_vqdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
28
+DEF_HELPER_FLAGS_4(mve_vqdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
29
+DEF_HELPER_FLAGS_4(mve_vqdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
30
+
31
+DEF_HELPER_FLAGS_4(mve_vqrdmladhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
32
+DEF_HELPER_FLAGS_4(mve_vqrdmladhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
33
+DEF_HELPER_FLAGS_4(mve_vqrdmladhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
34
+
35
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
36
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
37
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
38
+
39
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
40
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
41
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
42
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/mve.decode
45
+++ b/target/arm/mve.decode
46
@@ -XXX,XX +XXX,XX @@ VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
47
VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
48
VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
49
50
+VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op
51
+VQDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
52
+VQRDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
53
+VQRDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
54
+
55
# Vector miscellaneous
56
57
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
58
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/mve_helper.c
61
+++ b/target/arm/mve_helper.c
62
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
63
DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
64
DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
65
66
+/*
67
+ * Multiply add dual returning high half
68
+ * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of
69
+ * whether to add the rounding constant, and the pointer to the
70
+ * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant",
71
+ * saturate to twice the input size and return the high half; or
72
+ * (A * B - C * D) etc for VQDMLSDH.
73
+ */
74
+#define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
75
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
76
+ void *vm) \
77
+ { \
78
+ TYPE *d = vd, *n = vn, *m = vm; \
79
+ uint16_t mask = mve_element_mask(env); \
80
+ unsigned e; \
81
+ bool qc = false; \
82
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
83
+ bool sat = false; \
84
+ if ((e & 1) == XCHG) { \
85
+ TYPE r = FN(n[H##ESIZE(e)], \
86
+ m[H##ESIZE(e - XCHG)], \
87
+ n[H##ESIZE(e + (1 - 2 * XCHG))], \
88
+ m[H##ESIZE(e + (1 - XCHG))], \
89
+ ROUND, &sat); \
90
+ mergemask(&d[H##ESIZE(e)], r, mask); \
91
+ qc |= sat & mask & 1; \
92
+ } \
93
+ } \
94
+ if (qc) { \
95
+ env->vfp.qc[0] = qc; \
96
+ } \
97
+ mve_advance_vpt(env); \
98
+ }
99
+
100
+static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
101
+ int round, bool *sat)
102
+{
103
+ int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
104
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
105
+}
106
+
107
+static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
108
+ int round, bool *sat)
109
+{
110
+ int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
111
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
112
+}
113
+
114
+static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
115
+ int round, bool *sat)
116
+{
117
+ int64_t m1 = (int64_t)a * b;
118
+ int64_t m2 = (int64_t)c * d;
119
+ int64_t r;
120
+ /*
121
+ * Architecturally we should do the entire add, double, round
122
+ * and then check for saturation. We do three saturating adds,
123
+ * but we need to be careful about the order. If the first
124
+ * m1 + m2 saturates then it's impossible for the *2+rc to
125
+ * bring it back into the non-saturated range. However, if
126
+ * m1 + m2 is negative then it's possible that doing the doubling
127
+ * would take the intermediate result below INT64_MAX and the
128
+ * addition of the rounding constant then brings it back in range.
129
+ * So we add half the rounding constant before doubling rather
130
+ * than adding the rounding constant after the doubling.
131
+ */
132
+ if (sadd64_overflow(m1, m2, &r) ||
133
+ sadd64_overflow(r, (round << 30), &r) ||
134
+ sadd64_overflow(r, r, &r)) {
135
+ *sat = true;
136
+ return r < 0 ? INT32_MAX : INT32_MIN;
137
+ }
138
+ return r >> 32;
139
+}
140
+
141
+DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
142
+DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
143
+DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
144
+DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
145
+DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
146
+DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
147
+
148
+DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
149
+DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
150
+DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
151
+DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
152
+DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
153
+DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
154
+
155
#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
156
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
157
uint32_t rm) \
158
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/translate-mve.c
161
+++ b/target/arm/translate-mve.c
162
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQSHL_S, vqshls)
163
DO_2OP(VQSHL_U, vqshlu)
164
DO_2OP(VQRSHL_S, vqrshls)
165
DO_2OP(VQRSHL_U, vqrshlu)
166
+DO_2OP(VQDMLADH, vqdmladh)
167
+DO_2OP(VQDMLADHX, vqdmladhx)
168
+DO_2OP(VQRDMLADH, vqrdmladh)
169
+DO_2OP(VQRDMLADHX, vqrdmladhx)
170
171
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
172
MVEGenTwoOpScalarFn fn)
173
--
174
2.20.1
175
176
diff view generated by jsdifflib
Deleted patch
1
Implement the vector form of the MVE VQDMULL insn.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210617121628.20116-39-peter.maydell@linaro.org
6
---
7
target/arm/helper-mve.h | 5 +++++
8
target/arm/mve.decode | 5 +++++
9
target/arm/mve_helper.c | 30 ++++++++++++++++++++++++++++++
10
target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++
11
4 files changed, 70 insertions(+)
12
13
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-mve.h
16
+++ b/target/arm/helper-mve.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
18
DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
19
DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
20
21
+DEF_HELPER_FLAGS_4(mve_vqdmullbh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
22
+DEF_HELPER_FLAGS_4(mve_vqdmullbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
23
+DEF_HELPER_FLAGS_4(mve_vqdmullth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
24
+DEF_HELPER_FLAGS_4(mve_vqdmulltw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
25
+
26
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
27
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
28
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
29
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/mve.decode
32
+++ b/target/arm/mve.decode
33
@@ -XXX,XX +XXX,XX @@
34
@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
35
@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
36
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
37
+@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
38
+ size=%size_28
39
40
# The _rev suffix indicates that Vn and Vm are reversed. This is
41
# the case for shifts. In the Arm ARM these insns are documented
42
@@ -XXX,XX +XXX,XX @@ VQDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
43
VQRDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
44
VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
45
46
+VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28
47
+VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
48
+
49
# Vector miscellaneous
50
51
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
52
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/mve_helper.c
55
+++ b/target/arm/mve_helper.c
56
@@ -XXX,XX +XXX,XX @@ DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
57
DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
58
do_qdmullw, SATMASK32)
59
60
+/*
61
+ * Long saturating ops
62
+ */
63
+#define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
64
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
65
+ void *vm) \
66
+ { \
67
+ LTYPE *d = vd; \
68
+ TYPE *n = vn, *m = vm; \
69
+ uint16_t mask = mve_element_mask(env); \
70
+ unsigned le; \
71
+ bool qc = false; \
72
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
73
+ bool sat = false; \
74
+ LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
75
+ LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
76
+ mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
77
+ qc |= sat && (mask & SATMASK); \
78
+ } \
79
+ if (qc) { \
80
+ env->vfp.qc[0] = qc; \
81
+ } \
82
+ mve_advance_vpt(env); \
83
+ }
84
+
85
+DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
86
+DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
87
+DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
88
+DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
89
+
90
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
91
{
92
m &= 0xff;
93
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate-mve.c
96
+++ b/target/arm/translate-mve.c
97
@@ -XXX,XX +XXX,XX @@ DO_2OP(VQDMLSDHX, vqdmlsdhx)
98
DO_2OP(VQRDMLSDH, vqrdmlsdh)
99
DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
100
101
+static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
102
+{
103
+ static MVEGenTwoOpFn * const fns[] = {
104
+ NULL,
105
+ gen_helper_mve_vqdmullbh,
106
+ gen_helper_mve_vqdmullbw,
107
+ NULL,
108
+ };
109
+ if (a->size == MO_32 && (a->qd == a->qm || a->qd == a->qn)) {
110
+ /* UNPREDICTABLE; we choose to undef */
111
+ return false;
112
+ }
113
+ return do_2op(s, a, fns[a->size]);
114
+}
115
+
116
+static bool trans_VQDMULLT(DisasContext *s, arg_2op *a)
117
+{
118
+ static MVEGenTwoOpFn * const fns[] = {
119
+ NULL,
120
+ gen_helper_mve_vqdmullth,
121
+ gen_helper_mve_vqdmulltw,
122
+ NULL,
123
+ };
124
+ if (a->size == MO_32 && (a->qd == a->qm || a->qd == a->qn)) {
125
+ /* UNPREDICTABLE; we choose to undef */
126
+ return false;
127
+ }
128
+ return do_2op(s, a, fns[a->size]);
129
+}
130
+
131
static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
132
MVEGenTwoOpScalarFn fn)
133
{
134
--
135
2.20.1
136
137
diff view generated by jsdifflib
1
From: Alexandre Iooss <erdnaxe@crans.org>
1
From: Alex Zuepke <alex.zuepke@tum.de>
2
2
3
This adds the target guide for BBC Micro:bit.
3
The ARMv8 manual defines that PMUSERENR_EL0.ER enables read-access
4
to both PMXEVCNTR_EL0 and PMEVCNTR<n>_EL0 registers, however,
5
we only use it for PMXEVCNTR_EL0. Extend to PMEVCNTR<n>_EL0 as well.
4
6
5
Information is taken from https://wiki.qemu.org/Features/MicroBit
7
Signed-off-by: Alex Zuepke <alex.zuepke@tum.de>
6
and from hw/arm/nrf51_soc.c.
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
9
Message-id: 20220428132717.84190-1-alex.zuepke@tum.de
8
Signed-off-by: Alexandre Iooss <erdnaxe@crans.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Joel Stanley <joel@jms.id.au>
11
Message-id: 20210621075625.540471-1-erdnaxe@crans.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
11
---
14
docs/system/arm/nrf.rst | 51 ++++++++++++++++++++++++++++++++++++++
12
target/arm/helper.c | 4 ++--
15
docs/system/target-arm.rst | 1 +
13
1 file changed, 2 insertions(+), 2 deletions(-)
16
MAINTAINERS | 1 +
17
3 files changed, 53 insertions(+)
18
create mode 100644 docs/system/arm/nrf.rst
19
14
20
diff --git a/docs/system/arm/nrf.rst b/docs/system/arm/nrf.rst
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
21
new file mode 100644
22
index XXXXXXX..XXXXXXX
23
--- /dev/null
24
+++ b/docs/system/arm/nrf.rst
25
@@ -XXX,XX +XXX,XX @@
26
+Nordic nRF boards (``microbit``)
27
+================================
28
+
29
+The `Nordic nRF`_ chips are a family of ARM-based System-on-Chip that
30
+are designed to be used for low-power and short-range wireless solutions.
31
+
32
+.. _Nordic nRF: https://www.nordicsemi.com/Products
33
+
34
+The nRF51 series is the first series for short range wireless applications.
35
+It is superseded by the nRF52 series.
36
+The following machines are based on this chip :
37
+
38
+- ``microbit`` BBC micro:bit board with nRF51822 SoC
39
+
40
+There are other series such as nRF52, nRF53 and nRF91 which are currently not
41
+supported by QEMU.
42
+
43
+Supported devices
44
+-----------------
45
+
46
+ * ARM Cortex-M0 (ARMv6-M)
47
+ * Serial ports (UART)
48
+ * Clock controller
49
+ * Timers
50
+ * Random Number Generator (RNG)
51
+ * GPIO controller
52
+ * NVMC
53
+ * SWI
54
+
55
+Missing devices
56
+---------------
57
+
58
+ * Watchdog
59
+ * Real-Time Clock (RTC) controller
60
+ * TWI (i2c)
61
+ * SPI controller
62
+ * Analog to Digital Converter (ADC)
63
+ * Quadrature decoder
64
+ * Radio
65
+
66
+Boot options
67
+------------
68
+
69
+The Micro:bit machine can be started using the ``-device`` option to load a
70
+firmware in `ihex format`_. Example:
71
+
72
+.. _ihex format: https://en.wikipedia.org/wiki/Intel_HEX
73
+
74
+.. code-block:: bash
75
+
76
+ $ qemu-system-arm -M microbit -device loader,file=test.hex
77
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
78
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
79
--- a/docs/system/target-arm.rst
17
--- a/target/arm/helper.c
80
+++ b/docs/system/target-arm.rst
18
+++ b/target/arm/helper.c
81
@@ -XXX,XX +XXX,XX @@ undocumented; you can get a complete list by running
19
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
82
arm/digic
20
.crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
83
arm/musicpal
21
.access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
84
arm/gumstix
22
.readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
85
+ arm/nrf
23
- .accessfn = pmreg_access },
86
arm/nseries
24
+ .accessfn = pmreg_access_xevcntr },
87
arm/nuvoton
25
{ .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
88
arm/orangepi
26
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
89
diff --git a/MAINTAINERS b/MAINTAINERS
27
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
90
index XXXXXXX..XXXXXXX 100644
28
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
91
--- a/MAINTAINERS
29
.type = ARM_CP_IO,
92
+++ b/MAINTAINERS
30
.readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
93
@@ -XXX,XX +XXX,XX @@ F: hw/*/microbit*.c
31
.raw_readfn = pmevcntr_rawread,
94
F: include/hw/*/nrf51*.h
95
F: include/hw/*/microbit*.h
96
F: tests/qtest/microbit-test.c
97
+F: docs/system/arm/nrf.rst
98
99
AVR Machines
100
-------------
101
--
32
--
102
2.20.1
33
2.25.1
103
104
diff view generated by jsdifflib