1
Arm queue; the bulk of this is the VFP decodetree conversion...
1
Last handful of arm patches before softfreeze...
2
2
3
thanks
3
The following changes since commit a98135f727595382e200d04c2996e868b7925a01:
4
-- PMM
5
4
6
The following changes since commit 4747524f9f243ca5ff1f146d37e423c00e923ee1:
5
Merge remote-tracking branch 'remotes/kraxel/tags/vga-20200316-pull-request' into staging (2020-03-16 14:55:59 +0000)
7
8
Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2019-06-12' into staging (2019-06-13 11:58:00 +0100)
9
6
10
are available in the Git repository at:
7
are available in the Git repository at:
11
8
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190613
9
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20200317
13
10
14
for you to fetch changes up to 07e4c7f769120c9a5bd6a26c2dc1421f2f838d80:
11
for you to fetch changes up to e88d3671e3bbd59d385838a4101ea19cdcf47309:
15
12
16
target/arm: Fix short-vector increment behaviour (2019-06-13 12:57:37 +0100)
13
hw/arm/pxa2xx: Do not wire up OHCI for PXA255 (2020-03-17 11:36:48 +0000)
17
14
18
----------------------------------------------------------------
15
----------------------------------------------------------------
19
target-arm queue:
16
target-arm:
20
* convert aarch32 VFP decoder to decodetree
17
* hw/arm/pxa2xx: Do not wire up OHCI for PXA255
21
(includes tightening up decode in a few places)
18
* aspeed/smc: Fix number of dummy cycles for FAST_READ_4 command
22
* fix minor bugs in VFP short-vector handling
19
* m25p80: Improve command handling for Jedec and unsupported commands
23
* hw/core/bus.c: Only the main system bus can have no parent
20
* hw/net/imx_fec: write TGSR and TCSR3 in imx_enet_write()
24
* smmuv3: Fix decoding of ID register range
21
* hw/arm/fsl-imx6, imx6ul: Wire up USB controllers
25
* Implement NSACR gating of floating point
22
* hw/arm/fsl-imx6ul: Instantiate unimplemented pwm and can devices
26
* Use tcg_gen_gvec_bitsel
27
* Vectorize USHL and SSHL
28
23
29
----------------------------------------------------------------
24
----------------------------------------------------------------
30
Peter Maydell (44):
25
Chen Qun (1):
31
target/arm: Implement NSACR gating of floating point
26
hw/net/imx_fec: write TGSR and TCSR3 in imx_enet_write()
32
hw/arm/smmuv3: Fix decoding of ID register range
33
hw/core/bus.c: Only the main system bus can have no parent
34
target/arm: Add stubs for AArch32 VFP decodetree
35
target/arm: Factor out VFP access checking code
36
target/arm: Fix Cortex-R5F MVFR values
37
target/arm: Explicitly enable VFP short-vectors for aarch32 -cpu max
38
target/arm: Convert the VSEL instructions to decodetree
39
target/arm: Convert VMINNM, VMAXNM to decodetree
40
target/arm: Convert VRINTA/VRINTN/VRINTP/VRINTM to decodetree
41
target/arm: Convert VCVTA/VCVTN/VCVTP/VCVTM to decodetree
42
target/arm: Move the VFP trans_* functions to translate-vfp.inc.c
43
target/arm: Add helpers for VFP register loads and stores
44
target/arm: Convert "double-precision" register moves to decodetree
45
target/arm: Convert "single-precision" register moves to decodetree
46
target/arm: Convert VFP two-register transfer insns to decodetree
47
target/arm: Convert VFP VLDR and VSTR to decodetree
48
target/arm: Convert the VFP load/store multiple insns to decodetree
49
target/arm: Remove VLDR/VSTR/VLDM/VSTM use of cpu_F0s and cpu_F0d
50
target/arm: Convert VFP VMLA to decodetree
51
target/arm: Convert VFP VMLS to decodetree
52
target/arm: Convert VFP VNMLS to decodetree
53
target/arm: Convert VFP VNMLA to decodetree
54
target/arm: Convert VMUL to decodetree
55
target/arm: Convert VNMUL to decodetree
56
target/arm: Convert VADD to decodetree
57
target/arm: Convert VSUB to decodetree
58
target/arm: Convert VDIV to decodetree
59
target/arm: Convert VFP fused multiply-add insns to decodetree
60
target/arm: Convert VMOV (imm) to decodetree
61
target/arm: Convert VABS to decodetree
62
target/arm: Convert VNEG to decodetree
63
target/arm: Convert VSQRT to decodetree
64
target/arm: Convert VMOV (register) to decodetree
65
target/arm: Convert VFP comparison insns to decodetree
66
target/arm: Convert the VCVT-from-f16 insns to decodetree
67
target/arm: Convert the VCVT-to-f16 insns to decodetree
68
target/arm: Convert VFP round insns to decodetree
69
target/arm: Convert double-single precision conversion insns to decodetree
70
target/arm: Convert integer-to-float insns to decodetree
71
target/arm: Convert VJCVT to decodetree
72
target/arm: Convert VCVT fp/fixed-point conversion insns to decodetree
73
target/arm: Convert float-to-integer VCVT insns to decodetree
74
target/arm: Fix short-vector increment behaviour
75
27
76
Richard Henderson (4):
28
Guenter Roeck (10):
77
target/arm: Vectorize USHL and SSHL
29
hw/usb: Add basic i.MX USB Phy support
78
target/arm: Use tcg_gen_gvec_bitsel
30
hw/arm/fsl-imx6ul: Fix USB interrupt numbers
79
target/arm: Fix output of PAuth Auth
31
hw/arm/fsl-imx6ul: Instantiate unimplemented pwm and can devices
80
decodetree: Fix comparison of Field
32
hw/arm/fsl-imx6ul: Wire up USB controllers
33
hw/arm/fsl-imx6: Wire up USB controllers
34
m25p80: Convert to support tracing
35
m25p80: Improve command handling for Jedec commands
36
m25p80: Improve command handling for unsupported commands
37
aspeed/smc: Fix number of dummy cycles for FAST_READ_4 command
38
hw/arm/pxa2xx: Do not wire up OHCI for PXA255
81
39
82
target/arm/Makefile.objs | 13 +
40
hw/usb/Makefile.objs | 2 +
83
tests/tcg/aarch64/Makefile.target | 2 +-
41
include/hw/arm/fsl-imx6.h | 6 ++
84
target/arm/cpu.h | 11 +
42
include/hw/arm/fsl-imx6ul.h | 16 ++-
85
target/arm/helper.h | 11 +-
43
include/hw/usb/imx-usb-phy.h | 53 ++++++++++
86
target/arm/translate-a64.h | 2 +
44
hw/arm/fsl-imx6.c | 36 +++++++
87
target/arm/translate.h | 9 +-
45
hw/arm/fsl-imx6ul.c | 49 ++++++++++
88
hw/arm/smmuv3.c | 2 +-
46
hw/arm/pxa2xx.c | 3 -
89
hw/core/bus.c | 21 +-
47
hw/block/m25p80.c | 58 +++++------
90
target/arm/cpu.c | 6 +
48
hw/net/imx_fec.c | 6 +-
91
target/arm/helper.c | 75 +-
49
hw/ssi/aspeed_smc.c | 2 +-
92
target/arm/neon_helper.c | 33 -
50
hw/usb/imx-usb-phy.c | 225 +++++++++++++++++++++++++++++++++++++++++++
93
target/arm/pauth_helper.c | 4 +-
51
MAINTAINERS | 2 +
94
target/arm/translate-a64.c | 33 +-
52
hw/arm/Kconfig | 1 +
95
target/arm/translate-vfp.inc.c | 2672 +++++++++++++++++++++++++++++++++++++
53
hw/block/trace-events | 16 +++
96
target/arm/translate.c | 1881 +++++---------------------
54
hw/usb/Kconfig | 5 +
97
target/arm/vec_helper.c | 88 ++
55
15 files changed, 444 insertions(+), 36 deletions(-)
98
tests/tcg/aarch64/pauth-2.c | 61 +
56
create mode 100644 include/hw/usb/imx-usb-phy.h
99
scripts/decodetree.py | 2 +-
57
create mode 100644 hw/usb/imx-usb-phy.c
100
target/arm/vfp-uncond.decode | 63 +
101
target/arm/vfp.decode | 242 ++++
102
20 files changed, 3593 insertions(+), 1638 deletions(-)
103
create mode 100644 target/arm/translate-vfp.inc.c
104
create mode 100644 tests/tcg/aarch64/pauth-2.c
105
create mode 100644 target/arm/vfp-uncond.decode
106
create mode 100644 target/arm/vfp.decode
107
58
diff view generated by jsdifflib
1
Add the infrastructure for building and invoking a decodetree decoder
1
From: Guenter Roeck <linux@roeck-us.net>
2
for the AArch32 VFP encodings. At the moment the new decoder covers
2
3
nothing, so we always fall back to the existing hand-written decode.
3
Add basic USB PHY support as implemented in i.MX23, i.MX28, i.MX6,
4
4
and i.MX7 SoCs.
5
We need to have one decoder for the unconditional insns and one for
5
6
the conditional insns, as otherwise the patterns for conditional
6
The only support really needed - at least to boot Linux - is support
7
insns would incorrectly match against the unconditional ones too.
7
for soft reset, which needs to reset various registers to their initial
8
8
value. Otherwise, just record register values.
9
Since translate.c is over 14,000 lines long and we're going to be
9
10
touching pretty much every line of the VFP code as part of the
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
decodetree conversion, we create a new translate-vfp.inc.c to hold
11
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
12
the code which deals with VFP in the new scheme. It should be
12
Message-id: 20200313014551.12554-2-linux@roeck-us.net
13
possible to convert this into a standalone translation unit
14
eventually, but the conversion process will be much simpler if we
15
simply #include it midway through translate.c to start with.
16
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
---
14
---
20
target/arm/Makefile.objs | 13 +++++++++++++
15
hw/usb/Makefile.objs | 2 +
21
target/arm/translate-vfp.inc.c | 31 +++++++++++++++++++++++++++++++
16
include/hw/usb/imx-usb-phy.h | 53 +++++++++
22
target/arm/translate.c | 19 +++++++++++++++++++
17
hw/usb/imx-usb-phy.c | 225 +++++++++++++++++++++++++++++++++++
23
target/arm/vfp-uncond.decode | 28 ++++++++++++++++++++++++++++
18
MAINTAINERS | 2 +
24
target/arm/vfp.decode | 28 ++++++++++++++++++++++++++++
19
hw/arm/Kconfig | 1 +
25
5 files changed, 119 insertions(+)
20
hw/usb/Kconfig | 5 +
26
create mode 100644 target/arm/translate-vfp.inc.c
21
6 files changed, 288 insertions(+)
27
create mode 100644 target/arm/vfp-uncond.decode
22
create mode 100644 include/hw/usb/imx-usb-phy.h
28
create mode 100644 target/arm/vfp.decode
23
create mode 100644 hw/usb/imx-usb-phy.c
29
24
30
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
25
diff --git a/hw/usb/Makefile.objs b/hw/usb/Makefile.objs
31
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/Makefile.objs
27
--- a/hw/usb/Makefile.objs
33
+++ b/target/arm/Makefile.objs
28
+++ b/hw/usb/Makefile.objs
34
@@ -XXX,XX +XXX,XX @@ target/arm/decode-sve.inc.c: $(SRC_PATH)/target/arm/sve.decode $(DECODETREE)
29
@@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_XEN) += xen-usb.o
35
     $(PYTHON) $(DECODETREE) --decode disas_sve -o $@ $<,\
30
xen-usb.o-cflags := $(LIBUSB_CFLAGS)
36
     "GEN", $(TARGET_DIR)$@)
31
xen-usb.o-libs := $(LIBUSB_LIBS)
37
32
endif
38
+target/arm/decode-vfp.inc.c: $(SRC_PATH)/target/arm/vfp.decode $(DECODETREE)
33
+
39
+    $(call quiet-command,\
34
+common-obj-$(CONFIG_IMX_USBPHY) += imx-usb-phy.o
40
+     $(PYTHON) $(DECODETREE) --static-decode disas_vfp -o $@ $<,\
35
diff --git a/include/hw/usb/imx-usb-phy.h b/include/hw/usb/imx-usb-phy.h
41
+     "GEN", $(TARGET_DIR)$@)
42
+
43
+target/arm/decode-vfp-uncond.inc.c: $(SRC_PATH)/target/arm/vfp-uncond.decode $(DECODETREE)
44
+    $(call quiet-command,\
45
+     $(PYTHON) $(DECODETREE) --static-decode disas_vfp_uncond -o $@ $<,\
46
+     "GEN", $(TARGET_DIR)$@)
47
+
48
target/arm/translate-sve.o: target/arm/decode-sve.inc.c
49
+target/arm/translate.o: target/arm/decode-vfp.inc.c
50
+target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
51
+
52
obj-$(TARGET_AARCH64) += translate-sve.o sve_helper.o
53
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
54
new file mode 100644
36
new file mode 100644
55
index XXXXXXX..XXXXXXX
37
index XXXXXXX..XXXXXXX
56
--- /dev/null
38
--- /dev/null
57
+++ b/target/arm/translate-vfp.inc.c
39
+++ b/include/hw/usb/imx-usb-phy.h
58
@@ -XXX,XX +XXX,XX @@
40
@@ -XXX,XX +XXX,XX @@
59
+/*
41
+#ifndef IMX_USB_PHY_H
60
+ * ARM translation: AArch32 VFP instructions
42
+#define IMX_USB_PHY_H
61
+ *
43
+
62
+ * Copyright (c) 2003 Fabrice Bellard
44
+#include "hw/sysbus.h"
63
+ * Copyright (c) 2005-2007 CodeSourcery
45
+#include "qemu/bitops.h"
64
+ * Copyright (c) 2007 OpenedHand, Ltd.
46
+
65
+ * Copyright (c) 2019 Linaro, Ltd.
47
+enum IMXUsbPhyRegisters {
66
+ *
48
+ USBPHY_PWD,
67
+ * This library is free software; you can redistribute it and/or
49
+ USBPHY_PWD_SET,
68
+ * modify it under the terms of the GNU Lesser General Public
50
+ USBPHY_PWD_CLR,
69
+ * License as published by the Free Software Foundation; either
51
+ USBPHY_PWD_TOG,
70
+ * version 2 of the License, or (at your option) any later version.
52
+ USBPHY_TX,
71
+ *
53
+ USBPHY_TX_SET,
72
+ * This library is distributed in the hope that it will be useful,
54
+ USBPHY_TX_CLR,
73
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
55
+ USBPHY_TX_TOG,
74
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
56
+ USBPHY_RX,
75
+ * Lesser General Public License for more details.
57
+ USBPHY_RX_SET,
76
+ *
58
+ USBPHY_RX_CLR,
77
+ * You should have received a copy of the GNU Lesser General Public
59
+ USBPHY_RX_TOG,
78
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
60
+ USBPHY_CTRL,
79
+ */
61
+ USBPHY_CTRL_SET,
80
+
62
+ USBPHY_CTRL_CLR,
81
+/*
63
+ USBPHY_CTRL_TOG,
82
+ * This file is intended to be included from translate.c; it uses
64
+ USBPHY_STATUS,
83
+ * some macros and definitions provided by that file.
65
+ USBPHY_DEBUG = 0x14,
84
+ * It might be possible to convert it to a standalone .c file eventually.
66
+ USBPHY_DEBUG_SET,
85
+ */
67
+ USBPHY_DEBUG_CLR,
86
+
68
+ USBPHY_DEBUG_TOG,
87
+/* Include the generated VFP decoder */
69
+ USBPHY_DEBUG0_STATUS,
88
+#include "decode-vfp.inc.c"
70
+ USBPHY_DEBUG1 = 0x1c,
89
+#include "decode-vfp-uncond.inc.c"
71
+ USBPHY_DEBUG1_SET,
90
diff --git a/target/arm/translate.c b/target/arm/translate.c
72
+ USBPHY_DEBUG1_CLR,
91
index XXXXXXX..XXXXXXX 100644
73
+ USBPHY_DEBUG1_TOG,
92
--- a/target/arm/translate.c
74
+ USBPHY_VERSION,
93
+++ b/target/arm/translate.c
75
+ USBPHY_MAX
94
@@ -XXX,XX +XXX,XX @@ static inline void gen_mov_vreg_F0(int dp, int reg)
76
+};
95
77
+
96
#define ARM_CP_RW_BIT (1 << 20)
78
+#define USBPHY_CTRL_SFTRST BIT(31)
97
79
+
98
+/* Include the VFP decoder */
80
+#define TYPE_IMX_USBPHY "imx.usbphy"
99
+#include "translate-vfp.inc.c"
81
+#define IMX_USBPHY(obj) OBJECT_CHECK(IMXUSBPHYState, (obj), TYPE_IMX_USBPHY)
100
+
82
+
101
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
83
+typedef struct IMXUSBPHYState {
102
{
84
+ /* <private> */
103
tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
85
+ SysBusDevice parent_obj;
104
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
86
+
105
return 1;
87
+ /* <public> */
106
}
88
+ MemoryRegion iomem;
107
89
+
108
+ /*
90
+ uint32_t usbphy[USBPHY_MAX];
109
+ * If the decodetree decoder handles this insn it will always
91
+} IMXUSBPHYState;
110
+ * emit code to either execute the insn or generate an appropriate
92
+
111
+ * exception; so we don't need to ever return non-zero to tell
93
+#endif /* IMX_USB_PHY_H */
112
+ * the calling code to emit an UNDEF exception.
94
diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c
113
+ */
114
+ if (extract32(insn, 28, 4) == 0xf) {
115
+ if (disas_vfp_uncond(s, insn)) {
116
+ return 0;
117
+ }
118
+ } else {
119
+ if (disas_vfp(s, insn)) {
120
+ return 0;
121
+ }
122
+ }
123
+
124
/* FIXME: this access check should not take precedence over UNDEF
125
* for invalid encodings; we will generate incorrect syndrome information
126
* for attempts to execute invalid vfp/neon encodings with FP disabled.
127
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
128
new file mode 100644
95
new file mode 100644
129
index XXXXXXX..XXXXXXX
96
index XXXXXXX..XXXXXXX
130
--- /dev/null
97
--- /dev/null
131
+++ b/target/arm/vfp-uncond.decode
98
+++ b/hw/usb/imx-usb-phy.c
132
@@ -XXX,XX +XXX,XX @@
99
@@ -XXX,XX +XXX,XX @@
133
+# AArch32 VFP instruction descriptions (unconditional insns)
100
+/*
134
+#
101
+ * i.MX USB PHY
135
+# Copyright (c) 2019 Linaro, Ltd
102
+ *
136
+#
103
+ * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
137
+# This library is free software; you can redistribute it and/or
104
+ *
138
+# modify it under the terms of the GNU Lesser General Public
105
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
139
+# License as published by the Free Software Foundation; either
106
+ * See the COPYING file in the top-level directory.
140
+# version 2 of the License, or (at your option) any later version.
107
+ *
141
+#
108
+ * We need to implement basic reset control in the PHY control register.
142
+# This library is distributed in the hope that it will be useful,
109
+ * For everything else, it is sufficient to set whatever is written.
143
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
110
+ */
144
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
111
+
145
+# Lesser General Public License for more details.
112
+#include "qemu/osdep.h"
146
+#
113
+#include "hw/usb/imx-usb-phy.h"
147
+# You should have received a copy of the GNU Lesser General Public
114
+#include "migration/vmstate.h"
148
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
115
+#include "qemu/log.h"
149
+
116
+#include "qemu/module.h"
150
+#
117
+
151
+# This file is processed by scripts/decodetree.py
118
+static const VMStateDescription vmstate_imx_usbphy = {
152
+#
119
+ .name = TYPE_IMX_USBPHY,
153
+# Encodings for the unconditional VFP instructions are here:
120
+ .version_id = 1,
154
+# generally anything matching A32
121
+ .minimum_version_id = 1,
155
+# 1111 1110 .... .... .... 101. ...0 ....
122
+ .fields = (VMStateField[]) {
156
+# and T32
123
+ VMSTATE_UINT32_ARRAY(usbphy, IMXUSBPHYState, USBPHY_MAX),
157
+# 1111 110. .... .... .... 101. .... ....
124
+ VMSTATE_END_OF_LIST()
158
+# 1111 1110 .... .... .... 101. .... ....
125
+ },
159
+# (but those patterns might also cover some Neon instructions,
126
+};
160
+# which do not live in this file.)
127
+
161
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
128
+static void imx_usbphy_softreset(IMXUSBPHYState *s)
162
new file mode 100644
129
+{
163
index XXXXXXX..XXXXXXX
130
+ s->usbphy[USBPHY_PWD] = 0x001e1c00;
164
--- /dev/null
131
+ s->usbphy[USBPHY_TX] = 0x10060607;
165
+++ b/target/arm/vfp.decode
132
+ s->usbphy[USBPHY_RX] = 0x00000000;
166
@@ -XXX,XX +XXX,XX @@
133
+ s->usbphy[USBPHY_CTRL] = 0xc0200000;
167
+# AArch32 VFP instruction descriptions (conditional insns)
134
+}
168
+#
135
+
169
+# Copyright (c) 2019 Linaro, Ltd
136
+static void imx_usbphy_reset(DeviceState *dev)
170
+#
137
+{
171
+# This library is free software; you can redistribute it and/or
138
+ IMXUSBPHYState *s = IMX_USBPHY(dev);
172
+# modify it under the terms of the GNU Lesser General Public
139
+
173
+# License as published by the Free Software Foundation; either
140
+ s->usbphy[USBPHY_STATUS] = 0x00000000;
174
+# version 2 of the License, or (at your option) any later version.
141
+ s->usbphy[USBPHY_DEBUG] = 0x7f180000;
175
+#
142
+ s->usbphy[USBPHY_DEBUG0_STATUS] = 0x00000000;
176
+# This library is distributed in the hope that it will be useful,
143
+ s->usbphy[USBPHY_DEBUG1] = 0x00001000;
177
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
144
+ s->usbphy[USBPHY_VERSION] = 0x04020000;
178
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
145
+
179
+# Lesser General Public License for more details.
146
+ imx_usbphy_softreset(s);
180
+#
147
+}
181
+# You should have received a copy of the GNU Lesser General Public
148
+
182
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
149
+static uint64_t imx_usbphy_read(void *opaque, hwaddr offset, unsigned size)
183
+
150
+{
184
+#
151
+ IMXUSBPHYState *s = (IMXUSBPHYState *)opaque;
185
+# This file is processed by scripts/decodetree.py
152
+ uint32_t index = offset >> 2;
186
+#
153
+ uint32_t value;
187
+# Encodings for the conditional VFP instructions are here:
154
+
188
+# generally anything matching A32
155
+ switch (index) {
189
+# cccc 11.. .... .... .... 101. .... ....
156
+ case USBPHY_PWD_SET:
190
+# and T32
157
+ case USBPHY_TX_SET:
191
+# 1110 110. .... .... .... 101. .... ....
158
+ case USBPHY_RX_SET:
192
+# 1110 1110 .... .... .... 101. .... ....
159
+ case USBPHY_CTRL_SET:
193
+# (but those patterns might also cover some Neon instructions,
160
+ case USBPHY_DEBUG_SET:
194
+# which do not live in this file.)
161
+ case USBPHY_DEBUG1_SET:
162
+ /*
163
+ * All REG_NAME_SET register access are in fact targeting the
164
+ * REG_NAME register.
165
+ */
166
+ value = s->usbphy[index - 1];
167
+ break;
168
+ case USBPHY_PWD_CLR:
169
+ case USBPHY_TX_CLR:
170
+ case USBPHY_RX_CLR:
171
+ case USBPHY_CTRL_CLR:
172
+ case USBPHY_DEBUG_CLR:
173
+ case USBPHY_DEBUG1_CLR:
174
+ /*
175
+ * All REG_NAME_CLR register access are in fact targeting the
176
+ * REG_NAME register.
177
+ */
178
+ value = s->usbphy[index - 2];
179
+ break;
180
+ case USBPHY_PWD_TOG:
181
+ case USBPHY_TX_TOG:
182
+ case USBPHY_RX_TOG:
183
+ case USBPHY_CTRL_TOG:
184
+ case USBPHY_DEBUG_TOG:
185
+ case USBPHY_DEBUG1_TOG:
186
+ /*
187
+ * All REG_NAME_TOG register access are in fact targeting the
188
+ * REG_NAME register.
189
+ */
190
+ value = s->usbphy[index - 3];
191
+ break;
192
+ default:
193
+ value = s->usbphy[index];
194
+ break;
195
+ }
196
+ return (uint64_t)value;
197
+}
198
+
199
+static void imx_usbphy_write(void *opaque, hwaddr offset, uint64_t value,
200
+ unsigned size)
201
+{
202
+ IMXUSBPHYState *s = (IMXUSBPHYState *)opaque;
203
+ uint32_t index = offset >> 2;
204
+
205
+ switch (index) {
206
+ case USBPHY_CTRL:
207
+ s->usbphy[index] = value;
208
+ if (value & USBPHY_CTRL_SFTRST) {
209
+ imx_usbphy_softreset(s);
210
+ }
211
+ break;
212
+ case USBPHY_PWD:
213
+ case USBPHY_TX:
214
+ case USBPHY_RX:
215
+ case USBPHY_STATUS:
216
+ case USBPHY_DEBUG:
217
+ case USBPHY_DEBUG1:
218
+ s->usbphy[index] = value;
219
+ break;
220
+ case USBPHY_CTRL_SET:
221
+ s->usbphy[index - 1] |= value;
222
+ if (value & USBPHY_CTRL_SFTRST) {
223
+ imx_usbphy_softreset(s);
224
+ }
225
+ break;
226
+ case USBPHY_PWD_SET:
227
+ case USBPHY_TX_SET:
228
+ case USBPHY_RX_SET:
229
+ case USBPHY_DEBUG_SET:
230
+ case USBPHY_DEBUG1_SET:
231
+ /*
232
+ * All REG_NAME_SET register access are in fact targeting the
233
+ * REG_NAME register. So we change the value of the REG_NAME
234
+ * register, setting bits passed in the value.
235
+ */
236
+ s->usbphy[index - 1] |= value;
237
+ break;
238
+ case USBPHY_PWD_CLR:
239
+ case USBPHY_TX_CLR:
240
+ case USBPHY_RX_CLR:
241
+ case USBPHY_CTRL_CLR:
242
+ case USBPHY_DEBUG_CLR:
243
+ case USBPHY_DEBUG1_CLR:
244
+ /*
245
+ * All REG_NAME_CLR register access are in fact targeting the
246
+ * REG_NAME register. So we change the value of the REG_NAME
247
+ * register, unsetting bits passed in the value.
248
+ */
249
+ s->usbphy[index - 2] &= ~value;
250
+ break;
251
+ case USBPHY_CTRL_TOG:
252
+ s->usbphy[index - 3] ^= value;
253
+ if ((value & USBPHY_CTRL_SFTRST) &&
254
+ (s->usbphy[index - 3] & USBPHY_CTRL_SFTRST)) {
255
+ imx_usbphy_softreset(s);
256
+ }
257
+ break;
258
+ case USBPHY_PWD_TOG:
259
+ case USBPHY_TX_TOG:
260
+ case USBPHY_RX_TOG:
261
+ case USBPHY_DEBUG_TOG:
262
+ case USBPHY_DEBUG1_TOG:
263
+ /*
264
+ * All REG_NAME_TOG register access are in fact targeting the
265
+ * REG_NAME register. So we change the value of the REG_NAME
266
+ * register, toggling bits passed in the value.
267
+ */
268
+ s->usbphy[index - 3] ^= value;
269
+ break;
270
+ default:
271
+ /* Other registers are read-only */
272
+ break;
273
+ }
274
+}
275
+
276
+static const struct MemoryRegionOps imx_usbphy_ops = {
277
+ .read = imx_usbphy_read,
278
+ .write = imx_usbphy_write,
279
+ .endianness = DEVICE_NATIVE_ENDIAN,
280
+ .valid = {
281
+ /*
282
+ * Our device would not work correctly if the guest was doing
283
+ * unaligned access. This might not be a limitation on the real
284
+ * device but in practice there is no reason for a guest to access
285
+ * this device unaligned.
286
+ */
287
+ .min_access_size = 4,
288
+ .max_access_size = 4,
289
+ .unaligned = false,
290
+ },
291
+};
292
+
293
+static void imx_usbphy_realize(DeviceState *dev, Error **errp)
294
+{
295
+ IMXUSBPHYState *s = IMX_USBPHY(dev);
296
+
297
+ memory_region_init_io(&s->iomem, OBJECT(s), &imx_usbphy_ops, s,
298
+ "imx-usbphy", 0x1000);
299
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
300
+}
301
+
302
+static void imx_usbphy_class_init(ObjectClass *klass, void *data)
303
+{
304
+ DeviceClass *dc = DEVICE_CLASS(klass);
305
+
306
+ dc->reset = imx_usbphy_reset;
307
+ dc->vmsd = &vmstate_imx_usbphy;
308
+ dc->desc = "i.MX USB PHY Module";
309
+ dc->realize = imx_usbphy_realize;
310
+}
311
+
312
+static const TypeInfo imx_usbphy_info = {
313
+ .name = TYPE_IMX_USBPHY,
314
+ .parent = TYPE_SYS_BUS_DEVICE,
315
+ .instance_size = sizeof(IMXUSBPHYState),
316
+ .class_init = imx_usbphy_class_init,
317
+};
318
+
319
+static void imx_usbphy_register_types(void)
320
+{
321
+ type_register_static(&imx_usbphy_info);
322
+}
323
+
324
+type_init(imx_usbphy_register_types)
325
diff --git a/MAINTAINERS b/MAINTAINERS
326
index XXXXXXX..XXXXXXX 100644
327
--- a/MAINTAINERS
328
+++ b/MAINTAINERS
329
@@ -XXX,XX +XXX,XX @@ F: hw/arm/sabrelite.c
330
F: hw/arm/fsl-imx6.c
331
F: hw/misc/imx6_*.c
332
F: hw/ssi/imx_spi.c
333
+F: hw/usb/imx-usb-phy.c
334
+F: include/hw/usb/imx-usb-phy.h
335
F: include/hw/arm/fsl-imx6.h
336
F: include/hw/misc/imx6_*.h
337
F: include/hw/ssi/imx_spi.h
338
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
339
index XXXXXXX..XXXXXXX 100644
340
--- a/hw/arm/Kconfig
341
+++ b/hw/arm/Kconfig
342
@@ -XXX,XX +XXX,XX @@ config FSL_IMX6
343
select IMX
344
select IMX_FEC
345
select IMX_I2C
346
+ select IMX_USBPHY
347
select SDHCI
348
349
config ASPEED_SOC
350
diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig
351
index XXXXXXX..XXXXXXX 100644
352
--- a/hw/usb/Kconfig
353
+++ b/hw/usb/Kconfig
354
@@ -XXX,XX +XXX,XX @@ config USB_STORAGE_MTP
355
bool
356
default y
357
depends on USB
358
+
359
+config IMX_USBPHY
360
+ bool
361
+ default y
362
+ depends on USB
195
--
363
--
196
2.20.1
364
2.20.1
197
365
198
366
diff view generated by jsdifflib
1
For VFP short vectors, the VFP registers are divided into a
1
From: Guenter Roeck <linux@roeck-us.net>
2
series of banks: for single-precision these are s0-s7, s8-s15,
3
s16-s23 and s24-s31; for double-precision they are d0-d3,
4
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
5
use of a register within them triggers a pure-scalar or
6
mixed vector-scalar operation rather than a full vector
7
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
8
When using a bank as part of a vector operation, we
9
iterate through it, increasing the register number by
10
the specified stride each time, and wrapping around to
11
the beginning of the bank.
12
2
13
Unfortunately our calculation of the "increment" part of this
3
USB1 and USB2 interrupt numbers were swapped. USB_PHY2 interrupt number
14
was incorrect:
4
is 45. That didn't really matter up to now since the interrupts were not
15
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
5
used, but it needs to be fixed to be able to wire up the USB controllers.
16
will only do the intended thing if bank_mask has exactly
17
one set high bit. For instance for doubles (bank_mask = 0xc),
18
if we start with vd = 6 and delta_d = 2 then vd is updated
19
to 12 rather than the intended 4.
20
6
21
This only causes problems in the unlikely case that the
7
Fixes: 31cbf933f0e ("i.MX6UL: Add i.MX6UL SOC")
22
starting register is not the first in its bank: if the
8
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
23
register number doesn't have to wrap around then the
9
Message-id: 20200313014551.12554-3-linux@roeck-us.net
24
expression happens to give the right answer.
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
include/hw/arm/fsl-imx6ul.h | 6 +++---
14
1 file changed, 3 insertions(+), 3 deletions(-)
25
15
26
Fix this bug by abstracting out the "check whether register
16
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
27
is in a scalar bank" and "advance register within bank"
28
operations to utility functions which use the right
29
bit masking operations.
30
31
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
32
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
33
---
34
target/arm/translate-vfp.inc.c | 100 ++++++++++++++++++++-------------
35
1 file changed, 60 insertions(+), 40 deletions(-)
36
37
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
38
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-vfp.inc.c
18
--- a/include/hw/arm/fsl-imx6ul.h
40
+++ b/target/arm/translate-vfp.inc.c
19
+++ b/include/hw/arm/fsl-imx6ul.h
41
@@ -XXX,XX +XXX,XX @@ typedef void VFPGen3OpDPFn(TCGv_i64 vd,
20
@@ -XXX,XX +XXX,XX @@ enum FslIMX6ULIRQs {
42
typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
21
FSL_IMX6UL_UART7_IRQ = 39,
43
typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
22
FSL_IMX6UL_UART8_IRQ = 40,
44
23
45
+/*
24
- FSL_IMX6UL_USB1_IRQ = 42,
46
+ * Return true if the specified S reg is in a scalar bank
25
- FSL_IMX6UL_USB2_IRQ = 43,
47
+ * (ie if it is s0..s7)
26
+ FSL_IMX6UL_USB1_IRQ = 43,
48
+ */
27
+ FSL_IMX6UL_USB2_IRQ = 42,
49
+static inline bool vfp_sreg_is_scalar(int reg)
28
FSL_IMX6UL_USB_PHY1_IRQ = 44,
50
+{
29
- FSL_IMX6UL_USB_PHY2_IRQ = 44,
51
+ return (reg & 0x18) == 0;
30
+ FSL_IMX6UL_USB_PHY2_IRQ = 45,
52
+}
31
53
+
32
FSL_IMX6UL_CAAM_JQ2_IRQ = 46,
54
+/*
33
FSL_IMX6UL_CAAM_ERR_IRQ = 47,
55
+ * Return true if the specified D reg is in a scalar bank
56
+ * (ie if it is d0..d3 or d16..d19)
57
+ */
58
+static inline bool vfp_dreg_is_scalar(int reg)
59
+{
60
+ return (reg & 0xc) == 0;
61
+}
62
+
63
+/*
64
+ * Advance the S reg number forwards by delta within its bank
65
+ * (ie increment the low 3 bits but leave the rest the same)
66
+ */
67
+static inline int vfp_advance_sreg(int reg, int delta)
68
+{
69
+ return ((reg + delta) & 0x7) | (reg & ~0x7);
70
+}
71
+
72
+/*
73
+ * Advance the D reg number forwards by delta within its bank
74
+ * (ie increment the low 2 bits but leave the rest the same)
75
+ */
76
+static inline int vfp_advance_dreg(int reg, int delta)
77
+{
78
+ return ((reg + delta) & 0x3) | (reg & ~0x3);
79
+}
80
+
81
/*
82
* Perform a 3-operand VFP data processing instruction. fn is the
83
* callback to do the actual operation; this function deals with the
84
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
85
{
86
uint32_t delta_m = 0;
87
uint32_t delta_d = 0;
88
- uint32_t bank_mask = 0;
89
int veclen = s->vec_len;
90
TCGv_i32 f0, f1, fd;
91
TCGv_ptr fpst;
92
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
93
}
94
95
if (veclen > 0) {
96
- bank_mask = 0x18;
97
-
98
/* Figure out what type of vector operation this is. */
99
- if ((vd & bank_mask) == 0) {
100
+ if (vfp_sreg_is_scalar(vd)) {
101
/* scalar */
102
veclen = 0;
103
} else {
104
delta_d = s->vec_stride + 1;
105
106
- if ((vm & bank_mask) == 0) {
107
+ if (vfp_sreg_is_scalar(vm)) {
108
/* mixed scalar/vector */
109
delta_m = 0;
110
} else {
111
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
112
113
/* Set up the operands for the next iteration */
114
veclen--;
115
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
116
- vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
117
+ vd = vfp_advance_sreg(vd, delta_d);
118
+ vn = vfp_advance_sreg(vn, delta_d);
119
neon_load_reg32(f0, vn);
120
if (delta_m) {
121
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
122
+ vm = vfp_advance_sreg(vm, delta_m);
123
neon_load_reg32(f1, vm);
124
}
125
}
126
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
127
{
128
uint32_t delta_m = 0;
129
uint32_t delta_d = 0;
130
- uint32_t bank_mask = 0;
131
int veclen = s->vec_len;
132
TCGv_i64 f0, f1, fd;
133
TCGv_ptr fpst;
134
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
135
}
136
137
if (veclen > 0) {
138
- bank_mask = 0xc;
139
-
140
/* Figure out what type of vector operation this is. */
141
- if ((vd & bank_mask) == 0) {
142
+ if (vfp_dreg_is_scalar(vd)) {
143
/* scalar */
144
veclen = 0;
145
} else {
146
delta_d = (s->vec_stride >> 1) + 1;
147
148
- if ((vm & bank_mask) == 0) {
149
+ if (vfp_dreg_is_scalar(vm)) {
150
/* mixed scalar/vector */
151
delta_m = 0;
152
} else {
153
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
154
}
155
/* Set up the operands for the next iteration */
156
veclen--;
157
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
158
- vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
159
+ vd = vfp_advance_dreg(vd, delta_d);
160
+ vn = vfp_advance_dreg(vn, delta_d);
161
neon_load_reg64(f0, vn);
162
if (delta_m) {
163
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
164
+ vm = vfp_advance_dreg(vm, delta_m);
165
neon_load_reg64(f1, vm);
166
}
167
}
168
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
169
{
170
uint32_t delta_m = 0;
171
uint32_t delta_d = 0;
172
- uint32_t bank_mask = 0;
173
int veclen = s->vec_len;
174
TCGv_i32 f0, fd;
175
176
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
177
}
178
179
if (veclen > 0) {
180
- bank_mask = 0x18;
181
-
182
/* Figure out what type of vector operation this is. */
183
- if ((vd & bank_mask) == 0) {
184
+ if (vfp_sreg_is_scalar(vd)) {
185
/* scalar */
186
veclen = 0;
187
} else {
188
delta_d = s->vec_stride + 1;
189
190
- if ((vm & bank_mask) == 0) {
191
+ if (vfp_sreg_is_scalar(vm)) {
192
/* mixed scalar/vector */
193
delta_m = 0;
194
} else {
195
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
196
if (delta_m == 0) {
197
/* single source one-many */
198
while (veclen--) {
199
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
200
+ vd = vfp_advance_sreg(vd, delta_d);
201
neon_store_reg32(fd, vd);
202
}
203
break;
204
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
205
206
/* Set up the operands for the next iteration */
207
veclen--;
208
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
209
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
210
+ vd = vfp_advance_sreg(vd, delta_d);
211
+ vm = vfp_advance_sreg(vm, delta_m);
212
neon_load_reg32(f0, vm);
213
}
214
215
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
216
{
217
uint32_t delta_m = 0;
218
uint32_t delta_d = 0;
219
- uint32_t bank_mask = 0;
220
int veclen = s->vec_len;
221
TCGv_i64 f0, fd;
222
223
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
224
}
225
226
if (veclen > 0) {
227
- bank_mask = 0xc;
228
-
229
/* Figure out what type of vector operation this is. */
230
- if ((vd & bank_mask) == 0) {
231
+ if (vfp_dreg_is_scalar(vd)) {
232
/* scalar */
233
veclen = 0;
234
} else {
235
delta_d = (s->vec_stride >> 1) + 1;
236
237
- if ((vm & bank_mask) == 0) {
238
+ if (vfp_dreg_is_scalar(vm)) {
239
/* mixed scalar/vector */
240
delta_m = 0;
241
} else {
242
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
243
if (delta_m == 0) {
244
/* single source one-many */
245
while (veclen--) {
246
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
247
+ vd = vfp_advance_dreg(vd, delta_d);
248
neon_store_reg64(fd, vd);
249
}
250
break;
251
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
252
253
/* Set up the operands for the next iteration */
254
veclen--;
255
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
256
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
257
+ vd = vfp_advance_dreg(vd, delta_d);
258
+ vd = vfp_advance_dreg(vm, delta_m);
259
neon_load_reg64(f0, vm);
260
}
261
262
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a)
263
static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
264
{
265
uint32_t delta_d = 0;
266
- uint32_t bank_mask = 0;
267
int veclen = s->vec_len;
268
TCGv_i32 fd;
269
uint32_t n, i, vd;
270
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
271
}
272
273
if (veclen > 0) {
274
- bank_mask = 0x18;
275
/* Figure out what type of vector operation this is. */
276
- if ((vd & bank_mask) == 0) {
277
+ if (vfp_sreg_is_scalar(vd)) {
278
/* scalar */
279
veclen = 0;
280
} else {
281
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
282
283
/* Set up the operands for the next iteration */
284
veclen--;
285
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
286
+ vd = vfp_advance_sreg(vd, delta_d);
287
}
288
289
tcg_temp_free_i32(fd);
290
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
291
static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
292
{
293
uint32_t delta_d = 0;
294
- uint32_t bank_mask = 0;
295
int veclen = s->vec_len;
296
TCGv_i64 fd;
297
uint32_t n, i, vd;
298
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
299
}
300
301
if (veclen > 0) {
302
- bank_mask = 0xc;
303
/* Figure out what type of vector operation this is. */
304
- if ((vd & bank_mask) == 0) {
305
+ if (vfp_dreg_is_scalar(vd)) {
306
/* scalar */
307
veclen = 0;
308
} else {
309
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
310
311
/* Set up the operands for the next iteration */
312
veclen--;
313
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
314
+ vfp_advance_dreg(vd, delta_d);
315
}
316
317
tcg_temp_free_i64(fd);
318
--
34
--
319
2.20.1
35
2.20.1
320
36
321
37
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Guenter Roeck <linux@roeck-us.net>
2
2
3
The ARM pseudocode installs the error_code into the original
3
Recent Linux kernels (post v4.20) crash due to accesses to flexcan
4
pointer, not the encrypted pointer. The difference applies
4
and pwm controllers. Instantiate as unimplemented devices to work
5
within the 7 bits of pac data; the result should be the sign
5
around the problem.
6
extension of bit 55.
7
6
8
Add a testcase to that effect.
7
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
9
8
Message-id: 20200313014551.12554-4-linux@roeck-us.net
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
11
---
14
tests/tcg/aarch64/Makefile.target | 2 +-
12
hw/arm/fsl-imx6ul.c | 14 ++++++++++++++
15
target/arm/pauth_helper.c | 4 +-
13
1 file changed, 14 insertions(+)
16
tests/tcg/aarch64/pauth-2.c | 61 +++++++++++++++++++++++++++++++
17
3 files changed, 64 insertions(+), 3 deletions(-)
18
create mode 100644 tests/tcg/aarch64/pauth-2.c
19
14
20
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
15
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/tests/tcg/aarch64/Makefile.target
17
--- a/hw/arm/fsl-imx6ul.c
23
+++ b/tests/tcg/aarch64/Makefile.target
18
+++ b/hw/arm/fsl-imx6ul.c
24
@@ -XXX,XX +XXX,XX @@ run-fcvt: fcvt
19
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
25
    $(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)")
20
*/
26
    $(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref)
21
create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, 0x4000);
27
22
28
-AARCH64_TESTS += pauth-1
23
+ /*
29
+AARCH64_TESTS += pauth-1 pauth-2
24
+ * PWM
30
run-pauth-%: QEMU += -cpu max
25
+ */
31
26
+ create_unimplemented_device("pwm1", FSL_IMX6UL_PWM1_ADDR, 0x4000);
32
TESTS:=$(AARCH64_TESTS)
27
+ create_unimplemented_device("pwm2", FSL_IMX6UL_PWM2_ADDR, 0x4000);
33
diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c
28
+ create_unimplemented_device("pwm3", FSL_IMX6UL_PWM3_ADDR, 0x4000);
34
index XXXXXXX..XXXXXXX 100644
29
+ create_unimplemented_device("pwm4", FSL_IMX6UL_PWM4_ADDR, 0x4000);
35
--- a/target/arm/pauth_helper.c
36
+++ b/target/arm/pauth_helper.c
37
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
38
if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) {
39
int error_code = (keynumber << 1) | (keynumber ^ 1);
40
if (param.tbi) {
41
- return deposit64(ptr, 53, 2, error_code);
42
+ return deposit64(orig_ptr, 53, 2, error_code);
43
} else {
44
- return deposit64(ptr, 61, 2, error_code);
45
+ return deposit64(orig_ptr, 61, 2, error_code);
46
}
47
}
48
return orig_ptr;
49
diff --git a/tests/tcg/aarch64/pauth-2.c b/tests/tcg/aarch64/pauth-2.c
50
new file mode 100644
51
index XXXXXXX..XXXXXXX
52
--- /dev/null
53
+++ b/tests/tcg/aarch64/pauth-2.c
54
@@ -XXX,XX +XXX,XX @@
55
+#include <stdint.h>
56
+#include <assert.h>
57
+
58
+asm(".arch armv8.4-a");
59
+
60
+void do_test(uint64_t value)
61
+{
62
+ uint64_t salt1, salt2;
63
+ uint64_t encode, decode;
64
+
30
+
65
+ /*
31
+ /*
66
+ * With TBI enabled and a 48-bit VA, there are 7 bits of auth,
32
+ * CAN
67
+ * and so a 1/128 chance of encode = pac(value,key,salt) producing
68
+ * an auth for which leaves value unchanged.
69
+ * Iterate until we find a salt for which encode != value.
70
+ */
33
+ */
71
+ for (salt1 = 1; ; salt1++) {
34
+ create_unimplemented_device("can1", FSL_IMX6UL_CAN1_ADDR, 0x4000);
72
+ asm volatile("pacda %0, %2" : "=r"(encode) : "0"(value), "r"(salt1));
35
+ create_unimplemented_device("can2", FSL_IMX6UL_CAN2_ADDR, 0x4000);
73
+ if (encode != value) {
74
+ break;
75
+ }
76
+ }
77
+
36
+
78
+ /* A valid salt must produce a valid authorization. */
37
/*
79
+ asm volatile("autda %0, %2" : "=r"(decode) : "0"(encode), "r"(salt1));
38
* APHB_DMA
80
+ assert(decode == value);
39
*/
81
+
82
+ /*
83
+ * An invalid salt usually fails authorization, but again there
84
+ * is a chance of choosing another salt that works.
85
+ * Iterate until we find another salt which does fail.
86
+ */
87
+ for (salt2 = salt1 + 1; ; salt2++) {
88
+ asm volatile("autda %0, %2" : "=r"(decode) : "0"(encode), "r"(salt2));
89
+ if (decode != value) {
90
+ break;
91
+ }
92
+ }
93
+
94
+ /* The VA bits, bit 55, and the TBI bits, should be unchanged. */
95
+ assert(((decode ^ value) & 0xff80ffffffffffffull) == 0);
96
+
97
+ /*
98
+ * Bits [54:53] are an error indicator based on the key used;
99
+ * the DA key above is keynumber 0, so error == 0b01. Otherwise
100
+ * bit 55 of the original is sign-extended into the rest of the auth.
101
+ */
102
+ if ((value >> 55) & 1) {
103
+ assert(((decode >> 48) & 0xff) == 0b10111111);
104
+ } else {
105
+ assert(((decode >> 48) & 0xff) == 0b00100000);
106
+ }
107
+}
108
+
109
+int main()
110
+{
111
+ do_test(0);
112
+ do_test(-1);
113
+ do_test(0xda004acedeadbeefull);
114
+ return 0;
115
+}
116
--
40
--
117
2.20.1
41
2.20.1
118
42
119
43
diff view generated by jsdifflib
1
Convert the float-to-integer VCVT instructions to decodetree.
1
From: Guenter Roeck <linux@roeck-us.net>
2
Since these are the last unconverted instructions, we can
3
delete the old decoder structure entirely now.
4
2
3
IMX6UL USB controllers are quite similar to IMX7 USB controllers.
4
Wire them up the same way.
5
6
The only real difference is that wiring up phy devices is necessary
7
to avoid phy reset timeouts in the Linux kernel.
8
9
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
10
Message-id: 20200313014551.12554-5-linux@roeck-us.net
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
13
---
8
target/arm/translate-vfp.inc.c | 72 ++++++++++
14
include/hw/arm/fsl-imx6ul.h | 10 ++++++++++
9
target/arm/translate.c | 241 +--------------------------------
15
hw/arm/fsl-imx6ul.c | 35 +++++++++++++++++++++++++++++++++++
10
target/arm/vfp.decode | 6 +
16
2 files changed, 45 insertions(+)
11
3 files changed, 80 insertions(+), 239 deletions(-)
12
17
13
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
18
diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h
14
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.inc.c
20
--- a/include/hw/arm/fsl-imx6ul.h
16
+++ b/target/arm/translate-vfp.inc.c
21
+++ b/include/hw/arm/fsl-imx6ul.h
17
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
22
@@ -XXX,XX +XXX,XX @@
18
tcg_temp_free_ptr(fpst);
23
#include "hw/sd/sdhci.h"
19
return true;
24
#include "hw/ssi/imx_spi.h"
20
}
25
#include "hw/net/imx_fec.h"
21
+
26
+#include "hw/usb/chipidea.h"
22
+static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
27
+#include "hw/usb/imx-usb-phy.h"
23
+{
28
#include "exec/memory.h"
24
+ TCGv_i32 vm;
29
#include "cpu.h"
25
+ TCGv_ptr fpst;
30
26
+
31
@@ -XXX,XX +XXX,XX @@ enum FslIMX6ULConfiguration {
27
+ if (!vfp_access_check(s)) {
32
FSL_IMX6UL_NUM_I2CS = 4,
28
+ return true;
33
FSL_IMX6UL_NUM_ECSPIS = 4,
34
FSL_IMX6UL_NUM_ADCS = 2,
35
+ FSL_IMX6UL_NUM_USB_PHYS = 2,
36
+ FSL_IMX6UL_NUM_USBS = 2,
37
};
38
39
typedef struct FslIMX6ULState {
40
@@ -XXX,XX +XXX,XX @@ typedef struct FslIMX6ULState {
41
IMXFECState eth[FSL_IMX6UL_NUM_ETHS];
42
SDHCIState usdhc[FSL_IMX6UL_NUM_USDHCS];
43
IMX2WdtState wdt[FSL_IMX6UL_NUM_WDTS];
44
+ IMXUSBPHYState usbphy[FSL_IMX6UL_NUM_USB_PHYS];
45
+ ChipideaState usb[FSL_IMX6UL_NUM_USBS];
46
MemoryRegion rom;
47
MemoryRegion caam;
48
MemoryRegion ocram;
49
@@ -XXX,XX +XXX,XX @@ enum FslIMX6ULMemoryMap {
50
FSL_IMX6UL_EPIT2_ADDR = 0x020D4000,
51
FSL_IMX6UL_EPIT1_ADDR = 0x020D0000,
52
FSL_IMX6UL_SNVS_HP_ADDR = 0x020CC000,
53
+ FSL_IMX6UL_USBPHY2_ADDR = 0x020CA000,
54
+ FSL_IMX6UL_USBPHY2_SIZE = (4 * 1024),
55
+ FSL_IMX6UL_USBPHY1_ADDR = 0x020C9000,
56
+ FSL_IMX6UL_USBPHY1_SIZE = (4 * 1024),
57
FSL_IMX6UL_ANALOG_ADDR = 0x020C8000,
58
FSL_IMX6UL_CCM_ADDR = 0x020C4000,
59
FSL_IMX6UL_WDOG2_ADDR = 0x020C0000,
60
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/hw/arm/fsl-imx6ul.c
63
+++ b/hw/arm/fsl-imx6ul.c
64
@@ -XXX,XX +XXX,XX @@
65
#include "qapi/error.h"
66
#include "hw/arm/fsl-imx6ul.h"
67
#include "hw/misc/unimp.h"
68
+#include "hw/usb/imx-usb-phy.h"
69
#include "hw/boards.h"
70
#include "sysemu/sysemu.h"
71
#include "qemu/error-report.h"
72
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_init(Object *obj)
73
TYPE_IMX_ENET);
74
}
75
76
+ /* USB */
77
+ for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) {
78
+ snprintf(name, NAME_SIZE, "usbphy%d", i);
79
+ sysbus_init_child_obj(obj, name, &s->usbphy[i], sizeof(s->usbphy[i]),
80
+ TYPE_IMX_USBPHY);
81
+ }
82
+ for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) {
83
+ snprintf(name, NAME_SIZE, "usb%d", i);
84
+ sysbus_init_child_obj(obj, name, &s->usb[i], sizeof(s->usb[i]),
85
+ TYPE_CHIPIDEA);
29
+ }
86
+ }
30
+
87
+
31
+ fpst = get_fpstatus_ptr(false);
88
/*
32
+ vm = tcg_temp_new_i32();
89
* SDHCI
33
+ neon_load_reg32(vm, a->vm);
90
*/
34
+
91
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
35
+ if (a->s) {
92
FSL_IMX6UL_ENETn_TIMER_IRQ[i]));
36
+ if (a->rz) {
93
}
37
+ gen_helper_vfp_tosizs(vm, vm, fpst);
94
38
+ } else {
95
+ /* USB */
39
+ gen_helper_vfp_tosis(vm, vm, fpst);
96
+ for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) {
40
+ }
97
+ object_property_set_bool(OBJECT(&s->usbphy[i]), true, "realized",
41
+ } else {
98
+ &error_abort);
42
+ if (a->rz) {
99
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usbphy[i]), 0,
43
+ gen_helper_vfp_touizs(vm, vm, fpst);
100
+ FSL_IMX6UL_USBPHY1_ADDR + i * 0x1000);
44
+ } else {
45
+ gen_helper_vfp_touis(vm, vm, fpst);
46
+ }
47
+ }
48
+ neon_store_reg32(vm, a->vd);
49
+ tcg_temp_free_i32(vm);
50
+ tcg_temp_free_ptr(fpst);
51
+ return true;
52
+}
53
+
54
+static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
55
+{
56
+ TCGv_i32 vd;
57
+ TCGv_i64 vm;
58
+ TCGv_ptr fpst;
59
+
60
+ /* UNDEF accesses to D16-D31 if they don't exist. */
61
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
62
+ return false;
63
+ }
101
+ }
64
+
102
+
65
+ if (!vfp_access_check(s)) {
103
+ for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) {
66
+ return true;
104
+ static const int FSL_IMX6UL_USBn_IRQ[] = {
105
+ FSL_IMX6UL_USB1_IRQ,
106
+ FSL_IMX6UL_USB2_IRQ,
107
+ };
108
+ object_property_set_bool(OBJECT(&s->usb[i]), true, "realized",
109
+ &error_abort);
110
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0,
111
+ FSL_IMX6UL_USBO2_USB_ADDR + i * 0x200);
112
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0,
113
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
114
+ FSL_IMX6UL_USBn_IRQ[i]));
67
+ }
115
+ }
68
+
116
+
69
+ fpst = get_fpstatus_ptr(false);
117
/*
70
+ vm = tcg_temp_new_i64();
118
* USDHC
71
+ vd = tcg_temp_new_i32();
119
*/
72
+ neon_load_reg64(vm, a->vm);
73
+
74
+ if (a->s) {
75
+ if (a->rz) {
76
+ gen_helper_vfp_tosizd(vd, vm, fpst);
77
+ } else {
78
+ gen_helper_vfp_tosid(vd, vm, fpst);
79
+ }
80
+ } else {
81
+ if (a->rz) {
82
+ gen_helper_vfp_touizd(vd, vm, fpst);
83
+ } else {
84
+ gen_helper_vfp_touid(vd, vm, fpst);
85
+ }
86
+ }
87
+ neon_store_reg32(vd, a->vd);
88
+ tcg_temp_free_i32(vd);
89
+ tcg_temp_free_i64(vm);
90
+ tcg_temp_free_ptr(fpst);
91
+ return true;
92
+}
93
diff --git a/target/arm/translate.c b/target/arm/translate.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/arm/translate.c
96
+++ b/target/arm/translate.c
97
@@ -XXX,XX +XXX,XX @@ static inline void gen_vfp_##name(int dp, int neon) \
98
tcg_temp_free_ptr(statusptr); \
99
}
100
101
-VFP_GEN_FTOI(toui)
102
VFP_GEN_FTOI(touiz)
103
-VFP_GEN_FTOI(tosi)
104
VFP_GEN_FTOI(tosiz)
105
#undef VFP_GEN_FTOI
106
107
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
108
}
109
110
#define tcg_gen_ld_f32 tcg_gen_ld_i32
111
-#define tcg_gen_ld_f64 tcg_gen_ld_i64
112
#define tcg_gen_st_f32 tcg_gen_st_i32
113
-#define tcg_gen_st_f64 tcg_gen_st_i64
114
-
115
-static inline void gen_mov_F0_vreg(int dp, int reg)
116
-{
117
- if (dp)
118
- tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
119
- else
120
- tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
121
-}
122
-
123
-static inline void gen_mov_F1_vreg(int dp, int reg)
124
-{
125
- if (dp)
126
- tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
127
- else
128
- tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
129
-}
130
-
131
-static inline void gen_mov_vreg_F0(int dp, int reg)
132
-{
133
- if (dp)
134
- tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
135
- else
136
- tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
137
-}
138
139
#define ARM_CP_RW_BIT (1 << 20)
140
141
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
142
*/
143
static int disas_vfp_insn(DisasContext *s, uint32_t insn)
144
{
145
- uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask;
146
- int dp, veclen;
147
-
148
if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
149
return 1;
150
}
151
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
152
return 0;
153
}
154
}
155
-
156
- if (extract32(insn, 28, 4) == 0xf) {
157
- /*
158
- * Encodings with T=1 (Thumb) or unconditional (ARM): these
159
- * were all handled by the decodetree decoder, so any insn
160
- * patterns which get here must be UNDEF.
161
- */
162
- return 1;
163
- }
164
-
165
- /*
166
- * FIXME: this access check should not take precedence over UNDEF
167
- * for invalid encodings; we will generate incorrect syndrome information
168
- * for attempts to execute invalid vfp/neon encodings with FP disabled.
169
- */
170
- if (!vfp_access_check(s)) {
171
- return 0;
172
- }
173
-
174
- dp = ((insn & 0xf00) == 0xb00);
175
- switch ((insn >> 24) & 0xf) {
176
- case 0xe:
177
- if (insn & (1 << 4)) {
178
- /* already handled by decodetree */
179
- return 1;
180
- } else {
181
- /* data processing */
182
- bool rd_is_dp = dp;
183
- bool rm_is_dp = dp;
184
- bool no_output = false;
185
-
186
- /* The opcode is in bits 23, 21, 20 and 6. */
187
- op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
188
- rn = VFP_SREG_N(insn);
189
-
190
- switch (op) {
191
- case 0 ... 14:
192
- /* Already handled by decodetree */
193
- return 1;
194
- case 15:
195
- switch (rn) {
196
- case 0 ... 23:
197
- case 28 ... 31:
198
- /* Already handled by decodetree */
199
- return 1;
200
- default:
201
- break;
202
- }
203
- default:
204
- break;
205
- }
206
-
207
- if (op == 15) {
208
- /* rn is opcode, encoded as per VFP_SREG_N. */
209
- switch (rn) {
210
- case 0x18: /* vcvtr.u32.fxx */
211
- case 0x19: /* vcvtz.u32.fxx */
212
- case 0x1a: /* vcvtr.s32.fxx */
213
- case 0x1b: /* vcvtz.s32.fxx */
214
- rd_is_dp = false;
215
- break;
216
-
217
- default:
218
- return 1;
219
- }
220
- } else if (dp) {
221
- /* rn is register number */
222
- VFP_DREG_N(rn, insn);
223
- }
224
-
225
- if (rd_is_dp) {
226
- VFP_DREG_D(rd, insn);
227
- } else {
228
- rd = VFP_SREG_D(insn);
229
- }
230
- if (rm_is_dp) {
231
- VFP_DREG_M(rm, insn);
232
- } else {
233
- rm = VFP_SREG_M(insn);
234
- }
235
-
236
- veclen = s->vec_len;
237
- if (op == 15 && rn > 3) {
238
- veclen = 0;
239
- }
240
-
241
- /* Shut up compiler warnings. */
242
- delta_m = 0;
243
- delta_d = 0;
244
- bank_mask = 0;
245
-
246
- if (veclen > 0) {
247
- if (dp)
248
- bank_mask = 0xc;
249
- else
250
- bank_mask = 0x18;
251
-
252
- /* Figure out what type of vector operation this is. */
253
- if ((rd & bank_mask) == 0) {
254
- /* scalar */
255
- veclen = 0;
256
- } else {
257
- if (dp)
258
- delta_d = (s->vec_stride >> 1) + 1;
259
- else
260
- delta_d = s->vec_stride + 1;
261
-
262
- if ((rm & bank_mask) == 0) {
263
- /* mixed scalar/vector */
264
- delta_m = 0;
265
- } else {
266
- /* vector */
267
- delta_m = delta_d;
268
- }
269
- }
270
- }
271
-
272
- /* Load the initial operands. */
273
- if (op == 15) {
274
- switch (rn) {
275
- default:
276
- /* One source operand. */
277
- gen_mov_F0_vreg(rm_is_dp, rm);
278
- break;
279
- }
280
- } else {
281
- /* Two source operands. */
282
- gen_mov_F0_vreg(dp, rn);
283
- gen_mov_F1_vreg(dp, rm);
284
- }
285
-
286
- for (;;) {
287
- /* Perform the calculation. */
288
- switch (op) {
289
- case 15: /* extension space */
290
- switch (rn) {
291
- case 24: /* ftoui */
292
- gen_vfp_toui(dp, 0);
293
- break;
294
- case 25: /* ftouiz */
295
- gen_vfp_touiz(dp, 0);
296
- break;
297
- case 26: /* ftosi */
298
- gen_vfp_tosi(dp, 0);
299
- break;
300
- case 27: /* ftosiz */
301
- gen_vfp_tosiz(dp, 0);
302
- break;
303
- default: /* undefined */
304
- g_assert_not_reached();
305
- }
306
- break;
307
- default: /* undefined */
308
- return 1;
309
- }
310
-
311
- /* Write back the result, if any. */
312
- if (!no_output) {
313
- gen_mov_vreg_F0(rd_is_dp, rd);
314
- }
315
-
316
- /* break out of the loop if we have finished */
317
- if (veclen == 0) {
318
- break;
319
- }
320
-
321
- if (op == 15 && delta_m == 0) {
322
- /* single source one-many */
323
- while (veclen--) {
324
- rd = ((rd + delta_d) & (bank_mask - 1))
325
- | (rd & bank_mask);
326
- gen_mov_vreg_F0(dp, rd);
327
- }
328
- break;
329
- }
330
- /* Setup the next operands. */
331
- veclen--;
332
- rd = ((rd + delta_d) & (bank_mask - 1))
333
- | (rd & bank_mask);
334
-
335
- if (op == 15) {
336
- /* One source operand. */
337
- rm = ((rm + delta_m) & (bank_mask - 1))
338
- | (rm & bank_mask);
339
- gen_mov_F0_vreg(dp, rm);
340
- } else {
341
- /* Two source operands. */
342
- rn = ((rn + delta_d) & (bank_mask - 1))
343
- | (rn & bank_mask);
344
- gen_mov_F0_vreg(dp, rn);
345
- if (delta_m) {
346
- rm = ((rm + delta_m) & (bank_mask - 1))
347
- | (rm & bank_mask);
348
- gen_mov_F1_vreg(dp, rm);
349
- }
350
- }
351
- }
352
- }
353
- break;
354
- case 0xc:
355
- case 0xd:
356
- /* Already handled by decodetree */
357
- return 1;
358
- default:
359
- /* Should never happen. */
360
- return 1;
361
- }
362
- return 0;
363
+ /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
364
+ return 1;
365
}
366
367
static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
368
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
369
index XXXXXXX..XXXXXXX 100644
370
--- a/target/arm/vfp.decode
371
+++ b/target/arm/vfp.decode
372
@@ -XXX,XX +XXX,XX @@ VCVT_fix_sp ---- 1110 1.11 1.1. .... 1010 .1.0 .... \
373
vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
374
VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \
375
vd=%vd_dp imm=%vm_sp opc=%vcvt_fix_op
376
+
377
+# VCVT float to integer (VCVT and VCVTR): Vd always single; Vd depends on size
378
+VCVT_sp_int ---- 1110 1.11 110 s:1 .... 1010 rz:1 1.0 .... \
379
+ vd=%vd_sp vm=%vm_sp
380
+VCVT_dp_int ---- 1110 1.11 110 s:1 .... 1011 rz:1 1.0 .... \
381
+ vd=%vd_sp vm=%vm_dp
382
--
120
--
383
2.20.1
121
2.20.1
384
122
385
123
diff view generated by jsdifflib
1
Convert the "double-precision" register moves to decodetree:
1
From: Guenter Roeck <linux@roeck-us.net>
2
this covers VMOV scalar-to-gpreg, VMOV gpreg-to-scalar and VDUP.
3
2
4
Note that the conversion process has tightened up a few of the
3
With this patch, the USB controllers on 'sabrelite' are detected
5
UNDEF encoding checks: we now correctly forbid:
4
and can be used to boot the system.
6
* VMOV-to-gpr with U:opc1:opc2 == 10x00 or x0x10
7
* VMOV-from-gpr with opc1:opc2 == 0x10
8
* VDUP with B:E == 11
9
* VDUP with Q == 1 and Vn<0> == 1
10
5
6
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
7
Message-id: 20200313014551.12554-6-linux@roeck-us.net
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
The accesses of elements < 32 bits could be improved by doing
11
include/hw/arm/fsl-imx6.h | 6 ++++++
14
direct ld/st of the right size rather than 32-bit read-and-shift
12
hw/arm/fsl-imx6.c | 36 ++++++++++++++++++++++++++++++++++++
15
or read-modify-write, but we leave this for later cleanup,
13
2 files changed, 42 insertions(+)
16
since this series is generally trying to stick to fixing
17
the decode.
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
---
20
target/arm/translate-vfp.inc.c | 147 +++++++++++++++++++++++++++++++++
21
target/arm/translate.c | 83 +------------------
22
target/arm/vfp.decode | 36 ++++++++
23
3 files changed, 185 insertions(+), 81 deletions(-)
24
14
25
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
15
diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h
26
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate-vfp.inc.c
17
--- a/include/hw/arm/fsl-imx6.h
28
+++ b/target/arm/translate-vfp.inc.c
18
+++ b/include/hw/arm/fsl-imx6.h
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
19
@@ -XXX,XX +XXX,XX @@
30
20
#include "hw/sd/sdhci.h"
31
return true;
21
#include "hw/ssi/imx_spi.h"
32
}
22
#include "hw/net/imx_fec.h"
33
+
23
+#include "hw/usb/chipidea.h"
34
+static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
24
+#include "hw/usb/imx-usb-phy.h"
35
+{
25
#include "exec/memory.h"
36
+ /* VMOV scalar to general purpose register */
26
#include "cpu.h"
37
+ TCGv_i32 tmp;
27
38
+ int pass;
28
@@ -XXX,XX +XXX,XX @@
39
+ uint32_t offset;
29
#define FSL_IMX6_NUM_ESDHCS 4
40
+
30
#define FSL_IMX6_NUM_ECSPIS 5
41
+ /* UNDEF accesses to D16-D31 if they don't exist */
31
#define FSL_IMX6_NUM_WDTS 2
42
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
32
+#define FSL_IMX6_NUM_USB_PHYS 2
43
+ return false;
33
+#define FSL_IMX6_NUM_USBS 4
34
35
typedef struct FslIMX6State {
36
/*< private >*/
37
@@ -XXX,XX +XXX,XX @@ typedef struct FslIMX6State {
38
SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS];
39
IMXSPIState spi[FSL_IMX6_NUM_ECSPIS];
40
IMX2WdtState wdt[FSL_IMX6_NUM_WDTS];
41
+ IMXUSBPHYState usbphy[FSL_IMX6_NUM_USB_PHYS];
42
+ ChipideaState usb[FSL_IMX6_NUM_USBS];
43
IMXFECState eth;
44
MemoryRegion rom;
45
MemoryRegion caam;
46
diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/hw/arm/fsl-imx6.c
49
+++ b/hw/arm/fsl-imx6.c
50
@@ -XXX,XX +XXX,XX @@
51
#include "qemu/osdep.h"
52
#include "qapi/error.h"
53
#include "hw/arm/fsl-imx6.h"
54
+#include "hw/usb/imx-usb-phy.h"
55
#include "hw/boards.h"
56
#include "hw/qdev-properties.h"
57
#include "sysemu/sysemu.h"
58
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6_init(Object *obj)
59
TYPE_IMX_USDHC);
60
}
61
62
+ for (i = 0; i < FSL_IMX6_NUM_USB_PHYS; i++) {
63
+ snprintf(name, NAME_SIZE, "usbphy%d", i);
64
+ sysbus_init_child_obj(obj, name, &s->usbphy[i], sizeof(s->usbphy[i]),
65
+ TYPE_IMX_USBPHY);
66
+ }
67
+ for (i = 0; i < FSL_IMX6_NUM_USBS; i++) {
68
+ snprintf(name, NAME_SIZE, "usb%d", i);
69
+ sysbus_init_child_obj(obj, name, &s->usb[i], sizeof(s->usb[i]),
70
+ TYPE_CHIPIDEA);
44
+ }
71
+ }
45
+
72
+
46
+ offset = a->index << a->size;
73
for (i = 0; i < FSL_IMX6_NUM_ECSPIS; i++) {
47
+ pass = extract32(offset, 2, 1);
74
snprintf(name, NAME_SIZE, "spi%d", i + 1);
48
+ offset = extract32(offset, 0, 2) * 8;
75
sysbus_init_child_obj(obj, name, &s->spi[i], sizeof(s->spi[i]),
76
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
77
esdhc_table[i].irq));
78
}
79
80
+ /* USB */
81
+ for (i = 0; i < FSL_IMX6_NUM_USB_PHYS; i++) {
82
+ object_property_set_bool(OBJECT(&s->usbphy[i]), true, "realized",
83
+ &error_abort);
84
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usbphy[i]), 0,
85
+ FSL_IMX6_USBPHY1_ADDR + i * 0x1000);
86
+ }
87
+ for (i = 0; i < FSL_IMX6_NUM_USBS; i++) {
88
+ static const int FSL_IMX6_USBn_IRQ[] = {
89
+ FSL_IMX6_USB_OTG_IRQ,
90
+ FSL_IMX6_USB_HOST1_IRQ,
91
+ FSL_IMX6_USB_HOST2_IRQ,
92
+ FSL_IMX6_USB_HOST3_IRQ,
93
+ };
49
+
94
+
50
+ if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
95
+ object_property_set_bool(OBJECT(&s->usb[i]), true, "realized",
51
+ return false;
96
+ &error_abort);
97
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0,
98
+ FSL_IMX6_USBOH3_USB_ADDR + i * 0x200);
99
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0,
100
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
101
+ FSL_IMX6_USBn_IRQ[i]));
52
+ }
102
+ }
53
+
103
+
54
+ if (!vfp_access_check(s)) {
104
/* Initialize all ECSPI */
55
+ return true;
105
for (i = 0; i < FSL_IMX6_NUM_ECSPIS; i++) {
56
+ }
106
static const struct {
57
+
58
+ tmp = neon_load_reg(a->vn, pass);
59
+ switch (a->size) {
60
+ case 0:
61
+ if (offset) {
62
+ tcg_gen_shri_i32(tmp, tmp, offset);
63
+ }
64
+ if (a->u) {
65
+ gen_uxtb(tmp);
66
+ } else {
67
+ gen_sxtb(tmp);
68
+ }
69
+ break;
70
+ case 1:
71
+ if (a->u) {
72
+ if (offset) {
73
+ tcg_gen_shri_i32(tmp, tmp, 16);
74
+ } else {
75
+ gen_uxth(tmp);
76
+ }
77
+ } else {
78
+ if (offset) {
79
+ tcg_gen_sari_i32(tmp, tmp, 16);
80
+ } else {
81
+ gen_sxth(tmp);
82
+ }
83
+ }
84
+ break;
85
+ case 2:
86
+ break;
87
+ }
88
+ store_reg(s, a->rt, tmp);
89
+
90
+ return true;
91
+}
92
+
93
+static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
94
+{
95
+ /* VMOV general purpose register to scalar */
96
+ TCGv_i32 tmp, tmp2;
97
+ int pass;
98
+ uint32_t offset;
99
+
100
+ /* UNDEF accesses to D16-D31 if they don't exist */
101
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
102
+ return false;
103
+ }
104
+
105
+ offset = a->index << a->size;
106
+ pass = extract32(offset, 2, 1);
107
+ offset = extract32(offset, 0, 2) * 8;
108
+
109
+ if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
110
+ return false;
111
+ }
112
+
113
+ if (!vfp_access_check(s)) {
114
+ return true;
115
+ }
116
+
117
+ tmp = load_reg(s, a->rt);
118
+ switch (a->size) {
119
+ case 0:
120
+ tmp2 = neon_load_reg(a->vn, pass);
121
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
122
+ tcg_temp_free_i32(tmp2);
123
+ break;
124
+ case 1:
125
+ tmp2 = neon_load_reg(a->vn, pass);
126
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
127
+ tcg_temp_free_i32(tmp2);
128
+ break;
129
+ case 2:
130
+ break;
131
+ }
132
+ neon_store_reg(a->vn, pass, tmp);
133
+
134
+ return true;
135
+}
136
+
137
+static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
138
+{
139
+ /* VDUP (general purpose register) */
140
+ TCGv_i32 tmp;
141
+ int size, vec_size;
142
+
143
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
144
+ return false;
145
+ }
146
+
147
+ /* UNDEF accesses to D16-D31 if they don't exist */
148
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
149
+ return false;
150
+ }
151
+
152
+ if (a->b && a->e) {
153
+ return false;
154
+ }
155
+
156
+ if (a->q && (a->vn & 1)) {
157
+ return false;
158
+ }
159
+
160
+ vec_size = a->q ? 16 : 8;
161
+ if (a->b) {
162
+ size = 0;
163
+ } else if (a->e) {
164
+ size = 1;
165
+ } else {
166
+ size = 2;
167
+ }
168
+
169
+ if (!vfp_access_check(s)) {
170
+ return true;
171
+ }
172
+
173
+ tmp = load_reg(s, a->rt);
174
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
175
+ vec_size, vec_size, tmp);
176
+ tcg_temp_free_i32(tmp);
177
+
178
+ return true;
179
+}
180
diff --git a/target/arm/translate.c b/target/arm/translate.c
181
index XXXXXXX..XXXXXXX 100644
182
--- a/target/arm/translate.c
183
+++ b/target/arm/translate.c
184
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
185
/* single register transfer */
186
rd = (insn >> 12) & 0xf;
187
if (dp) {
188
- int size;
189
- int pass;
190
-
191
- VFP_DREG_N(rn, insn);
192
- if (insn & 0xf)
193
- return 1;
194
- if (insn & 0x00c00060
195
- && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
196
- return 1;
197
- }
198
-
199
- pass = (insn >> 21) & 1;
200
- if (insn & (1 << 22)) {
201
- size = 0;
202
- offset = ((insn >> 5) & 3) * 8;
203
- } else if (insn & (1 << 5)) {
204
- size = 1;
205
- offset = (insn & (1 << 6)) ? 16 : 0;
206
- } else {
207
- size = 2;
208
- offset = 0;
209
- }
210
- if (insn & ARM_CP_RW_BIT) {
211
- /* vfp->arm */
212
- tmp = neon_load_reg(rn, pass);
213
- switch (size) {
214
- case 0:
215
- if (offset)
216
- tcg_gen_shri_i32(tmp, tmp, offset);
217
- if (insn & (1 << 23))
218
- gen_uxtb(tmp);
219
- else
220
- gen_sxtb(tmp);
221
- break;
222
- case 1:
223
- if (insn & (1 << 23)) {
224
- if (offset) {
225
- tcg_gen_shri_i32(tmp, tmp, 16);
226
- } else {
227
- gen_uxth(tmp);
228
- }
229
- } else {
230
- if (offset) {
231
- tcg_gen_sari_i32(tmp, tmp, 16);
232
- } else {
233
- gen_sxth(tmp);
234
- }
235
- }
236
- break;
237
- case 2:
238
- break;
239
- }
240
- store_reg(s, rd, tmp);
241
- } else {
242
- /* arm->vfp */
243
- tmp = load_reg(s, rd);
244
- if (insn & (1 << 23)) {
245
- /* VDUP */
246
- int vec_size = pass ? 16 : 8;
247
- tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
248
- vec_size, vec_size, tmp);
249
- tcg_temp_free_i32(tmp);
250
- } else {
251
- /* VMOV */
252
- switch (size) {
253
- case 0:
254
- tmp2 = neon_load_reg(rn, pass);
255
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
256
- tcg_temp_free_i32(tmp2);
257
- break;
258
- case 1:
259
- tmp2 = neon_load_reg(rn, pass);
260
- tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
261
- tcg_temp_free_i32(tmp2);
262
- break;
263
- case 2:
264
- break;
265
- }
266
- neon_store_reg(rn, pass, tmp);
267
- }
268
- }
269
+ /* already handled by decodetree */
270
+ return 1;
271
} else { /* !dp */
272
bool is_sysreg;
273
274
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
275
index XXXXXXX..XXXXXXX 100644
276
--- a/target/arm/vfp.decode
277
+++ b/target/arm/vfp.decode
278
@@ -XXX,XX +XXX,XX @@
279
# 1110 1110 .... .... .... 101. .... ....
280
# (but those patterns might also cover some Neon instructions,
281
# which do not live in this file.)
282
+
283
+# VFP registers have an odd encoding with a four-bit field
284
+# and a one-bit field which are assembled in different orders
285
+# depending on whether the register is double or single precision.
286
+# Each individual instruction function must do the checks for
287
+# "double register selected but CPU does not have double support"
288
+# and "double register number has bit 4 set but CPU does not
289
+# support D16-D31" (which should UNDEF).
290
+%vm_dp 5:1 0:4
291
+%vm_sp 0:4 5:1
292
+%vn_dp 7:1 16:4
293
+%vn_sp 16:4 7:1
294
+%vd_dp 22:1 12:4
295
+%vd_sp 12:4 22:1
296
+
297
+%vmov_idx_b 21:1 5:2
298
+%vmov_idx_h 21:1 6:1
299
+
300
+# VMOV scalar to general-purpose register; note that this does
301
+# include some Neon cases.
302
+VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \
303
+ vn=%vn_dp size=0 index=%vmov_idx_b
304
+VMOV_to_gp ---- 1110 u:1 0. 1 .... rt:4 1011 ..1 1 0000 \
305
+ vn=%vn_dp size=1 index=%vmov_idx_h
306
+VMOV_to_gp ---- 1110 0 0 index:1 1 .... rt:4 1011 .00 1 0000 \
307
+ vn=%vn_dp size=2 u=0
308
+
309
+VMOV_from_gp ---- 1110 0 1. 0 .... rt:4 1011 ... 1 0000 \
310
+ vn=%vn_dp size=0 index=%vmov_idx_b
311
+VMOV_from_gp ---- 1110 0 0. 0 .... rt:4 1011 ..1 1 0000 \
312
+ vn=%vn_dp size=1 index=%vmov_idx_h
313
+VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \
314
+ vn=%vn_dp size=2
315
+
316
+VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
317
+ vn=%vn_dp
318
--
107
--
319
2.20.1
108
2.20.1
320
109
321
110
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Chen Qun <kuhn.chenqun@huawei.com>
2
2
3
These instructions shift left or right depending on the sign
3
The current code causes clang static code analyzer generate warning:
4
of the input, and 7 bits are significant to the shift. This
4
hw/net/imx_fec.c:858:9: warning: Value stored to 'value' is never read
5
requires several masks and selects in addition to the actual
5
value = value & 0x0000000f;
6
shifts to form the complete answer.
6
^ ~~~~~~~~~~~~~~~~~~
7
hw/net/imx_fec.c:864:9: warning: Value stored to 'value' is never read
8
value = value & 0x000000fd;
9
^ ~~~~~~~~~~~~~~~~~~
7
10
8
That said, the operation is still a small improvement even for
11
According to the definition of the function, the two “value” assignments
9
two 64-bit elements -- 13 vector operations instead of 2 * 7
12
should be written to registers.
10
integer operations.
11
13
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Reported-by: Euler Robot <euler.robot@huawei.com>
13
Message-id: 20190603232209.20704-1-richard.henderson@linaro.org
15
Signed-off-by: Chen Qun <kuhn.chenqun@huawei.com>
16
Message-id: 20200313123242.13236-1-kuhn.chenqun@huawei.com
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
19
---
17
target/arm/helper.h | 11 +-
20
hw/net/imx_fec.c | 6 ++++--
18
target/arm/translate.h | 6 +
21
1 file changed, 4 insertions(+), 2 deletions(-)
19
target/arm/neon_helper.c | 33 ----
20
target/arm/translate-a64.c | 18 +--
21
target/arm/translate.c | 300 +++++++++++++++++++++++++++++++++++--
22
target/arm/vec_helper.c | 88 +++++++++++
23
6 files changed, 390 insertions(+), 66 deletions(-)
24
22
25
diff --git a/target/arm/helper.h b/target/arm/helper.h
23
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
26
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/helper.h
25
--- a/hw/net/imx_fec.c
28
+++ b/target/arm/helper.h
26
+++ b/hw/net/imx_fec.c
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(neon_abd_s16, i32, i32, i32)
27
@@ -XXX,XX +XXX,XX @@ static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
30
DEF_HELPER_2(neon_abd_u32, i32, i32, i32)
31
DEF_HELPER_2(neon_abd_s32, i32, i32, i32)
32
33
-DEF_HELPER_2(neon_shl_u8, i32, i32, i32)
34
-DEF_HELPER_2(neon_shl_s8, i32, i32, i32)
35
DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
36
DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
37
-DEF_HELPER_2(neon_shl_u32, i32, i32, i32)
38
-DEF_HELPER_2(neon_shl_s32, i32, i32, i32)
39
-DEF_HELPER_2(neon_shl_u64, i64, i64, i64)
40
-DEF_HELPER_2(neon_shl_s64, i64, i64, i64)
41
DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
42
DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
43
DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
44
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr)
45
DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr)
46
DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr)
47
48
+DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
49
+DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
51
+DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
52
+
53
#ifdef TARGET_AARCH64
54
#include "helper-a64.h"
55
#include "helper-sve.h"
56
diff --git a/target/arm/translate.h b/target/arm/translate.h
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate.h
59
+++ b/target/arm/translate.h
60
@@ -XXX,XX +XXX,XX @@ extern const GVecGen3 bif_op;
61
extern const GVecGen3 mla_op[4];
62
extern const GVecGen3 mls_op[4];
63
extern const GVecGen3 cmtst_op[4];
64
+extern const GVecGen3 sshl_op[4];
65
+extern const GVecGen3 ushl_op[4];
66
extern const GVecGen2i ssra_op[4];
67
extern const GVecGen2i usra_op[4];
68
extern const GVecGen2i sri_op[4];
69
@@ -XXX,XX +XXX,XX @@ extern const GVecGen4 sqadd_op[4];
70
extern const GVecGen4 uqsub_op[4];
71
extern const GVecGen4 sqsub_op[4];
72
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
73
+void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
74
+void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
75
+void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
76
+void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
77
78
/*
79
* Forward to the isar_feature_* tests given a DisasContext pointer.
80
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/neon_helper.c
83
+++ b/target/arm/neon_helper.c
84
@@ -XXX,XX +XXX,XX @@ NEON_VOP(abd_u32, neon_u32, 1)
85
} else { \
86
dest = src1 << tmp; \
87
}} while (0)
88
-NEON_VOP(shl_u8, neon_u8, 4)
89
NEON_VOP(shl_u16, neon_u16, 2)
90
-NEON_VOP(shl_u32, neon_u32, 1)
91
#undef NEON_FN
92
93
-uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
94
-{
95
- int8_t shift = (int8_t)shiftop;
96
- if (shift >= 64 || shift <= -64) {
97
- val = 0;
98
- } else if (shift < 0) {
99
- val >>= -shift;
100
- } else {
101
- val <<= shift;
102
- }
103
- return val;
104
-}
105
-
106
#define NEON_FN(dest, src1, src2) do { \
107
int8_t tmp; \
108
tmp = (int8_t)src2; \
109
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
110
} else { \
111
dest = src1 << tmp; \
112
}} while (0)
113
-NEON_VOP(shl_s8, neon_s8, 4)
114
NEON_VOP(shl_s16, neon_s16, 2)
115
-NEON_VOP(shl_s32, neon_s32, 1)
116
#undef NEON_FN
117
118
-uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
119
-{
120
- int8_t shift = (int8_t)shiftop;
121
- int64_t val = valop;
122
- if (shift >= 64) {
123
- val = 0;
124
- } else if (shift <= -64) {
125
- val >>= 63;
126
- } else if (shift < 0) {
127
- val >>= -shift;
128
- } else {
129
- val <<= shift;
130
- }
131
- return val;
132
-}
133
-
134
#define NEON_FN(dest, src1, src2) do { \
135
int8_t tmp; \
136
tmp = (int8_t)src2; \
137
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/arm/translate-a64.c
140
+++ b/target/arm/translate-a64.c
141
@@ -XXX,XX +XXX,XX @@ static void handle_3same_64(DisasContext *s, int opcode, bool u,
142
break;
28
break;
143
case 0x8: /* SSHL, USHL */
29
case ENET_TGSR:
144
if (u) {
30
/* implement clear timer flag */
145
- gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
31
- value = value & 0x0000000f;
146
+ gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
32
+ s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
147
} else {
148
- gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
149
+ gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
150
}
151
break;
33
break;
152
case 0x9: /* SQSHL, UQSHL */
34
case ENET_TCSR0:
153
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
35
case ENET_TCSR1:
154
is_q ? 16 : 8, vec_full_reg_size(s),
36
case ENET_TCSR2:
155
(u ? uqsub_op : sqsub_op) + size);
37
case ENET_TCSR3:
156
return;
38
- value = value & 0x000000fd;
157
+ case 0x08: /* SSHL, USHL */
39
+ s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
158
+ gen_gvec_op3(s, is_q, rd, rn, rm,
40
+ s->regs[index] &= ~0x0000007d; /* writable fields */
159
+ u ? &ushl_op[size] : &sshl_op[size]);
41
+ s->regs[index] |= (value & 0x0000007d);
160
+ return;
42
break;
161
case 0x0c: /* SMAX, UMAX */
43
case ENET_TCCR0:
162
if (u) {
44
case ENET_TCCR1:
163
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
164
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
165
genfn = fns[size][u];
166
break;
167
}
168
- case 0x8: /* SSHL, USHL */
169
- {
170
- static NeonGenTwoOpFn * const fns[3][2] = {
171
- { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
172
- { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
173
- { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
174
- };
175
- genfn = fns[size][u];
176
- break;
177
- }
178
case 0x9: /* SQSHL, UQSHL */
179
{
180
static NeonGenTwoOpEnvFn * const fns[3][2] = {
181
diff --git a/target/arm/translate.c b/target/arm/translate.c
182
index XXXXXXX..XXXXXXX 100644
183
--- a/target/arm/translate.c
184
+++ b/target/arm/translate.c
185
@@ -XXX,XX +XXX,XX @@ static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
186
if (u) {
187
switch (size) {
188
case 1: gen_helper_neon_shl_u16(var, var, shift); break;
189
- case 2: gen_helper_neon_shl_u32(var, var, shift); break;
190
+ case 2: gen_ushl_i32(var, var, shift); break;
191
default: abort();
192
}
193
} else {
194
switch (size) {
195
case 1: gen_helper_neon_shl_s16(var, var, shift); break;
196
- case 2: gen_helper_neon_shl_s32(var, var, shift); break;
197
+ case 2: gen_sshl_i32(var, var, shift); break;
198
default: abort();
199
}
200
}
201
@@ -XXX,XX +XXX,XX @@ const GVecGen3 cmtst_op[4] = {
202
.vece = MO_64 },
203
};
204
205
+void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
206
+{
207
+ TCGv_i32 lval = tcg_temp_new_i32();
208
+ TCGv_i32 rval = tcg_temp_new_i32();
209
+ TCGv_i32 lsh = tcg_temp_new_i32();
210
+ TCGv_i32 rsh = tcg_temp_new_i32();
211
+ TCGv_i32 zero = tcg_const_i32(0);
212
+ TCGv_i32 max = tcg_const_i32(32);
213
+
214
+ /*
215
+ * Rely on the TCG guarantee that out of range shifts produce
216
+ * unspecified results, not undefined behaviour (i.e. no trap).
217
+ * Discard out-of-range results after the fact.
218
+ */
219
+ tcg_gen_ext8s_i32(lsh, b);
220
+ tcg_gen_neg_i32(rsh, lsh);
221
+ tcg_gen_shl_i32(lval, a, lsh);
222
+ tcg_gen_shr_i32(rval, a, rsh);
223
+ tcg_gen_movcond_i32(TCG_COND_LTU, d, lsh, max, lval, zero);
224
+ tcg_gen_movcond_i32(TCG_COND_LTU, d, rsh, max, rval, d);
225
+
226
+ tcg_temp_free_i32(lval);
227
+ tcg_temp_free_i32(rval);
228
+ tcg_temp_free_i32(lsh);
229
+ tcg_temp_free_i32(rsh);
230
+ tcg_temp_free_i32(zero);
231
+ tcg_temp_free_i32(max);
232
+}
233
+
234
+void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
235
+{
236
+ TCGv_i64 lval = tcg_temp_new_i64();
237
+ TCGv_i64 rval = tcg_temp_new_i64();
238
+ TCGv_i64 lsh = tcg_temp_new_i64();
239
+ TCGv_i64 rsh = tcg_temp_new_i64();
240
+ TCGv_i64 zero = tcg_const_i64(0);
241
+ TCGv_i64 max = tcg_const_i64(64);
242
+
243
+ /*
244
+ * Rely on the TCG guarantee that out of range shifts produce
245
+ * unspecified results, not undefined behaviour (i.e. no trap).
246
+ * Discard out-of-range results after the fact.
247
+ */
248
+ tcg_gen_ext8s_i64(lsh, b);
249
+ tcg_gen_neg_i64(rsh, lsh);
250
+ tcg_gen_shl_i64(lval, a, lsh);
251
+ tcg_gen_shr_i64(rval, a, rsh);
252
+ tcg_gen_movcond_i64(TCG_COND_LTU, d, lsh, max, lval, zero);
253
+ tcg_gen_movcond_i64(TCG_COND_LTU, d, rsh, max, rval, d);
254
+
255
+ tcg_temp_free_i64(lval);
256
+ tcg_temp_free_i64(rval);
257
+ tcg_temp_free_i64(lsh);
258
+ tcg_temp_free_i64(rsh);
259
+ tcg_temp_free_i64(zero);
260
+ tcg_temp_free_i64(max);
261
+}
262
+
263
+static void gen_ushl_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
264
+{
265
+ TCGv_vec lval = tcg_temp_new_vec_matching(d);
266
+ TCGv_vec rval = tcg_temp_new_vec_matching(d);
267
+ TCGv_vec lsh = tcg_temp_new_vec_matching(d);
268
+ TCGv_vec rsh = tcg_temp_new_vec_matching(d);
269
+ TCGv_vec msk, max;
270
+
271
+ /*
272
+ * Rely on the TCG guarantee that out of range shifts produce
273
+ * unspecified results, not undefined behaviour (i.e. no trap).
274
+ * Discard out-of-range results after the fact.
275
+ */
276
+ tcg_gen_neg_vec(vece, rsh, b);
277
+ if (vece == MO_8) {
278
+ tcg_gen_mov_vec(lsh, b);
279
+ } else {
280
+ msk = tcg_temp_new_vec_matching(d);
281
+ tcg_gen_dupi_vec(vece, msk, 0xff);
282
+ tcg_gen_and_vec(vece, lsh, b, msk);
283
+ tcg_gen_and_vec(vece, rsh, rsh, msk);
284
+ tcg_temp_free_vec(msk);
285
+ }
286
+
287
+ /*
288
+ * Perform possibly out of range shifts, trusting that the operation
289
+ * does not trap. Discard unused results after the fact.
290
+ */
291
+ tcg_gen_shlv_vec(vece, lval, a, lsh);
292
+ tcg_gen_shrv_vec(vece, rval, a, rsh);
293
+
294
+ max = tcg_temp_new_vec_matching(d);
295
+ tcg_gen_dupi_vec(vece, max, 8 << vece);
296
+
297
+ /*
298
+ * The choice of LT (signed) and GEU (unsigned) are biased toward
299
+ * the instructions of the x86_64 host. For MO_8, the whole byte
300
+ * is significant so we must use an unsigned compare; otherwise we
301
+ * have already masked to a byte and so a signed compare works.
302
+ * Other tcg hosts have a full set of comparisons and do not care.
303
+ */
304
+ if (vece == MO_8) {
305
+ tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
306
+ tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
307
+ tcg_gen_andc_vec(vece, lval, lval, lsh);
308
+ tcg_gen_andc_vec(vece, rval, rval, rsh);
309
+ } else {
310
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
311
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
312
+ tcg_gen_and_vec(vece, lval, lval, lsh);
313
+ tcg_gen_and_vec(vece, rval, rval, rsh);
314
+ }
315
+ tcg_gen_or_vec(vece, d, lval, rval);
316
+
317
+ tcg_temp_free_vec(max);
318
+ tcg_temp_free_vec(lval);
319
+ tcg_temp_free_vec(rval);
320
+ tcg_temp_free_vec(lsh);
321
+ tcg_temp_free_vec(rsh);
322
+}
323
+
324
+static const TCGOpcode ushl_list[] = {
325
+ INDEX_op_neg_vec, INDEX_op_shlv_vec,
326
+ INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
327
+};
328
+
329
+const GVecGen3 ushl_op[4] = {
330
+ { .fniv = gen_ushl_vec,
331
+ .fno = gen_helper_gvec_ushl_b,
332
+ .opt_opc = ushl_list,
333
+ .vece = MO_8 },
334
+ { .fniv = gen_ushl_vec,
335
+ .fno = gen_helper_gvec_ushl_h,
336
+ .opt_opc = ushl_list,
337
+ .vece = MO_16 },
338
+ { .fni4 = gen_ushl_i32,
339
+ .fniv = gen_ushl_vec,
340
+ .opt_opc = ushl_list,
341
+ .vece = MO_32 },
342
+ { .fni8 = gen_ushl_i64,
343
+ .fniv = gen_ushl_vec,
344
+ .opt_opc = ushl_list,
345
+ .vece = MO_64 },
346
+};
347
+
348
+void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
349
+{
350
+ TCGv_i32 lval = tcg_temp_new_i32();
351
+ TCGv_i32 rval = tcg_temp_new_i32();
352
+ TCGv_i32 lsh = tcg_temp_new_i32();
353
+ TCGv_i32 rsh = tcg_temp_new_i32();
354
+ TCGv_i32 zero = tcg_const_i32(0);
355
+ TCGv_i32 max = tcg_const_i32(31);
356
+
357
+ /*
358
+ * Rely on the TCG guarantee that out of range shifts produce
359
+ * unspecified results, not undefined behaviour (i.e. no trap).
360
+ * Discard out-of-range results after the fact.
361
+ */
362
+ tcg_gen_ext8s_i32(lsh, b);
363
+ tcg_gen_neg_i32(rsh, lsh);
364
+ tcg_gen_shl_i32(lval, a, lsh);
365
+ tcg_gen_umin_i32(rsh, rsh, max);
366
+ tcg_gen_sar_i32(rval, a, rsh);
367
+ tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
368
+ tcg_gen_movcond_i32(TCG_COND_LT, d, lsh, zero, rval, lval);
369
+
370
+ tcg_temp_free_i32(lval);
371
+ tcg_temp_free_i32(rval);
372
+ tcg_temp_free_i32(lsh);
373
+ tcg_temp_free_i32(rsh);
374
+ tcg_temp_free_i32(zero);
375
+ tcg_temp_free_i32(max);
376
+}
377
+
378
+void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
379
+{
380
+ TCGv_i64 lval = tcg_temp_new_i64();
381
+ TCGv_i64 rval = tcg_temp_new_i64();
382
+ TCGv_i64 lsh = tcg_temp_new_i64();
383
+ TCGv_i64 rsh = tcg_temp_new_i64();
384
+ TCGv_i64 zero = tcg_const_i64(0);
385
+ TCGv_i64 max = tcg_const_i64(63);
386
+
387
+ /*
388
+ * Rely on the TCG guarantee that out of range shifts produce
389
+ * unspecified results, not undefined behaviour (i.e. no trap).
390
+ * Discard out-of-range results after the fact.
391
+ */
392
+ tcg_gen_ext8s_i64(lsh, b);
393
+ tcg_gen_neg_i64(rsh, lsh);
394
+ tcg_gen_shl_i64(lval, a, lsh);
395
+ tcg_gen_umin_i64(rsh, rsh, max);
396
+ tcg_gen_sar_i64(rval, a, rsh);
397
+ tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
398
+ tcg_gen_movcond_i64(TCG_COND_LT, d, lsh, zero, rval, lval);
399
+
400
+ tcg_temp_free_i64(lval);
401
+ tcg_temp_free_i64(rval);
402
+ tcg_temp_free_i64(lsh);
403
+ tcg_temp_free_i64(rsh);
404
+ tcg_temp_free_i64(zero);
405
+ tcg_temp_free_i64(max);
406
+}
407
+
408
+static void gen_sshl_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
409
+{
410
+ TCGv_vec lval = tcg_temp_new_vec_matching(d);
411
+ TCGv_vec rval = tcg_temp_new_vec_matching(d);
412
+ TCGv_vec lsh = tcg_temp_new_vec_matching(d);
413
+ TCGv_vec rsh = tcg_temp_new_vec_matching(d);
414
+ TCGv_vec tmp = tcg_temp_new_vec_matching(d);
415
+
416
+ /*
417
+ * Rely on the TCG guarantee that out of range shifts produce
418
+ * unspecified results, not undefined behaviour (i.e. no trap).
419
+ * Discard out-of-range results after the fact.
420
+ */
421
+ tcg_gen_neg_vec(vece, rsh, b);
422
+ if (vece == MO_8) {
423
+ tcg_gen_mov_vec(lsh, b);
424
+ } else {
425
+ tcg_gen_dupi_vec(vece, tmp, 0xff);
426
+ tcg_gen_and_vec(vece, lsh, b, tmp);
427
+ tcg_gen_and_vec(vece, rsh, rsh, tmp);
428
+ }
429
+
430
+ /* Bound rsh so out of bound right shift gets -1. */
431
+ tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
432
+ tcg_gen_umin_vec(vece, rsh, rsh, tmp);
433
+ tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
434
+
435
+ tcg_gen_shlv_vec(vece, lval, a, lsh);
436
+ tcg_gen_sarv_vec(vece, rval, a, rsh);
437
+
438
+ /* Select in-bound left shift. */
439
+ tcg_gen_andc_vec(vece, lval, lval, tmp);
440
+
441
+ /* Select between left and right shift. */
442
+ if (vece == MO_8) {
443
+ tcg_gen_dupi_vec(vece, tmp, 0);
444
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, d, lsh, tmp, rval, lval);
445
+ } else {
446
+ tcg_gen_dupi_vec(vece, tmp, 0x80);
447
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, d, lsh, tmp, lval, rval);
448
+ }
449
+
450
+ tcg_temp_free_vec(lval);
451
+ tcg_temp_free_vec(rval);
452
+ tcg_temp_free_vec(lsh);
453
+ tcg_temp_free_vec(rsh);
454
+ tcg_temp_free_vec(tmp);
455
+}
456
+
457
+static const TCGOpcode sshl_list[] = {
458
+ INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
459
+ INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
460
+};
461
+
462
+const GVecGen3 sshl_op[4] = {
463
+ { .fniv = gen_sshl_vec,
464
+ .fno = gen_helper_gvec_sshl_b,
465
+ .opt_opc = sshl_list,
466
+ .vece = MO_8 },
467
+ { .fniv = gen_sshl_vec,
468
+ .fno = gen_helper_gvec_sshl_h,
469
+ .opt_opc = sshl_list,
470
+ .vece = MO_16 },
471
+ { .fni4 = gen_sshl_i32,
472
+ .fniv = gen_sshl_vec,
473
+ .opt_opc = sshl_list,
474
+ .vece = MO_32 },
475
+ { .fni8 = gen_sshl_i64,
476
+ .fniv = gen_sshl_vec,
477
+ .opt_opc = sshl_list,
478
+ .vece = MO_64 },
479
+};
480
+
481
static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
482
TCGv_vec a, TCGv_vec b)
483
{
484
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
485
vec_size, vec_size);
486
}
487
return 0;
488
+
489
+ case NEON_3R_VSHL:
490
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
491
+ u ? &ushl_op[size] : &sshl_op[size]);
492
+ return 0;
493
}
494
495
if (size == 3) {
496
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
497
neon_load_reg64(cpu_V0, rn + pass);
498
neon_load_reg64(cpu_V1, rm + pass);
499
switch (op) {
500
- case NEON_3R_VSHL:
501
- if (u) {
502
- gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
503
- } else {
504
- gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
505
- }
506
- break;
507
case NEON_3R_VQSHL:
508
if (u) {
509
gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
510
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
511
}
512
pairwise = 0;
513
switch (op) {
514
- case NEON_3R_VSHL:
515
case NEON_3R_VQSHL:
516
case NEON_3R_VRSHL:
517
case NEON_3R_VQRSHL:
518
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
519
case NEON_3R_VHSUB:
520
GEN_NEON_INTEGER_OP(hsub);
521
break;
522
- case NEON_3R_VSHL:
523
- GEN_NEON_INTEGER_OP(shl);
524
- break;
525
case NEON_3R_VQSHL:
526
GEN_NEON_INTEGER_OP_ENV(qshl);
527
break;
528
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
529
}
530
} else {
531
if (input_unsigned) {
532
- gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
533
+ gen_ushl_i64(cpu_V0, in, tmp64);
534
} else {
535
- gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
536
+ gen_sshl_i64(cpu_V0, in, tmp64);
537
}
538
}
539
tmp = tcg_temp_new_i32();
540
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
541
index XXXXXXX..XXXXXXX 100644
542
--- a/target/arm/vec_helper.c
543
+++ b/target/arm/vec_helper.c
544
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
545
do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc,
546
get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
547
}
548
+
549
+void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc)
550
+{
551
+ intptr_t i, opr_sz = simd_oprsz(desc);
552
+ int8_t *d = vd, *n = vn, *m = vm;
553
+
554
+ for (i = 0; i < opr_sz; ++i) {
555
+ int8_t mm = m[i];
556
+ int8_t nn = n[i];
557
+ int8_t res = 0;
558
+ if (mm >= 0) {
559
+ if (mm < 8) {
560
+ res = nn << mm;
561
+ }
562
+ } else {
563
+ res = nn >> (mm > -8 ? -mm : 7);
564
+ }
565
+ d[i] = res;
566
+ }
567
+ clear_tail(d, opr_sz, simd_maxsz(desc));
568
+}
569
+
570
+void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc)
571
+{
572
+ intptr_t i, opr_sz = simd_oprsz(desc);
573
+ int16_t *d = vd, *n = vn, *m = vm;
574
+
575
+ for (i = 0; i < opr_sz / 2; ++i) {
576
+ int8_t mm = m[i]; /* only 8 bits of shift are significant */
577
+ int16_t nn = n[i];
578
+ int16_t res = 0;
579
+ if (mm >= 0) {
580
+ if (mm < 16) {
581
+ res = nn << mm;
582
+ }
583
+ } else {
584
+ res = nn >> (mm > -16 ? -mm : 15);
585
+ }
586
+ d[i] = res;
587
+ }
588
+ clear_tail(d, opr_sz, simd_maxsz(desc));
589
+}
590
+
591
+void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc)
592
+{
593
+ intptr_t i, opr_sz = simd_oprsz(desc);
594
+ uint8_t *d = vd, *n = vn, *m = vm;
595
+
596
+ for (i = 0; i < opr_sz; ++i) {
597
+ int8_t mm = m[i];
598
+ uint8_t nn = n[i];
599
+ uint8_t res = 0;
600
+ if (mm >= 0) {
601
+ if (mm < 8) {
602
+ res = nn << mm;
603
+ }
604
+ } else {
605
+ if (mm > -8) {
606
+ res = nn >> -mm;
607
+ }
608
+ }
609
+ d[i] = res;
610
+ }
611
+ clear_tail(d, opr_sz, simd_maxsz(desc));
612
+}
613
+
614
+void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc)
615
+{
616
+ intptr_t i, opr_sz = simd_oprsz(desc);
617
+ uint16_t *d = vd, *n = vn, *m = vm;
618
+
619
+ for (i = 0; i < opr_sz / 2; ++i) {
620
+ int8_t mm = m[i]; /* only 8 bits of shift are significant */
621
+ uint16_t nn = n[i];
622
+ uint16_t res = 0;
623
+ if (mm >= 0) {
624
+ if (mm < 16) {
625
+ res = nn << mm;
626
+ }
627
+ } else {
628
+ if (mm > -16) {
629
+ res = nn >> -mm;
630
+ }
631
+ }
632
+ d[i] = res;
633
+ }
634
+ clear_tail(d, opr_sz, simd_maxsz(desc));
635
+}
636
--
45
--
637
2.20.1
46
2.20.1
638
47
639
48
diff view generated by jsdifflib
1
Convert the VRINTA/VRINTN/VRINTP/VRINTM instructions to decodetree.
1
From: Guenter Roeck <linux@roeck-us.net>
2
Again, trans_VRINT() is temporarily left in translate.c.
3
2
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
3
While at it, add some trace messages to help debug problems
4
seen when running the latest Linux kernel.
5
6
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Cédric Le Goater <clg@kaod.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
---
11
---
6
target/arm/translate.c | 60 +++++++++++++++++++++++-------------
12
hw/block/m25p80.c | 48 ++++++++++++++++++++-----------------------
7
target/arm/vfp-uncond.decode | 5 +++
13
hw/block/trace-events | 16 +++++++++++++++
8
2 files changed, 43 insertions(+), 22 deletions(-)
14
2 files changed, 38 insertions(+), 26 deletions(-)
9
15
10
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
11
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
12
--- a/target/arm/translate.c
18
--- a/hw/block/m25p80.c
13
+++ b/target/arm/translate.c
19
+++ b/hw/block/m25p80.c
14
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
20
@@ -XXX,XX +XXX,XX @@
15
return true;
21
#include "qemu/module.h"
22
#include "qemu/error-report.h"
23
#include "qapi/error.h"
24
-
25
-#ifndef M25P80_ERR_DEBUG
26
-#define M25P80_ERR_DEBUG 0
27
-#endif
28
-
29
-#define DB_PRINT_L(level, ...) do { \
30
- if (M25P80_ERR_DEBUG > (level)) { \
31
- fprintf(stderr, ": %s: ", __func__); \
32
- fprintf(stderr, ## __VA_ARGS__); \
33
- } \
34
-} while (0)
35
+#include "trace.h"
36
37
/* Fields for FlashPartInfo->flags */
38
39
@@ -XXX,XX +XXX,XX @@ static void flash_erase(Flash *s, int offset, FlashCMD cmd)
40
abort();
41
}
42
43
- DB_PRINT_L(0, "offset = %#x, len = %d\n", offset, len);
44
+ trace_m25p80_flash_erase(s, offset, len);
45
+
46
if ((s->pi->flags & capa_to_assert) != capa_to_assert) {
47
qemu_log_mask(LOG_GUEST_ERROR, "M25P80: %d erase size not supported by"
48
" device\n", len);
49
@@ -XXX,XX +XXX,XX @@ void flash_write8(Flash *s, uint32_t addr, uint8_t data)
50
}
51
52
if ((prev ^ data) & data) {
53
- DB_PRINT_L(1, "programming zero to one! addr=%" PRIx32 " %" PRIx8
54
- " -> %" PRIx8 "\n", addr, prev, data);
55
+ trace_m25p80_programming_zero_to_one(s, addr, prev, data);
56
}
57
58
if (s->pi->flags & EEPROM) {
59
@@ -XXX,XX +XXX,XX @@ static void complete_collecting_data(Flash *s)
60
61
s->state = STATE_IDLE;
62
63
+ trace_m25p80_complete_collecting(s, s->cmd_in_progress, n, s->ear,
64
+ s->cur_addr);
65
+
66
switch (s->cmd_in_progress) {
67
case DPP:
68
case QPP:
69
@@ -XXX,XX +XXX,XX @@ static void reset_memory(Flash *s)
70
break;
71
}
72
73
- DB_PRINT_L(0, "Reset done.\n");
74
+ trace_m25p80_reset_done(s);
16
}
75
}
17
76
18
-static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
77
static void decode_fast_read_cmd(Flash *s)
19
- int rounding)
78
@@ -XXX,XX +XXX,XX @@ static void decode_qio_read_cmd(Flash *s)
20
+/*
79
21
+ * Table for converting the most common AArch32 encoding of
80
static void decode_new_cmd(Flash *s, uint32_t value)
22
+ * rounding mode to arm_fprounding order (which matches the
81
{
23
+ * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
82
- s->cmd_in_progress = value;
24
+ */
83
int i;
25
+static const uint8_t fp_decode_rm[] = {
84
- DB_PRINT_L(0, "decoded new command:%x\n", value);
26
+ FPROUNDING_TIEAWAY,
27
+ FPROUNDING_TIEEVEN,
28
+ FPROUNDING_POSINF,
29
+ FPROUNDING_NEGINF,
30
+};
31
+
85
+
32
+static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
86
+ s->cmd_in_progress = value;
33
{
87
+ trace_m25p80_command_decoded(s, value);
34
- TCGv_ptr fpst = get_fpstatus_ptr(0);
88
35
+ uint32_t rd, rm;
89
if (value != RESET_MEMORY) {
36
+ bool dp = a->dp;
90
s->reset_enable = false;
37
+ TCGv_ptr fpst;
91
@@ -XXX,XX +XXX,XX @@ static void decode_new_cmd(Flash *s, uint32_t value)
38
TCGv_i32 tcg_rmode;
92
break;
39
+ int rounding = fp_decode_rm[a->rm];
93
40
+
94
case JEDEC_READ:
41
+ if (!dc_isar_feature(aa32_vrint, s)) {
95
- DB_PRINT_L(0, "populated jedec code\n");
42
+ return false;
96
+ trace_m25p80_populated_jedec(s);
43
+ }
97
for (i = 0; i < s->pi->id_len; i++) {
44
+
98
s->data[i] = s->pi->id[i];
45
+ /* UNDEF accesses to D16-D31 if they don't exist */
99
}
46
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
100
@@ -XXX,XX +XXX,XX @@ static void decode_new_cmd(Flash *s, uint32_t value)
47
+ ((a->vm | a->vd) & 0x10)) {
101
case BULK_ERASE_60:
48
+ return false;
102
case BULK_ERASE:
49
+ }
103
if (s->write_enable) {
50
+ rd = a->vd;
104
- DB_PRINT_L(0, "chip erase\n");
51
+ rm = a->vm;
105
+ trace_m25p80_chip_erase(s);
52
+
106
flash_erase(s, 0, BULK_ERASE);
53
+ if (!vfp_access_check(s)) {
107
} else {
54
+ return true;
108
qemu_log_mask(LOG_GUEST_ERROR, "M25P80: chip erase with write "
55
+ }
109
@@ -XXX,XX +XXX,XX @@ static int m25p80_cs(SSISlave *ss, bool select)
56
+
110
s->data_read_loop = false;
57
+ fpst = get_fpstatus_ptr(0);
111
}
58
112
59
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
113
- DB_PRINT_L(0, "%sselect\n", select ? "de" : "");
60
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
114
+ trace_m25p80_select(s, select ? "de" : "");
61
@@ -XXX,XX +XXX,XX @@ static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
115
62
tcg_temp_free_i32(tcg_rmode);
63
64
tcg_temp_free_ptr(fpst);
65
- return 0;
66
+ return true;
67
}
68
69
static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
70
@@ -XXX,XX +XXX,XX @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
71
return 0;
116
return 0;
72
}
117
}
73
118
@@ -XXX,XX +XXX,XX @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
74
-/* Table for converting the most common AArch32 encoding of
119
Flash *s = M25P80(ss);
75
- * rounding mode to arm_fprounding order (which matches the
120
uint32_t r = 0;
76
- * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
121
77
- */
122
+ trace_m25p80_transfer(s, s->state, s->len, s->needed_bytes, s->pos,
78
-static const uint8_t fp_decode_rm[] = {
123
+ s->cur_addr, (uint8_t)tx);
79
- FPROUNDING_TIEAWAY,
124
+
80
- FPROUNDING_TIEEVEN,
125
switch (s->state) {
81
- FPROUNDING_POSINF,
126
82
- FPROUNDING_NEGINF,
127
case STATE_PAGE_PROGRAM:
83
-};
128
- DB_PRINT_L(1, "page program cur_addr=%#" PRIx32 " data=%" PRIx8 "\n",
84
-
129
- s->cur_addr, (uint8_t)tx);
85
static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
130
+ trace_m25p80_page_program(s, s->cur_addr, (uint8_t)tx);
86
{
131
flash_write8(s, s->cur_addr, (uint8_t)tx);
87
uint32_t rd, rm, dp = extract32(insn, 8, 1);
132
s->cur_addr = (s->cur_addr + 1) & (s->size - 1);
88
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
133
break;
89
rm = VFP_SREG_M(insn);
134
135
case STATE_READ:
136
r = s->storage[s->cur_addr];
137
- DB_PRINT_L(1, "READ 0x%" PRIx32 "=%" PRIx8 "\n", s->cur_addr,
138
- (uint8_t)r);
139
+ trace_m25p80_read_byte(s, s->cur_addr, (uint8_t)r);
140
s->cur_addr = (s->cur_addr + 1) & (s->size - 1);
141
break;
142
143
@@ -XXX,XX +XXX,XX @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
144
}
145
146
r = s->data[s->pos];
147
+ trace_m25p80_read_data(s, s->pos, (uint8_t)r);
148
s->pos++;
149
if (s->pos == s->len) {
150
s->pos = 0;
151
@@ -XXX,XX +XXX,XX @@ static void m25p80_realize(SSISlave *ss, Error **errp)
152
return;
153
}
154
155
- DB_PRINT_L(0, "Binding to IF_MTD drive\n");
156
+ trace_m25p80_binding(s);
157
s->storage = blk_blockalign(s->blk, s->size);
158
159
if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) {
160
@@ -XXX,XX +XXX,XX @@ static void m25p80_realize(SSISlave *ss, Error **errp)
161
return;
162
}
163
} else {
164
- DB_PRINT_L(0, "No BDRV - binding to RAM\n");
165
+ trace_m25p80_binding_no_bdrv(s);
166
s->storage = blk_blockalign(NULL, s->size);
167
memset(s->storage, 0xFF, s->size);
90
}
168
}
91
169
diff --git a/hw/block/trace-events b/hw/block/trace-events
92
- if ((insn & 0x0fbc0ed0) == 0x0eb80a40 &&
93
- dc_isar_feature(aa32_vrint, s)) {
94
- /* VRINTA, VRINTN, VRINTP, VRINTM */
95
- int rounding = fp_decode_rm[extract32(insn, 16, 2)];
96
- return handle_vrint(insn, rd, rm, dp, rounding);
97
- } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40 &&
98
- dc_isar_feature(aa32_vcvt_dr, s)) {
99
+ if ((insn & 0x0fbc0e50) == 0x0ebc0a40 &&
100
+ dc_isar_feature(aa32_vcvt_dr, s)) {
101
/* VCVTA, VCVTN, VCVTP, VCVTM */
102
int rounding = fp_decode_rm[extract32(insn, 16, 2)];
103
return handle_vcvt(insn, rd, rm, dp, rounding);
104
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
105
index XXXXXXX..XXXXXXX 100644
170
index XXXXXXX..XXXXXXX 100644
106
--- a/target/arm/vfp-uncond.decode
171
--- a/hw/block/trace-events
107
+++ b/target/arm/vfp-uncond.decode
172
+++ b/hw/block/trace-events
108
@@ -XXX,XX +XXX,XX @@ VMINMAXNM 1111 1110 1.00 .... .... 1010 . op:1 .0 .... \
173
@@ -XXX,XX +XXX,XX @@ xen_block_blockdev_add(char *str) "%s"
109
vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0
174
xen_block_blockdev_del(const char *node_name) "%s"
110
VMINMAXNM 1111 1110 1.00 .... .... 1011 . op:1 .0 .... \
175
xen_block_device_create(unsigned int number) "%u"
111
vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1
176
xen_block_device_destroy(unsigned int number) "%u"
112
+
177
+
113
+VRINT 1111 1110 1.11 10 rm:2 .... 1010 01.0 .... \
178
+# m25p80.c
114
+ vm=%vm_sp vd=%vd_sp dp=0
179
+m25p80_flash_erase(void *s, int offset, uint32_t len) "[%p] offset = 0x%"PRIx32", len = %u"
115
+VRINT 1111 1110 1.11 10 rm:2 .... 1011 01.0 .... \
180
+m25p80_programming_zero_to_one(void *s, uint32_t addr, uint8_t prev, uint8_t data) "[%p] programming zero to one! addr=0x%"PRIx32" 0x%"PRIx8" -> 0x%"PRIx8
116
+ vm=%vm_dp vd=%vd_dp dp=1
181
+m25p80_reset_done(void *s) "[%p] Reset done."
182
+m25p80_command_decoded(void *s, uint32_t cmd) "[%p] new command:0x%"PRIx32
183
+m25p80_complete_collecting(void *s, uint32_t cmd, int n, uint8_t ear, uint32_t cur_addr) "[%p] decode cmd: 0x%"PRIx32" len %d ear 0x%"PRIx8" addr 0x%"PRIx32
184
+m25p80_populated_jedec(void *s) "[%p] populated jedec code"
185
+m25p80_chip_erase(void *s) "[%p] chip erase"
186
+m25p80_select(void *s, const char *what) "[%p] %sselect"
187
+m25p80_page_program(void *s, uint32_t addr, uint8_t tx) "[%p] page program cur_addr=0x%"PRIx32" data=0x%"PRIx8
188
+m25p80_transfer(void *s, uint8_t state, uint32_t len, uint8_t needed, uint32_t pos, uint32_t cur_addr, uint8_t t) "[%p] Transfer state 0x%"PRIx8" len 0x%"PRIx32" needed 0x%"PRIx8" pos 0x%"PRIx32" addr 0x%"PRIx32" tx 0x%"PRIx8
189
+m25p80_read_byte(void *s, uint32_t addr, uint8_t v) "[%p] Read byte 0x%"PRIx32"=0x%"PRIx8
190
+m25p80_read_data(void *s, uint32_t pos, uint8_t v) "[%p] Read data 0x%"PRIx32"=0x%"PRIx8
191
+m25p80_binding(void *s) "[%p] Binding to IF_MTD drive"
192
+m25p80_binding_no_bdrv(void *s) "[%p] No BDRV - binding to RAM"
117
--
193
--
118
2.20.1
194
2.20.1
119
195
120
196
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Guenter Roeck <linux@roeck-us.net>
2
2
3
This replaces 3 target-specific implementations for BIT, BIF, and BSL.
3
When requesting JEDEC data using the JEDEC_READ command, the Linux kernel
4
always requests 6 bytes. The current implementation only returns three
5
bytes, and interprets the remaining three bytes as new commands.
6
While this does not matter most of the time, it is at the very least
7
confusing. To avoid the problem, always report up to 6 bytes of JEDEC
8
data. Fill remaining data with 0.
4
9
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-id: 20190518191934.21887-3-richard.henderson@linaro.org
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Cédric Le Goater <clg@kaod.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
15
---
10
target/arm/translate-a64.h | 2 +
16
hw/block/m25p80.c | 5 ++++-
11
target/arm/translate.h | 3 --
17
1 file changed, 4 insertions(+), 1 deletion(-)
12
target/arm/translate-a64.c | 15 ++++++--
13
target/arm/translate.c | 78 +++-----------------------------------
14
4 files changed, 20 insertions(+), 78 deletions(-)
15
18
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
19
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
17
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.h
21
--- a/hw/block/m25p80.c
19
+++ b/target/arm/translate-a64.h
22
+++ b/hw/block/m25p80.c
20
@@ -XXX,XX +XXX,XX @@ typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
23
@@ -XXX,XX +XXX,XX @@ static void decode_new_cmd(Flash *s, uint32_t value)
21
uint32_t, uint32_t);
24
for (i = 0; i < s->pi->id_len; i++) {
22
typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
25
s->data[i] = s->pi->id[i];
23
uint32_t, uint32_t, uint32_t);
26
}
24
+typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
27
+ for (; i < SPI_NOR_MAX_ID_LEN; i++) {
25
+ uint32_t, uint32_t, uint32_t);
28
+ s->data[i] = 0;
26
29
+ }
27
#endif /* TARGET_ARM_TRANSLATE_A64_H */
30
28
diff --git a/target/arm/translate.h b/target/arm/translate.h
31
- s->len = s->pi->id_len;
29
index XXXXXXX..XXXXXXX 100644
32
+ s->len = SPI_NOR_MAX_ID_LEN;
30
--- a/target/arm/translate.h
33
s->pos = 0;
31
+++ b/target/arm/translate.h
34
s->state = STATE_READING_DATA;
32
@@ -XXX,XX +XXX,XX @@ static inline void gen_ss_advance(DisasContext *s)
35
break;
33
}
34
35
/* Vector operations shared between ARM and AArch64. */
36
-extern const GVecGen3 bsl_op;
37
-extern const GVecGen3 bit_op;
38
-extern const GVecGen3 bif_op;
39
extern const GVecGen3 mla_op[4];
40
extern const GVecGen3 mls_op[4];
41
extern const GVecGen3 cmtst_op[4];
42
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/translate-a64.c
45
+++ b/target/arm/translate-a64.c
46
@@ -XXX,XX +XXX,XX @@ static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
47
vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
48
}
49
50
+/* Expand a 4-operand AdvSIMD vector operation using an expander function. */
51
+static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
52
+ int rx, GVecGen4Fn *gvec_fn, int vece)
53
+{
54
+ gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
55
+ vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
56
+ is_q ? 16 : 8, vec_full_reg_size(s));
57
+}
58
+
59
/* Expand a 2-operand + immediate AdvSIMD vector operation using
60
* an op descriptor.
61
*/
62
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
63
return;
64
65
case 5: /* BSL bitwise select */
66
- gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
67
+ gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
68
return;
69
case 6: /* BIT, bitwise insert if true */
70
- gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
71
+ gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
72
return;
73
case 7: /* BIF, bitwise insert if false */
74
- gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
75
+ gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
76
return;
77
78
default:
79
diff --git a/target/arm/translate.c b/target/arm/translate.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/arm/translate.c
82
+++ b/target/arm/translate.c
83
@@ -XXX,XX +XXX,XX @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
84
return 1;
85
}
86
87
-/*
88
- * Expanders for VBitOps_VBIF, VBIT, VBSL.
89
- */
90
-static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
91
-{
92
- tcg_gen_xor_i64(rn, rn, rm);
93
- tcg_gen_and_i64(rn, rn, rd);
94
- tcg_gen_xor_i64(rd, rm, rn);
95
-}
96
-
97
-static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
98
-{
99
- tcg_gen_xor_i64(rn, rn, rd);
100
- tcg_gen_and_i64(rn, rn, rm);
101
- tcg_gen_xor_i64(rd, rd, rn);
102
-}
103
-
104
-static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
105
-{
106
- tcg_gen_xor_i64(rn, rn, rd);
107
- tcg_gen_andc_i64(rn, rn, rm);
108
- tcg_gen_xor_i64(rd, rd, rn);
109
-}
110
-
111
-static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
112
-{
113
- tcg_gen_xor_vec(vece, rn, rn, rm);
114
- tcg_gen_and_vec(vece, rn, rn, rd);
115
- tcg_gen_xor_vec(vece, rd, rm, rn);
116
-}
117
-
118
-static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
119
-{
120
- tcg_gen_xor_vec(vece, rn, rn, rd);
121
- tcg_gen_and_vec(vece, rn, rn, rm);
122
- tcg_gen_xor_vec(vece, rd, rd, rn);
123
-}
124
-
125
-static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
126
-{
127
- tcg_gen_xor_vec(vece, rn, rn, rd);
128
- tcg_gen_andc_vec(vece, rn, rn, rm);
129
- tcg_gen_xor_vec(vece, rd, rd, rn);
130
-}
131
-
132
-const GVecGen3 bsl_op = {
133
- .fni8 = gen_bsl_i64,
134
- .fniv = gen_bsl_vec,
135
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
136
- .load_dest = true
137
-};
138
-
139
-const GVecGen3 bit_op = {
140
- .fni8 = gen_bit_i64,
141
- .fniv = gen_bit_vec,
142
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
143
- .load_dest = true
144
-};
145
-
146
-const GVecGen3 bif_op = {
147
- .fni8 = gen_bif_i64,
148
- .fniv = gen_bif_vec,
149
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
150
- .load_dest = true
151
-};
152
-
153
static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
154
{
155
tcg_gen_vec_sar8i_i64(a, a, shift);
156
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
157
vec_size, vec_size);
158
break;
159
case 5: /* VBSL */
160
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
161
- vec_size, vec_size, &bsl_op);
162
+ tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
163
+ vec_size, vec_size);
164
break;
165
case 6: /* VBIT */
166
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
167
- vec_size, vec_size, &bit_op);
168
+ tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
169
+ vec_size, vec_size);
170
break;
171
case 7: /* VBIF */
172
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
173
- vec_size, vec_size, &bif_op);
174
+ tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
175
+ vec_size, vec_size);
176
break;
177
}
178
return 0;
179
--
36
--
180
2.20.1
37
2.20.1
181
38
182
39
diff view generated by jsdifflib
1
The NSACR register allows secure code to configure the FPU
1
From: Guenter Roeck <linux@roeck-us.net>
2
to be inaccessible to non-secure code. If the NSACR.CP10
3
bit is set then:
4
* NS accesses to the FPU trap as UNDEF (ie to NS EL1 or EL2)
5
* CPACR.{CP10,CP11} behave as if RAZ/WI
6
* HCPTR.{TCP11,TCP10} behave as if RAO/WI
7
2
8
Note that we do not implement the NSACR.NSASEDIS bit which
3
Whenever an unsupported command is encountered, the current code
9
gates only access to Advanced SIMD, in the same way that
4
interprets each transferred byte as new command. Most of the time, those
10
we don't implement the equivalent CPACR.ASEDIS and HCPTR.TASE.
5
'commands' are interpreted as new unknown commands. However, in rare
6
cases, it may be that for example address or length information
7
passed with the original command is by itself a valid command.
8
If that happens, the state machine may get completely confused and,
9
worst case, start writing data into the flash or even erase it.
11
10
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
To avoid the problem, transition into STATE_READING_DATA and keep
12
sending a value of 0 until the chip is deselected after encountering
13
an unsupported command.
14
15
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
16
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
17
Reviewed-by: Cédric Le Goater <clg@kaod.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Message-id: 20190510110357.18825-1-peter.maydell@linaro.org
15
---
19
---
16
target/arm/helper.c | 75 +++++++++++++++++++++++++++++++++++++++++++--
20
hw/block/m25p80.c | 5 +++++
17
1 file changed, 73 insertions(+), 2 deletions(-)
21
1 file changed, 5 insertions(+)
18
22
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
23
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
20
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
25
--- a/hw/block/m25p80.c
22
+++ b/target/arm/helper.c
26
+++ b/hw/block/m25p80.c
23
@@ -XXX,XX +XXX,XX @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
27
@@ -XXX,XX +XXX,XX @@ static void decode_new_cmd(Flash *s, uint32_t value)
24
}
28
s->quad_enable = false;
25
value &= mask;
29
break;
26
}
30
default:
27
+
31
+ s->pos = 0;
28
+ /*
32
+ s->len = 1;
29
+ * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
33
+ s->state = STATE_READING_DATA;
30
+ * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
34
+ s->data_read_loop = true;
31
+ */
35
+ s->data[0] = 0;
32
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
36
qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value);
33
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
34
+ value &= ~(0xf << 20);
35
+ value |= env->cp15.cpacr_el1 & (0xf << 20);
36
+ }
37
+
38
env->cp15.cpacr_el1 = value;
39
}
40
41
+static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
42
+{
43
+ /*
44
+ * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
45
+ * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
46
+ */
47
+ uint64_t value = env->cp15.cpacr_el1;
48
+
49
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
50
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
51
+ value &= ~(0xf << 20);
52
+ }
53
+ return value;
54
+}
55
+
56
+
57
static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
58
{
59
/* Call cpacr_write() so that we reset with the correct RAO bits set
60
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
61
{ .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
62
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
63
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
64
- .resetfn = cpacr_reset, .writefn = cpacr_write },
65
+ .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
66
REGINFO_SENTINEL
67
};
68
69
@@ -XXX,XX +XXX,XX @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
70
return ret;
71
}
72
73
+static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
74
+ uint64_t value)
75
+{
76
+ /*
77
+ * For A-profile AArch32 EL3, if NSACR.CP10
78
+ * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
79
+ */
80
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
81
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
82
+ value &= ~(0x3 << 10);
83
+ value |= env->cp15.cptr_el[2] & (0x3 << 10);
84
+ }
85
+ env->cp15.cptr_el[2] = value;
86
+}
87
+
88
+static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
89
+{
90
+ /*
91
+ * For A-profile AArch32 EL3, if NSACR.CP10
92
+ * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
93
+ */
94
+ uint64_t value = env->cp15.cptr_el[2];
95
+
96
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
97
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
98
+ value |= 0x3 << 10;
99
+ }
100
+ return value;
101
+}
102
+
103
static const ARMCPRegInfo el2_cp_reginfo[] = {
104
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
105
.type = ARM_CP_IO,
106
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
107
{ .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
108
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
109
.access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
110
- .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
111
+ .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
112
+ .readfn = cptr_el2_read, .writefn = cptr_el2_write },
113
{ .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
114
.opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
115
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
116
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
117
break;
37
break;
118
}
38
}
119
120
+ /*
121
+ * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
122
+ * to control non-secure access to the FPU. It doesn't have any
123
+ * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
124
+ */
125
+ if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
126
+ cur_el <= 2 && !arm_is_secure_below_el3(env))) {
127
+ if (!extract32(env->cp15.nsacr, 10, 1)) {
128
+ /* FP insns act as UNDEF */
129
+ return cur_el == 2 ? 2 : 1;
130
+ }
131
+ }
132
+
133
/* For the CPTR registers we don't need to guard with an ARM_FEATURE
134
* check because zero bits in the registers mean "don't trap".
135
*/
136
--
39
--
137
2.20.1
40
2.20.1
138
41
139
42
diff view generated by jsdifflib
Deleted patch
1
The SMMUv3 ID registers cover an area 0x30 bytes in size
2
(12 registers, 4 bytes each). We were incorrectly decoding
3
only the first 0x20 bytes.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Eric Auger <eric.auger@redhat.com>
7
Message-id: 20190524124829.2589-1-peter.maydell@linaro.org
8
---
9
hw/arm/smmuv3.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/arm/smmuv3.c
15
+++ b/hw/arm/smmuv3.c
16
@@ -XXX,XX +XXX,XX @@ static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
17
uint64_t *data, MemTxAttrs attrs)
18
{
19
switch (offset) {
20
- case A_IDREGS ... A_IDREGS + 0x1f:
21
+ case A_IDREGS ... A_IDREGS + 0x2f:
22
*data = smmuv3_idreg(offset - A_IDREGS);
23
return MEMTX_OK;
24
case A_IDR0 ... A_IDR5:
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
Deleted patch
1
In commit 80376c3fc2c38fdd453 in 2010 we added a workaround for
2
some qbus buses not being connected to qdev devices -- if the
3
bus has no parent object then we register a reset function which
4
resets the bus on system reset (and unregister it when the
5
bus is unparented).
6
1
7
Nearly a decade later, we have now no buses in the tree which
8
are created with non-NULL parents, so we can remove the
9
workaround and instead just assert that if the bus has a NULL
10
parent then it is the main system bus.
11
12
(The absence of other parentless buses was confirmed by
13
code inspection of all the callsites of qbus_create() and
14
qbus_create_inplace() and cross-checked by 'make check'.)
15
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Markus Armbruster <armbru@redhat.com>
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
19
Reviewed-by: Damien Hedde <damien.hedde@greensocs.com>
20
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
21
Message-id: 20190523150543.22676-1-peter.maydell@linaro.org
22
---
23
hw/core/bus.c | 21 +++++++++------------
24
1 file changed, 9 insertions(+), 12 deletions(-)
25
26
diff --git a/hw/core/bus.c b/hw/core/bus.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/hw/core/bus.c
29
+++ b/hw/core/bus.c
30
@@ -XXX,XX +XXX,XX @@ static void qbus_realize(BusState *bus, DeviceState *parent, const char *name)
31
bus->parent->num_child_bus++;
32
object_property_add_child(OBJECT(bus->parent), bus->name, OBJECT(bus), NULL);
33
object_unref(OBJECT(bus));
34
- } else if (bus != sysbus_get_default()) {
35
- /* TODO: once all bus devices are qdevified,
36
- only reset handler for main_system_bus should be registered here. */
37
- qemu_register_reset(qbus_reset_all_fn, bus);
38
+ } else {
39
+ /* The only bus without a parent is the main system bus */
40
+ assert(bus == sysbus_get_default());
41
}
42
}
43
44
@@ -XXX,XX +XXX,XX @@ static void bus_unparent(Object *obj)
45
BusState *bus = BUS(obj);
46
BusChild *kid;
47
48
+ /* Only the main system bus has no parent, and that bus is never freed */
49
+ assert(bus->parent);
50
+
51
while ((kid = QTAILQ_FIRST(&bus->children)) != NULL) {
52
DeviceState *dev = kid->child;
53
object_unparent(OBJECT(dev));
54
}
55
- if (bus->parent) {
56
- QLIST_REMOVE(bus, sibling);
57
- bus->parent->num_child_bus--;
58
- bus->parent = NULL;
59
- } else {
60
- assert(bus != sysbus_get_default()); /* main_system_bus is never freed */
61
- qemu_unregister_reset(qbus_reset_all_fn, bus);
62
- }
63
+ QLIST_REMOVE(bus, sibling);
64
+ bus->parent->num_child_bus--;
65
+ bus->parent = NULL;
66
}
67
68
void qbus_create_inplace(void *bus, size_t size, const char *typename,
69
--
70
2.20.1
71
72
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Guenter Roeck <linux@roeck-us.net>
2
2
3
Typo comparing the sign of the field, twice, instead of also comparing
3
The Linux kernel recently started using FAST_READ_4 commands.
4
the mask of the field (which itself encodes both position and length).
4
This results in flash read failures. At the same time, the m25p80
5
emulation is seen to read 8 more bytes than expected. Adjusting the
6
expected number of dummy cycles to match FAST_READ fixes the problem.
5
7
6
Reported-by: Peter Maydell <peter.maydell@linaro.org>
8
Fixes: f95c4bffdc4c ("aspeed/smc: snoop SPI transfers to fake dummy cycles")
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Cédric Le Goater <clg@kaod.org>
8
Message-id: 20190604154225.26992-1-richard.henderson@linaro.org
10
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
13
---
13
scripts/decodetree.py | 2 +-
14
hw/ssi/aspeed_smc.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
16
diff --git a/scripts/decodetree.py b/scripts/decodetree.py
17
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
17
index XXXXXXX..XXXXXXX 100755
18
index XXXXXXX..XXXXXXX 100644
18
--- a/scripts/decodetree.py
19
--- a/hw/ssi/aspeed_smc.c
19
+++ b/scripts/decodetree.py
20
+++ b/hw/ssi/aspeed_smc.c
20
@@ -XXX,XX +XXX,XX @@ class Field:
21
@@ -XXX,XX +XXX,XX @@ static int aspeed_smc_num_dummies(uint8_t command)
21
return '{0}(insn, {1}, {2})'.format(extr, self.pos, self.len)
22
case FAST_READ:
22
23
case DOR:
23
def __eq__(self, other):
24
case QOR:
24
- return self.sign == other.sign and self.sign == other.sign
25
+ case FAST_READ_4:
25
+ return self.sign == other.sign and self.mask == other.mask
26
case DOR_4:
26
27
case QOR_4:
27
def __ne__(self, other):
28
return 1;
28
return not self.__eq__(other)
29
case DIOR:
30
- case FAST_READ_4:
31
case DIOR_4:
32
return 2;
33
case QIOR:
29
--
34
--
30
2.20.1
35
2.20.1
31
36
32
37
diff view generated by jsdifflib
1
Factor out the VFP access checking code so that we can use it in the
1
From: Guenter Roeck <linux@roeck-us.net>
2
leaf functions of the decodetree decoder.
3
2
4
We call the function full_vfp_access_check() so we can keep
3
PXA255 does not support a USB OHCI controller, so don't wire it up.
5
the more natural vfp_access_check() for a version which doesn't
6
have the 'ignore_vfp_enabled' flag -- that way almost all VFP
7
insns will be able to use vfp_access_check(s) and only the
8
special-register access function will have to use
9
full_vfp_access_check(s, ignore_vfp_enabled).
10
4
5
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
6
Message-id: 20200313160215.28155-1-linux@roeck-us.net
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
---
9
---
14
target/arm/translate-vfp.inc.c | 100 ++++++++++++++++++++++++++++++++
10
hw/arm/pxa2xx.c | 3 ---
15
target/arm/translate.c | 101 +++++----------------------------
11
1 file changed, 3 deletions(-)
16
2 files changed, 113 insertions(+), 88 deletions(-)
17
12
18
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
13
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
19
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate-vfp.inc.c
15
--- a/hw/arm/pxa2xx.c
21
+++ b/target/arm/translate-vfp.inc.c
16
+++ b/hw/arm/pxa2xx.c
22
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
23
/* Include the generated VFP decoder */
18
s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi");
24
#include "decode-vfp.inc.c"
25
#include "decode-vfp-uncond.inc.c"
26
+
27
+/*
28
+ * Check that VFP access is enabled. If it is, do the necessary
29
+ * M-profile lazy-FP handling and then return true.
30
+ * If not, emit code to generate an appropriate exception and
31
+ * return false.
32
+ * The ignore_vfp_enabled argument specifies that we should ignore
33
+ * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
34
+ * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
35
+ */
36
+static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
37
+{
38
+ if (s->fp_excp_el) {
39
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
40
+ gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
41
+ s->fp_excp_el);
42
+ } else {
43
+ gen_exception_insn(s, 4, EXCP_UDEF,
44
+ syn_fp_access_trap(1, 0xe, false),
45
+ s->fp_excp_el);
46
+ }
47
+ return false;
48
+ }
49
+
50
+ if (!s->vfp_enabled && !ignore_vfp_enabled) {
51
+ assert(!arm_dc_feature(s, ARM_FEATURE_M));
52
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
53
+ default_exception_el(s));
54
+ return false;
55
+ }
56
+
57
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
58
+ /* Handle M-profile lazy FP state mechanics */
59
+
60
+ /* Trigger lazy-state preservation if necessary */
61
+ if (s->v7m_lspact) {
62
+ /*
63
+ * Lazy state saving affects external memory and also the NVIC,
64
+ * so we must mark it as an IO operation for icount.
65
+ */
66
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
67
+ gen_io_start();
68
+ }
69
+ gen_helper_v7m_preserve_fp_state(cpu_env);
70
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
71
+ gen_io_end();
72
+ }
73
+ /*
74
+ * If the preserve_fp_state helper doesn't throw an exception
75
+ * then it will clear LSPACT; we don't need to repeat this for
76
+ * any further FP insns in this TB.
77
+ */
78
+ s->v7m_lspact = false;
79
+ }
80
+
81
+ /* Update ownership of FP context: set FPCCR.S to match current state */
82
+ if (s->v8m_fpccr_s_wrong) {
83
+ TCGv_i32 tmp;
84
+
85
+ tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
86
+ if (s->v8m_secure) {
87
+ tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
88
+ } else {
89
+ tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
90
+ }
91
+ store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
92
+ /* Don't need to do this for any further FP insns in this TB */
93
+ s->v8m_fpccr_s_wrong = false;
94
+ }
95
+
96
+ if (s->v7m_new_fp_ctxt_needed) {
97
+ /*
98
+ * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
99
+ * and the FPSCR.
100
+ */
101
+ TCGv_i32 control, fpscr;
102
+ uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
103
+
104
+ fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
105
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
106
+ tcg_temp_free_i32(fpscr);
107
+ /*
108
+ * We don't need to arrange to end the TB, because the only
109
+ * parts of FPSCR which we cache in the TB flags are the VECLEN
110
+ * and VECSTRIDE, and those don't exist for M-profile.
111
+ */
112
+
113
+ if (s->v8m_secure) {
114
+ bits |= R_V7M_CONTROL_SFPA_MASK;
115
+ }
116
+ control = load_cpu_field(v7m.control[M_REG_S]);
117
+ tcg_gen_ori_i32(control, control, bits);
118
+ store_cpu_field(control, v7m.control[M_REG_S]);
119
+ /* Don't need to do this for any further FP insns in this TB */
120
+ s->v7m_new_fp_ctxt_needed = false;
121
+ }
122
+ }
123
+
124
+ return true;
125
+}
126
diff --git a/target/arm/translate.c b/target/arm/translate.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/target/arm/translate.c
129
+++ b/target/arm/translate.c
130
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
131
return 1;
132
}
133
134
-/* Disassemble a VFP instruction. Returns nonzero if an error occurred
135
- (ie. an undefined instruction). */
136
+/*
137
+ * Disassemble a VFP instruction. Returns nonzero if an error occurred
138
+ * (ie. an undefined instruction).
139
+ */
140
static int disas_vfp_insn(DisasContext *s, uint32_t insn)
141
{
142
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
143
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
144
TCGv_i32 addr;
145
TCGv_i32 tmp;
146
TCGv_i32 tmp2;
147
+ bool ignore_vfp_enabled = false;
148
149
if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
150
return 1;
151
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
152
}
153
}
19
}
154
20
155
- /* FIXME: this access check should not take precedence over UNDEF
21
- sysbus_create_simple("sysbus-ohci", 0x4c000000,
156
+ /*
22
- qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1));
157
+ * FIXME: this access check should not take precedence over UNDEF
158
* for invalid encodings; we will generate incorrect syndrome information
159
* for attempts to execute invalid vfp/neon encodings with FP disabled.
160
*/
161
- if (s->fp_excp_el) {
162
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
163
- gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
164
- s->fp_excp_el);
165
- } else {
166
- gen_exception_insn(s, 4, EXCP_UDEF,
167
- syn_fp_access_trap(1, 0xe, false),
168
- s->fp_excp_el);
169
- }
170
- return 0;
171
- }
172
-
23
-
173
- if (!s->vfp_enabled) {
24
s->pcmcia[0] = pxa2xx_pcmcia_init(address_space, 0x20000000);
174
- /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
25
s->pcmcia[1] = pxa2xx_pcmcia_init(address_space, 0x30000000);
175
- if ((insn & 0x0fe00fff) != 0x0ee00a10)
26
176
- return 1;
177
+ if ((insn & 0x0fe00fff) == 0x0ee00a10) {
178
rn = (insn >> 16) & 0xf;
179
- if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
180
- && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
181
- return 1;
182
+ if (rn == ARM_VFP_FPSID || rn == ARM_VFP_FPEXC || rn == ARM_VFP_MVFR2
183
+ || rn == ARM_VFP_MVFR1 || rn == ARM_VFP_MVFR0) {
184
+ ignore_vfp_enabled = true;
185
}
186
}
187
-
188
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
189
- /* Handle M-profile lazy FP state mechanics */
190
-
191
- /* Trigger lazy-state preservation if necessary */
192
- if (s->v7m_lspact) {
193
- /*
194
- * Lazy state saving affects external memory and also the NVIC,
195
- * so we must mark it as an IO operation for icount.
196
- */
197
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
198
- gen_io_start();
199
- }
200
- gen_helper_v7m_preserve_fp_state(cpu_env);
201
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
202
- gen_io_end();
203
- }
204
- /*
205
- * If the preserve_fp_state helper doesn't throw an exception
206
- * then it will clear LSPACT; we don't need to repeat this for
207
- * any further FP insns in this TB.
208
- */
209
- s->v7m_lspact = false;
210
- }
211
-
212
- /* Update ownership of FP context: set FPCCR.S to match current state */
213
- if (s->v8m_fpccr_s_wrong) {
214
- TCGv_i32 tmp;
215
-
216
- tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
217
- if (s->v8m_secure) {
218
- tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
219
- } else {
220
- tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
221
- }
222
- store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
223
- /* Don't need to do this for any further FP insns in this TB */
224
- s->v8m_fpccr_s_wrong = false;
225
- }
226
-
227
- if (s->v7m_new_fp_ctxt_needed) {
228
- /*
229
- * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
230
- * and the FPSCR.
231
- */
232
- TCGv_i32 control, fpscr;
233
- uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
234
-
235
- fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
236
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
237
- tcg_temp_free_i32(fpscr);
238
- /*
239
- * We don't need to arrange to end the TB, because the only
240
- * parts of FPSCR which we cache in the TB flags are the VECLEN
241
- * and VECSTRIDE, and those don't exist for M-profile.
242
- */
243
-
244
- if (s->v8m_secure) {
245
- bits |= R_V7M_CONTROL_SFPA_MASK;
246
- }
247
- control = load_cpu_field(v7m.control[M_REG_S]);
248
- tcg_gen_ori_i32(control, control, bits);
249
- store_cpu_field(control, v7m.control[M_REG_S]);
250
- /* Don't need to do this for any further FP insns in this TB */
251
- s->v7m_new_fp_ctxt_needed = false;
252
- }
253
+ if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
254
+ return 0;
255
}
256
257
if (extract32(insn, 28, 4) == 0xf) {
258
--
27
--
259
2.20.1
28
2.20.1
260
29
261
30
diff view generated by jsdifflib
Deleted patch
1
The Cortex-R5F initfn was not correctly setting up the MVFR
2
ID register values. Fill these in, since some subsequent patches
3
will use ID register checks rather than CPU feature bit checks.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/arm/cpu.c | 2 ++
9
1 file changed, 2 insertions(+)
10
11
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/cpu.c
14
+++ b/target/arm/cpu.c
15
@@ -XXX,XX +XXX,XX @@ static void cortex_r5f_initfn(Object *obj)
16
17
cortex_r5_initfn(obj);
18
set_feature(&cpu->env, ARM_FEATURE_VFP3);
19
+ cpu->isar.mvfr0 = 0x10110221;
20
+ cpu->isar.mvfr1 = 0x00000011;
21
}
22
23
static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
Deleted patch
1
At the moment our -cpu max for AArch32 supports VFP short-vectors
2
because we always implement them, even for CPUs which should
3
not have them. The following commits are going to switch to
4
using the correct ID-register-check to enable or disable short
5
vector support, so we need to turn it on explicitly for -cpu max,
6
because Cortex-A15 doesn't implement it.
7
1
8
We don't enable this for the AArch64 -cpu max, because the v8A
9
architecture never supports short-vectors.
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
target/arm/cpu.c | 4 ++++
15
1 file changed, 4 insertions(+)
16
17
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.c
20
+++ b/target/arm/cpu.c
21
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
22
kvm_arm_set_cpu_features_from_host(cpu);
23
} else {
24
cortex_a15_initfn(obj);
25
+
26
+ /* old-style VFP short-vector support */
27
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
28
+
29
#ifdef CONFIG_USER_ONLY
30
/* We don't set these in system emulation mode for the moment,
31
* since we don't correctly set (all of) the ID registers to
32
--
33
2.20.1
34
35
diff view generated by jsdifflib
Deleted patch
1
Convert the VSEL instructions to decodetree.
2
We leave trans_VSEL() in translate.c for now as this allows
3
the patch to show just the changes from the old handle_vsel().
4
1
5
In the old code the check for "do D16-D31 exist" was hidden in
6
the VFP_DREG macro, and assumed that VFPv3 always implied that
7
D16-D31 exist. In the new code we do the correct ID register test.
8
This gives identical behaviour for most of our CPUs, and fixes
9
previously incorrect handling for Cortex-R5F, Cortex-M4 and
10
Cortex-M33, which all implement VFPv3 or better with only 16
11
double-precision registers.
12
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
target/arm/cpu.h | 6 ++++++
17
target/arm/translate-vfp.inc.c | 9 +++++++++
18
target/arm/translate.c | 35 ++++++++++++++++++++++++----------
19
target/arm/vfp-uncond.decode | 19 ++++++++++++++++++
20
4 files changed, 59 insertions(+), 10 deletions(-)
21
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
26
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
27
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
28
}
29
30
+static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id)
31
+{
32
+ /* Return true if D16-D31 are implemented */
33
+ return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2;
34
+}
35
+
36
/*
37
* We always set the FP and SIMD FP16 fields to indicate identical
38
* levels of support (assuming SIMD is implemented at all), so
39
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate-vfp.inc.c
42
+++ b/target/arm/translate-vfp.inc.c
43
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
44
45
return true;
46
}
47
+
48
+/*
49
+ * The most usual kind of VFP access check, for everything except
50
+ * FMXR/FMRX to the always-available special registers.
51
+ */
52
+static bool vfp_access_check(DisasContext *s)
53
+{
54
+ return full_vfp_access_check(s, false);
55
+}
56
diff --git a/target/arm/translate.c b/target/arm/translate.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate.c
59
+++ b/target/arm/translate.c
60
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
61
tcg_temp_free_i32(tmp);
62
}
63
64
-static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
65
- uint32_t dp)
66
+static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
67
{
68
- uint32_t cc = extract32(insn, 20, 2);
69
+ uint32_t rd, rn, rm;
70
+ bool dp = a->dp;
71
+
72
+ if (!dc_isar_feature(aa32_vsel, s)) {
73
+ return false;
74
+ }
75
+
76
+ /* UNDEF accesses to D16-D31 if they don't exist */
77
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
78
+ ((a->vm | a->vn | a->vd) & 0x10)) {
79
+ return false;
80
+ }
81
+ rd = a->vd;
82
+ rn = a->vn;
83
+ rm = a->vm;
84
+
85
+ if (!vfp_access_check(s)) {
86
+ return true;
87
+ }
88
89
if (dp) {
90
TCGv_i64 frn, frm, dest;
91
@@ -XXX,XX +XXX,XX @@ static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
92
93
tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
94
tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
95
- switch (cc) {
96
+ switch (a->cc) {
97
case 0: /* eq: Z */
98
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
99
frn, frm);
100
@@ -XXX,XX +XXX,XX @@ static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
101
dest = tcg_temp_new_i32();
102
tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
103
tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
104
- switch (cc) {
105
+ switch (a->cc) {
106
case 0: /* eq: Z */
107
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
108
frn, frm);
109
@@ -XXX,XX +XXX,XX @@ static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
110
tcg_temp_free_i32(zero);
111
}
112
113
- return 0;
114
+ return true;
115
}
116
117
static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
118
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
119
rm = VFP_SREG_M(insn);
120
}
121
122
- if ((insn & 0x0f800e50) == 0x0e000a00 && dc_isar_feature(aa32_vsel, s)) {
123
- return handle_vsel(insn, rd, rn, rm, dp);
124
- } else if ((insn & 0x0fb00e10) == 0x0e800a00 &&
125
- dc_isar_feature(aa32_vminmaxnm, s)) {
126
+ if ((insn & 0x0fb00e10) == 0x0e800a00 &&
127
+ dc_isar_feature(aa32_vminmaxnm, s)) {
128
return handle_vminmaxnm(insn, rd, rn, rm, dp);
129
} else if ((insn & 0x0fbc0ed0) == 0x0eb80a40 &&
130
dc_isar_feature(aa32_vrint, s)) {
131
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
132
index XXXXXXX..XXXXXXX 100644
133
--- a/target/arm/vfp-uncond.decode
134
+++ b/target/arm/vfp-uncond.decode
135
@@ -XXX,XX +XXX,XX @@
136
# 1111 1110 .... .... .... 101. .... ....
137
# (but those patterns might also cover some Neon instructions,
138
# which do not live in this file.)
139
+
140
+# VFP registers have an odd encoding with a four-bit field
141
+# and a one-bit field which are assembled in different orders
142
+# depending on whether the register is double or single precision.
143
+# Each individual instruction function must do the checks for
144
+# "double register selected but CPU does not have double support"
145
+# and "double register number has bit 4 set but CPU does not
146
+# support D16-D31" (which should UNDEF).
147
+%vm_dp 5:1 0:4
148
+%vm_sp 0:4 5:1
149
+%vn_dp 7:1 16:4
150
+%vn_sp 16:4 7:1
151
+%vd_dp 22:1 12:4
152
+%vd_sp 12:4 22:1
153
+
154
+VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \
155
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0
156
+VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \
157
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1
158
--
159
2.20.1
160
161
diff view generated by jsdifflib
Deleted patch
1
Convert the VMINNM and VMAXNM instructions to decodetree.
2
As with VSEL, we leave the trans_VMINMAXNM() function
3
in translate.c for the moment.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/arm/translate.c | 41 ++++++++++++++++++++++++------------
9
target/arm/vfp-uncond.decode | 5 +++++
10
2 files changed, 33 insertions(+), 13 deletions(-)
11
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
17
return true;
18
}
19
20
-static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
21
- uint32_t rm, uint32_t dp)
22
+static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
23
{
24
- uint32_t vmin = extract32(insn, 6, 1);
25
- TCGv_ptr fpst = get_fpstatus_ptr(0);
26
+ uint32_t rd, rn, rm;
27
+ bool dp = a->dp;
28
+ bool vmin = a->op;
29
+ TCGv_ptr fpst;
30
+
31
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
32
+ return false;
33
+ }
34
+
35
+ /* UNDEF accesses to D16-D31 if they don't exist */
36
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
37
+ ((a->vm | a->vn | a->vd) & 0x10)) {
38
+ return false;
39
+ }
40
+ rd = a->vd;
41
+ rn = a->vn;
42
+ rm = a->vm;
43
+
44
+ if (!vfp_access_check(s)) {
45
+ return true;
46
+ }
47
+
48
+ fpst = get_fpstatus_ptr(0);
49
50
if (dp) {
51
TCGv_i64 frn, frm, dest;
52
@@ -XXX,XX +XXX,XX @@ static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
53
}
54
55
tcg_temp_free_ptr(fpst);
56
- return 0;
57
+ return true;
58
}
59
60
static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
61
@@ -XXX,XX +XXX,XX @@ static const uint8_t fp_decode_rm[] = {
62
63
static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
64
{
65
- uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
66
+ uint32_t rd, rm, dp = extract32(insn, 8, 1);
67
68
if (dp) {
69
VFP_DREG_D(rd, insn);
70
- VFP_DREG_N(rn, insn);
71
VFP_DREG_M(rm, insn);
72
} else {
73
rd = VFP_SREG_D(insn);
74
- rn = VFP_SREG_N(insn);
75
rm = VFP_SREG_M(insn);
76
}
77
78
- if ((insn & 0x0fb00e10) == 0x0e800a00 &&
79
- dc_isar_feature(aa32_vminmaxnm, s)) {
80
- return handle_vminmaxnm(insn, rd, rn, rm, dp);
81
- } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40 &&
82
- dc_isar_feature(aa32_vrint, s)) {
83
+ if ((insn & 0x0fbc0ed0) == 0x0eb80a40 &&
84
+ dc_isar_feature(aa32_vrint, s)) {
85
/* VRINTA, VRINTN, VRINTP, VRINTM */
86
int rounding = fp_decode_rm[extract32(insn, 16, 2)];
87
return handle_vrint(insn, rd, rm, dp, rounding);
88
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/vfp-uncond.decode
91
+++ b/target/arm/vfp-uncond.decode
92
@@ -XXX,XX +XXX,XX @@ VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \
93
vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0
94
VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \
95
vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1
96
+
97
+VMINMAXNM 1111 1110 1.00 .... .... 1010 . op:1 .0 .... \
98
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0
99
+VMINMAXNM 1111 1110 1.00 .... .... 1011 . op:1 .0 .... \
100
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1
101
--
102
2.20.1
103
104
diff view generated by jsdifflib
Deleted patch
1
Convert the VCVTA/VCVTN/VCVTP/VCVTM instructions to decodetree.
2
trans_VCVT() is temporarily left in translate.c.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/arm/translate.c | 72 +++++++++++++++++-------------------
8
target/arm/vfp-uncond.decode | 6 +++
9
2 files changed, 39 insertions(+), 39 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
16
return true;
17
}
18
19
-static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
20
- int rounding)
21
+static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
22
{
23
- bool is_signed = extract32(insn, 7, 1);
24
- TCGv_ptr fpst = get_fpstatus_ptr(0);
25
+ uint32_t rd, rm;
26
+ bool dp = a->dp;
27
+ TCGv_ptr fpst;
28
TCGv_i32 tcg_rmode, tcg_shift;
29
+ int rounding = fp_decode_rm[a->rm];
30
+ bool is_signed = a->op;
31
+
32
+ if (!dc_isar_feature(aa32_vcvt_dr, s)) {
33
+ return false;
34
+ }
35
+
36
+ /* UNDEF accesses to D16-D31 if they don't exist */
37
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
38
+ return false;
39
+ }
40
+ rd = a->vd;
41
+ rm = a->vm;
42
+
43
+ if (!vfp_access_check(s)) {
44
+ return true;
45
+ }
46
+
47
+ fpst = get_fpstatus_ptr(0);
48
49
tcg_shift = tcg_const_i32(0);
50
51
@@ -XXX,XX +XXX,XX @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
52
if (dp) {
53
TCGv_i64 tcg_double, tcg_res;
54
TCGv_i32 tcg_tmp;
55
- /* Rd is encoded as a single precision register even when the source
56
- * is double precision.
57
- */
58
- rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
59
tcg_double = tcg_temp_new_i64();
60
tcg_res = tcg_temp_new_i64();
61
tcg_tmp = tcg_temp_new_i32();
62
@@ -XXX,XX +XXX,XX @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
63
64
tcg_temp_free_ptr(fpst);
65
66
- return 0;
67
-}
68
-
69
-static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
70
-{
71
- uint32_t rd, rm, dp = extract32(insn, 8, 1);
72
-
73
- if (dp) {
74
- VFP_DREG_D(rd, insn);
75
- VFP_DREG_M(rm, insn);
76
- } else {
77
- rd = VFP_SREG_D(insn);
78
- rm = VFP_SREG_M(insn);
79
- }
80
-
81
- if ((insn & 0x0fbc0e50) == 0x0ebc0a40 &&
82
- dc_isar_feature(aa32_vcvt_dr, s)) {
83
- /* VCVTA, VCVTN, VCVTP, VCVTM */
84
- int rounding = fp_decode_rm[extract32(insn, 16, 2)];
85
- return handle_vcvt(insn, rd, rm, dp, rounding);
86
- }
87
- return 1;
88
+ return true;
89
}
90
91
/*
92
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
93
}
94
}
95
96
+ if (extract32(insn, 28, 4) == 0xf) {
97
+ /*
98
+ * Encodings with T=1 (Thumb) or unconditional (ARM): these
99
+ * were all handled by the decodetree decoder, so any insn
100
+ * patterns which get here must be UNDEF.
101
+ */
102
+ return 1;
103
+ }
104
+
105
/*
106
* FIXME: this access check should not take precedence over UNDEF
107
* for invalid encodings; we will generate incorrect syndrome information
108
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
109
return 0;
110
}
111
112
- if (extract32(insn, 28, 4) == 0xf) {
113
- /*
114
- * Encodings with T=1 (Thumb) or unconditional (ARM):
115
- * only used for the "miscellaneous VFP features" added in v8A
116
- * and v7M (and gated on the MVFR2.FPMisc field).
117
- */
118
- return disas_vfp_misc_insn(s, insn);
119
- }
120
-
121
dp = ((insn & 0xf00) == 0xb00);
122
switch ((insn >> 24) & 0xf) {
123
case 0xe:
124
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
125
index XXXXXXX..XXXXXXX 100644
126
--- a/target/arm/vfp-uncond.decode
127
+++ b/target/arm/vfp-uncond.decode
128
@@ -XXX,XX +XXX,XX @@ VRINT 1111 1110 1.11 10 rm:2 .... 1010 01.0 .... \
129
vm=%vm_sp vd=%vd_sp dp=0
130
VRINT 1111 1110 1.11 10 rm:2 .... 1011 01.0 .... \
131
vm=%vm_dp vd=%vd_dp dp=1
132
+
133
+# VCVT float to int with specified rounding mode; Vd is always single-precision
134
+VCVT 1111 1110 1.11 11 rm:2 .... 1010 op:1 1.0 .... \
135
+ vm=%vm_sp vd=%vd_sp dp=0
136
+VCVT 1111 1110 1.11 11 rm:2 .... 1011 op:1 1.0 .... \
137
+ vm=%vm_dp vd=%vd_sp dp=1
138
--
139
2.20.1
140
141
diff view generated by jsdifflib
Deleted patch
1
Move the trans_*() functions we've just created from translate.c
2
to translate-vfp.inc.c. This is pure code motion with no textual
3
changes (this can be checked with 'git show --color-moved').
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/arm/translate-vfp.inc.c | 337 +++++++++++++++++++++++++++++++++
9
target/arm/translate.c | 337 ---------------------------------
10
2 files changed, 337 insertions(+), 337 deletions(-)
11
12
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate-vfp.inc.c
15
+++ b/target/arm/translate-vfp.inc.c
16
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check(DisasContext *s)
17
{
18
return full_vfp_access_check(s, false);
19
}
20
+
21
+static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
22
+{
23
+ uint32_t rd, rn, rm;
24
+ bool dp = a->dp;
25
+
26
+ if (!dc_isar_feature(aa32_vsel, s)) {
27
+ return false;
28
+ }
29
+
30
+ /* UNDEF accesses to D16-D31 if they don't exist */
31
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
32
+ ((a->vm | a->vn | a->vd) & 0x10)) {
33
+ return false;
34
+ }
35
+ rd = a->vd;
36
+ rn = a->vn;
37
+ rm = a->vm;
38
+
39
+ if (!vfp_access_check(s)) {
40
+ return true;
41
+ }
42
+
43
+ if (dp) {
44
+ TCGv_i64 frn, frm, dest;
45
+ TCGv_i64 tmp, zero, zf, nf, vf;
46
+
47
+ zero = tcg_const_i64(0);
48
+
49
+ frn = tcg_temp_new_i64();
50
+ frm = tcg_temp_new_i64();
51
+ dest = tcg_temp_new_i64();
52
+
53
+ zf = tcg_temp_new_i64();
54
+ nf = tcg_temp_new_i64();
55
+ vf = tcg_temp_new_i64();
56
+
57
+ tcg_gen_extu_i32_i64(zf, cpu_ZF);
58
+ tcg_gen_ext_i32_i64(nf, cpu_NF);
59
+ tcg_gen_ext_i32_i64(vf, cpu_VF);
60
+
61
+ tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
62
+ tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
63
+ switch (a->cc) {
64
+ case 0: /* eq: Z */
65
+ tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
66
+ frn, frm);
67
+ break;
68
+ case 1: /* vs: V */
69
+ tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
70
+ frn, frm);
71
+ break;
72
+ case 2: /* ge: N == V -> N ^ V == 0 */
73
+ tmp = tcg_temp_new_i64();
74
+ tcg_gen_xor_i64(tmp, vf, nf);
75
+ tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
76
+ frn, frm);
77
+ tcg_temp_free_i64(tmp);
78
+ break;
79
+ case 3: /* gt: !Z && N == V */
80
+ tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
81
+ frn, frm);
82
+ tmp = tcg_temp_new_i64();
83
+ tcg_gen_xor_i64(tmp, vf, nf);
84
+ tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
85
+ dest, frm);
86
+ tcg_temp_free_i64(tmp);
87
+ break;
88
+ }
89
+ tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
90
+ tcg_temp_free_i64(frn);
91
+ tcg_temp_free_i64(frm);
92
+ tcg_temp_free_i64(dest);
93
+
94
+ tcg_temp_free_i64(zf);
95
+ tcg_temp_free_i64(nf);
96
+ tcg_temp_free_i64(vf);
97
+
98
+ tcg_temp_free_i64(zero);
99
+ } else {
100
+ TCGv_i32 frn, frm, dest;
101
+ TCGv_i32 tmp, zero;
102
+
103
+ zero = tcg_const_i32(0);
104
+
105
+ frn = tcg_temp_new_i32();
106
+ frm = tcg_temp_new_i32();
107
+ dest = tcg_temp_new_i32();
108
+ tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
109
+ tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
110
+ switch (a->cc) {
111
+ case 0: /* eq: Z */
112
+ tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
113
+ frn, frm);
114
+ break;
115
+ case 1: /* vs: V */
116
+ tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
117
+ frn, frm);
118
+ break;
119
+ case 2: /* ge: N == V -> N ^ V == 0 */
120
+ tmp = tcg_temp_new_i32();
121
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
122
+ tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
123
+ frn, frm);
124
+ tcg_temp_free_i32(tmp);
125
+ break;
126
+ case 3: /* gt: !Z && N == V */
127
+ tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
128
+ frn, frm);
129
+ tmp = tcg_temp_new_i32();
130
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
131
+ tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
132
+ dest, frm);
133
+ tcg_temp_free_i32(tmp);
134
+ break;
135
+ }
136
+ tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
137
+ tcg_temp_free_i32(frn);
138
+ tcg_temp_free_i32(frm);
139
+ tcg_temp_free_i32(dest);
140
+
141
+ tcg_temp_free_i32(zero);
142
+ }
143
+
144
+ return true;
145
+}
146
+
147
+static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
148
+{
149
+ uint32_t rd, rn, rm;
150
+ bool dp = a->dp;
151
+ bool vmin = a->op;
152
+ TCGv_ptr fpst;
153
+
154
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
155
+ return false;
156
+ }
157
+
158
+ /* UNDEF accesses to D16-D31 if they don't exist */
159
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
160
+ ((a->vm | a->vn | a->vd) & 0x10)) {
161
+ return false;
162
+ }
163
+ rd = a->vd;
164
+ rn = a->vn;
165
+ rm = a->vm;
166
+
167
+ if (!vfp_access_check(s)) {
168
+ return true;
169
+ }
170
+
171
+ fpst = get_fpstatus_ptr(0);
172
+
173
+ if (dp) {
174
+ TCGv_i64 frn, frm, dest;
175
+
176
+ frn = tcg_temp_new_i64();
177
+ frm = tcg_temp_new_i64();
178
+ dest = tcg_temp_new_i64();
179
+
180
+ tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
181
+ tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
182
+ if (vmin) {
183
+ gen_helper_vfp_minnumd(dest, frn, frm, fpst);
184
+ } else {
185
+ gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
186
+ }
187
+ tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
188
+ tcg_temp_free_i64(frn);
189
+ tcg_temp_free_i64(frm);
190
+ tcg_temp_free_i64(dest);
191
+ } else {
192
+ TCGv_i32 frn, frm, dest;
193
+
194
+ frn = tcg_temp_new_i32();
195
+ frm = tcg_temp_new_i32();
196
+ dest = tcg_temp_new_i32();
197
+
198
+ tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
199
+ tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
200
+ if (vmin) {
201
+ gen_helper_vfp_minnums(dest, frn, frm, fpst);
202
+ } else {
203
+ gen_helper_vfp_maxnums(dest, frn, frm, fpst);
204
+ }
205
+ tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
206
+ tcg_temp_free_i32(frn);
207
+ tcg_temp_free_i32(frm);
208
+ tcg_temp_free_i32(dest);
209
+ }
210
+
211
+ tcg_temp_free_ptr(fpst);
212
+ return true;
213
+}
214
+
215
+/*
216
+ * Table for converting the most common AArch32 encoding of
217
+ * rounding mode to arm_fprounding order (which matches the
218
+ * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
219
+ */
220
+static const uint8_t fp_decode_rm[] = {
221
+ FPROUNDING_TIEAWAY,
222
+ FPROUNDING_TIEEVEN,
223
+ FPROUNDING_POSINF,
224
+ FPROUNDING_NEGINF,
225
+};
226
+
227
+static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
228
+{
229
+ uint32_t rd, rm;
230
+ bool dp = a->dp;
231
+ TCGv_ptr fpst;
232
+ TCGv_i32 tcg_rmode;
233
+ int rounding = fp_decode_rm[a->rm];
234
+
235
+ if (!dc_isar_feature(aa32_vrint, s)) {
236
+ return false;
237
+ }
238
+
239
+ /* UNDEF accesses to D16-D31 if they don't exist */
240
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
241
+ ((a->vm | a->vd) & 0x10)) {
242
+ return false;
243
+ }
244
+ rd = a->vd;
245
+ rm = a->vm;
246
+
247
+ if (!vfp_access_check(s)) {
248
+ return true;
249
+ }
250
+
251
+ fpst = get_fpstatus_ptr(0);
252
+
253
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
254
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
255
+
256
+ if (dp) {
257
+ TCGv_i64 tcg_op;
258
+ TCGv_i64 tcg_res;
259
+ tcg_op = tcg_temp_new_i64();
260
+ tcg_res = tcg_temp_new_i64();
261
+ tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
262
+ gen_helper_rintd(tcg_res, tcg_op, fpst);
263
+ tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
264
+ tcg_temp_free_i64(tcg_op);
265
+ tcg_temp_free_i64(tcg_res);
266
+ } else {
267
+ TCGv_i32 tcg_op;
268
+ TCGv_i32 tcg_res;
269
+ tcg_op = tcg_temp_new_i32();
270
+ tcg_res = tcg_temp_new_i32();
271
+ tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
272
+ gen_helper_rints(tcg_res, tcg_op, fpst);
273
+ tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
274
+ tcg_temp_free_i32(tcg_op);
275
+ tcg_temp_free_i32(tcg_res);
276
+ }
277
+
278
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
279
+ tcg_temp_free_i32(tcg_rmode);
280
+
281
+ tcg_temp_free_ptr(fpst);
282
+ return true;
283
+}
284
+
285
+static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
286
+{
287
+ uint32_t rd, rm;
288
+ bool dp = a->dp;
289
+ TCGv_ptr fpst;
290
+ TCGv_i32 tcg_rmode, tcg_shift;
291
+ int rounding = fp_decode_rm[a->rm];
292
+ bool is_signed = a->op;
293
+
294
+ if (!dc_isar_feature(aa32_vcvt_dr, s)) {
295
+ return false;
296
+ }
297
+
298
+ /* UNDEF accesses to D16-D31 if they don't exist */
299
+ if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
300
+ return false;
301
+ }
302
+ rd = a->vd;
303
+ rm = a->vm;
304
+
305
+ if (!vfp_access_check(s)) {
306
+ return true;
307
+ }
308
+
309
+ fpst = get_fpstatus_ptr(0);
310
+
311
+ tcg_shift = tcg_const_i32(0);
312
+
313
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
314
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
315
+
316
+ if (dp) {
317
+ TCGv_i64 tcg_double, tcg_res;
318
+ TCGv_i32 tcg_tmp;
319
+ tcg_double = tcg_temp_new_i64();
320
+ tcg_res = tcg_temp_new_i64();
321
+ tcg_tmp = tcg_temp_new_i32();
322
+ tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
323
+ if (is_signed) {
324
+ gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
325
+ } else {
326
+ gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
327
+ }
328
+ tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
329
+ tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
330
+ tcg_temp_free_i32(tcg_tmp);
331
+ tcg_temp_free_i64(tcg_res);
332
+ tcg_temp_free_i64(tcg_double);
333
+ } else {
334
+ TCGv_i32 tcg_single, tcg_res;
335
+ tcg_single = tcg_temp_new_i32();
336
+ tcg_res = tcg_temp_new_i32();
337
+ tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
338
+ if (is_signed) {
339
+ gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
340
+ } else {
341
+ gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
342
+ }
343
+ tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
344
+ tcg_temp_free_i32(tcg_res);
345
+ tcg_temp_free_i32(tcg_single);
346
+ }
347
+
348
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
349
+ tcg_temp_free_i32(tcg_rmode);
350
+
351
+ tcg_temp_free_i32(tcg_shift);
352
+
353
+ tcg_temp_free_ptr(fpst);
354
+
355
+ return true;
356
+}
357
diff --git a/target/arm/translate.c b/target/arm/translate.c
358
index XXXXXXX..XXXXXXX 100644
359
--- a/target/arm/translate.c
360
+++ b/target/arm/translate.c
361
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
362
tcg_temp_free_i32(tmp);
363
}
364
365
-static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
366
-{
367
- uint32_t rd, rn, rm;
368
- bool dp = a->dp;
369
-
370
- if (!dc_isar_feature(aa32_vsel, s)) {
371
- return false;
372
- }
373
-
374
- /* UNDEF accesses to D16-D31 if they don't exist */
375
- if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
376
- ((a->vm | a->vn | a->vd) & 0x10)) {
377
- return false;
378
- }
379
- rd = a->vd;
380
- rn = a->vn;
381
- rm = a->vm;
382
-
383
- if (!vfp_access_check(s)) {
384
- return true;
385
- }
386
-
387
- if (dp) {
388
- TCGv_i64 frn, frm, dest;
389
- TCGv_i64 tmp, zero, zf, nf, vf;
390
-
391
- zero = tcg_const_i64(0);
392
-
393
- frn = tcg_temp_new_i64();
394
- frm = tcg_temp_new_i64();
395
- dest = tcg_temp_new_i64();
396
-
397
- zf = tcg_temp_new_i64();
398
- nf = tcg_temp_new_i64();
399
- vf = tcg_temp_new_i64();
400
-
401
- tcg_gen_extu_i32_i64(zf, cpu_ZF);
402
- tcg_gen_ext_i32_i64(nf, cpu_NF);
403
- tcg_gen_ext_i32_i64(vf, cpu_VF);
404
-
405
- tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
406
- tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
407
- switch (a->cc) {
408
- case 0: /* eq: Z */
409
- tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
410
- frn, frm);
411
- break;
412
- case 1: /* vs: V */
413
- tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
414
- frn, frm);
415
- break;
416
- case 2: /* ge: N == V -> N ^ V == 0 */
417
- tmp = tcg_temp_new_i64();
418
- tcg_gen_xor_i64(tmp, vf, nf);
419
- tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
420
- frn, frm);
421
- tcg_temp_free_i64(tmp);
422
- break;
423
- case 3: /* gt: !Z && N == V */
424
- tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
425
- frn, frm);
426
- tmp = tcg_temp_new_i64();
427
- tcg_gen_xor_i64(tmp, vf, nf);
428
- tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
429
- dest, frm);
430
- tcg_temp_free_i64(tmp);
431
- break;
432
- }
433
- tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
434
- tcg_temp_free_i64(frn);
435
- tcg_temp_free_i64(frm);
436
- tcg_temp_free_i64(dest);
437
-
438
- tcg_temp_free_i64(zf);
439
- tcg_temp_free_i64(nf);
440
- tcg_temp_free_i64(vf);
441
-
442
- tcg_temp_free_i64(zero);
443
- } else {
444
- TCGv_i32 frn, frm, dest;
445
- TCGv_i32 tmp, zero;
446
-
447
- zero = tcg_const_i32(0);
448
-
449
- frn = tcg_temp_new_i32();
450
- frm = tcg_temp_new_i32();
451
- dest = tcg_temp_new_i32();
452
- tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
453
- tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
454
- switch (a->cc) {
455
- case 0: /* eq: Z */
456
- tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
457
- frn, frm);
458
- break;
459
- case 1: /* vs: V */
460
- tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
461
- frn, frm);
462
- break;
463
- case 2: /* ge: N == V -> N ^ V == 0 */
464
- tmp = tcg_temp_new_i32();
465
- tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
466
- tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
467
- frn, frm);
468
- tcg_temp_free_i32(tmp);
469
- break;
470
- case 3: /* gt: !Z && N == V */
471
- tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
472
- frn, frm);
473
- tmp = tcg_temp_new_i32();
474
- tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
475
- tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
476
- dest, frm);
477
- tcg_temp_free_i32(tmp);
478
- break;
479
- }
480
- tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
481
- tcg_temp_free_i32(frn);
482
- tcg_temp_free_i32(frm);
483
- tcg_temp_free_i32(dest);
484
-
485
- tcg_temp_free_i32(zero);
486
- }
487
-
488
- return true;
489
-}
490
-
491
-static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
492
-{
493
- uint32_t rd, rn, rm;
494
- bool dp = a->dp;
495
- bool vmin = a->op;
496
- TCGv_ptr fpst;
497
-
498
- if (!dc_isar_feature(aa32_vminmaxnm, s)) {
499
- return false;
500
- }
501
-
502
- /* UNDEF accesses to D16-D31 if they don't exist */
503
- if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
504
- ((a->vm | a->vn | a->vd) & 0x10)) {
505
- return false;
506
- }
507
- rd = a->vd;
508
- rn = a->vn;
509
- rm = a->vm;
510
-
511
- if (!vfp_access_check(s)) {
512
- return true;
513
- }
514
-
515
- fpst = get_fpstatus_ptr(0);
516
-
517
- if (dp) {
518
- TCGv_i64 frn, frm, dest;
519
-
520
- frn = tcg_temp_new_i64();
521
- frm = tcg_temp_new_i64();
522
- dest = tcg_temp_new_i64();
523
-
524
- tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
525
- tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
526
- if (vmin) {
527
- gen_helper_vfp_minnumd(dest, frn, frm, fpst);
528
- } else {
529
- gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
530
- }
531
- tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
532
- tcg_temp_free_i64(frn);
533
- tcg_temp_free_i64(frm);
534
- tcg_temp_free_i64(dest);
535
- } else {
536
- TCGv_i32 frn, frm, dest;
537
-
538
- frn = tcg_temp_new_i32();
539
- frm = tcg_temp_new_i32();
540
- dest = tcg_temp_new_i32();
541
-
542
- tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
543
- tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
544
- if (vmin) {
545
- gen_helper_vfp_minnums(dest, frn, frm, fpst);
546
- } else {
547
- gen_helper_vfp_maxnums(dest, frn, frm, fpst);
548
- }
549
- tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
550
- tcg_temp_free_i32(frn);
551
- tcg_temp_free_i32(frm);
552
- tcg_temp_free_i32(dest);
553
- }
554
-
555
- tcg_temp_free_ptr(fpst);
556
- return true;
557
-}
558
-
559
-/*
560
- * Table for converting the most common AArch32 encoding of
561
- * rounding mode to arm_fprounding order (which matches the
562
- * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
563
- */
564
-static const uint8_t fp_decode_rm[] = {
565
- FPROUNDING_TIEAWAY,
566
- FPROUNDING_TIEEVEN,
567
- FPROUNDING_POSINF,
568
- FPROUNDING_NEGINF,
569
-};
570
-
571
-static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
572
-{
573
- uint32_t rd, rm;
574
- bool dp = a->dp;
575
- TCGv_ptr fpst;
576
- TCGv_i32 tcg_rmode;
577
- int rounding = fp_decode_rm[a->rm];
578
-
579
- if (!dc_isar_feature(aa32_vrint, s)) {
580
- return false;
581
- }
582
-
583
- /* UNDEF accesses to D16-D31 if they don't exist */
584
- if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
585
- ((a->vm | a->vd) & 0x10)) {
586
- return false;
587
- }
588
- rd = a->vd;
589
- rm = a->vm;
590
-
591
- if (!vfp_access_check(s)) {
592
- return true;
593
- }
594
-
595
- fpst = get_fpstatus_ptr(0);
596
-
597
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
598
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
599
-
600
- if (dp) {
601
- TCGv_i64 tcg_op;
602
- TCGv_i64 tcg_res;
603
- tcg_op = tcg_temp_new_i64();
604
- tcg_res = tcg_temp_new_i64();
605
- tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
606
- gen_helper_rintd(tcg_res, tcg_op, fpst);
607
- tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
608
- tcg_temp_free_i64(tcg_op);
609
- tcg_temp_free_i64(tcg_res);
610
- } else {
611
- TCGv_i32 tcg_op;
612
- TCGv_i32 tcg_res;
613
- tcg_op = tcg_temp_new_i32();
614
- tcg_res = tcg_temp_new_i32();
615
- tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
616
- gen_helper_rints(tcg_res, tcg_op, fpst);
617
- tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
618
- tcg_temp_free_i32(tcg_op);
619
- tcg_temp_free_i32(tcg_res);
620
- }
621
-
622
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
623
- tcg_temp_free_i32(tcg_rmode);
624
-
625
- tcg_temp_free_ptr(fpst);
626
- return true;
627
-}
628
-
629
-static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
630
-{
631
- uint32_t rd, rm;
632
- bool dp = a->dp;
633
- TCGv_ptr fpst;
634
- TCGv_i32 tcg_rmode, tcg_shift;
635
- int rounding = fp_decode_rm[a->rm];
636
- bool is_signed = a->op;
637
-
638
- if (!dc_isar_feature(aa32_vcvt_dr, s)) {
639
- return false;
640
- }
641
-
642
- /* UNDEF accesses to D16-D31 if they don't exist */
643
- if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
644
- return false;
645
- }
646
- rd = a->vd;
647
- rm = a->vm;
648
-
649
- if (!vfp_access_check(s)) {
650
- return true;
651
- }
652
-
653
- fpst = get_fpstatus_ptr(0);
654
-
655
- tcg_shift = tcg_const_i32(0);
656
-
657
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
658
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
659
-
660
- if (dp) {
661
- TCGv_i64 tcg_double, tcg_res;
662
- TCGv_i32 tcg_tmp;
663
- tcg_double = tcg_temp_new_i64();
664
- tcg_res = tcg_temp_new_i64();
665
- tcg_tmp = tcg_temp_new_i32();
666
- tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
667
- if (is_signed) {
668
- gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
669
- } else {
670
- gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
671
- }
672
- tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
673
- tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
674
- tcg_temp_free_i32(tcg_tmp);
675
- tcg_temp_free_i64(tcg_res);
676
- tcg_temp_free_i64(tcg_double);
677
- } else {
678
- TCGv_i32 tcg_single, tcg_res;
679
- tcg_single = tcg_temp_new_i32();
680
- tcg_res = tcg_temp_new_i32();
681
- tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
682
- if (is_signed) {
683
- gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
684
- } else {
685
- gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
686
- }
687
- tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
688
- tcg_temp_free_i32(tcg_res);
689
- tcg_temp_free_i32(tcg_single);
690
- }
691
-
692
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
693
- tcg_temp_free_i32(tcg_rmode);
694
-
695
- tcg_temp_free_i32(tcg_shift);
696
-
697
- tcg_temp_free_ptr(fpst);
698
-
699
- return true;
700
-}
701
-
702
/*
703
* Disassemble a VFP instruction. Returns nonzero if an error occurred
704
* (ie. an undefined instruction).
705
--
706
2.20.1
707
708
diff view generated by jsdifflib
Deleted patch
1
The current VFP code has two different idioms for
2
loading and storing from the VFP register file:
3
1 using the gen_mov_F0_vreg() and similar functions,
4
which load and store to a fixed set of TCG globals
5
cpu_F0s, CPU_F0d, etc
6
2 by direct calls to tcg_gen_ld_f64() and friends
7
1
8
We want to phase out idiom 1 (because the use of the
9
fixed globals is a relic of a much older version of TCG),
10
but idiom 2 is quite longwinded:
11
tcg_gen_ld_f64(tmp, cpu_env, vfp_reg_offset(true, reg))
12
requires us to specify the 64-bitness twice, once in
13
the function name and once by passing 'true' to
14
vfp_reg_offset(). There's no guard against accidentally
15
passing the wrong flag.
16
17
Instead, let's move to a convention of accessing 64-bit
18
registers via the existing neon_load_reg64() and
19
neon_store_reg64(), and provide new neon_load_reg32()
20
and neon_store_reg32() for the 32-bit equivalents.
21
22
Implement the new functions and use them in the code in
23
translate-vfp.inc.c. We will convert the rest of the VFP
24
code as we do the decodetree conversion in subsequent
25
commits.
26
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
---
30
target/arm/translate-vfp.inc.c | 40 +++++++++++++++++-----------------
31
target/arm/translate.c | 10 +++++++++
32
2 files changed, 30 insertions(+), 20 deletions(-)
33
34
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/translate-vfp.inc.c
37
+++ b/target/arm/translate-vfp.inc.c
38
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
39
tcg_gen_ext_i32_i64(nf, cpu_NF);
40
tcg_gen_ext_i32_i64(vf, cpu_VF);
41
42
- tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
43
- tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
44
+ neon_load_reg64(frn, rn);
45
+ neon_load_reg64(frm, rm);
46
switch (a->cc) {
47
case 0: /* eq: Z */
48
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
50
tcg_temp_free_i64(tmp);
51
break;
52
}
53
- tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
54
+ neon_store_reg64(dest, rd);
55
tcg_temp_free_i64(frn);
56
tcg_temp_free_i64(frm);
57
tcg_temp_free_i64(dest);
58
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
59
frn = tcg_temp_new_i32();
60
frm = tcg_temp_new_i32();
61
dest = tcg_temp_new_i32();
62
- tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
63
- tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
64
+ neon_load_reg32(frn, rn);
65
+ neon_load_reg32(frm, rm);
66
switch (a->cc) {
67
case 0: /* eq: Z */
68
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
69
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
70
tcg_temp_free_i32(tmp);
71
break;
72
}
73
- tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
74
+ neon_store_reg32(dest, rd);
75
tcg_temp_free_i32(frn);
76
tcg_temp_free_i32(frm);
77
tcg_temp_free_i32(dest);
78
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
79
frm = tcg_temp_new_i64();
80
dest = tcg_temp_new_i64();
81
82
- tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
83
- tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
84
+ neon_load_reg64(frn, rn);
85
+ neon_load_reg64(frm, rm);
86
if (vmin) {
87
gen_helper_vfp_minnumd(dest, frn, frm, fpst);
88
} else {
89
gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
90
}
91
- tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
92
+ neon_store_reg64(dest, rd);
93
tcg_temp_free_i64(frn);
94
tcg_temp_free_i64(frm);
95
tcg_temp_free_i64(dest);
96
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
97
frm = tcg_temp_new_i32();
98
dest = tcg_temp_new_i32();
99
100
- tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
101
- tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
102
+ neon_load_reg32(frn, rn);
103
+ neon_load_reg32(frm, rm);
104
if (vmin) {
105
gen_helper_vfp_minnums(dest, frn, frm, fpst);
106
} else {
107
gen_helper_vfp_maxnums(dest, frn, frm, fpst);
108
}
109
- tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
110
+ neon_store_reg32(dest, rd);
111
tcg_temp_free_i32(frn);
112
tcg_temp_free_i32(frm);
113
tcg_temp_free_i32(dest);
114
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
115
TCGv_i64 tcg_res;
116
tcg_op = tcg_temp_new_i64();
117
tcg_res = tcg_temp_new_i64();
118
- tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
119
+ neon_load_reg64(tcg_op, rm);
120
gen_helper_rintd(tcg_res, tcg_op, fpst);
121
- tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
122
+ neon_store_reg64(tcg_res, rd);
123
tcg_temp_free_i64(tcg_op);
124
tcg_temp_free_i64(tcg_res);
125
} else {
126
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
127
TCGv_i32 tcg_res;
128
tcg_op = tcg_temp_new_i32();
129
tcg_res = tcg_temp_new_i32();
130
- tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
131
+ neon_load_reg32(tcg_op, rm);
132
gen_helper_rints(tcg_res, tcg_op, fpst);
133
- tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
134
+ neon_store_reg32(tcg_res, rd);
135
tcg_temp_free_i32(tcg_op);
136
tcg_temp_free_i32(tcg_res);
137
}
138
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
139
tcg_double = tcg_temp_new_i64();
140
tcg_res = tcg_temp_new_i64();
141
tcg_tmp = tcg_temp_new_i32();
142
- tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
143
+ neon_load_reg64(tcg_double, rm);
144
if (is_signed) {
145
gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
146
} else {
147
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
148
}
149
tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
150
- tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
151
+ neon_store_reg32(tcg_tmp, rd);
152
tcg_temp_free_i32(tcg_tmp);
153
tcg_temp_free_i64(tcg_res);
154
tcg_temp_free_i64(tcg_double);
155
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
156
TCGv_i32 tcg_single, tcg_res;
157
tcg_single = tcg_temp_new_i32();
158
tcg_res = tcg_temp_new_i32();
159
- tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
160
+ neon_load_reg32(tcg_single, rm);
161
if (is_signed) {
162
gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
163
} else {
164
gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
165
}
166
- tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
167
+ neon_store_reg32(tcg_res, rd);
168
tcg_temp_free_i32(tcg_res);
169
tcg_temp_free_i32(tcg_single);
170
}
171
diff --git a/target/arm/translate.c b/target/arm/translate.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/target/arm/translate.c
174
+++ b/target/arm/translate.c
175
@@ -XXX,XX +XXX,XX @@ static inline void neon_store_reg64(TCGv_i64 var, int reg)
176
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
177
}
178
179
+static inline void neon_load_reg32(TCGv_i32 var, int reg)
180
+{
181
+ tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
182
+}
183
+
184
+static inline void neon_store_reg32(TCGv_i32 var, int reg)
185
+{
186
+ tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
187
+}
188
+
189
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
190
{
191
TCGv_ptr ret = tcg_temp_new_ptr();
192
--
193
2.20.1
194
195
diff view generated by jsdifflib
Deleted patch
1
Convert the "single-precision" register moves to decodetree:
2
* VMSR
3
* VMRS
4
* VMOV between general purpose register and single precision
5
1
6
Note that the VMSR/VMRS conversions make our handling of
7
the "should this UNDEF?" checks consistent between the two
8
instructions:
9
* VMSR to MVFR0, MVFR1, MVFR2 now UNDEF from EL0
10
(previously was a nop)
11
* VMSR to FPSID now UNDEFs from EL0 or if VFPv3 or better
12
(previously was a nop)
13
* VMSR to FPINST and FPINST2 now UNDEF if VFPv3 or better
14
(previously would write to the register, which had no
15
guest-visible effect because we always UNDEF reads)
16
17
We also tighten up the decode: we were previously underdecoding
18
some SBZ or SBO bits.
19
20
The conversion of VMOV_single includes the expansion out of the
21
gen_mov_F0_vreg()/gen_vfp_mrs() and gen_mov_vreg_F0()/gen_vfp_msr()
22
sequences into the simpler direct load/store of the TCG temp via
23
neon_{load,store}_reg32(): we know in the new function that we're
24
always single-precision, we don't need to use the old-and-deprecated
25
cpu_F0* TCG globals, and we don't happen to have the declaration of
26
gen_vfp_msr() and gen_vfp_mrs() at the point in the file where the
27
new function is.
28
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
31
---
32
target/arm/translate-vfp.inc.c | 161 +++++++++++++++++++++++++++++++++
33
target/arm/translate.c | 148 +-----------------------------
34
target/arm/vfp.decode | 4 +
35
3 files changed, 168 insertions(+), 145 deletions(-)
36
37
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-vfp.inc.c
40
+++ b/target/arm/translate-vfp.inc.c
41
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
42
43
return true;
44
}
45
+
46
+static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
47
+{
48
+ TCGv_i32 tmp;
49
+ bool ignore_vfp_enabled = false;
50
+
51
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
52
+ /*
53
+ * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
54
+ * Writes to R15 are UNPREDICTABLE; we choose to undef.
55
+ */
56
+ if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
57
+ return false;
58
+ }
59
+ }
60
+
61
+ switch (a->reg) {
62
+ case ARM_VFP_FPSID:
63
+ /*
64
+ * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
65
+ * all ID registers to privileged access only.
66
+ */
67
+ if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
68
+ return false;
69
+ }
70
+ ignore_vfp_enabled = true;
71
+ break;
72
+ case ARM_VFP_MVFR0:
73
+ case ARM_VFP_MVFR1:
74
+ if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
75
+ return false;
76
+ }
77
+ ignore_vfp_enabled = true;
78
+ break;
79
+ case ARM_VFP_MVFR2:
80
+ if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
81
+ return false;
82
+ }
83
+ ignore_vfp_enabled = true;
84
+ break;
85
+ case ARM_VFP_FPSCR:
86
+ break;
87
+ case ARM_VFP_FPEXC:
88
+ if (IS_USER(s)) {
89
+ return false;
90
+ }
91
+ ignore_vfp_enabled = true;
92
+ break;
93
+ case ARM_VFP_FPINST:
94
+ case ARM_VFP_FPINST2:
95
+ /* Not present in VFPv3 */
96
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
97
+ return false;
98
+ }
99
+ break;
100
+ default:
101
+ return false;
102
+ }
103
+
104
+ if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
105
+ return true;
106
+ }
107
+
108
+ if (a->l) {
109
+ /* VMRS, move VFP special register to gp register */
110
+ switch (a->reg) {
111
+ case ARM_VFP_FPSID:
112
+ case ARM_VFP_FPEXC:
113
+ case ARM_VFP_FPINST:
114
+ case ARM_VFP_FPINST2:
115
+ case ARM_VFP_MVFR0:
116
+ case ARM_VFP_MVFR1:
117
+ case ARM_VFP_MVFR2:
118
+ tmp = load_cpu_field(vfp.xregs[a->reg]);
119
+ break;
120
+ case ARM_VFP_FPSCR:
121
+ if (a->rt == 15) {
122
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
123
+ tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
124
+ } else {
125
+ tmp = tcg_temp_new_i32();
126
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
127
+ }
128
+ break;
129
+ default:
130
+ g_assert_not_reached();
131
+ }
132
+
133
+ if (a->rt == 15) {
134
+ /* Set the 4 flag bits in the CPSR. */
135
+ gen_set_nzcv(tmp);
136
+ tcg_temp_free_i32(tmp);
137
+ } else {
138
+ store_reg(s, a->rt, tmp);
139
+ }
140
+ } else {
141
+ /* VMSR, move gp register to VFP special register */
142
+ switch (a->reg) {
143
+ case ARM_VFP_FPSID:
144
+ case ARM_VFP_MVFR0:
145
+ case ARM_VFP_MVFR1:
146
+ case ARM_VFP_MVFR2:
147
+ /* Writes are ignored. */
148
+ break;
149
+ case ARM_VFP_FPSCR:
150
+ tmp = load_reg(s, a->rt);
151
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
152
+ tcg_temp_free_i32(tmp);
153
+ gen_lookup_tb(s);
154
+ break;
155
+ case ARM_VFP_FPEXC:
156
+ /*
157
+ * TODO: VFP subarchitecture support.
158
+ * For now, keep the EN bit only
159
+ */
160
+ tmp = load_reg(s, a->rt);
161
+ tcg_gen_andi_i32(tmp, tmp, 1 << 30);
162
+ store_cpu_field(tmp, vfp.xregs[a->reg]);
163
+ gen_lookup_tb(s);
164
+ break;
165
+ case ARM_VFP_FPINST:
166
+ case ARM_VFP_FPINST2:
167
+ tmp = load_reg(s, a->rt);
168
+ store_cpu_field(tmp, vfp.xregs[a->reg]);
169
+ break;
170
+ default:
171
+ g_assert_not_reached();
172
+ }
173
+ }
174
+
175
+ return true;
176
+}
177
+
178
+static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
179
+{
180
+ TCGv_i32 tmp;
181
+
182
+ if (!vfp_access_check(s)) {
183
+ return true;
184
+ }
185
+
186
+ if (a->l) {
187
+ /* VFP to general purpose register */
188
+ tmp = tcg_temp_new_i32();
189
+ neon_load_reg32(tmp, a->vn);
190
+ if (a->rt == 15) {
191
+ /* Set the 4 flag bits in the CPSR. */
192
+ gen_set_nzcv(tmp);
193
+ tcg_temp_free_i32(tmp);
194
+ } else {
195
+ store_reg(s, a->rt, tmp);
196
+ }
197
+ } else {
198
+ /* general purpose register to VFP */
199
+ tmp = load_reg(s, a->rt);
200
+ neon_store_reg32(tmp, a->vn);
201
+ tcg_temp_free_i32(tmp);
202
+ }
203
+
204
+ return true;
205
+}
206
diff --git a/target/arm/translate.c b/target/arm/translate.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/arm/translate.c
209
+++ b/target/arm/translate.c
210
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
211
TCGv_i32 addr;
212
TCGv_i32 tmp;
213
TCGv_i32 tmp2;
214
- bool ignore_vfp_enabled = false;
215
216
if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
217
return 1;
218
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
219
* for invalid encodings; we will generate incorrect syndrome information
220
* for attempts to execute invalid vfp/neon encodings with FP disabled.
221
*/
222
- if ((insn & 0x0fe00fff) == 0x0ee00a10) {
223
- rn = (insn >> 16) & 0xf;
224
- if (rn == ARM_VFP_FPSID || rn == ARM_VFP_FPEXC || rn == ARM_VFP_MVFR2
225
- || rn == ARM_VFP_MVFR1 || rn == ARM_VFP_MVFR0) {
226
- ignore_vfp_enabled = true;
227
- }
228
- }
229
- if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
230
+ if (!vfp_access_check(s)) {
231
return 0;
232
}
233
234
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
235
switch ((insn >> 24) & 0xf) {
236
case 0xe:
237
if (insn & (1 << 4)) {
238
- /* single register transfer */
239
- rd = (insn >> 12) & 0xf;
240
- if (dp) {
241
- /* already handled by decodetree */
242
- return 1;
243
- } else { /* !dp */
244
- bool is_sysreg;
245
-
246
- if ((insn & 0x6f) != 0x00)
247
- return 1;
248
- rn = VFP_SREG_N(insn);
249
-
250
- is_sysreg = extract32(insn, 21, 1);
251
-
252
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
253
- /*
254
- * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
255
- * Writes to R15 are UNPREDICTABLE; we choose to undef.
256
- */
257
- if (is_sysreg && (rd == 15 || (rn >> 1) != ARM_VFP_FPSCR)) {
258
- return 1;
259
- }
260
- }
261
-
262
- if (insn & ARM_CP_RW_BIT) {
263
- /* vfp->arm */
264
- if (is_sysreg) {
265
- /* system register */
266
- rn >>= 1;
267
-
268
- switch (rn) {
269
- case ARM_VFP_FPSID:
270
- /* VFP2 allows access to FSID from userspace.
271
- VFP3 restricts all id registers to privileged
272
- accesses. */
273
- if (IS_USER(s)
274
- && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
275
- return 1;
276
- }
277
- tmp = load_cpu_field(vfp.xregs[rn]);
278
- break;
279
- case ARM_VFP_FPEXC:
280
- if (IS_USER(s))
281
- return 1;
282
- tmp = load_cpu_field(vfp.xregs[rn]);
283
- break;
284
- case ARM_VFP_FPINST:
285
- case ARM_VFP_FPINST2:
286
- /* Not present in VFP3. */
287
- if (IS_USER(s)
288
- || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
289
- return 1;
290
- }
291
- tmp = load_cpu_field(vfp.xregs[rn]);
292
- break;
293
- case ARM_VFP_FPSCR:
294
- if (rd == 15) {
295
- tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
296
- tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
297
- } else {
298
- tmp = tcg_temp_new_i32();
299
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
300
- }
301
- break;
302
- case ARM_VFP_MVFR2:
303
- if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
304
- return 1;
305
- }
306
- /* fall through */
307
- case ARM_VFP_MVFR0:
308
- case ARM_VFP_MVFR1:
309
- if (IS_USER(s)
310
- || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
311
- return 1;
312
- }
313
- tmp = load_cpu_field(vfp.xregs[rn]);
314
- break;
315
- default:
316
- return 1;
317
- }
318
- } else {
319
- gen_mov_F0_vreg(0, rn);
320
- tmp = gen_vfp_mrs();
321
- }
322
- if (rd == 15) {
323
- /* Set the 4 flag bits in the CPSR. */
324
- gen_set_nzcv(tmp);
325
- tcg_temp_free_i32(tmp);
326
- } else {
327
- store_reg(s, rd, tmp);
328
- }
329
- } else {
330
- /* arm->vfp */
331
- if (is_sysreg) {
332
- rn >>= 1;
333
- /* system register */
334
- switch (rn) {
335
- case ARM_VFP_FPSID:
336
- case ARM_VFP_MVFR0:
337
- case ARM_VFP_MVFR1:
338
- /* Writes are ignored. */
339
- break;
340
- case ARM_VFP_FPSCR:
341
- tmp = load_reg(s, rd);
342
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
343
- tcg_temp_free_i32(tmp);
344
- gen_lookup_tb(s);
345
- break;
346
- case ARM_VFP_FPEXC:
347
- if (IS_USER(s))
348
- return 1;
349
- /* TODO: VFP subarchitecture support.
350
- * For now, keep the EN bit only */
351
- tmp = load_reg(s, rd);
352
- tcg_gen_andi_i32(tmp, tmp, 1 << 30);
353
- store_cpu_field(tmp, vfp.xregs[rn]);
354
- gen_lookup_tb(s);
355
- break;
356
- case ARM_VFP_FPINST:
357
- case ARM_VFP_FPINST2:
358
- if (IS_USER(s)) {
359
- return 1;
360
- }
361
- tmp = load_reg(s, rd);
362
- store_cpu_field(tmp, vfp.xregs[rn]);
363
- break;
364
- default:
365
- return 1;
366
- }
367
- } else {
368
- tmp = load_reg(s, rd);
369
- gen_vfp_msr(tmp);
370
- gen_mov_vreg_F0(0, rn);
371
- }
372
- }
373
- }
374
+ /* already handled by decodetree */
375
+ return 1;
376
} else {
377
/* data processing */
378
bool rd_is_dp = dp;
379
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
380
index XXXXXXX..XXXXXXX 100644
381
--- a/target/arm/vfp.decode
382
+++ b/target/arm/vfp.decode
383
@@ -XXX,XX +XXX,XX @@ VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \
384
385
VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
386
vn=%vn_dp
387
+
388
+VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
389
+VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 \
390
+ vn=%vn_sp
391
--
392
2.20.1
393
394
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP two-register transfer instructions to decodetree
2
(in the v8 Arm ARM these are the "Advanced SIMD and floating-point
3
64-bit move" encoding group).
4
1
5
Again, we expand out the sequences involving gen_vfp_msr() and
6
gen_msr_vfp().
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
target/arm/translate-vfp.inc.c | 70 ++++++++++++++++++++++++++++++++++
12
target/arm/translate.c | 46 +---------------------
13
target/arm/vfp.decode | 5 +++
14
3 files changed, 77 insertions(+), 44 deletions(-)
15
16
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-vfp.inc.c
19
+++ b/target/arm/translate-vfp.inc.c
20
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
21
22
return true;
23
}
24
+
25
+static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
26
+{
27
+ TCGv_i32 tmp;
28
+
29
+ /*
30
+ * VMOV between two general-purpose registers and two single precision
31
+ * floating point registers
32
+ */
33
+ if (!vfp_access_check(s)) {
34
+ return true;
35
+ }
36
+
37
+ if (a->op) {
38
+ /* fpreg to gpreg */
39
+ tmp = tcg_temp_new_i32();
40
+ neon_load_reg32(tmp, a->vm);
41
+ store_reg(s, a->rt, tmp);
42
+ tmp = tcg_temp_new_i32();
43
+ neon_load_reg32(tmp, a->vm + 1);
44
+ store_reg(s, a->rt2, tmp);
45
+ } else {
46
+ /* gpreg to fpreg */
47
+ tmp = load_reg(s, a->rt);
48
+ neon_store_reg32(tmp, a->vm);
49
+ tmp = load_reg(s, a->rt2);
50
+ neon_store_reg32(tmp, a->vm + 1);
51
+ }
52
+
53
+ return true;
54
+}
55
+
56
+static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
57
+{
58
+ TCGv_i32 tmp;
59
+
60
+ /*
61
+ * VMOV between two general-purpose registers and one double precision
62
+ * floating point register
63
+ */
64
+
65
+ /* UNDEF accesses to D16-D31 if they don't exist */
66
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
67
+ return false;
68
+ }
69
+
70
+ if (!vfp_access_check(s)) {
71
+ return true;
72
+ }
73
+
74
+ if (a->op) {
75
+ /* fpreg to gpreg */
76
+ tmp = tcg_temp_new_i32();
77
+ neon_load_reg32(tmp, a->vm * 2);
78
+ store_reg(s, a->rt, tmp);
79
+ tmp = tcg_temp_new_i32();
80
+ neon_load_reg32(tmp, a->vm * 2 + 1);
81
+ store_reg(s, a->rt2, tmp);
82
+ } else {
83
+ /* gpreg to fpreg */
84
+ tmp = load_reg(s, a->rt);
85
+ neon_store_reg32(tmp, a->vm * 2);
86
+ tcg_temp_free_i32(tmp);
87
+ tmp = load_reg(s, a->rt2);
88
+ neon_store_reg32(tmp, a->vm * 2 + 1);
89
+ tcg_temp_free_i32(tmp);
90
+ }
91
+
92
+ return true;
93
+}
94
diff --git a/target/arm/translate.c b/target/arm/translate.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/arm/translate.c
97
+++ b/target/arm/translate.c
98
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
99
case 0xc:
100
case 0xd:
101
if ((insn & 0x03e00000) == 0x00400000) {
102
- /* two-register transfer */
103
- rn = (insn >> 16) & 0xf;
104
- rd = (insn >> 12) & 0xf;
105
- if (dp) {
106
- VFP_DREG_M(rm, insn);
107
- } else {
108
- rm = VFP_SREG_M(insn);
109
- }
110
-
111
- if (insn & ARM_CP_RW_BIT) {
112
- /* vfp->arm */
113
- if (dp) {
114
- gen_mov_F0_vreg(0, rm * 2);
115
- tmp = gen_vfp_mrs();
116
- store_reg(s, rd, tmp);
117
- gen_mov_F0_vreg(0, rm * 2 + 1);
118
- tmp = gen_vfp_mrs();
119
- store_reg(s, rn, tmp);
120
- } else {
121
- gen_mov_F0_vreg(0, rm);
122
- tmp = gen_vfp_mrs();
123
- store_reg(s, rd, tmp);
124
- gen_mov_F0_vreg(0, rm + 1);
125
- tmp = gen_vfp_mrs();
126
- store_reg(s, rn, tmp);
127
- }
128
- } else {
129
- /* arm->vfp */
130
- if (dp) {
131
- tmp = load_reg(s, rd);
132
- gen_vfp_msr(tmp);
133
- gen_mov_vreg_F0(0, rm * 2);
134
- tmp = load_reg(s, rn);
135
- gen_vfp_msr(tmp);
136
- gen_mov_vreg_F0(0, rm * 2 + 1);
137
- } else {
138
- tmp = load_reg(s, rd);
139
- gen_vfp_msr(tmp);
140
- gen_mov_vreg_F0(0, rm);
141
- tmp = load_reg(s, rn);
142
- gen_vfp_msr(tmp);
143
- gen_mov_vreg_F0(0, rm + 1);
144
- }
145
- }
146
+ /* Already handled by decodetree */
147
+ return 1;
148
} else {
149
/* Load/store */
150
rn = (insn >> 16) & 0xf;
151
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
152
index XXXXXXX..XXXXXXX 100644
153
--- a/target/arm/vfp.decode
154
+++ b/target/arm/vfp.decode
155
@@ -XXX,XX +XXX,XX @@ VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
156
VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
157
VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 \
158
vn=%vn_sp
159
+
160
+VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \
161
+ vm=%vm_sp
162
+VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \
163
+ vm=%vm_dp
164
--
165
2.20.1
166
167
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP single load/store insns VLDR and VSTR to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 73 ++++++++++++++++++++++++++++++++++
7
target/arm/translate.c | 22 +---------
8
target/arm/vfp.decode | 7 ++++
9
3 files changed, 82 insertions(+), 20 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
16
17
return true;
18
}
19
+
20
+static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
21
+{
22
+ uint32_t offset;
23
+ TCGv_i32 addr;
24
+
25
+ if (!vfp_access_check(s)) {
26
+ return true;
27
+ }
28
+
29
+ offset = a->imm << 2;
30
+ if (!a->u) {
31
+ offset = -offset;
32
+ }
33
+
34
+ if (s->thumb && a->rn == 15) {
35
+ /* This is actually UNPREDICTABLE */
36
+ addr = tcg_temp_new_i32();
37
+ tcg_gen_movi_i32(addr, s->pc & ~2);
38
+ } else {
39
+ addr = load_reg(s, a->rn);
40
+ }
41
+ tcg_gen_addi_i32(addr, addr, offset);
42
+ if (a->l) {
43
+ gen_vfp_ld(s, false, addr);
44
+ gen_mov_vreg_F0(false, a->vd);
45
+ } else {
46
+ gen_mov_F0_vreg(false, a->vd);
47
+ gen_vfp_st(s, false, addr);
48
+ }
49
+ tcg_temp_free_i32(addr);
50
+
51
+ return true;
52
+}
53
+
54
+static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
55
+{
56
+ uint32_t offset;
57
+ TCGv_i32 addr;
58
+
59
+ /* UNDEF accesses to D16-D31 if they don't exist */
60
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
61
+ return false;
62
+ }
63
+
64
+ if (!vfp_access_check(s)) {
65
+ return true;
66
+ }
67
+
68
+ offset = a->imm << 2;
69
+ if (!a->u) {
70
+ offset = -offset;
71
+ }
72
+
73
+ if (s->thumb && a->rn == 15) {
74
+ /* This is actually UNPREDICTABLE */
75
+ addr = tcg_temp_new_i32();
76
+ tcg_gen_movi_i32(addr, s->pc & ~2);
77
+ } else {
78
+ addr = load_reg(s, a->rn);
79
+ }
80
+ tcg_gen_addi_i32(addr, addr, offset);
81
+ if (a->l) {
82
+ gen_vfp_ld(s, true, addr);
83
+ gen_mov_vreg_F0(true, a->vd);
84
+ } else {
85
+ gen_mov_F0_vreg(true, a->vd);
86
+ gen_vfp_st(s, true, addr);
87
+ }
88
+ tcg_temp_free_i32(addr);
89
+
90
+ return true;
91
+}
92
diff --git a/target/arm/translate.c b/target/arm/translate.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/translate.c
95
+++ b/target/arm/translate.c
96
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
97
else
98
rd = VFP_SREG_D(insn);
99
if ((insn & 0x01200000) == 0x01000000) {
100
- /* Single load/store */
101
- offset = (insn & 0xff) << 2;
102
- if ((insn & (1 << 23)) == 0)
103
- offset = -offset;
104
- if (s->thumb && rn == 15) {
105
- /* This is actually UNPREDICTABLE */
106
- addr = tcg_temp_new_i32();
107
- tcg_gen_movi_i32(addr, s->pc & ~2);
108
- } else {
109
- addr = load_reg(s, rn);
110
- }
111
- tcg_gen_addi_i32(addr, addr, offset);
112
- if (insn & (1 << 20)) {
113
- gen_vfp_ld(s, dp, addr);
114
- gen_mov_vreg_F0(dp, rd);
115
- } else {
116
- gen_mov_F0_vreg(dp, rd);
117
- gen_vfp_st(s, dp, addr);
118
- }
119
- tcg_temp_free_i32(addr);
120
+ /* Already handled by decodetree */
121
+ return 1;
122
} else {
123
/* load/store multiple */
124
int w = insn & (1 << 21);
125
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/vfp.decode
128
+++ b/target/arm/vfp.decode
129
@@ -XXX,XX +XXX,XX @@ VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \
130
vm=%vm_sp
131
VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \
132
vm=%vm_dp
133
+
134
+# Note that the half-precision variants of VLDR and VSTR are
135
+# not part of this decodetree at all because they have bits [9:8] == 0b01
136
+VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \
137
+ vd=%vd_sp
138
+VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \
139
+ vd=%vd_dp
140
--
141
2.20.1
142
143
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP load/store multiple insns to decodetree.
2
This includes tightening up the UNDEF checking for pre-VFPv3
3
CPUs which only have D0-D15 : they now UNDEF for any access
4
to D16-D31, not merely when the smallest register in the
5
transfer list is in D16-D31.
6
1
7
This conversion does not try to share code between the single
8
precision and the double precision versions; this looks a bit
9
duplicative of code, but it leaves the door open for a future
10
refactoring which gets rid of the use of the "F0" registers
11
by inlining the various functions like gen_vfp_ld() and
12
gen_mov_F0_reg() which are hiding "if (dp) { ... } else { ... }"
13
conditionalisation.
14
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
---
18
target/arm/translate-vfp.inc.c | 162 +++++++++++++++++++++++++++++++++
19
target/arm/translate.c | 97 +-------------------
20
target/arm/vfp.decode | 18 ++++
21
3 files changed, 183 insertions(+), 94 deletions(-)
22
23
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/translate-vfp.inc.c
26
+++ b/target/arm/translate-vfp.inc.c
27
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
28
29
return true;
30
}
31
+
32
+static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
33
+{
34
+ uint32_t offset;
35
+ TCGv_i32 addr;
36
+ int i, n;
37
+
38
+ n = a->imm;
39
+
40
+ if (n == 0 || (a->vd + n) > 32) {
41
+ /*
42
+ * UNPREDICTABLE cases for bad immediates: we choose to
43
+ * UNDEF to avoid generating huge numbers of TCG ops
44
+ */
45
+ return false;
46
+ }
47
+ if (a->rn == 15 && a->w) {
48
+ /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
49
+ return false;
50
+ }
51
+
52
+ if (!vfp_access_check(s)) {
53
+ return true;
54
+ }
55
+
56
+ if (s->thumb && a->rn == 15) {
57
+ /* This is actually UNPREDICTABLE */
58
+ addr = tcg_temp_new_i32();
59
+ tcg_gen_movi_i32(addr, s->pc & ~2);
60
+ } else {
61
+ addr = load_reg(s, a->rn);
62
+ }
63
+ if (a->p) {
64
+ /* pre-decrement */
65
+ tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
66
+ }
67
+
68
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
69
+ /*
70
+ * Here 'addr' is the lowest address we will store to,
71
+ * and is either the old SP (if post-increment) or
72
+ * the new SP (if pre-decrement). For post-increment
73
+ * where the old value is below the limit and the new
74
+ * value is above, it is UNKNOWN whether the limit check
75
+ * triggers; we choose to trigger.
76
+ */
77
+ gen_helper_v8m_stackcheck(cpu_env, addr);
78
+ }
79
+
80
+ offset = 4;
81
+ for (i = 0; i < n; i++) {
82
+ if (a->l) {
83
+ /* load */
84
+ gen_vfp_ld(s, false, addr);
85
+ gen_mov_vreg_F0(false, a->vd + i);
86
+ } else {
87
+ /* store */
88
+ gen_mov_F0_vreg(false, a->vd + i);
89
+ gen_vfp_st(s, false, addr);
90
+ }
91
+ tcg_gen_addi_i32(addr, addr, offset);
92
+ }
93
+ if (a->w) {
94
+ /* writeback */
95
+ if (a->p) {
96
+ offset = -offset * n;
97
+ tcg_gen_addi_i32(addr, addr, offset);
98
+ }
99
+ store_reg(s, a->rn, addr);
100
+ } else {
101
+ tcg_temp_free_i32(addr);
102
+ }
103
+
104
+ return true;
105
+}
106
+
107
+static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
108
+{
109
+ uint32_t offset;
110
+ TCGv_i32 addr;
111
+ int i, n;
112
+
113
+ n = a->imm >> 1;
114
+
115
+ if (n == 0 || (a->vd + n) > 32 || n > 16) {
116
+ /*
117
+ * UNPREDICTABLE cases for bad immediates: we choose to
118
+ * UNDEF to avoid generating huge numbers of TCG ops
119
+ */
120
+ return false;
121
+ }
122
+ if (a->rn == 15 && a->w) {
123
+ /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
124
+ return false;
125
+ }
126
+
127
+ /* UNDEF accesses to D16-D31 if they don't exist */
128
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) {
129
+ return false;
130
+ }
131
+
132
+ if (!vfp_access_check(s)) {
133
+ return true;
134
+ }
135
+
136
+ if (s->thumb && a->rn == 15) {
137
+ /* This is actually UNPREDICTABLE */
138
+ addr = tcg_temp_new_i32();
139
+ tcg_gen_movi_i32(addr, s->pc & ~2);
140
+ } else {
141
+ addr = load_reg(s, a->rn);
142
+ }
143
+ if (a->p) {
144
+ /* pre-decrement */
145
+ tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
146
+ }
147
+
148
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
149
+ /*
150
+ * Here 'addr' is the lowest address we will store to,
151
+ * and is either the old SP (if post-increment) or
152
+ * the new SP (if pre-decrement). For post-increment
153
+ * where the old value is below the limit and the new
154
+ * value is above, it is UNKNOWN whether the limit check
155
+ * triggers; we choose to trigger.
156
+ */
157
+ gen_helper_v8m_stackcheck(cpu_env, addr);
158
+ }
159
+
160
+ offset = 8;
161
+ for (i = 0; i < n; i++) {
162
+ if (a->l) {
163
+ /* load */
164
+ gen_vfp_ld(s, true, addr);
165
+ gen_mov_vreg_F0(true, a->vd + i);
166
+ } else {
167
+ /* store */
168
+ gen_mov_F0_vreg(true, a->vd + i);
169
+ gen_vfp_st(s, true, addr);
170
+ }
171
+ tcg_gen_addi_i32(addr, addr, offset);
172
+ }
173
+ if (a->w) {
174
+ /* writeback */
175
+ if (a->p) {
176
+ offset = -offset * n;
177
+ } else if (a->imm & 1) {
178
+ offset = 4;
179
+ } else {
180
+ offset = 0;
181
+ }
182
+
183
+ if (offset != 0) {
184
+ tcg_gen_addi_i32(addr, addr, offset);
185
+ }
186
+ store_reg(s, a->rn, addr);
187
+ } else {
188
+ tcg_temp_free_i32(addr);
189
+ }
190
+
191
+ return true;
192
+}
193
diff --git a/target/arm/translate.c b/target/arm/translate.c
194
index XXXXXXX..XXXXXXX 100644
195
--- a/target/arm/translate.c
196
+++ b/target/arm/translate.c
197
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
198
*/
199
static int disas_vfp_insn(DisasContext *s, uint32_t insn)
200
{
201
- uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
202
+ uint32_t rd, rn, rm, op, i, n, delta_d, delta_m, bank_mask;
203
int dp, veclen;
204
- TCGv_i32 addr;
205
TCGv_i32 tmp;
206
TCGv_i32 tmp2;
207
208
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
209
break;
210
case 0xc:
211
case 0xd:
212
- if ((insn & 0x03e00000) == 0x00400000) {
213
- /* Already handled by decodetree */
214
- return 1;
215
- } else {
216
- /* Load/store */
217
- rn = (insn >> 16) & 0xf;
218
- if (dp)
219
- VFP_DREG_D(rd, insn);
220
- else
221
- rd = VFP_SREG_D(insn);
222
- if ((insn & 0x01200000) == 0x01000000) {
223
- /* Already handled by decodetree */
224
- return 1;
225
- } else {
226
- /* load/store multiple */
227
- int w = insn & (1 << 21);
228
- if (dp)
229
- n = (insn >> 1) & 0x7f;
230
- else
231
- n = insn & 0xff;
232
-
233
- if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
234
- /* P == U , W == 1 => UNDEF */
235
- return 1;
236
- }
237
- if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
238
- /* UNPREDICTABLE cases for bad immediates: we choose to
239
- * UNDEF to avoid generating huge numbers of TCG ops
240
- */
241
- return 1;
242
- }
243
- if (rn == 15 && w) {
244
- /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
245
- return 1;
246
- }
247
-
248
- if (s->thumb && rn == 15) {
249
- /* This is actually UNPREDICTABLE */
250
- addr = tcg_temp_new_i32();
251
- tcg_gen_movi_i32(addr, s->pc & ~2);
252
- } else {
253
- addr = load_reg(s, rn);
254
- }
255
- if (insn & (1 << 24)) /* pre-decrement */
256
- tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
257
-
258
- if (s->v8m_stackcheck && rn == 13 && w) {
259
- /*
260
- * Here 'addr' is the lowest address we will store to,
261
- * and is either the old SP (if post-increment) or
262
- * the new SP (if pre-decrement). For post-increment
263
- * where the old value is below the limit and the new
264
- * value is above, it is UNKNOWN whether the limit check
265
- * triggers; we choose to trigger.
266
- */
267
- gen_helper_v8m_stackcheck(cpu_env, addr);
268
- }
269
-
270
- if (dp)
271
- offset = 8;
272
- else
273
- offset = 4;
274
- for (i = 0; i < n; i++) {
275
- if (insn & ARM_CP_RW_BIT) {
276
- /* load */
277
- gen_vfp_ld(s, dp, addr);
278
- gen_mov_vreg_F0(dp, rd + i);
279
- } else {
280
- /* store */
281
- gen_mov_F0_vreg(dp, rd + i);
282
- gen_vfp_st(s, dp, addr);
283
- }
284
- tcg_gen_addi_i32(addr, addr, offset);
285
- }
286
- if (w) {
287
- /* writeback */
288
- if (insn & (1 << 24))
289
- offset = -offset * n;
290
- else if (dp && (insn & 1))
291
- offset = 4;
292
- else
293
- offset = 0;
294
-
295
- if (offset != 0)
296
- tcg_gen_addi_i32(addr, addr, offset);
297
- store_reg(s, rn, addr);
298
- } else {
299
- tcg_temp_free_i32(addr);
300
- }
301
- }
302
- }
303
- break;
304
+ /* Already handled by decodetree */
305
+ return 1;
306
default:
307
/* Should never happen. */
308
return 1;
309
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
310
index XXXXXXX..XXXXXXX 100644
311
--- a/target/arm/vfp.decode
312
+++ b/target/arm/vfp.decode
313
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \
314
vd=%vd_sp
315
VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \
316
vd=%vd_dp
317
+
318
+# We split the load/store multiple up into two patterns to avoid
319
+# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
320
+# grouping:
321
+# P=0 U=0 W=0 is 64-bit VMOV
322
+# P=1 W=0 is VLDR/VSTR
323
+# P=U W=1 is UNDEF
324
+# leaving P=0 U=1 W=x and P=1 U=0 W=1 for load/store multiple.
325
+# These include FSTM/FLDM.
326
+VLDM_VSTM_sp ---- 1100 1 . w:1 l:1 rn:4 .... 1010 imm:8 \
327
+ vd=%vd_sp p=0 u=1
328
+VLDM_VSTM_dp ---- 1100 1 . w:1 l:1 rn:4 .... 1011 imm:8 \
329
+ vd=%vd_dp p=0 u=1
330
+
331
+VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \
332
+ vd=%vd_sp p=1 u=0 w=1
333
+VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
334
+ vd=%vd_dp p=1 u=0 w=1
335
--
336
2.20.1
337
338
diff view generated by jsdifflib
Deleted patch
1
Expand out the sequences in the new decoder VLDR/VSTR/VLDM/VSTM trans
2
functions which perform the memory accesses by going via the TCG
3
globals cpu_F0s and cpu_F0d, to use local TCG temps instead.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/arm/translate-vfp.inc.c | 46 +++++++++++++++++++++-------------
9
target/arm/translate.c | 18 -------------
10
2 files changed, 28 insertions(+), 36 deletions(-)
11
12
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate-vfp.inc.c
15
+++ b/target/arm/translate-vfp.inc.c
16
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
17
static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
18
{
19
uint32_t offset;
20
- TCGv_i32 addr;
21
+ TCGv_i32 addr, tmp;
22
23
if (!vfp_access_check(s)) {
24
return true;
25
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
26
addr = load_reg(s, a->rn);
27
}
28
tcg_gen_addi_i32(addr, addr, offset);
29
+ tmp = tcg_temp_new_i32();
30
if (a->l) {
31
- gen_vfp_ld(s, false, addr);
32
- gen_mov_vreg_F0(false, a->vd);
33
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
34
+ neon_store_reg32(tmp, a->vd);
35
} else {
36
- gen_mov_F0_vreg(false, a->vd);
37
- gen_vfp_st(s, false, addr);
38
+ neon_load_reg32(tmp, a->vd);
39
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
40
}
41
+ tcg_temp_free_i32(tmp);
42
tcg_temp_free_i32(addr);
43
44
return true;
45
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
46
{
47
uint32_t offset;
48
TCGv_i32 addr;
49
+ TCGv_i64 tmp;
50
51
/* UNDEF accesses to D16-D31 if they don't exist */
52
if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
53
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
54
addr = load_reg(s, a->rn);
55
}
56
tcg_gen_addi_i32(addr, addr, offset);
57
+ tmp = tcg_temp_new_i64();
58
if (a->l) {
59
- gen_vfp_ld(s, true, addr);
60
- gen_mov_vreg_F0(true, a->vd);
61
+ gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
62
+ neon_store_reg64(tmp, a->vd);
63
} else {
64
- gen_mov_F0_vreg(true, a->vd);
65
- gen_vfp_st(s, true, addr);
66
+ neon_load_reg64(tmp, a->vd);
67
+ gen_aa32_st64(s, tmp, addr, get_mem_index(s));
68
}
69
+ tcg_temp_free_i64(tmp);
70
tcg_temp_free_i32(addr);
71
72
return true;
73
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
74
static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
75
{
76
uint32_t offset;
77
- TCGv_i32 addr;
78
+ TCGv_i32 addr, tmp;
79
int i, n;
80
81
n = a->imm;
82
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
83
}
84
85
offset = 4;
86
+ tmp = tcg_temp_new_i32();
87
for (i = 0; i < n; i++) {
88
if (a->l) {
89
/* load */
90
- gen_vfp_ld(s, false, addr);
91
- gen_mov_vreg_F0(false, a->vd + i);
92
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
93
+ neon_store_reg32(tmp, a->vd + i);
94
} else {
95
/* store */
96
- gen_mov_F0_vreg(false, a->vd + i);
97
- gen_vfp_st(s, false, addr);
98
+ neon_load_reg32(tmp, a->vd + i);
99
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
100
}
101
tcg_gen_addi_i32(addr, addr, offset);
102
}
103
+ tcg_temp_free_i32(tmp);
104
if (a->w) {
105
/* writeback */
106
if (a->p) {
107
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
108
{
109
uint32_t offset;
110
TCGv_i32 addr;
111
+ TCGv_i64 tmp;
112
int i, n;
113
114
n = a->imm >> 1;
115
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
116
}
117
118
offset = 8;
119
+ tmp = tcg_temp_new_i64();
120
for (i = 0; i < n; i++) {
121
if (a->l) {
122
/* load */
123
- gen_vfp_ld(s, true, addr);
124
- gen_mov_vreg_F0(true, a->vd + i);
125
+ gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
126
+ neon_store_reg64(tmp, a->vd + i);
127
} else {
128
/* store */
129
- gen_mov_F0_vreg(true, a->vd + i);
130
- gen_vfp_st(s, true, addr);
131
+ neon_load_reg64(tmp, a->vd + i);
132
+ gen_aa32_st64(s, tmp, addr, get_mem_index(s));
133
}
134
tcg_gen_addi_i32(addr, addr, offset);
135
}
136
+ tcg_temp_free_i64(tmp);
137
if (a->w) {
138
/* writeback */
139
if (a->p) {
140
diff --git a/target/arm/translate.c b/target/arm/translate.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/translate.c
143
+++ b/target/arm/translate.c
144
@@ -XXX,XX +XXX,XX @@ VFP_GEN_FIX(uhto, )
145
VFP_GEN_FIX(ulto, )
146
#undef VFP_GEN_FIX
147
148
-static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
149
-{
150
- if (dp) {
151
- gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
152
- } else {
153
- gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
154
- }
155
-}
156
-
157
-static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
158
-{
159
- if (dp) {
160
- gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
161
- } else {
162
- gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
163
- }
164
-}
165
-
166
static inline long vfp_reg_offset(bool dp, unsigned reg)
167
{
168
if (dp) {
169
--
170
2.20.1
171
172
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP VMLA instruction to decodetree.
2
1
3
This is the first of the VFP 3-operand data processing instructions,
4
so we include in this patch the code which loops over the elements
5
for an old-style VFP vector operation. The existing code to do this
6
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
7
we are going to be converting instructions one at a time anyway
8
we can take the opportunity to make the new loop use TCG temporaries,
9
which means we can do that conversion one operation at a time
10
rather than needing to do it all in one go.
11
12
We include an UNDEF check which was missing in the old code:
13
short-vector operations (with stride or length non-zero) were
14
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
15
field does not indicate that support for short vectors is present
16
we UNDEF the operations that would use them. (This is a change
17
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
18
previously were all incorrectly allowing short-vector operations.)
19
20
Note that the conversion fixes a bug in the old code for the
21
case of VFP short-vector "mixed scalar/vector operations". These
22
happen where the destination register is in a vector bank but
23
but the second operand is in a scalar bank. For example
24
vmla.f64 d10, d1, d16 with length 2 stride 2
25
is equivalent to the pair of scalar operations
26
vmla.f64 d10, d1, d16
27
vmla.f64 d8, d3, d16
28
where the destination and first input register cycle through
29
their vector but the second input is scalar (d16). In the
30
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
31
as a temporary output for the multiply, which trashes the
32
second input operand. For the fully-scalar case (where we
33
never do a second iteration) and the fully-vector case
34
(where the loop loads the new second input operand) this
35
doesn't matter, but for the mixed scalar/vector case we
36
will end up using the wrong value for later loop iterations.
37
In the new code we use TCG temporaries and so avoid the bug.
38
This bug is present for all the multiply-accumulate insns
39
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
40
41
Note 2: the expression used to calculate the next register
42
number in the vector bank is not in fact correct; we leave
43
this behaviour unchanged from the old decoder and will
44
fix this bug later in the series.
45
46
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
47
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
48
---
49
target/arm/cpu.h | 5 +
50
target/arm/translate-vfp.inc.c | 205 +++++++++++++++++++++++++++++++++
51
target/arm/translate.c | 14 ++-
52
target/arm/vfp.decode | 6 +
53
4 files changed, 224 insertions(+), 6 deletions(-)
54
55
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/cpu.h
58
+++ b/target/arm/cpu.h
59
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id)
60
return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2;
61
}
62
63
+static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
64
+{
65
+ return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0;
66
+}
67
+
68
/*
69
* We always set the FP and SIMD FP16 fields to indicate identical
70
* levels of support (assuming SIMD is implemented at all), so
71
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/translate-vfp.inc.c
74
+++ b/target/arm/translate-vfp.inc.c
75
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
76
77
return true;
78
}
79
+
80
+/*
81
+ * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
82
+ * The callback should emit code to write a value to vd. If
83
+ * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
84
+ * will contain the old value of the relevant VFP register;
85
+ * otherwise it must be written to only.
86
+ */
87
+typedef void VFPGen3OpSPFn(TCGv_i32 vd,
88
+ TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
89
+typedef void VFPGen3OpDPFn(TCGv_i64 vd,
90
+ TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
91
+
92
+/*
93
+ * Perform a 3-operand VFP data processing instruction. fn is the
94
+ * callback to do the actual operation; this function deals with the
95
+ * code to handle looping around for VFP vector processing.
96
+ */
97
+static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
98
+ int vd, int vn, int vm, bool reads_vd)
99
+{
100
+ uint32_t delta_m = 0;
101
+ uint32_t delta_d = 0;
102
+ uint32_t bank_mask = 0;
103
+ int veclen = s->vec_len;
104
+ TCGv_i32 f0, f1, fd;
105
+ TCGv_ptr fpst;
106
+
107
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
108
+ (veclen != 0 || s->vec_stride != 0)) {
109
+ return false;
110
+ }
111
+
112
+ if (!vfp_access_check(s)) {
113
+ return true;
114
+ }
115
+
116
+ if (veclen > 0) {
117
+ bank_mask = 0x18;
118
+
119
+ /* Figure out what type of vector operation this is. */
120
+ if ((vd & bank_mask) == 0) {
121
+ /* scalar */
122
+ veclen = 0;
123
+ } else {
124
+ delta_d = s->vec_stride + 1;
125
+
126
+ if ((vm & bank_mask) == 0) {
127
+ /* mixed scalar/vector */
128
+ delta_m = 0;
129
+ } else {
130
+ /* vector */
131
+ delta_m = delta_d;
132
+ }
133
+ }
134
+ }
135
+
136
+ f0 = tcg_temp_new_i32();
137
+ f1 = tcg_temp_new_i32();
138
+ fd = tcg_temp_new_i32();
139
+ fpst = get_fpstatus_ptr(0);
140
+
141
+ neon_load_reg32(f0, vn);
142
+ neon_load_reg32(f1, vm);
143
+
144
+ for (;;) {
145
+ if (reads_vd) {
146
+ neon_load_reg32(fd, vd);
147
+ }
148
+ fn(fd, f0, f1, fpst);
149
+ neon_store_reg32(fd, vd);
150
+
151
+ if (veclen == 0) {
152
+ break;
153
+ }
154
+
155
+ /* Set up the operands for the next iteration */
156
+ veclen--;
157
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
158
+ vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
159
+ neon_load_reg32(f0, vn);
160
+ if (delta_m) {
161
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
162
+ neon_load_reg32(f1, vm);
163
+ }
164
+ }
165
+
166
+ tcg_temp_free_i32(f0);
167
+ tcg_temp_free_i32(f1);
168
+ tcg_temp_free_i32(fd);
169
+ tcg_temp_free_ptr(fpst);
170
+
171
+ return true;
172
+}
173
+
174
+static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
175
+ int vd, int vn, int vm, bool reads_vd)
176
+{
177
+ uint32_t delta_m = 0;
178
+ uint32_t delta_d = 0;
179
+ uint32_t bank_mask = 0;
180
+ int veclen = s->vec_len;
181
+ TCGv_i64 f0, f1, fd;
182
+ TCGv_ptr fpst;
183
+
184
+ /* UNDEF accesses to D16-D31 if they don't exist */
185
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) {
186
+ return false;
187
+ }
188
+
189
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
190
+ (veclen != 0 || s->vec_stride != 0)) {
191
+ return false;
192
+ }
193
+
194
+ if (!vfp_access_check(s)) {
195
+ return true;
196
+ }
197
+
198
+ if (veclen > 0) {
199
+ bank_mask = 0xc;
200
+
201
+ /* Figure out what type of vector operation this is. */
202
+ if ((vd & bank_mask) == 0) {
203
+ /* scalar */
204
+ veclen = 0;
205
+ } else {
206
+ delta_d = (s->vec_stride >> 1) + 1;
207
+
208
+ if ((vm & bank_mask) == 0) {
209
+ /* mixed scalar/vector */
210
+ delta_m = 0;
211
+ } else {
212
+ /* vector */
213
+ delta_m = delta_d;
214
+ }
215
+ }
216
+ }
217
+
218
+ f0 = tcg_temp_new_i64();
219
+ f1 = tcg_temp_new_i64();
220
+ fd = tcg_temp_new_i64();
221
+ fpst = get_fpstatus_ptr(0);
222
+
223
+ neon_load_reg64(f0, vn);
224
+ neon_load_reg64(f1, vm);
225
+
226
+ for (;;) {
227
+ if (reads_vd) {
228
+ neon_load_reg64(fd, vd);
229
+ }
230
+ fn(fd, f0, f1, fpst);
231
+ neon_store_reg64(fd, vd);
232
+
233
+ if (veclen == 0) {
234
+ break;
235
+ }
236
+ /* Set up the operands for the next iteration */
237
+ veclen--;
238
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
239
+ vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
240
+ neon_load_reg64(f0, vn);
241
+ if (delta_m) {
242
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
243
+ neon_load_reg64(f1, vm);
244
+ }
245
+ }
246
+
247
+ tcg_temp_free_i64(f0);
248
+ tcg_temp_free_i64(f1);
249
+ tcg_temp_free_i64(fd);
250
+ tcg_temp_free_ptr(fpst);
251
+
252
+ return true;
253
+}
254
+
255
+static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
256
+{
257
+ /* Note that order of inputs to the add matters for NaNs */
258
+ TCGv_i32 tmp = tcg_temp_new_i32();
259
+
260
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
261
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
262
+ tcg_temp_free_i32(tmp);
263
+}
264
+
265
+static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
266
+{
267
+ return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
268
+}
269
+
270
+static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
271
+{
272
+ /* Note that order of inputs to the add matters for NaNs */
273
+ TCGv_i64 tmp = tcg_temp_new_i64();
274
+
275
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
276
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
277
+ tcg_temp_free_i64(tmp);
278
+}
279
+
280
+static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a)
281
+{
282
+ return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
283
+}
284
diff --git a/target/arm/translate.c b/target/arm/translate.c
285
index XXXXXXX..XXXXXXX 100644
286
--- a/target/arm/translate.c
287
+++ b/target/arm/translate.c
288
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
289
op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
290
rn = VFP_SREG_N(insn);
291
292
+ switch (op) {
293
+ case 0:
294
+ /* Already handled by decodetree */
295
+ return 1;
296
+ default:
297
+ break;
298
+ }
299
+
300
if (op == 15) {
301
/* rn is opcode, encoded as per VFP_SREG_N. */
302
switch (rn) {
303
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
304
for (;;) {
305
/* Perform the calculation. */
306
switch (op) {
307
- case 0: /* VMLA: fd + (fn * fm) */
308
- /* Note that order of inputs to the add matters for NaNs */
309
- gen_vfp_F1_mul(dp);
310
- gen_mov_F0_vreg(dp, rd);
311
- gen_vfp_add(dp);
312
- break;
313
case 1: /* VMLS: fd + -(fn * fm) */
314
gen_vfp_mul(dp);
315
gen_vfp_F1_neg(dp);
316
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
317
index XXXXXXX..XXXXXXX 100644
318
--- a/target/arm/vfp.decode
319
+++ b/target/arm/vfp.decode
320
@@ -XXX,XX +XXX,XX @@ VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \
321
vd=%vd_sp p=1 u=0 w=1
322
VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
323
vd=%vd_dp p=1 u=0 w=1
324
+
325
+# 3-register VFP data-processing; bits [23,21:20,6] identify the operation.
326
+VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... \
327
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
328
+VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... \
329
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
330
--
331
2.20.1
332
333
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP VMLS instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 38 ++++++++++++++++++++++++++++++++++
7
target/arm/translate.c | 8 +------
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 44 insertions(+), 7 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
18
}
19
+
20
+static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
21
+{
22
+ /*
23
+ * VMLS: vd = vd + -(vn * vm)
24
+ * Note that order of inputs to the add matters for NaNs.
25
+ */
26
+ TCGv_i32 tmp = tcg_temp_new_i32();
27
+
28
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
29
+ gen_helper_vfp_negs(tmp, tmp);
30
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
31
+ tcg_temp_free_i32(tmp);
32
+}
33
+
34
+static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
35
+{
36
+ return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
37
+}
38
+
39
+static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
40
+{
41
+ /*
42
+ * VMLS: vd = vd + -(vn * vm)
43
+ * Note that order of inputs to the add matters for NaNs.
44
+ */
45
+ TCGv_i64 tmp = tcg_temp_new_i64();
46
+
47
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
48
+ gen_helper_vfp_negd(tmp, tmp);
49
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
50
+ tcg_temp_free_i64(tmp);
51
+}
52
+
53
+static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a)
54
+{
55
+ return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
56
+}
57
diff --git a/target/arm/translate.c b/target/arm/translate.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate.c
60
+++ b/target/arm/translate.c
61
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
62
rn = VFP_SREG_N(insn);
63
64
switch (op) {
65
- case 0:
66
+ case 0 ... 1:
67
/* Already handled by decodetree */
68
return 1;
69
default:
70
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
71
for (;;) {
72
/* Perform the calculation. */
73
switch (op) {
74
- case 1: /* VMLS: fd + -(fn * fm) */
75
- gen_vfp_mul(dp);
76
- gen_vfp_F1_neg(dp);
77
- gen_mov_F0_vreg(dp, rd);
78
- gen_vfp_add(dp);
79
- break;
80
case 2: /* VNMLS: -fd + (fn * fm) */
81
/* Note that it isn't valid to replace (-A + B) with (B - A)
82
* or similar plausible looking simplifications
83
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/arm/vfp.decode
86
+++ b/target/arm/vfp.decode
87
@@ -XXX,XX +XXX,XX @@ VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... \
88
vm=%vm_sp vn=%vn_sp vd=%vd_sp
89
VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... \
90
vm=%vm_dp vn=%vn_dp vd=%vd_dp
91
+
92
+VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... \
93
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
94
+VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... \
95
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP VNMLS instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 42 ++++++++++++++++++++++++++++++++++
7
target/arm/translate.c | 24 +------------------
8
target/arm/vfp.decode | 5 ++++
9
3 files changed, 48 insertions(+), 23 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
18
}
19
+
20
+static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
21
+{
22
+ /*
23
+ * VNMLS: -fd + (fn * fm)
24
+ * Note that it isn't valid to replace (-A + B) with (B - A) or similar
25
+ * plausible looking simplifications because this will give wrong results
26
+ * for NaNs.
27
+ */
28
+ TCGv_i32 tmp = tcg_temp_new_i32();
29
+
30
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
31
+ gen_helper_vfp_negs(vd, vd);
32
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
33
+ tcg_temp_free_i32(tmp);
34
+}
35
+
36
+static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
37
+{
38
+ return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
39
+}
40
+
41
+static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
42
+{
43
+ /*
44
+ * VNMLS: -fd + (fn * fm)
45
+ * Note that it isn't valid to replace (-A + B) with (B - A) or similar
46
+ * plausible looking simplifications because this will give wrong results
47
+ * for NaNs.
48
+ */
49
+ TCGv_i64 tmp = tcg_temp_new_i64();
50
+
51
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
52
+ gen_helper_vfp_negd(vd, vd);
53
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
54
+ tcg_temp_free_i64(tmp);
55
+}
56
+
57
+static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a)
58
+{
59
+ return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
60
+}
61
diff --git a/target/arm/translate.c b/target/arm/translate.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/translate.c
64
+++ b/target/arm/translate.c
65
@@ -XXX,XX +XXX,XX @@ VFP_OP2(div)
66
67
#undef VFP_OP2
68
69
-static inline void gen_vfp_F1_mul(int dp)
70
-{
71
- /* Like gen_vfp_mul() but put result in F1 */
72
- TCGv_ptr fpst = get_fpstatus_ptr(0);
73
- if (dp) {
74
- gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
75
- } else {
76
- gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
77
- }
78
- tcg_temp_free_ptr(fpst);
79
-}
80
-
81
static inline void gen_vfp_F1_neg(int dp)
82
{
83
/* Like gen_vfp_neg() but put result in F1 */
84
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
85
rn = VFP_SREG_N(insn);
86
87
switch (op) {
88
- case 0 ... 1:
89
+ case 0 ... 2:
90
/* Already handled by decodetree */
91
return 1;
92
default:
93
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
94
for (;;) {
95
/* Perform the calculation. */
96
switch (op) {
97
- case 2: /* VNMLS: -fd + (fn * fm) */
98
- /* Note that it isn't valid to replace (-A + B) with (B - A)
99
- * or similar plausible looking simplifications
100
- * because this will give wrong results for NaNs.
101
- */
102
- gen_vfp_F1_mul(dp);
103
- gen_mov_F0_vreg(dp, rd);
104
- gen_vfp_neg(dp);
105
- gen_vfp_add(dp);
106
- break;
107
case 3: /* VNMLA: -fd + -(fn * fm) */
108
gen_vfp_mul(dp);
109
gen_vfp_F1_neg(dp);
110
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/vfp.decode
113
+++ b/target/arm/vfp.decode
114
@@ -XXX,XX +XXX,XX @@ VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... \
115
vm=%vm_sp vn=%vn_sp vd=%vd_sp
116
VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... \
117
vm=%vm_dp vn=%vn_dp vd=%vd_dp
118
+
119
+VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... \
120
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
121
+VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... \
122
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
123
--
124
2.20.1
125
126
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP VNMLA instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 34 ++++++++++++++++++++++++++++++++++
7
target/arm/translate.c | 19 +------------------
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 40 insertions(+), 18 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
18
}
19
+
20
+static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
21
+{
22
+ /* VNMLA: -fd + -(fn * fm) */
23
+ TCGv_i32 tmp = tcg_temp_new_i32();
24
+
25
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
26
+ gen_helper_vfp_negs(tmp, tmp);
27
+ gen_helper_vfp_negs(vd, vd);
28
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
29
+ tcg_temp_free_i32(tmp);
30
+}
31
+
32
+static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
33
+{
34
+ return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
35
+}
36
+
37
+static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
38
+{
39
+ /* VNMLA: -fd + (fn * fm) */
40
+ TCGv_i64 tmp = tcg_temp_new_i64();
41
+
42
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
43
+ gen_helper_vfp_negd(tmp, tmp);
44
+ gen_helper_vfp_negd(vd, vd);
45
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
46
+ tcg_temp_free_i64(tmp);
47
+}
48
+
49
+static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a)
50
+{
51
+ return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
52
+}
53
diff --git a/target/arm/translate.c b/target/arm/translate.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/translate.c
56
+++ b/target/arm/translate.c
57
@@ -XXX,XX +XXX,XX @@ VFP_OP2(div)
58
59
#undef VFP_OP2
60
61
-static inline void gen_vfp_F1_neg(int dp)
62
-{
63
- /* Like gen_vfp_neg() but put result in F1 */
64
- if (dp) {
65
- gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
66
- } else {
67
- gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
68
- }
69
-}
70
-
71
static inline void gen_vfp_abs(int dp)
72
{
73
if (dp)
74
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
75
rn = VFP_SREG_N(insn);
76
77
switch (op) {
78
- case 0 ... 2:
79
+ case 0 ... 3:
80
/* Already handled by decodetree */
81
return 1;
82
default:
83
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
84
for (;;) {
85
/* Perform the calculation. */
86
switch (op) {
87
- case 3: /* VNMLA: -fd + -(fn * fm) */
88
- gen_vfp_mul(dp);
89
- gen_vfp_F1_neg(dp);
90
- gen_mov_F0_vreg(dp, rd);
91
- gen_vfp_neg(dp);
92
- gen_vfp_add(dp);
93
- break;
94
case 4: /* mul: fn * fm */
95
gen_vfp_mul(dp);
96
break;
97
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/arm/vfp.decode
100
+++ b/target/arm/vfp.decode
101
@@ -XXX,XX +XXX,XX @@ VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... \
102
vm=%vm_sp vn=%vn_sp vd=%vd_sp
103
VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... \
104
vm=%vm_dp vn=%vn_dp vd=%vd_dp
105
+
106
+VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... \
107
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
108
+VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... \
109
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
110
--
111
2.20.1
112
113
diff view generated by jsdifflib
Deleted patch
1
Convert the VMUL instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 10 ++++++++++
7
target/arm/translate.c | 5 +----
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 16 insertions(+), 4 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
18
}
19
+
20
+static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
21
+{
22
+ return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
23
+}
24
+
25
+static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a)
26
+{
27
+ return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
28
+}
29
diff --git a/target/arm/translate.c b/target/arm/translate.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.c
32
+++ b/target/arm/translate.c
33
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
34
rn = VFP_SREG_N(insn);
35
36
switch (op) {
37
- case 0 ... 3:
38
+ case 0 ... 4:
39
/* Already handled by decodetree */
40
return 1;
41
default:
42
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
43
for (;;) {
44
/* Perform the calculation. */
45
switch (op) {
46
- case 4: /* mul: fn * fm */
47
- gen_vfp_mul(dp);
48
- break;
49
case 5: /* nmul: -(fn * fm) */
50
gen_vfp_mul(dp);
51
gen_vfp_neg(dp);
52
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/vfp.decode
55
+++ b/target/arm/vfp.decode
56
@@ -XXX,XX +XXX,XX @@ VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... \
57
vm=%vm_sp vn=%vn_sp vd=%vd_sp
58
VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... \
59
vm=%vm_dp vn=%vn_dp vd=%vd_dp
60
+
61
+VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... \
62
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
63
+VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... \
64
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
65
--
66
2.20.1
67
68
diff view generated by jsdifflib
Deleted patch
1
Convert the VNMUL instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 24 ++++++++++++++++++++++++
7
target/arm/translate.c | 7 +------
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 30 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
18
}
19
+
20
+static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
21
+{
22
+ /* VNMUL: -(fn * fm) */
23
+ gen_helper_vfp_muls(vd, vn, vm, fpst);
24
+ gen_helper_vfp_negs(vd, vd);
25
+}
26
+
27
+static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
28
+{
29
+ return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
30
+}
31
+
32
+static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
33
+{
34
+ /* VNMUL: -(fn * fm) */
35
+ gen_helper_vfp_muld(vd, vn, vm, fpst);
36
+ gen_helper_vfp_negd(vd, vd);
37
+}
38
+
39
+static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a)
40
+{
41
+ return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
42
+}
43
diff --git a/target/arm/translate.c b/target/arm/translate.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/translate.c
46
+++ b/target/arm/translate.c
47
@@ -XXX,XX +XXX,XX @@ static inline void gen_vfp_##name(int dp) \
48
49
VFP_OP2(add)
50
VFP_OP2(sub)
51
-VFP_OP2(mul)
52
VFP_OP2(div)
53
54
#undef VFP_OP2
55
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
56
rn = VFP_SREG_N(insn);
57
58
switch (op) {
59
- case 0 ... 4:
60
+ case 0 ... 5:
61
/* Already handled by decodetree */
62
return 1;
63
default:
64
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
65
for (;;) {
66
/* Perform the calculation. */
67
switch (op) {
68
- case 5: /* nmul: -(fn * fm) */
69
- gen_vfp_mul(dp);
70
- gen_vfp_neg(dp);
71
- break;
72
case 6: /* add: fn + fm */
73
gen_vfp_add(dp);
74
break;
75
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/vfp.decode
78
+++ b/target/arm/vfp.decode
79
@@ -XXX,XX +XXX,XX @@ VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... \
80
vm=%vm_sp vn=%vn_sp vd=%vd_sp
81
VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... \
82
vm=%vm_dp vn=%vn_dp vd=%vd_dp
83
+
84
+VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... \
85
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
86
+VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... \
87
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
Convert the VADD instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 10 ++++++++++
7
target/arm/translate.c | 6 +-----
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 16 insertions(+), 5 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
18
}
19
+
20
+static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
21
+{
22
+ return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
23
+}
24
+
25
+static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a)
26
+{
27
+ return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
28
+}
29
diff --git a/target/arm/translate.c b/target/arm/translate.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.c
32
+++ b/target/arm/translate.c
33
@@ -XXX,XX +XXX,XX @@ static inline void gen_vfp_##name(int dp) \
34
tcg_temp_free_ptr(fpst); \
35
}
36
37
-VFP_OP2(add)
38
VFP_OP2(sub)
39
VFP_OP2(div)
40
41
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
42
rn = VFP_SREG_N(insn);
43
44
switch (op) {
45
- case 0 ... 5:
46
+ case 0 ... 6:
47
/* Already handled by decodetree */
48
return 1;
49
default:
50
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
51
for (;;) {
52
/* Perform the calculation. */
53
switch (op) {
54
- case 6: /* add: fn + fm */
55
- gen_vfp_add(dp);
56
- break;
57
case 7: /* sub: fn - fm */
58
gen_vfp_sub(dp);
59
break;
60
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/vfp.decode
63
+++ b/target/arm/vfp.decode
64
@@ -XXX,XX +XXX,XX @@ VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... \
65
vm=%vm_sp vn=%vn_sp vd=%vd_sp
66
VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... \
67
vm=%vm_dp vn=%vn_dp vd=%vd_dp
68
+
69
+VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... \
70
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
71
+VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... \
72
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
73
--
74
2.20.1
75
76
diff view generated by jsdifflib
Deleted patch
1
Convert the VSUB instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 10 ++++++++++
7
target/arm/translate.c | 6 +-----
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 16 insertions(+), 5 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
18
}
19
+
20
+static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
21
+{
22
+ return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
23
+}
24
+
25
+static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a)
26
+{
27
+ return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
28
+}
29
diff --git a/target/arm/translate.c b/target/arm/translate.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.c
32
+++ b/target/arm/translate.c
33
@@ -XXX,XX +XXX,XX @@ static inline void gen_vfp_##name(int dp) \
34
tcg_temp_free_ptr(fpst); \
35
}
36
37
-VFP_OP2(sub)
38
VFP_OP2(div)
39
40
#undef VFP_OP2
41
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
42
rn = VFP_SREG_N(insn);
43
44
switch (op) {
45
- case 0 ... 6:
46
+ case 0 ... 7:
47
/* Already handled by decodetree */
48
return 1;
49
default:
50
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
51
for (;;) {
52
/* Perform the calculation. */
53
switch (op) {
54
- case 7: /* sub: fn - fm */
55
- gen_vfp_sub(dp);
56
- break;
57
case 8: /* div: fn / fm */
58
gen_vfp_div(dp);
59
break;
60
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/vfp.decode
63
+++ b/target/arm/vfp.decode
64
@@ -XXX,XX +XXX,XX @@ VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... \
65
vm=%vm_sp vn=%vn_sp vd=%vd_sp
66
VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... \
67
vm=%vm_dp vn=%vn_dp vd=%vd_dp
68
+
69
+VSUB_sp ---- 1110 0.11 .... .... 1010 .1.0 .... \
70
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
71
+VSUB_dp ---- 1110 0.11 .... .... 1011 .1.0 .... \
72
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
73
--
74
2.20.1
75
76
diff view generated by jsdifflib
Deleted patch
1
Convert the VDIV instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 10 ++++++++++
7
target/arm/translate.c | 21 +--------------------
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 16 insertions(+), 20 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a)
16
{
17
return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
18
}
19
+
20
+static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
21
+{
22
+ return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
23
+}
24
+
25
+static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a)
26
+{
27
+ return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
28
+}
29
diff --git a/target/arm/translate.c b/target/arm/translate.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.c
32
+++ b/target/arm/translate.c
33
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
34
return statusptr;
35
}
36
37
-#define VFP_OP2(name) \
38
-static inline void gen_vfp_##name(int dp) \
39
-{ \
40
- TCGv_ptr fpst = get_fpstatus_ptr(0); \
41
- if (dp) { \
42
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
43
- } else { \
44
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
45
- } \
46
- tcg_temp_free_ptr(fpst); \
47
-}
48
-
49
-VFP_OP2(div)
50
-
51
-#undef VFP_OP2
52
-
53
static inline void gen_vfp_abs(int dp)
54
{
55
if (dp)
56
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
57
rn = VFP_SREG_N(insn);
58
59
switch (op) {
60
- case 0 ... 7:
61
+ case 0 ... 8:
62
/* Already handled by decodetree */
63
return 1;
64
default:
65
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
66
for (;;) {
67
/* Perform the calculation. */
68
switch (op) {
69
- case 8: /* div: fn / fm */
70
- gen_vfp_div(dp);
71
- break;
72
case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
73
case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
74
case 12: /* VFMA : fd = muladd( fd, fn, fm) */
75
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/vfp.decode
78
+++ b/target/arm/vfp.decode
79
@@ -XXX,XX +XXX,XX @@ VSUB_sp ---- 1110 0.11 .... .... 1010 .1.0 .... \
80
vm=%vm_sp vn=%vn_sp vd=%vd_sp
81
VSUB_dp ---- 1110 0.11 .... .... 1011 .1.0 .... \
82
vm=%vm_dp vn=%vn_dp vd=%vd_dp
83
+
84
+VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... \
85
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp
86
+VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... \
87
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
88
--
89
2.20.1
90
91
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP fused multiply-add instructions (VFNMA, VFNMS,
2
VFMA, VFMS) to decodetree.
3
1
4
Note that in the old decode structure we were implementing
5
these to honour the VFP vector stride/length. These instructions
6
were introduced in VFPv4, and in the v7A architecture they
7
are UNPREDICTABLE if the vector stride or length are non-zero.
8
In v8A they must UNDEF if stride or length are non-zero, like
9
all VFP instructions; we choose to UNDEF always.
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
target/arm/translate-vfp.inc.c | 121 +++++++++++++++++++++++++++++++++
15
target/arm/translate.c | 53 +--------------
16
target/arm/vfp.decode | 9 +++
17
3 files changed, 131 insertions(+), 52 deletions(-)
18
19
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/translate-vfp.inc.c
22
+++ b/target/arm/translate-vfp.inc.c
23
@@ -XXX,XX +XXX,XX @@ static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a)
24
{
25
return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
26
}
27
+
28
+static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
29
+{
30
+ /*
31
+ * VFNMA : fd = muladd(-fd, fn, fm)
32
+ * VFNMS : fd = muladd(-fd, -fn, fm)
33
+ * VFMA : fd = muladd( fd, fn, fm)
34
+ * VFMS : fd = muladd( fd, -fn, fm)
35
+ *
36
+ * These are fused multiply-add, and must be done as one floating
37
+ * point operation with no rounding between the multiplication and
38
+ * addition steps. NB that doing the negations here as separate
39
+ * steps is correct : an input NaN should come out with its sign
40
+ * bit flipped if it is a negated-input.
41
+ */
42
+ TCGv_ptr fpst;
43
+ TCGv_i32 vn, vm, vd;
44
+
45
+ /*
46
+ * Present in VFPv4 only.
47
+ * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
48
+ * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
49
+ */
50
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
51
+ (s->vec_len != 0 || s->vec_stride != 0)) {
52
+ return false;
53
+ }
54
+
55
+ if (!vfp_access_check(s)) {
56
+ return true;
57
+ }
58
+
59
+ vn = tcg_temp_new_i32();
60
+ vm = tcg_temp_new_i32();
61
+ vd = tcg_temp_new_i32();
62
+
63
+ neon_load_reg32(vn, a->vn);
64
+ neon_load_reg32(vm, a->vm);
65
+ if (a->o2) {
66
+ /* VFNMS, VFMS */
67
+ gen_helper_vfp_negs(vn, vn);
68
+ }
69
+ neon_load_reg32(vd, a->vd);
70
+ if (a->o1 & 1) {
71
+ /* VFNMA, VFNMS */
72
+ gen_helper_vfp_negs(vd, vd);
73
+ }
74
+ fpst = get_fpstatus_ptr(0);
75
+ gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
76
+ neon_store_reg32(vd, a->vd);
77
+
78
+ tcg_temp_free_ptr(fpst);
79
+ tcg_temp_free_i32(vn);
80
+ tcg_temp_free_i32(vm);
81
+ tcg_temp_free_i32(vd);
82
+
83
+ return true;
84
+}
85
+
86
+static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a)
87
+{
88
+ /*
89
+ * VFNMA : fd = muladd(-fd, fn, fm)
90
+ * VFNMS : fd = muladd(-fd, -fn, fm)
91
+ * VFMA : fd = muladd( fd, fn, fm)
92
+ * VFMS : fd = muladd( fd, -fn, fm)
93
+ *
94
+ * These are fused multiply-add, and must be done as one floating
95
+ * point operation with no rounding between the multiplication and
96
+ * addition steps. NB that doing the negations here as separate
97
+ * steps is correct : an input NaN should come out with its sign
98
+ * bit flipped if it is a negated-input.
99
+ */
100
+ TCGv_ptr fpst;
101
+ TCGv_i64 vn, vm, vd;
102
+
103
+ /*
104
+ * Present in VFPv4 only.
105
+ * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
106
+ * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
107
+ */
108
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
109
+ (s->vec_len != 0 || s->vec_stride != 0)) {
110
+ return false;
111
+ }
112
+
113
+ /* UNDEF accesses to D16-D31 if they don't exist. */
114
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) {
115
+ return false;
116
+ }
117
+
118
+ if (!vfp_access_check(s)) {
119
+ return true;
120
+ }
121
+
122
+ vn = tcg_temp_new_i64();
123
+ vm = tcg_temp_new_i64();
124
+ vd = tcg_temp_new_i64();
125
+
126
+ neon_load_reg64(vn, a->vn);
127
+ neon_load_reg64(vm, a->vm);
128
+ if (a->o2) {
129
+ /* VFNMS, VFMS */
130
+ gen_helper_vfp_negd(vn, vn);
131
+ }
132
+ neon_load_reg64(vd, a->vd);
133
+ if (a->o1 & 1) {
134
+ /* VFNMA, VFNMS */
135
+ gen_helper_vfp_negd(vd, vd);
136
+ }
137
+ fpst = get_fpstatus_ptr(0);
138
+ gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
139
+ neon_store_reg64(vd, a->vd);
140
+
141
+ tcg_temp_free_ptr(fpst);
142
+ tcg_temp_free_i64(vn);
143
+ tcg_temp_free_i64(vm);
144
+ tcg_temp_free_i64(vd);
145
+
146
+ return true;
147
+}
148
diff --git a/target/arm/translate.c b/target/arm/translate.c
149
index XXXXXXX..XXXXXXX 100644
150
--- a/target/arm/translate.c
151
+++ b/target/arm/translate.c
152
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
153
rn = VFP_SREG_N(insn);
154
155
switch (op) {
156
- case 0 ... 8:
157
+ case 0 ... 13:
158
/* Already handled by decodetree */
159
return 1;
160
default:
161
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
162
for (;;) {
163
/* Perform the calculation. */
164
switch (op) {
165
- case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
166
- case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
167
- case 12: /* VFMA : fd = muladd( fd, fn, fm) */
168
- case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
169
- /* These are fused multiply-add, and must be done as one
170
- * floating point operation with no rounding between the
171
- * multiplication and addition steps.
172
- * NB that doing the negations here as separate steps is
173
- * correct : an input NaN should come out with its sign bit
174
- * flipped if it is a negated-input.
175
- */
176
- if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
177
- return 1;
178
- }
179
- if (dp) {
180
- TCGv_ptr fpst;
181
- TCGv_i64 frd;
182
- if (op & 1) {
183
- /* VFNMS, VFMS */
184
- gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
185
- }
186
- frd = tcg_temp_new_i64();
187
- tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
188
- if (op & 2) {
189
- /* VFNMA, VFNMS */
190
- gen_helper_vfp_negd(frd, frd);
191
- }
192
- fpst = get_fpstatus_ptr(0);
193
- gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
194
- cpu_F1d, frd, fpst);
195
- tcg_temp_free_ptr(fpst);
196
- tcg_temp_free_i64(frd);
197
- } else {
198
- TCGv_ptr fpst;
199
- TCGv_i32 frd;
200
- if (op & 1) {
201
- /* VFNMS, VFMS */
202
- gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
203
- }
204
- frd = tcg_temp_new_i32();
205
- tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
206
- if (op & 2) {
207
- gen_helper_vfp_negs(frd, frd);
208
- }
209
- fpst = get_fpstatus_ptr(0);
210
- gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
211
- cpu_F1s, frd, fpst);
212
- tcg_temp_free_ptr(fpst);
213
- tcg_temp_free_i32(frd);
214
- }
215
- break;
216
case 14: /* fconst */
217
if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
218
return 1;
219
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
220
index XXXXXXX..XXXXXXX 100644
221
--- a/target/arm/vfp.decode
222
+++ b/target/arm/vfp.decode
223
@@ -XXX,XX +XXX,XX @@ VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... \
224
vm=%vm_sp vn=%vn_sp vd=%vd_sp
225
VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... \
226
vm=%vm_dp vn=%vn_dp vd=%vd_dp
227
+
228
+VFM_sp ---- 1110 1.01 .... .... 1010 . o2:1 . 0 .... \
229
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=1
230
+VFM_dp ---- 1110 1.01 .... .... 1011 . o2:1 . 0 .... \
231
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=1
232
+VFM_sp ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \
233
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=2
234
+VFM_dp ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \
235
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2
236
--
237
2.20.1
238
239
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP VMOV (immediate) instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 129 +++++++++++++++++++++++++++++++++
7
target/arm/translate.c | 27 +------
8
target/arm/vfp.decode | 5 ++
9
3 files changed, 136 insertions(+), 25 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a)
16
17
return true;
18
}
19
+
20
+static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
21
+{
22
+ uint32_t delta_d = 0;
23
+ uint32_t bank_mask = 0;
24
+ int veclen = s->vec_len;
25
+ TCGv_i32 fd;
26
+ uint32_t n, i, vd;
27
+
28
+ vd = a->vd;
29
+
30
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
31
+ (veclen != 0 || s->vec_stride != 0)) {
32
+ return false;
33
+ }
34
+
35
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
36
+ return false;
37
+ }
38
+
39
+ if (!vfp_access_check(s)) {
40
+ return true;
41
+ }
42
+
43
+ if (veclen > 0) {
44
+ bank_mask = 0x18;
45
+ /* Figure out what type of vector operation this is. */
46
+ if ((vd & bank_mask) == 0) {
47
+ /* scalar */
48
+ veclen = 0;
49
+ } else {
50
+ delta_d = s->vec_stride + 1;
51
+ }
52
+ }
53
+
54
+ n = (a->imm4h << 28) & 0x80000000;
55
+ i = ((a->imm4h << 4) & 0x70) | a->imm4l;
56
+ if (i & 0x40) {
57
+ i |= 0x780;
58
+ } else {
59
+ i |= 0x800;
60
+ }
61
+ n |= i << 19;
62
+
63
+ fd = tcg_temp_new_i32();
64
+ tcg_gen_movi_i32(fd, n);
65
+
66
+ for (;;) {
67
+ neon_store_reg32(fd, vd);
68
+
69
+ if (veclen == 0) {
70
+ break;
71
+ }
72
+
73
+ /* Set up the operands for the next iteration */
74
+ veclen--;
75
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
76
+ }
77
+
78
+ tcg_temp_free_i32(fd);
79
+ return true;
80
+}
81
+
82
+static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
83
+{
84
+ uint32_t delta_d = 0;
85
+ uint32_t bank_mask = 0;
86
+ int veclen = s->vec_len;
87
+ TCGv_i64 fd;
88
+ uint32_t n, i, vd;
89
+
90
+ vd = a->vd;
91
+
92
+ /* UNDEF accesses to D16-D31 if they don't exist. */
93
+ if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) {
94
+ return false;
95
+ }
96
+
97
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
98
+ (veclen != 0 || s->vec_stride != 0)) {
99
+ return false;
100
+ }
101
+
102
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
103
+ return false;
104
+ }
105
+
106
+ if (!vfp_access_check(s)) {
107
+ return true;
108
+ }
109
+
110
+ if (veclen > 0) {
111
+ bank_mask = 0xc;
112
+ /* Figure out what type of vector operation this is. */
113
+ if ((vd & bank_mask) == 0) {
114
+ /* scalar */
115
+ veclen = 0;
116
+ } else {
117
+ delta_d = (s->vec_stride >> 1) + 1;
118
+ }
119
+ }
120
+
121
+ n = (a->imm4h << 28) & 0x80000000;
122
+ i = ((a->imm4h << 4) & 0x70) | a->imm4l;
123
+ if (i & 0x40) {
124
+ i |= 0x3f80;
125
+ } else {
126
+ i |= 0x4000;
127
+ }
128
+ n |= i << 16;
129
+
130
+ fd = tcg_temp_new_i64();
131
+ tcg_gen_movi_i64(fd, ((uint64_t)n) << 32);
132
+
133
+ for (;;) {
134
+ neon_store_reg64(fd, vd);
135
+
136
+ if (veclen == 0) {
137
+ break;
138
+ }
139
+
140
+ /* Set up the operands for the next iteration */
141
+ veclen--;
142
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
143
+ }
144
+
145
+ tcg_temp_free_i64(fd);
146
+ return true;
147
+}
148
diff --git a/target/arm/translate.c b/target/arm/translate.c
149
index XXXXXXX..XXXXXXX 100644
150
--- a/target/arm/translate.c
151
+++ b/target/arm/translate.c
152
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
153
*/
154
static int disas_vfp_insn(DisasContext *s, uint32_t insn)
155
{
156
- uint32_t rd, rn, rm, op, i, n, delta_d, delta_m, bank_mask;
157
+ uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask;
158
int dp, veclen;
159
TCGv_i32 tmp;
160
TCGv_i32 tmp2;
161
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
162
rn = VFP_SREG_N(insn);
163
164
switch (op) {
165
- case 0 ... 13:
166
+ case 0 ... 14:
167
/* Already handled by decodetree */
168
return 1;
169
default:
170
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
171
for (;;) {
172
/* Perform the calculation. */
173
switch (op) {
174
- case 14: /* fconst */
175
- if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
176
- return 1;
177
- }
178
-
179
- n = (insn << 12) & 0x80000000;
180
- i = ((insn >> 12) & 0x70) | (insn & 0xf);
181
- if (dp) {
182
- if (i & 0x40)
183
- i |= 0x3f80;
184
- else
185
- i |= 0x4000;
186
- n |= i << 16;
187
- tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
188
- } else {
189
- if (i & 0x40)
190
- i |= 0x780;
191
- else
192
- i |= 0x800;
193
- n |= i << 19;
194
- tcg_gen_movi_i32(cpu_F0s, n);
195
- }
196
- break;
197
case 15: /* extension space */
198
switch (rn) {
199
case 0: /* cpy */
200
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
201
index XXXXXXX..XXXXXXX 100644
202
--- a/target/arm/vfp.decode
203
+++ b/target/arm/vfp.decode
204
@@ -XXX,XX +XXX,XX @@ VFM_sp ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \
205
vm=%vm_sp vn=%vn_sp vd=%vd_sp o1=2
206
VFM_dp ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \
207
vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2
208
+
209
+VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
210
+ vd=%vd_sp
211
+VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
212
+ vd=%vd_dp
213
--
214
2.20.1
215
216
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP VABS instruction to decodetree.
2
1
3
Unlike the 3-op versions, we don't pass fpst to the VFPGen2OpSPFn or
4
VFPGen2OpDPFn because none of the operations which use this format
5
and support short vectors will need it.
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/arm/translate-vfp.inc.c | 167 +++++++++++++++++++++++++++++++++
11
target/arm/translate.c | 12 ++-
12
target/arm/vfp.decode | 5 +
13
3 files changed, 180 insertions(+), 4 deletions(-)
14
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.inc.c
18
+++ b/target/arm/translate-vfp.inc.c
19
@@ -XXX,XX +XXX,XX @@ typedef void VFPGen3OpSPFn(TCGv_i32 vd,
20
typedef void VFPGen3OpDPFn(TCGv_i64 vd,
21
TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
22
23
+/*
24
+ * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
25
+ * The callback should emit code to write a value to vd (which
26
+ * should be written to only).
27
+ */
28
+typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
29
+typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
30
+
31
/*
32
* Perform a 3-operand VFP data processing instruction. fn is the
33
* callback to do the actual operation; this function deals with the
34
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
35
return true;
36
}
37
38
+static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
39
+{
40
+ uint32_t delta_m = 0;
41
+ uint32_t delta_d = 0;
42
+ uint32_t bank_mask = 0;
43
+ int veclen = s->vec_len;
44
+ TCGv_i32 f0, fd;
45
+
46
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
47
+ (veclen != 0 || s->vec_stride != 0)) {
48
+ return false;
49
+ }
50
+
51
+ if (!vfp_access_check(s)) {
52
+ return true;
53
+ }
54
+
55
+ if (veclen > 0) {
56
+ bank_mask = 0x18;
57
+
58
+ /* Figure out what type of vector operation this is. */
59
+ if ((vd & bank_mask) == 0) {
60
+ /* scalar */
61
+ veclen = 0;
62
+ } else {
63
+ delta_d = s->vec_stride + 1;
64
+
65
+ if ((vm & bank_mask) == 0) {
66
+ /* mixed scalar/vector */
67
+ delta_m = 0;
68
+ } else {
69
+ /* vector */
70
+ delta_m = delta_d;
71
+ }
72
+ }
73
+ }
74
+
75
+ f0 = tcg_temp_new_i32();
76
+ fd = tcg_temp_new_i32();
77
+
78
+ neon_load_reg32(f0, vm);
79
+
80
+ for (;;) {
81
+ fn(fd, f0);
82
+ neon_store_reg32(fd, vd);
83
+
84
+ if (veclen == 0) {
85
+ break;
86
+ }
87
+
88
+ if (delta_m == 0) {
89
+ /* single source one-many */
90
+ while (veclen--) {
91
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
92
+ neon_store_reg32(fd, vd);
93
+ }
94
+ break;
95
+ }
96
+
97
+ /* Set up the operands for the next iteration */
98
+ veclen--;
99
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
100
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
101
+ neon_load_reg32(f0, vm);
102
+ }
103
+
104
+ tcg_temp_free_i32(f0);
105
+ tcg_temp_free_i32(fd);
106
+
107
+ return true;
108
+}
109
+
110
+static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
111
+{
112
+ uint32_t delta_m = 0;
113
+ uint32_t delta_d = 0;
114
+ uint32_t bank_mask = 0;
115
+ int veclen = s->vec_len;
116
+ TCGv_i64 f0, fd;
117
+
118
+ /* UNDEF accesses to D16-D31 if they don't exist */
119
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
120
+ return false;
121
+ }
122
+
123
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
124
+ (veclen != 0 || s->vec_stride != 0)) {
125
+ return false;
126
+ }
127
+
128
+ if (!vfp_access_check(s)) {
129
+ return true;
130
+ }
131
+
132
+ if (veclen > 0) {
133
+ bank_mask = 0xc;
134
+
135
+ /* Figure out what type of vector operation this is. */
136
+ if ((vd & bank_mask) == 0) {
137
+ /* scalar */
138
+ veclen = 0;
139
+ } else {
140
+ delta_d = (s->vec_stride >> 1) + 1;
141
+
142
+ if ((vm & bank_mask) == 0) {
143
+ /* mixed scalar/vector */
144
+ delta_m = 0;
145
+ } else {
146
+ /* vector */
147
+ delta_m = delta_d;
148
+ }
149
+ }
150
+ }
151
+
152
+ f0 = tcg_temp_new_i64();
153
+ fd = tcg_temp_new_i64();
154
+
155
+ neon_load_reg64(f0, vm);
156
+
157
+ for (;;) {
158
+ fn(fd, f0);
159
+ neon_store_reg64(fd, vd);
160
+
161
+ if (veclen == 0) {
162
+ break;
163
+ }
164
+
165
+ if (delta_m == 0) {
166
+ /* single source one-many */
167
+ while (veclen--) {
168
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
169
+ neon_store_reg64(fd, vd);
170
+ }
171
+ break;
172
+ }
173
+
174
+ /* Set up the operands for the next iteration */
175
+ veclen--;
176
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
177
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
178
+ neon_load_reg64(f0, vm);
179
+ }
180
+
181
+ tcg_temp_free_i64(f0);
182
+ tcg_temp_free_i64(fd);
183
+
184
+ return true;
185
+}
186
+
187
static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
188
{
189
/* Note that order of inputs to the add matters for NaNs */
190
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
191
tcg_temp_free_i64(fd);
192
return true;
193
}
194
+
195
+static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
196
+{
197
+ return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
198
+}
199
+
200
+static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
201
+{
202
+ return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
203
+}
204
diff --git a/target/arm/translate.c b/target/arm/translate.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/translate.c
207
+++ b/target/arm/translate.c
208
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
209
case 0 ... 14:
210
/* Already handled by decodetree */
211
return 1;
212
+ case 15:
213
+ switch (rn) {
214
+ case 1:
215
+ /* Already handled by decodetree */
216
+ return 1;
217
+ default:
218
+ break;
219
+ }
220
default:
221
break;
222
}
223
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
224
/* rn is opcode, encoded as per VFP_SREG_N. */
225
switch (rn) {
226
case 0x00: /* vmov */
227
- case 0x01: /* vabs */
228
case 0x02: /* vneg */
229
case 0x03: /* vsqrt */
230
break;
231
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
232
case 0: /* cpy */
233
/* no-op */
234
break;
235
- case 1: /* abs */
236
- gen_vfp_abs(dp);
237
- break;
238
case 2: /* neg */
239
gen_vfp_neg(dp);
240
break;
241
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
242
index XXXXXXX..XXXXXXX 100644
243
--- a/target/arm/vfp.decode
244
+++ b/target/arm/vfp.decode
245
@@ -XXX,XX +XXX,XX @@ VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
246
vd=%vd_sp
247
VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
248
vd=%vd_dp
249
+
250
+VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... \
251
+ vd=%vd_sp vm=%vm_sp
252
+VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... \
253
+ vd=%vd_dp vm=%vm_dp
254
--
255
2.20.1
256
257
diff view generated by jsdifflib
Deleted patch
1
Convert the VNEG instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 10 ++++++++++
7
target/arm/translate.c | 6 +-----
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 16 insertions(+), 5 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
16
{
17
return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
18
}
19
+
20
+static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a)
21
+{
22
+ return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm);
23
+}
24
+
25
+static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a)
26
+{
27
+ return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm);
28
+}
29
diff --git a/target/arm/translate.c b/target/arm/translate.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate.c
32
+++ b/target/arm/translate.c
33
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
34
return 1;
35
case 15:
36
switch (rn) {
37
- case 1:
38
+ case 1 ... 2:
39
/* Already handled by decodetree */
40
return 1;
41
default:
42
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
43
/* rn is opcode, encoded as per VFP_SREG_N. */
44
switch (rn) {
45
case 0x00: /* vmov */
46
- case 0x02: /* vneg */
47
case 0x03: /* vsqrt */
48
break;
49
50
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
51
case 0: /* cpy */
52
/* no-op */
53
break;
54
- case 2: /* neg */
55
- gen_vfp_neg(dp);
56
- break;
57
case 3: /* sqrt */
58
gen_vfp_sqrt(dp);
59
break;
60
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/vfp.decode
63
+++ b/target/arm/vfp.decode
64
@@ -XXX,XX +XXX,XX @@ VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... \
65
vd=%vd_sp vm=%vm_sp
66
VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... \
67
vd=%vd_dp vm=%vm_dp
68
+
69
+VNEG_sp ---- 1110 1.11 0001 .... 1010 01.0 .... \
70
+ vd=%vd_sp vm=%vm_sp
71
+VNEG_dp ---- 1110 1.11 0001 .... 1011 01.0 .... \
72
+ vd=%vd_dp vm=%vm_dp
73
--
74
2.20.1
75
76
diff view generated by jsdifflib
Deleted patch
1
Convert the VSQRT instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 20 ++++++++++++++++++++
7
target/arm/translate.c | 14 +-------------
8
target/arm/vfp.decode | 5 +++++
9
3 files changed, 26 insertions(+), 13 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a)
16
{
17
return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm);
18
}
19
+
20
+static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
21
+{
22
+ gen_helper_vfp_sqrts(vd, vm, cpu_env);
23
+}
24
+
25
+static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a)
26
+{
27
+ return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm);
28
+}
29
+
30
+static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
31
+{
32
+ gen_helper_vfp_sqrtd(vd, vm, cpu_env);
33
+}
34
+
35
+static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a)
36
+{
37
+ return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm);
38
+}
39
diff --git a/target/arm/translate.c b/target/arm/translate.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate.c
42
+++ b/target/arm/translate.c
43
@@ -XXX,XX +XXX,XX @@ static inline void gen_vfp_neg(int dp)
44
gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
45
}
46
47
-static inline void gen_vfp_sqrt(int dp)
48
-{
49
- if (dp)
50
- gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
51
- else
52
- gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
53
-}
54
-
55
static inline void gen_vfp_cmp(int dp)
56
{
57
if (dp)
58
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
59
return 1;
60
case 15:
61
switch (rn) {
62
- case 1 ... 2:
63
+ case 1 ... 3:
64
/* Already handled by decodetree */
65
return 1;
66
default:
67
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
68
/* rn is opcode, encoded as per VFP_SREG_N. */
69
switch (rn) {
70
case 0x00: /* vmov */
71
- case 0x03: /* vsqrt */
72
break;
73
74
case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
75
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
76
case 0: /* cpy */
77
/* no-op */
78
break;
79
- case 3: /* sqrt */
80
- gen_vfp_sqrt(dp);
81
- break;
82
case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
83
{
84
TCGv_ptr fpst = get_fpstatus_ptr(false);
85
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/vfp.decode
88
+++ b/target/arm/vfp.decode
89
@@ -XXX,XX +XXX,XX @@ VNEG_sp ---- 1110 1.11 0001 .... 1010 01.0 .... \
90
vd=%vd_sp vm=%vm_sp
91
VNEG_dp ---- 1110 1.11 0001 .... 1011 01.0 .... \
92
vd=%vd_dp vm=%vm_dp
93
+
94
+VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... \
95
+ vd=%vd_sp vm=%vm_sp
96
+VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... \
97
+ vd=%vd_dp vm=%vm_dp
98
--
99
2.20.1
100
101
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/arm/translate-vfp.inc.c | 10 ++++++++++
5
target/arm/translate.c | 8 +-------
6
target/arm/vfp.decode | 5 +++++
7
3 files changed, 16 insertions(+), 7 deletions(-)
8
1
9
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/arm/translate-vfp.inc.c
12
+++ b/target/arm/translate-vfp.inc.c
13
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
14
return true;
15
}
16
17
+static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a)
18
+{
19
+ return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm);
20
+}
21
+
22
+static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a)
23
+{
24
+ return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm);
25
+}
26
+
27
static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
28
{
29
return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
30
diff --git a/target/arm/translate.c b/target/arm/translate.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate.c
33
+++ b/target/arm/translate.c
34
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
35
return 1;
36
case 15:
37
switch (rn) {
38
- case 1 ... 3:
39
+ case 0 ... 3:
40
/* Already handled by decodetree */
41
return 1;
42
default:
43
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
44
if (op == 15) {
45
/* rn is opcode, encoded as per VFP_SREG_N. */
46
switch (rn) {
47
- case 0x00: /* vmov */
48
- break;
49
-
50
case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
51
case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
52
/*
53
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
54
switch (op) {
55
case 15: /* extension space */
56
switch (rn) {
57
- case 0: /* cpy */
58
- /* no-op */
59
- break;
60
case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
61
{
62
TCGv_ptr fpst = get_fpstatus_ptr(false);
63
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/vfp.decode
66
+++ b/target/arm/vfp.decode
67
@@ -XXX,XX +XXX,XX @@ VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
68
VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
69
vd=%vd_dp
70
71
+VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... \
72
+ vd=%vd_sp vm=%vm_sp
73
+VMOV_reg_dp ---- 1110 1.11 0000 .... 1011 01.0 .... \
74
+ vd=%vd_dp vm=%vm_dp
75
+
76
VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... \
77
vd=%vd_sp vm=%vm_sp
78
VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... \
79
--
80
2.20.1
81
82
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP comparison instructions to decodetree.
2
1
3
Note that comparison instructions should not honour the VFP
4
short-vector length and stride information: they are scalar-only
5
operations. This applies to all the 2-operand instructions except
6
for VMOV, VABS, VNEG and VSQRT. (In the old decoder this is
7
implemented via the "if (op == 15 && rn > 3) { veclen = 0; }" check.)
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
target/arm/translate-vfp.inc.c | 75 ++++++++++++++++++++++++++++++++++
13
target/arm/translate.c | 51 +----------------------
14
target/arm/vfp.decode | 5 +++
15
3 files changed, 81 insertions(+), 50 deletions(-)
16
17
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/translate-vfp.inc.c
20
+++ b/target/arm/translate-vfp.inc.c
21
@@ -XXX,XX +XXX,XX @@ static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a)
22
{
23
return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm);
24
}
25
+
26
+static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
27
+{
28
+ TCGv_i32 vd, vm;
29
+
30
+ /* Vm/M bits must be zero for the Z variant */
31
+ if (a->z && a->vm != 0) {
32
+ return false;
33
+ }
34
+
35
+ if (!vfp_access_check(s)) {
36
+ return true;
37
+ }
38
+
39
+ vd = tcg_temp_new_i32();
40
+ vm = tcg_temp_new_i32();
41
+
42
+ neon_load_reg32(vd, a->vd);
43
+ if (a->z) {
44
+ tcg_gen_movi_i32(vm, 0);
45
+ } else {
46
+ neon_load_reg32(vm, a->vm);
47
+ }
48
+
49
+ if (a->e) {
50
+ gen_helper_vfp_cmpes(vd, vm, cpu_env);
51
+ } else {
52
+ gen_helper_vfp_cmps(vd, vm, cpu_env);
53
+ }
54
+
55
+ tcg_temp_free_i32(vd);
56
+ tcg_temp_free_i32(vm);
57
+
58
+ return true;
59
+}
60
+
61
+static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
62
+{
63
+ TCGv_i64 vd, vm;
64
+
65
+ /* Vm/M bits must be zero for the Z variant */
66
+ if (a->z && a->vm != 0) {
67
+ return false;
68
+ }
69
+
70
+ /* UNDEF accesses to D16-D31 if they don't exist. */
71
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
72
+ return false;
73
+ }
74
+
75
+ if (!vfp_access_check(s)) {
76
+ return true;
77
+ }
78
+
79
+ vd = tcg_temp_new_i64();
80
+ vm = tcg_temp_new_i64();
81
+
82
+ neon_load_reg64(vd, a->vd);
83
+ if (a->z) {
84
+ tcg_gen_movi_i64(vm, 0);
85
+ } else {
86
+ neon_load_reg64(vm, a->vm);
87
+ }
88
+
89
+ if (a->e) {
90
+ gen_helper_vfp_cmped(vd, vm, cpu_env);
91
+ } else {
92
+ gen_helper_vfp_cmpd(vd, vm, cpu_env);
93
+ }
94
+
95
+ tcg_temp_free_i64(vd);
96
+ tcg_temp_free_i64(vm);
97
+
98
+ return true;
99
+}
100
diff --git a/target/arm/translate.c b/target/arm/translate.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate.c
103
+++ b/target/arm/translate.c
104
@@ -XXX,XX +XXX,XX @@ static inline void gen_vfp_neg(int dp)
105
gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
106
}
107
108
-static inline void gen_vfp_cmp(int dp)
109
-{
110
- if (dp)
111
- gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
112
- else
113
- gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
114
-}
115
-
116
-static inline void gen_vfp_cmpe(int dp)
117
-{
118
- if (dp)
119
- gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
120
- else
121
- gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
122
-}
123
-
124
-static inline void gen_vfp_F1_ld0(int dp)
125
-{
126
- if (dp)
127
- tcg_gen_movi_i64(cpu_F1d, 0);
128
- else
129
- tcg_gen_movi_i32(cpu_F1s, 0);
130
-}
131
-
132
#define VFP_GEN_ITOF(name) \
133
static inline void gen_vfp_##name(int dp, int neon) \
134
{ \
135
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
136
case 15:
137
switch (rn) {
138
case 0 ... 3:
139
+ case 8 ... 11:
140
/* Already handled by decodetree */
141
return 1;
142
default:
143
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
144
rd_is_dp = false;
145
break;
146
147
- case 0x08: case 0x0a: /* vcmp, vcmpz */
148
- case 0x09: case 0x0b: /* vcmpe, vcmpez */
149
- no_output = true;
150
- break;
151
-
152
case 0x0c: /* vrintr */
153
case 0x0d: /* vrintz */
154
case 0x0e: /* vrintx */
155
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
156
/* Load the initial operands. */
157
if (op == 15) {
158
switch (rn) {
159
- case 0x08: case 0x09: /* Compare */
160
- gen_mov_F0_vreg(dp, rd);
161
- gen_mov_F1_vreg(dp, rm);
162
- break;
163
- case 0x0a: case 0x0b: /* Compare with zero */
164
- gen_mov_F0_vreg(dp, rd);
165
- gen_vfp_F1_ld0(dp);
166
- break;
167
case 0x14: /* vcvt fp <-> fixed */
168
case 0x15:
169
case 0x16:
170
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
171
gen_vfp_msr(tmp);
172
break;
173
}
174
- case 8: /* cmp */
175
- gen_vfp_cmp(dp);
176
- break;
177
- case 9: /* cmpe */
178
- gen_vfp_cmpe(dp);
179
- break;
180
- case 10: /* cmpz */
181
- gen_vfp_cmp(dp);
182
- break;
183
- case 11: /* cmpez */
184
- gen_vfp_F1_ld0(dp);
185
- gen_vfp_cmpe(dp);
186
- break;
187
case 12: /* vrintr */
188
{
189
TCGv_ptr fpst = get_fpstatus_ptr(0);
190
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
191
index XXXXXXX..XXXXXXX 100644
192
--- a/target/arm/vfp.decode
193
+++ b/target/arm/vfp.decode
194
@@ -XXX,XX +XXX,XX @@ VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... \
195
vd=%vd_sp vm=%vm_sp
196
VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... \
197
vd=%vd_dp vm=%vm_dp
198
+
199
+VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \
200
+ vd=%vd_sp vm=%vm_sp
201
+VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \
202
+ vd=%vd_dp vm=%vm_dp
203
--
204
2.20.1
205
206
diff view generated by jsdifflib
Deleted patch
1
Convert the VCVTT, VCVTB instructions that deal with conversion
2
from half-precision floats to f32 or 64 to decodetree.
3
1
4
Since we're no longer constrained to the old decoder's style
5
using cpu_F0s and cpu_F0d we can perform a direct 16 bit
6
load of the right half of the input single-precision register
7
rather than loading the full 32 bits and then doing a
8
separate shift or sign-extension.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
target/arm/translate-vfp.inc.c | 82 ++++++++++++++++++++++++++++++++++
14
target/arm/translate.c | 56 +----------------------
15
target/arm/vfp.decode | 6 +++
16
3 files changed, 89 insertions(+), 55 deletions(-)
17
18
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate-vfp.inc.c
21
+++ b/target/arm/translate-vfp.inc.c
22
@@ -XXX,XX +XXX,XX @@
23
#include "decode-vfp.inc.c"
24
#include "decode-vfp-uncond.inc.c"
25
26
+/*
27
+ * Return the offset of a 16-bit half of the specified VFP single-precision
28
+ * register. If top is true, returns the top 16 bits; otherwise the bottom
29
+ * 16 bits.
30
+ */
31
+static inline long vfp_f16_offset(unsigned reg, bool top)
32
+{
33
+ long offs = vfp_reg_offset(false, reg);
34
+#ifdef HOST_WORDS_BIGENDIAN
35
+ if (!top) {
36
+ offs += 2;
37
+ }
38
+#else
39
+ if (top) {
40
+ offs += 2;
41
+ }
42
+#endif
43
+ return offs;
44
+}
45
+
46
/*
47
* Check that VFP access is enabled. If it is, do the necessary
48
* M-profile lazy-FP handling and then return true.
49
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
50
51
return true;
52
}
53
+
54
+static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
55
+{
56
+ TCGv_ptr fpst;
57
+ TCGv_i32 ahp_mode;
58
+ TCGv_i32 tmp;
59
+
60
+ if (!dc_isar_feature(aa32_fp16_spconv, s)) {
61
+ return false;
62
+ }
63
+
64
+ if (!vfp_access_check(s)) {
65
+ return true;
66
+ }
67
+
68
+ fpst = get_fpstatus_ptr(false);
69
+ ahp_mode = get_ahp_flag();
70
+ tmp = tcg_temp_new_i32();
71
+ /* The T bit tells us if we want the low or high 16 bits of Vm */
72
+ tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
73
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
74
+ neon_store_reg32(tmp, a->vd);
75
+ tcg_temp_free_i32(ahp_mode);
76
+ tcg_temp_free_ptr(fpst);
77
+ tcg_temp_free_i32(tmp);
78
+ return true;
79
+}
80
+
81
+static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
82
+{
83
+ TCGv_ptr fpst;
84
+ TCGv_i32 ahp_mode;
85
+ TCGv_i32 tmp;
86
+ TCGv_i64 vd;
87
+
88
+ if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
89
+ return false;
90
+ }
91
+
92
+ /* UNDEF accesses to D16-D31 if they don't exist. */
93
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
94
+ return false;
95
+ }
96
+
97
+ if (!vfp_access_check(s)) {
98
+ return true;
99
+ }
100
+
101
+ fpst = get_fpstatus_ptr(false);
102
+ ahp_mode = get_ahp_flag();
103
+ tmp = tcg_temp_new_i32();
104
+ /* The T bit tells us if we want the low or high 16 bits of Vm */
105
+ tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
106
+ vd = tcg_temp_new_i64();
107
+ gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
108
+ neon_store_reg64(vd, a->vd);
109
+ tcg_temp_free_i32(ahp_mode);
110
+ tcg_temp_free_ptr(fpst);
111
+ tcg_temp_free_i32(tmp);
112
+ tcg_temp_free_i64(vd);
113
+ return true;
114
+}
115
diff --git a/target/arm/translate.c b/target/arm/translate.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/target/arm/translate.c
118
+++ b/target/arm/translate.c
119
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
120
return 1;
121
case 15:
122
switch (rn) {
123
- case 0 ... 3:
124
+ case 0 ... 5:
125
case 8 ... 11:
126
/* Already handled by decodetree */
127
return 1;
128
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
129
if (op == 15) {
130
/* rn is opcode, encoded as per VFP_SREG_N. */
131
switch (rn) {
132
- case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
133
- case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
134
- /*
135
- * VCVTB, VCVTT: only present with the halfprec extension
136
- * UNPREDICTABLE if bit 8 is set prior to ARMv8
137
- * (we choose to UNDEF)
138
- */
139
- if (dp) {
140
- if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
141
- return 1;
142
- }
143
- } else {
144
- if (!dc_isar_feature(aa32_fp16_spconv, s)) {
145
- return 1;
146
- }
147
- }
148
- rm_is_dp = false;
149
- break;
150
case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
151
case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
152
if (dp) {
153
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
154
switch (op) {
155
case 15: /* extension space */
156
switch (rn) {
157
- case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
158
- {
159
- TCGv_ptr fpst = get_fpstatus_ptr(false);
160
- TCGv_i32 ahp_mode = get_ahp_flag();
161
- tmp = gen_vfp_mrs();
162
- tcg_gen_ext16u_i32(tmp, tmp);
163
- if (dp) {
164
- gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
165
- fpst, ahp_mode);
166
- } else {
167
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
168
- fpst, ahp_mode);
169
- }
170
- tcg_temp_free_i32(ahp_mode);
171
- tcg_temp_free_ptr(fpst);
172
- tcg_temp_free_i32(tmp);
173
- break;
174
- }
175
- case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
176
- {
177
- TCGv_ptr fpst = get_fpstatus_ptr(false);
178
- TCGv_i32 ahp = get_ahp_flag();
179
- tmp = gen_vfp_mrs();
180
- tcg_gen_shri_i32(tmp, tmp, 16);
181
- if (dp) {
182
- gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
183
- fpst, ahp);
184
- } else {
185
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
186
- fpst, ahp);
187
- }
188
- tcg_temp_free_i32(tmp);
189
- tcg_temp_free_i32(ahp);
190
- tcg_temp_free_ptr(fpst);
191
- break;
192
- }
193
case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
194
{
195
TCGv_ptr fpst = get_fpstatus_ptr(false);
196
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
197
index XXXXXXX..XXXXXXX 100644
198
--- a/target/arm/vfp.decode
199
+++ b/target/arm/vfp.decode
200
@@ -XXX,XX +XXX,XX @@ VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \
201
vd=%vd_sp vm=%vm_sp
202
VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \
203
vd=%vd_dp vm=%vm_dp
204
+
205
+# VCVTT and VCVTB from f16: Vd format depends on size bit; Vm is always vm_sp
206
+VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \
207
+ vd=%vd_sp vm=%vm_sp
208
+VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
209
+ vd=%vd_dp vm=%vm_sp
210
--
211
2.20.1
212
213
diff view generated by jsdifflib
Deleted patch
1
Convert the VCVTT and VCVTB instructions which convert from
2
f32 and f64 to f16 to decodetree.
3
1
4
Since we're no longer constrained to the old decoder's style
5
using cpu_F0s and cpu_F0d we can perform a direct 16 bit
6
store of the right half of the input single-precision register
7
rather than doing a load/modify/store sequence on the full
8
32 bits.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
target/arm/translate-vfp.inc.c | 62 ++++++++++++++++++++++++++
14
target/arm/translate.c | 79 +---------------------------------
15
target/arm/vfp.decode | 6 +++
16
3 files changed, 69 insertions(+), 78 deletions(-)
17
18
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate-vfp.inc.c
21
+++ b/target/arm/translate-vfp.inc.c
22
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
23
tcg_temp_free_i64(vd);
24
return true;
25
}
26
+
27
+static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
28
+{
29
+ TCGv_ptr fpst;
30
+ TCGv_i32 ahp_mode;
31
+ TCGv_i32 tmp;
32
+
33
+ if (!dc_isar_feature(aa32_fp16_spconv, s)) {
34
+ return false;
35
+ }
36
+
37
+ if (!vfp_access_check(s)) {
38
+ return true;
39
+ }
40
+
41
+ fpst = get_fpstatus_ptr(false);
42
+ ahp_mode = get_ahp_flag();
43
+ tmp = tcg_temp_new_i32();
44
+
45
+ neon_load_reg32(tmp, a->vm);
46
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
47
+ tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
48
+ tcg_temp_free_i32(ahp_mode);
49
+ tcg_temp_free_ptr(fpst);
50
+ tcg_temp_free_i32(tmp);
51
+ return true;
52
+}
53
+
54
+static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
55
+{
56
+ TCGv_ptr fpst;
57
+ TCGv_i32 ahp_mode;
58
+ TCGv_i32 tmp;
59
+ TCGv_i64 vm;
60
+
61
+ if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
62
+ return false;
63
+ }
64
+
65
+ /* UNDEF accesses to D16-D31 if they don't exist. */
66
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
67
+ return false;
68
+ }
69
+
70
+ if (!vfp_access_check(s)) {
71
+ return true;
72
+ }
73
+
74
+ fpst = get_fpstatus_ptr(false);
75
+ ahp_mode = get_ahp_flag();
76
+ tmp = tcg_temp_new_i32();
77
+ vm = tcg_temp_new_i64();
78
+
79
+ neon_load_reg64(vm, a->vm);
80
+ gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
81
+ tcg_temp_free_i64(vm);
82
+ tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
83
+ tcg_temp_free_i32(ahp_mode);
84
+ tcg_temp_free_ptr(fpst);
85
+ tcg_temp_free_i32(tmp);
86
+ return true;
87
+}
88
diff --git a/target/arm/translate.c b/target/arm/translate.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/translate.c
91
+++ b/target/arm/translate.c
92
@@ -XXX,XX +XXX,XX @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
93
#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
94
#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
95
96
-/* Move between integer and VFP cores. */
97
-static TCGv_i32 gen_vfp_mrs(void)
98
-{
99
- TCGv_i32 tmp = tcg_temp_new_i32();
100
- tcg_gen_mov_i32(tmp, cpu_F0s);
101
- return tmp;
102
-}
103
-
104
-static void gen_vfp_msr(TCGv_i32 tmp)
105
-{
106
- tcg_gen_mov_i32(cpu_F0s, tmp);
107
- tcg_temp_free_i32(tmp);
108
-}
109
-
110
static void gen_neon_dup_low16(TCGv_i32 var)
111
{
112
TCGv_i32 tmp = tcg_temp_new_i32();
113
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
114
{
115
uint32_t rd, rn, rm, op, delta_d, delta_m, bank_mask;
116
int dp, veclen;
117
- TCGv_i32 tmp;
118
- TCGv_i32 tmp2;
119
120
if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
121
return 1;
122
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
123
return 1;
124
case 15:
125
switch (rn) {
126
- case 0 ... 5:
127
- case 8 ... 11:
128
+ case 0 ... 11:
129
/* Already handled by decodetree */
130
return 1;
131
default:
132
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
133
if (op == 15) {
134
/* rn is opcode, encoded as per VFP_SREG_N. */
135
switch (rn) {
136
- case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
137
- case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
138
- if (dp) {
139
- if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
140
- return 1;
141
- }
142
- } else {
143
- if (!dc_isar_feature(aa32_fp16_spconv, s)) {
144
- return 1;
145
- }
146
- }
147
- rd_is_dp = false;
148
- break;
149
-
150
case 0x0c: /* vrintr */
151
case 0x0d: /* vrintz */
152
case 0x0e: /* vrintx */
153
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
154
switch (op) {
155
case 15: /* extension space */
156
switch (rn) {
157
- case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
158
- {
159
- TCGv_ptr fpst = get_fpstatus_ptr(false);
160
- TCGv_i32 ahp = get_ahp_flag();
161
- tmp = tcg_temp_new_i32();
162
-
163
- if (dp) {
164
- gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
165
- fpst, ahp);
166
- } else {
167
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
168
- fpst, ahp);
169
- }
170
- tcg_temp_free_i32(ahp);
171
- tcg_temp_free_ptr(fpst);
172
- gen_mov_F0_vreg(0, rd);
173
- tmp2 = gen_vfp_mrs();
174
- tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
175
- tcg_gen_or_i32(tmp, tmp, tmp2);
176
- tcg_temp_free_i32(tmp2);
177
- gen_vfp_msr(tmp);
178
- break;
179
- }
180
- case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
181
- {
182
- TCGv_ptr fpst = get_fpstatus_ptr(false);
183
- TCGv_i32 ahp = get_ahp_flag();
184
- tmp = tcg_temp_new_i32();
185
- if (dp) {
186
- gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
187
- fpst, ahp);
188
- } else {
189
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
190
- fpst, ahp);
191
- }
192
- tcg_temp_free_i32(ahp);
193
- tcg_temp_free_ptr(fpst);
194
- tcg_gen_shli_i32(tmp, tmp, 16);
195
- gen_mov_F0_vreg(0, rd);
196
- tmp2 = gen_vfp_mrs();
197
- tcg_gen_ext16u_i32(tmp2, tmp2);
198
- tcg_gen_or_i32(tmp, tmp, tmp2);
199
- tcg_temp_free_i32(tmp2);
200
- gen_vfp_msr(tmp);
201
- break;
202
- }
203
case 12: /* vrintr */
204
{
205
TCGv_ptr fpst = get_fpstatus_ptr(0);
206
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/arm/vfp.decode
209
+++ b/target/arm/vfp.decode
210
@@ -XXX,XX +XXX,XX @@ VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \
211
vd=%vd_sp vm=%vm_sp
212
VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
213
vd=%vd_dp vm=%vm_sp
214
+
215
+# VCVTB and VCVTT to f16: Vd format is always vd_sp; Vm format depends on size bit
216
+VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
217
+ vd=%vd_sp vm=%vm_sp
218
+VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
219
+ vd=%vd_sp vm=%vm_dp
220
--
221
2.20.1
222
223
diff view generated by jsdifflib
Deleted patch
1
Convert the VFP round-to-integer instructions VRINTR, VRINTZ and
2
VRINTX to decodetree.
3
1
4
These instructions were only introduced as part of the "VFP misc"
5
additions in v8A, so we check this. The old decoder's implementation
6
was incorrectly providing them even for v7A CPUs.
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
target/arm/translate-vfp.inc.c | 163 +++++++++++++++++++++++++++++++++
12
target/arm/translate.c | 45 +--------
13
target/arm/vfp.decode | 15 +++
14
3 files changed, 179 insertions(+), 44 deletions(-)
15
16
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-vfp.inc.c
19
+++ b/target/arm/translate-vfp.inc.c
20
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
21
tcg_temp_free_i32(tmp);
22
return true;
23
}
24
+
25
+static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
26
+{
27
+ TCGv_ptr fpst;
28
+ TCGv_i32 tmp;
29
+
30
+ if (!dc_isar_feature(aa32_vrint, s)) {
31
+ return false;
32
+ }
33
+
34
+ if (!vfp_access_check(s)) {
35
+ return true;
36
+ }
37
+
38
+ tmp = tcg_temp_new_i32();
39
+ neon_load_reg32(tmp, a->vm);
40
+ fpst = get_fpstatus_ptr(false);
41
+ gen_helper_rints(tmp, tmp, fpst);
42
+ neon_store_reg32(tmp, a->vd);
43
+ tcg_temp_free_ptr(fpst);
44
+ tcg_temp_free_i32(tmp);
45
+ return true;
46
+}
47
+
48
+static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a)
49
+{
50
+ TCGv_ptr fpst;
51
+ TCGv_i64 tmp;
52
+
53
+ if (!dc_isar_feature(aa32_vrint, s)) {
54
+ return false;
55
+ }
56
+
57
+ /* UNDEF accesses to D16-D31 if they don't exist. */
58
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
59
+ return false;
60
+ }
61
+
62
+ if (!vfp_access_check(s)) {
63
+ return true;
64
+ }
65
+
66
+ tmp = tcg_temp_new_i64();
67
+ neon_load_reg64(tmp, a->vm);
68
+ fpst = get_fpstatus_ptr(false);
69
+ gen_helper_rintd(tmp, tmp, fpst);
70
+ neon_store_reg64(tmp, a->vd);
71
+ tcg_temp_free_ptr(fpst);
72
+ tcg_temp_free_i64(tmp);
73
+ return true;
74
+}
75
+
76
+static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
77
+{
78
+ TCGv_ptr fpst;
79
+ TCGv_i32 tmp;
80
+ TCGv_i32 tcg_rmode;
81
+
82
+ if (!dc_isar_feature(aa32_vrint, s)) {
83
+ return false;
84
+ }
85
+
86
+ if (!vfp_access_check(s)) {
87
+ return true;
88
+ }
89
+
90
+ tmp = tcg_temp_new_i32();
91
+ neon_load_reg32(tmp, a->vm);
92
+ fpst = get_fpstatus_ptr(false);
93
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
94
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
95
+ gen_helper_rints(tmp, tmp, fpst);
96
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
97
+ neon_store_reg32(tmp, a->vd);
98
+ tcg_temp_free_ptr(fpst);
99
+ tcg_temp_free_i32(tcg_rmode);
100
+ tcg_temp_free_i32(tmp);
101
+ return true;
102
+}
103
+
104
+static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a)
105
+{
106
+ TCGv_ptr fpst;
107
+ TCGv_i64 tmp;
108
+ TCGv_i32 tcg_rmode;
109
+
110
+ if (!dc_isar_feature(aa32_vrint, s)) {
111
+ return false;
112
+ }
113
+
114
+ /* UNDEF accesses to D16-D31 if they don't exist. */
115
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
116
+ return false;
117
+ }
118
+
119
+ if (!vfp_access_check(s)) {
120
+ return true;
121
+ }
122
+
123
+ tmp = tcg_temp_new_i64();
124
+ neon_load_reg64(tmp, a->vm);
125
+ fpst = get_fpstatus_ptr(false);
126
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
127
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
128
+ gen_helper_rintd(tmp, tmp, fpst);
129
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
130
+ neon_store_reg64(tmp, a->vd);
131
+ tcg_temp_free_ptr(fpst);
132
+ tcg_temp_free_i64(tmp);
133
+ tcg_temp_free_i32(tcg_rmode);
134
+ return true;
135
+}
136
+
137
+static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
138
+{
139
+ TCGv_ptr fpst;
140
+ TCGv_i32 tmp;
141
+
142
+ if (!dc_isar_feature(aa32_vrint, s)) {
143
+ return false;
144
+ }
145
+
146
+ if (!vfp_access_check(s)) {
147
+ return true;
148
+ }
149
+
150
+ tmp = tcg_temp_new_i32();
151
+ neon_load_reg32(tmp, a->vm);
152
+ fpst = get_fpstatus_ptr(false);
153
+ gen_helper_rints_exact(tmp, tmp, fpst);
154
+ neon_store_reg32(tmp, a->vd);
155
+ tcg_temp_free_ptr(fpst);
156
+ tcg_temp_free_i32(tmp);
157
+ return true;
158
+}
159
+
160
+static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
161
+{
162
+ TCGv_ptr fpst;
163
+ TCGv_i64 tmp;
164
+
165
+ if (!dc_isar_feature(aa32_vrint, s)) {
166
+ return false;
167
+ }
168
+
169
+ /* UNDEF accesses to D16-D31 if they don't exist. */
170
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
171
+ return false;
172
+ }
173
+
174
+ if (!vfp_access_check(s)) {
175
+ return true;
176
+ }
177
+
178
+ tmp = tcg_temp_new_i64();
179
+ neon_load_reg64(tmp, a->vm);
180
+ fpst = get_fpstatus_ptr(false);
181
+ gen_helper_rintd_exact(tmp, tmp, fpst);
182
+ neon_store_reg64(tmp, a->vd);
183
+ tcg_temp_free_ptr(fpst);
184
+ tcg_temp_free_i64(tmp);
185
+ return true;
186
+}
187
diff --git a/target/arm/translate.c b/target/arm/translate.c
188
index XXXXXXX..XXXXXXX 100644
189
--- a/target/arm/translate.c
190
+++ b/target/arm/translate.c
191
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
192
return 1;
193
case 15:
194
switch (rn) {
195
- case 0 ... 11:
196
+ case 0 ... 14:
197
/* Already handled by decodetree */
198
return 1;
199
default:
200
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
201
if (op == 15) {
202
/* rn is opcode, encoded as per VFP_SREG_N. */
203
switch (rn) {
204
- case 0x0c: /* vrintr */
205
- case 0x0d: /* vrintz */
206
- case 0x0e: /* vrintx */
207
- break;
208
-
209
case 0x0f: /* vcvt double<->single */
210
rd_is_dp = !dp;
211
break;
212
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
213
switch (op) {
214
case 15: /* extension space */
215
switch (rn) {
216
- case 12: /* vrintr */
217
- {
218
- TCGv_ptr fpst = get_fpstatus_ptr(0);
219
- if (dp) {
220
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
221
- } else {
222
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
223
- }
224
- tcg_temp_free_ptr(fpst);
225
- break;
226
- }
227
- case 13: /* vrintz */
228
- {
229
- TCGv_ptr fpst = get_fpstatus_ptr(0);
230
- TCGv_i32 tcg_rmode;
231
- tcg_rmode = tcg_const_i32(float_round_to_zero);
232
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
233
- if (dp) {
234
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
235
- } else {
236
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
237
- }
238
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
239
- tcg_temp_free_i32(tcg_rmode);
240
- tcg_temp_free_ptr(fpst);
241
- break;
242
- }
243
- case 14: /* vrintx */
244
- {
245
- TCGv_ptr fpst = get_fpstatus_ptr(0);
246
- if (dp) {
247
- gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
248
- } else {
249
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
250
- }
251
- tcg_temp_free_ptr(fpst);
252
- break;
253
- }
254
case 15: /* single<->double conversion */
255
if (dp) {
256
gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
257
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
258
index XXXXXXX..XXXXXXX 100644
259
--- a/target/arm/vfp.decode
260
+++ b/target/arm/vfp.decode
261
@@ -XXX,XX +XXX,XX @@ VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
262
vd=%vd_sp vm=%vm_sp
263
VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
264
vd=%vd_sp vm=%vm_dp
265
+
266
+VRINTR_sp ---- 1110 1.11 0110 .... 1010 01.0 .... \
267
+ vd=%vd_sp vm=%vm_sp
268
+VRINTR_dp ---- 1110 1.11 0110 .... 1011 01.0 .... \
269
+ vd=%vd_dp vm=%vm_dp
270
+
271
+VRINTZ_sp ---- 1110 1.11 0110 .... 1010 11.0 .... \
272
+ vd=%vd_sp vm=%vm_sp
273
+VRINTZ_dp ---- 1110 1.11 0110 .... 1011 11.0 .... \
274
+ vd=%vd_dp vm=%vm_dp
275
+
276
+VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... \
277
+ vd=%vd_sp vm=%vm_sp
278
+VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... \
279
+ vd=%vd_dp vm=%vm_dp
280
--
281
2.20.1
282
283
diff view generated by jsdifflib
Deleted patch
1
Convert the VCVT double/single precision conversion insns to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 48 ++++++++++++++++++++++++++++++++++
7
target/arm/translate.c | 13 +--------
8
target/arm/vfp.decode | 6 +++++
9
3 files changed, 55 insertions(+), 12 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
16
tcg_temp_free_i64(tmp);
17
return true;
18
}
19
+
20
+static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
21
+{
22
+ TCGv_i64 vd;
23
+ TCGv_i32 vm;
24
+
25
+ /* UNDEF accesses to D16-D31 if they don't exist. */
26
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
27
+ return false;
28
+ }
29
+
30
+ if (!vfp_access_check(s)) {
31
+ return true;
32
+ }
33
+
34
+ vm = tcg_temp_new_i32();
35
+ vd = tcg_temp_new_i64();
36
+ neon_load_reg32(vm, a->vm);
37
+ gen_helper_vfp_fcvtds(vd, vm, cpu_env);
38
+ neon_store_reg64(vd, a->vd);
39
+ tcg_temp_free_i32(vm);
40
+ tcg_temp_free_i64(vd);
41
+ return true;
42
+}
43
+
44
+static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
45
+{
46
+ TCGv_i64 vm;
47
+ TCGv_i32 vd;
48
+
49
+ /* UNDEF accesses to D16-D31 if they don't exist. */
50
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
51
+ return false;
52
+ }
53
+
54
+ if (!vfp_access_check(s)) {
55
+ return true;
56
+ }
57
+
58
+ vd = tcg_temp_new_i32();
59
+ vm = tcg_temp_new_i64();
60
+ neon_load_reg64(vm, a->vm);
61
+ gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
62
+ neon_store_reg32(vd, a->vd);
63
+ tcg_temp_free_i32(vd);
64
+ tcg_temp_free_i64(vm);
65
+ return true;
66
+}
67
diff --git a/target/arm/translate.c b/target/arm/translate.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/translate.c
70
+++ b/target/arm/translate.c
71
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
72
return 1;
73
case 15:
74
switch (rn) {
75
- case 0 ... 14:
76
+ case 0 ... 15:
77
/* Already handled by decodetree */
78
return 1;
79
default:
80
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
81
if (op == 15) {
82
/* rn is opcode, encoded as per VFP_SREG_N. */
83
switch (rn) {
84
- case 0x0f: /* vcvt double<->single */
85
- rd_is_dp = !dp;
86
- break;
87
-
88
case 0x10: /* vcvt.fxx.u32 */
89
case 0x11: /* vcvt.fxx.s32 */
90
rm_is_dp = false;
91
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
92
switch (op) {
93
case 15: /* extension space */
94
switch (rn) {
95
- case 15: /* single<->double conversion */
96
- if (dp) {
97
- gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
98
- } else {
99
- gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
100
- }
101
- break;
102
case 16: /* fuito */
103
gen_vfp_uito(dp, 0);
104
break;
105
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/vfp.decode
108
+++ b/target/arm/vfp.decode
109
@@ -XXX,XX +XXX,XX @@ VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... \
110
vd=%vd_sp vm=%vm_sp
111
VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... \
112
vd=%vd_dp vm=%vm_dp
113
+
114
+# VCVT between single and double: Vm precision depends on size; Vd is its reverse
115
+VCVT_sp ---- 1110 1.11 0111 .... 1010 11.0 .... \
116
+ vd=%vd_dp vm=%vm_sp
117
+VCVT_dp ---- 1110 1.11 0111 .... 1011 11.0 .... \
118
+ vd=%vd_sp vm=%vm_dp
119
--
120
2.20.1
121
122
diff view generated by jsdifflib
Deleted patch
1
Convert the VCVT integer-to-float instructions to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 58 ++++++++++++++++++++++++++++++++++
7
target/arm/translate.c | 12 +------
8
target/arm/vfp.decode | 6 ++++
9
3 files changed, 65 insertions(+), 11 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
16
tcg_temp_free_i64(vm);
17
return true;
18
}
19
+
20
+static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
21
+{
22
+ TCGv_i32 vm;
23
+ TCGv_ptr fpst;
24
+
25
+ if (!vfp_access_check(s)) {
26
+ return true;
27
+ }
28
+
29
+ vm = tcg_temp_new_i32();
30
+ neon_load_reg32(vm, a->vm);
31
+ fpst = get_fpstatus_ptr(false);
32
+ if (a->s) {
33
+ /* i32 -> f32 */
34
+ gen_helper_vfp_sitos(vm, vm, fpst);
35
+ } else {
36
+ /* u32 -> f32 */
37
+ gen_helper_vfp_uitos(vm, vm, fpst);
38
+ }
39
+ neon_store_reg32(vm, a->vd);
40
+ tcg_temp_free_i32(vm);
41
+ tcg_temp_free_ptr(fpst);
42
+ return true;
43
+}
44
+
45
+static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
46
+{
47
+ TCGv_i32 vm;
48
+ TCGv_i64 vd;
49
+ TCGv_ptr fpst;
50
+
51
+ /* UNDEF accesses to D16-D31 if they don't exist. */
52
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
53
+ return false;
54
+ }
55
+
56
+ if (!vfp_access_check(s)) {
57
+ return true;
58
+ }
59
+
60
+ vm = tcg_temp_new_i32();
61
+ vd = tcg_temp_new_i64();
62
+ neon_load_reg32(vm, a->vm);
63
+ fpst = get_fpstatus_ptr(false);
64
+ if (a->s) {
65
+ /* i32 -> f64 */
66
+ gen_helper_vfp_sitod(vd, vm, fpst);
67
+ } else {
68
+ /* u32 -> f64 */
69
+ gen_helper_vfp_uitod(vd, vm, fpst);
70
+ }
71
+ neon_store_reg64(vd, a->vd);
72
+ tcg_temp_free_i32(vm);
73
+ tcg_temp_free_i64(vd);
74
+ tcg_temp_free_ptr(fpst);
75
+ return true;
76
+}
77
diff --git a/target/arm/translate.c b/target/arm/translate.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate.c
80
+++ b/target/arm/translate.c
81
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
82
return 1;
83
case 15:
84
switch (rn) {
85
- case 0 ... 15:
86
+ case 0 ... 17:
87
/* Already handled by decodetree */
88
return 1;
89
default:
90
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
91
if (op == 15) {
92
/* rn is opcode, encoded as per VFP_SREG_N. */
93
switch (rn) {
94
- case 0x10: /* vcvt.fxx.u32 */
95
- case 0x11: /* vcvt.fxx.s32 */
96
- rm_is_dp = false;
97
- break;
98
case 0x18: /* vcvtr.u32.fxx */
99
case 0x19: /* vcvtz.u32.fxx */
100
case 0x1a: /* vcvtr.s32.fxx */
101
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
102
switch (op) {
103
case 15: /* extension space */
104
switch (rn) {
105
- case 16: /* fuito */
106
- gen_vfp_uito(dp, 0);
107
- break;
108
- case 17: /* fsito */
109
- gen_vfp_sito(dp, 0);
110
- break;
111
case 19: /* vjcvt */
112
gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env);
113
break;
114
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
115
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/vfp.decode
117
+++ b/target/arm/vfp.decode
118
@@ -XXX,XX +XXX,XX @@ VCVT_sp ---- 1110 1.11 0111 .... 1010 11.0 .... \
119
vd=%vd_dp vm=%vm_sp
120
VCVT_dp ---- 1110 1.11 0111 .... 1011 11.0 .... \
121
vd=%vd_sp vm=%vm_dp
122
+
123
+# VCVT from integer to floating point: Vm always single; Vd depends on size
124
+VCVT_int_sp ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \
125
+ vd=%vd_sp vm=%vm_sp
126
+VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \
127
+ vd=%vd_dp vm=%vm_sp
128
--
129
2.20.1
130
131
diff view generated by jsdifflib
Deleted patch
1
Convert the VJCVT instruction to decodetree.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/translate-vfp.inc.c | 28 ++++++++++++++++++++++++++++
7
target/arm/translate.c | 12 +-----------
8
target/arm/vfp.decode | 4 ++++
9
3 files changed, 33 insertions(+), 11 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.inc.c
14
+++ b/target/arm/translate-vfp.inc.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
16
tcg_temp_free_ptr(fpst);
17
return true;
18
}
19
+
20
+static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
21
+{
22
+ TCGv_i32 vd;
23
+ TCGv_i64 vm;
24
+
25
+ if (!dc_isar_feature(aa32_jscvt, s)) {
26
+ return false;
27
+ }
28
+
29
+ /* UNDEF accesses to D16-D31 if they don't exist. */
30
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
31
+ return false;
32
+ }
33
+
34
+ if (!vfp_access_check(s)) {
35
+ return true;
36
+ }
37
+
38
+ vm = tcg_temp_new_i64();
39
+ vd = tcg_temp_new_i32();
40
+ neon_load_reg64(vm, a->vm);
41
+ gen_helper_vjcvt(vd, vm, cpu_env);
42
+ neon_store_reg32(vd, a->vd);
43
+ tcg_temp_free_i64(vm);
44
+ tcg_temp_free_i32(vd);
45
+ return true;
46
+}
47
diff --git a/target/arm/translate.c b/target/arm/translate.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate.c
50
+++ b/target/arm/translate.c
51
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
52
return 1;
53
case 15:
54
switch (rn) {
55
- case 0 ... 17:
56
+ case 0 ... 19:
57
/* Already handled by decodetree */
58
return 1;
59
default:
60
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
61
rm_is_dp = false;
62
break;
63
64
- case 0x13: /* vjcvt */
65
- if (!dp || !dc_isar_feature(aa32_jscvt, s)) {
66
- return 1;
67
- }
68
- rd_is_dp = false;
69
- break;
70
-
71
default:
72
return 1;
73
}
74
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
75
switch (op) {
76
case 15: /* extension space */
77
switch (rn) {
78
- case 19: /* vjcvt */
79
- gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env);
80
- break;
81
case 20: /* fshto */
82
gen_vfp_shto(dp, 16 - rm, 0);
83
break;
84
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
85
index XXXXXXX..XXXXXXX 100644
86
--- a/target/arm/vfp.decode
87
+++ b/target/arm/vfp.decode
88
@@ -XXX,XX +XXX,XX @@ VCVT_int_sp ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \
89
vd=%vd_sp vm=%vm_sp
90
VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \
91
vd=%vd_dp vm=%vm_sp
92
+
93
+# VJCVT is always dp to sp
94
+VJCVT ---- 1110 1.11 1001 .... 1011 11.0 .... \
95
+ vd=%vd_sp vm=%vm_dp
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
Convert the VCVT (between floating-point and fixed-point) instructions
2
to decodetree.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/arm/translate-vfp.inc.c | 124 +++++++++++++++++++++++++++++++++
8
target/arm/translate.c | 57 +--------------
9
target/arm/vfp.decode | 10 +++
10
3 files changed, 136 insertions(+), 55 deletions(-)
11
12
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate-vfp.inc.c
15
+++ b/target/arm/translate-vfp.inc.c
16
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
17
tcg_temp_free_i32(vd);
18
return true;
19
}
20
+
21
+static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
22
+{
23
+ TCGv_i32 vd, shift;
24
+ TCGv_ptr fpst;
25
+ int frac_bits;
26
+
27
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
28
+ return false;
29
+ }
30
+
31
+ if (!vfp_access_check(s)) {
32
+ return true;
33
+ }
34
+
35
+ frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
36
+
37
+ vd = tcg_temp_new_i32();
38
+ neon_load_reg32(vd, a->vd);
39
+
40
+ fpst = get_fpstatus_ptr(false);
41
+ shift = tcg_const_i32(frac_bits);
42
+
43
+ /* Switch on op:U:sx bits */
44
+ switch (a->opc) {
45
+ case 0:
46
+ gen_helper_vfp_shtos(vd, vd, shift, fpst);
47
+ break;
48
+ case 1:
49
+ gen_helper_vfp_sltos(vd, vd, shift, fpst);
50
+ break;
51
+ case 2:
52
+ gen_helper_vfp_uhtos(vd, vd, shift, fpst);
53
+ break;
54
+ case 3:
55
+ gen_helper_vfp_ultos(vd, vd, shift, fpst);
56
+ break;
57
+ case 4:
58
+ gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
59
+ break;
60
+ case 5:
61
+ gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
62
+ break;
63
+ case 6:
64
+ gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
65
+ break;
66
+ case 7:
67
+ gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
68
+ break;
69
+ default:
70
+ g_assert_not_reached();
71
+ }
72
+
73
+ neon_store_reg32(vd, a->vd);
74
+ tcg_temp_free_i32(vd);
75
+ tcg_temp_free_i32(shift);
76
+ tcg_temp_free_ptr(fpst);
77
+ return true;
78
+}
79
+
80
+static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
81
+{
82
+ TCGv_i64 vd;
83
+ TCGv_i32 shift;
84
+ TCGv_ptr fpst;
85
+ int frac_bits;
86
+
87
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
88
+ return false;
89
+ }
90
+
91
+ /* UNDEF accesses to D16-D31 if they don't exist. */
92
+ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
93
+ return false;
94
+ }
95
+
96
+ if (!vfp_access_check(s)) {
97
+ return true;
98
+ }
99
+
100
+ frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
101
+
102
+ vd = tcg_temp_new_i64();
103
+ neon_load_reg64(vd, a->vd);
104
+
105
+ fpst = get_fpstatus_ptr(false);
106
+ shift = tcg_const_i32(frac_bits);
107
+
108
+ /* Switch on op:U:sx bits */
109
+ switch (a->opc) {
110
+ case 0:
111
+ gen_helper_vfp_shtod(vd, vd, shift, fpst);
112
+ break;
113
+ case 1:
114
+ gen_helper_vfp_sltod(vd, vd, shift, fpst);
115
+ break;
116
+ case 2:
117
+ gen_helper_vfp_uhtod(vd, vd, shift, fpst);
118
+ break;
119
+ case 3:
120
+ gen_helper_vfp_ultod(vd, vd, shift, fpst);
121
+ break;
122
+ case 4:
123
+ gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
124
+ break;
125
+ case 5:
126
+ gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
127
+ break;
128
+ case 6:
129
+ gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
130
+ break;
131
+ case 7:
132
+ gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
133
+ break;
134
+ default:
135
+ g_assert_not_reached();
136
+ }
137
+
138
+ neon_store_reg64(vd, a->vd);
139
+ tcg_temp_free_i64(vd);
140
+ tcg_temp_free_i32(shift);
141
+ tcg_temp_free_ptr(fpst);
142
+ return true;
143
+}
144
diff --git a/target/arm/translate.c b/target/arm/translate.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate.c
147
+++ b/target/arm/translate.c
148
@@ -XXX,XX +XXX,XX @@ static inline void gen_vfp_##name(int dp, int shift, int neon) \
149
tcg_temp_free_i32(tmp_shift); \
150
tcg_temp_free_ptr(statusptr); \
151
}
152
-VFP_GEN_FIX(tosh, _round_to_zero)
153
VFP_GEN_FIX(tosl, _round_to_zero)
154
-VFP_GEN_FIX(touh, _round_to_zero)
155
VFP_GEN_FIX(toul, _round_to_zero)
156
-VFP_GEN_FIX(shto, )
157
VFP_GEN_FIX(slto, )
158
-VFP_GEN_FIX(uhto, )
159
VFP_GEN_FIX(ulto, )
160
#undef VFP_GEN_FIX
161
162
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
163
return 1;
164
case 15:
165
switch (rn) {
166
- case 0 ... 19:
167
+ case 0 ... 23:
168
+ case 28 ... 31:
169
/* Already handled by decodetree */
170
return 1;
171
default:
172
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
173
rd_is_dp = false;
174
break;
175
176
- case 0x14: /* vcvt fp <-> fixed */
177
- case 0x15:
178
- case 0x16:
179
- case 0x17:
180
- case 0x1c:
181
- case 0x1d:
182
- case 0x1e:
183
- case 0x1f:
184
- if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
185
- return 1;
186
- }
187
- /* Immediate frac_bits has same format as SREG_M. */
188
- rm_is_dp = false;
189
- break;
190
-
191
default:
192
return 1;
193
}
194
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
195
/* Load the initial operands. */
196
if (op == 15) {
197
switch (rn) {
198
- case 0x14: /* vcvt fp <-> fixed */
199
- case 0x15:
200
- case 0x16:
201
- case 0x17:
202
- case 0x1c:
203
- case 0x1d:
204
- case 0x1e:
205
- case 0x1f:
206
- /* Source and destination the same. */
207
- gen_mov_F0_vreg(dp, rd);
208
- break;
209
default:
210
/* One source operand. */
211
gen_mov_F0_vreg(rm_is_dp, rm);
212
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
213
switch (op) {
214
case 15: /* extension space */
215
switch (rn) {
216
- case 20: /* fshto */
217
- gen_vfp_shto(dp, 16 - rm, 0);
218
- break;
219
- case 21: /* fslto */
220
- gen_vfp_slto(dp, 32 - rm, 0);
221
- break;
222
- case 22: /* fuhto */
223
- gen_vfp_uhto(dp, 16 - rm, 0);
224
- break;
225
- case 23: /* fulto */
226
- gen_vfp_ulto(dp, 32 - rm, 0);
227
- break;
228
case 24: /* ftoui */
229
gen_vfp_toui(dp, 0);
230
break;
231
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
232
case 27: /* ftosiz */
233
gen_vfp_tosiz(dp, 0);
234
break;
235
- case 28: /* ftosh */
236
- gen_vfp_tosh(dp, 16 - rm, 0);
237
- break;
238
- case 29: /* ftosl */
239
- gen_vfp_tosl(dp, 32 - rm, 0);
240
- break;
241
- case 30: /* ftouh */
242
- gen_vfp_touh(dp, 16 - rm, 0);
243
- break;
244
- case 31: /* ftoul */
245
- gen_vfp_toul(dp, 32 - rm, 0);
246
- break;
247
default: /* undefined */
248
g_assert_not_reached();
249
}
250
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
251
index XXXXXXX..XXXXXXX 100644
252
--- a/target/arm/vfp.decode
253
+++ b/target/arm/vfp.decode
254
@@ -XXX,XX +XXX,XX @@ VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \
255
# VJCVT is always dp to sp
256
VJCVT ---- 1110 1.11 1001 .... 1011 11.0 .... \
257
vd=%vd_sp vm=%vm_dp
258
+
259
+# VCVT between floating-point and fixed-point. The immediate value
260
+# is in the same format as a Vm single-precision register number.
261
+# We assemble bits 18 (op), 16 (u) and 7 (sx) into a single opc field
262
+# for the convenience of the trans_VCVT_fix functions.
263
+%vcvt_fix_op 18:1 16:1 7:1
264
+VCVT_fix_sp ---- 1110 1.11 1.1. .... 1010 .1.0 .... \
265
+ vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
266
+VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \
267
+ vd=%vd_dp imm=%vm_sp opc=%vcvt_fix_op
268
--
269
2.20.1
270
271
diff view generated by jsdifflib