1
Latest arm queue, half minor code cleanups and half minor
1
A last small test of bug fixes before rc1.
2
bug fixes.
3
2
3
thanks
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit 5d0e5694470d2952b4f257bc985cac8c89b4fd92:
6
The following changes since commit ed8ad9728a9c0eec34db9dff61dfa2f1dd625637:
7
7
8
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (2019-06-17 11:55:14 +0100)
8
Merge tag 'pull-tpm-2023-07-14-1' of https://github.com/stefanberger/qemu-tpm into staging (2023-07-15 14:54:04 +0100)
9
9
10
are available in the Git repository at:
10
are available in the Git repository at:
11
11
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190617
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230717
13
13
14
for you to fetch changes up to 1120827fa182f0e76226df7ffe7a86598d1df54f:
14
for you to fetch changes up to c2c1c4a35c7c2b1a4140b0942b9797c857e476a4:
15
15
16
target/arm: Only implement doubles if the FPU supports them (2019-06-17 15:15:06 +0100)
16
hw/nvram: Avoid unnecessary Xilinx eFuse backstore write (2023-07-17 11:05:52 +0100)
17
17
18
----------------------------------------------------------------
18
----------------------------------------------------------------
19
target-arm queue:
19
target-arm queue:
20
* support large kernel images in bootloader (by avoiding
20
* hw/arm/sbsa-ref: set 'slots' property of xhci
21
putting the initrd over the top of them)
21
* linux-user: Remove pointless NULL check in clock_adjtime handling
22
* correctly disable FPU/DSP in the CPU for the mps2-an521, musca-a boards
22
* ptw: Fix S1_ptw_translate() debug path
23
* arm_gicv3: Fix decoding of ID register range
23
* ptw: Account for FEAT_RME when applying {N}SW, SA bits
24
* arm_gicv3: GICD_TYPER.SecurityExtn is RAZ if GICD_CTLR.DS == 1
24
* accel/tcg: Zero-pad PC in TCG CPU exec trace lines
25
* some code cleanups following on from the VFP decodetree conversion
25
* hw/nvram: Avoid unnecessary Xilinx eFuse backstore write
26
* Only implement doubles if the FPU supports them
27
(so we now correctly model Cortex-M4, -M33 as single precision only)
28
26
29
----------------------------------------------------------------
27
----------------------------------------------------------------
30
Peter Maydell (24):
28
Peter Maydell (5):
31
hw/arm/boot: Don't assume RAM starts at address zero
29
linux-user: Remove pointless NULL check in clock_adjtime handling
32
hw/arm/boot: Diagnose layouts that put initrd or DTB off the end of RAM
30
target/arm/ptw.c: Add comments to S1Translate struct fields
33
hw/arm/boot: Avoid placing the initrd on top of the kernel
31
target/arm: Fix S1_ptw_translate() debug path
34
hw/arm/boot: Honour image size field in AArch64 Image format kernels
32
target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits
35
target/arm: Allow VFP and Neon to be disabled via a CPU property
33
accel/tcg: Zero-pad PC in TCG CPU exec trace lines
36
target/arm: Allow M-profile CPUs to disable the DSP extension via CPU property
37
hw/arm/armv7m: Forward "vfp" and "dsp" properties to CPU
38
hw/arm: Correctly disable FPU/DSP for some ARMSSE-based boards
39
hw/intc/arm_gicv3: Fix decoding of ID register range
40
hw/intc/arm_gicv3: GICD_TYPER.SecurityExtn is RAZ if GICD_CTLR.DS == 1
41
target/arm: Move vfp_expand_imm() to translate.[ch]
42
target/arm: Use vfp_expand_imm() for AArch32 VFP VMOV_imm
43
target/arm: Stop using cpu_F0s for NEON_2RM_VABS_F
44
target/arm: Stop using cpu_F0s for NEON_2RM_VNEG_F
45
target/arm: Stop using cpu_F0s for NEON_2RM_VRINT*
46
target/arm: Stop using cpu_F0s for NEON_2RM_VCVT[ANPM][US]
47
target/arm: Stop using cpu_F0s for NEON_2RM_VRECPE_F and NEON_2RM_VRSQRTE_F
48
target/arm: Stop using cpu_F0s for Neon f32/s32 VCVT
49
target/arm: Stop using cpu_F0s in Neon VCVT fixed-point ops
50
target/arm: stop using deprecated functions in NEON_2RM_VCVT_F16_F32
51
target/arm: Stop using deprecated functions in NEON_2RM_VCVT_F32_F16
52
target/arm: Remove unused cpu_F0s, cpu_F0d, cpu_F1s, cpu_F1d
53
target/arm: Fix typos in trans function prototypes
54
target/arm: Only implement doubles if the FPU supports them
55
34
56
include/hw/arm/armsse.h | 7 ++
35
Tong Ho (1):
57
include/hw/arm/armv7m.h | 4 +
36
hw/nvram: Avoid unnecessary Xilinx eFuse backstore write
58
target/arm/cpu.h | 12 +++
59
target/arm/translate-a64.h | 1 -
60
target/arm/translate.h | 7 ++
61
hw/arm/armsse.c | 58 +++++++---
62
hw/arm/armv7m.c | 18 ++++
63
hw/arm/boot.c | 83 ++++++++++----
64
hw/arm/musca.c | 8 ++
65
hw/intc/arm_gicv3_dist.c | 12 ++-
66
hw/intc/arm_gicv3_redist.c | 4 +-
67
target/arm/cpu.c | 179 ++++++++++++++++++++++++++++--
68
target/arm/translate-a64.c | 32 ------
69
target/arm/translate-vfp.inc.c | 173 ++++++++++++++++++++++-------
70
target/arm/translate.c | 240 ++++++++++++++---------------------------
71
target/arm/vfp.decode | 10 +-
72
16 files changed, 572 insertions(+), 276 deletions(-)
73
37
38
Yuquan Wang (1):
39
hw/arm/sbsa-ref: set 'slots' property of xhci
40
41
accel/tcg/cpu-exec.c | 4 +--
42
accel/tcg/translate-all.c | 2 +-
43
hw/arm/sbsa-ref.c | 1 +
44
hw/nvram/xlnx-efuse.c | 11 ++++--
45
linux-user/syscall.c | 12 +++----
46
target/arm/ptw.c | 90 +++++++++++++++++++++++++++++++++++++++++------
47
6 files changed, 98 insertions(+), 22 deletions(-)
diff view generated by jsdifflib
Deleted patch
1
In the Arm kernel/initrd loading code, in some places we make the
2
incorrect assumption that info->ram_size can be treated as the
3
address of the end of RAM, as for instance when we calculate the
4
available space for the initrd using "info->ram_size - info->initrd_start".
5
This is wrong, because many Arm boards (including "virt") specify
6
a non-zero info->loader_start to indicate that their RAM area
7
starts at a non-zero physical address.
8
1
9
Correct the places which make this incorrect assumption.
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Tested-by: Mark Rutland <mark.rutland@arm.com>
14
Message-id: 20190516144733.32399-2-peter.maydell@linaro.org
15
---
16
hw/arm/boot.c | 9 ++++-----
17
1 file changed, 4 insertions(+), 5 deletions(-)
18
19
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/boot.c
22
+++ b/hw/arm/boot.c
23
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
24
int elf_machine;
25
hwaddr entry;
26
static const ARMInsnFixup *primary_loader;
27
+ uint64_t ram_end = info->loader_start + info->ram_size;
28
29
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
30
primary_loader = bootloader_aarch64;
31
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
32
/* 32-bit ARM */
33
entry = info->loader_start + KERNEL_LOAD_ADDR;
34
kernel_size = load_image_targphys_as(info->kernel_filename, entry,
35
- info->ram_size - KERNEL_LOAD_ADDR,
36
- as);
37
+ ram_end - KERNEL_LOAD_ADDR, as);
38
is_linux = 1;
39
}
40
if (kernel_size < 0) {
41
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
42
if (info->initrd_filename) {
43
initrd_size = load_ramdisk_as(info->initrd_filename,
44
info->initrd_start,
45
- info->ram_size - info->initrd_start,
46
- as);
47
+ ram_end - info->initrd_start, as);
48
if (initrd_size < 0) {
49
initrd_size = load_image_targphys_as(info->initrd_filename,
50
info->initrd_start,
51
- info->ram_size -
52
+ ram_end -
53
info->initrd_start,
54
as);
55
}
56
--
57
2.20.1
58
59
diff view generated by jsdifflib
Deleted patch
1
We calculate the locations in memory where we want to put the
2
initrd and the DTB based on the size of the kernel, since they
3
come after it. Add some explicit checks that these aren't off the
4
end of RAM entirely.
5
1
6
(At the moment the way we calculate the initrd_start means that
7
it can't ever be off the end of RAM, but that will change with
8
the next commit.)
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Tested-by: Mark Rutland <mark.rutland@arm.com>
13
Message-id: 20190516144733.32399-3-peter.maydell@linaro.org
14
---
15
hw/arm/boot.c | 23 +++++++++++++++++++++++
16
1 file changed, 23 insertions(+)
17
18
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/boot.c
21
+++ b/hw/arm/boot.c
22
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
23
error_report("could not load kernel '%s'", info->kernel_filename);
24
exit(1);
25
}
26
+
27
+ if (kernel_size > info->ram_size) {
28
+ error_report("kernel '%s' is too large to fit in RAM "
29
+ "(kernel size %d, RAM size %" PRId64 ")",
30
+ info->kernel_filename, kernel_size, info->ram_size);
31
+ exit(1);
32
+ }
33
+
34
info->entry = entry;
35
if (is_linux) {
36
uint32_t fixupcontext[FIXUP_MAX];
37
38
if (info->initrd_filename) {
39
+
40
+ if (info->initrd_start >= ram_end) {
41
+ error_report("not enough space after kernel to load initrd");
42
+ exit(1);
43
+ }
44
+
45
initrd_size = load_ramdisk_as(info->initrd_filename,
46
info->initrd_start,
47
ram_end - info->initrd_start, as);
48
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
49
info->initrd_filename);
50
exit(1);
51
}
52
+ if (info->initrd_start + initrd_size > info->ram_size) {
53
+ error_report("could not load initrd '%s': "
54
+ "too big to fit into RAM after the kernel",
55
+ info->initrd_filename);
56
+ }
57
} else {
58
initrd_size = 0;
59
}
60
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
61
/* Place the DTB after the initrd in memory with alignment. */
62
info->dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size,
63
align);
64
+ if (info->dtb_start >= ram_end) {
65
+ error_report("Not enough space for DTB after kernel/initrd");
66
+ exit(1);
67
+ }
68
fixupcontext[FIXUP_ARGPTR_LO] = info->dtb_start;
69
fixupcontext[FIXUP_ARGPTR_HI] = info->dtb_start >> 32;
70
} else {
71
--
72
2.20.1
73
74
diff view generated by jsdifflib
Deleted patch
1
We currently put the initrd at the smaller of:
2
* 128MB into RAM
3
* halfway into the RAM
4
(with the dtb following it).
5
1
6
However for large kernels this might mean that the kernel
7
overlaps the initrd. For some kinds of kernel (self-decompressing
8
32-bit kernels, and ELF images with a BSS section at the end)
9
we don't know the exact size, but even there we have a
10
minimum size. Put the initrd at least further into RAM than
11
that. For image formats that can give us an exact kernel size, this
12
will mean that we definitely avoid overlaying kernel and initrd.
13
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Tested-by: Mark Rutland <mark.rutland@arm.com>
17
Message-id: 20190516144733.32399-4-peter.maydell@linaro.org
18
---
19
hw/arm/boot.c | 34 ++++++++++++++++++++--------------
20
1 file changed, 20 insertions(+), 14 deletions(-)
21
22
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/hw/arm/boot.c
25
+++ b/hw/arm/boot.c
26
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
27
if (info->nb_cpus == 0)
28
info->nb_cpus = 1;
29
30
- /*
31
- * We want to put the initrd far enough into RAM that when the
32
- * kernel is uncompressed it will not clobber the initrd. However
33
- * on boards without much RAM we must ensure that we still leave
34
- * enough room for a decent sized initrd, and on boards with large
35
- * amounts of RAM we must avoid the initrd being so far up in RAM
36
- * that it is outside lowmem and inaccessible to the kernel.
37
- * So for boards with less than 256MB of RAM we put the initrd
38
- * halfway into RAM, and for boards with 256MB of RAM or more we put
39
- * the initrd at 128MB.
40
- */
41
- info->initrd_start = info->loader_start +
42
- MIN(info->ram_size / 2, 128 * 1024 * 1024);
43
-
44
/* Assume that raw images are linux kernels, and ELF images are not. */
45
kernel_size = arm_load_elf(info, &elf_entry, &elf_low_addr,
46
&elf_high_addr, elf_machine, as);
47
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
48
}
49
50
info->entry = entry;
51
+
52
+ /*
53
+ * We want to put the initrd far enough into RAM that when the
54
+ * kernel is uncompressed it will not clobber the initrd. However
55
+ * on boards without much RAM we must ensure that we still leave
56
+ * enough room for a decent sized initrd, and on boards with large
57
+ * amounts of RAM we must avoid the initrd being so far up in RAM
58
+ * that it is outside lowmem and inaccessible to the kernel.
59
+ * So for boards with less than 256MB of RAM we put the initrd
60
+ * halfway into RAM, and for boards with 256MB of RAM or more we put
61
+ * the initrd at 128MB.
62
+ * We also refuse to put the initrd somewhere that will definitely
63
+ * overlay the kernel we just loaded, though for kernel formats which
64
+ * don't tell us their exact size (eg self-decompressing 32-bit kernels)
65
+ * we might still make a bad choice here.
66
+ */
67
+ info->initrd_start = info->loader_start +
68
+ MAX(MIN(info->ram_size / 2, 128 * 1024 * 1024), kernel_size);
69
+ info->initrd_start = TARGET_PAGE_ALIGN(info->initrd_start);
70
+
71
if (is_linux) {
72
uint32_t fixupcontext[FIXUP_MAX];
73
74
--
75
2.20.1
76
77
diff view generated by jsdifflib
1
The architecture permits FPUs which have only single-precision
1
From: Yuquan Wang <wangyuquan1236@phytium.com.cn>
2
support, not double-precision; Cortex-M4 and Cortex-M33 are
3
both like that. Add the necessary checks on the MVFR0 FPDP
4
field so that we UNDEF any double-precision instructions on
5
CPUs like this.
6
2
7
Note that even if FPDP==0 the insns like VMOV-to/from-gpreg,
3
This extends the slots of xhci to 64, since the default xhci_sysbus
8
VLDM/VSTM, VLDR/VSTR which take double precision registers
4
just supports one slot.
9
still exist.
10
5
6
Signed-off-by: Wang Yuquan <wangyuquan1236@phytium.com.cn>
7
Signed-off-by: Chen Baozi <chenbaozi@phytium.com.cn>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
10
Tested-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
11
Message-id: 20230710063750.473510-2-wangyuquan1236@phytium.com.cn
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20190614104457.24703-3-peter.maydell@linaro.org
14
---
13
---
15
target/arm/cpu.h | 6 +++
14
hw/arm/sbsa-ref.c | 1 +
16
target/arm/translate-vfp.inc.c | 84 ++++++++++++++++++++++++++++++++++
15
1 file changed, 1 insertion(+)
17
2 files changed, 90 insertions(+)
18
16
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
19
--- a/hw/arm/sbsa-ref.c
22
+++ b/target/arm/cpu.h
20
+++ b/hw/arm/sbsa-ref.c
23
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
21
@@ -XXX,XX +XXX,XX @@ static void create_xhci(const SBSAMachineState *sms)
24
return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0;
22
hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base;
25
}
23
int irq = sbsa_ref_irqmap[SBSA_XHCI];
26
24
DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS);
27
+static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id)
25
+ qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS);
28
+{
26
29
+ /* Return true if CPU supports double precision floating point */
27
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
30
+ return FIELD_EX64(id->mvfr0, MVFR0, FPDP) > 0;
28
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
31
+}
32
+
33
/*
34
* We always set the FP and SIMD FP16 fields to indicate identical
35
* levels of support (assuming SIMD is implemented at all), so
36
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-vfp.inc.c
39
+++ b/target/arm/translate-vfp.inc.c
40
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
41
((a->vm | a->vn | a->vd) & 0x10)) {
42
return false;
43
}
44
+
45
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
46
+ return false;
47
+ }
48
+
49
rd = a->vd;
50
rn = a->vn;
51
rm = a->vm;
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
53
((a->vm | a->vn | a->vd) & 0x10)) {
54
return false;
55
}
56
+
57
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
58
+ return false;
59
+ }
60
+
61
rd = a->vd;
62
rn = a->vn;
63
rm = a->vm;
64
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
65
((a->vm | a->vd) & 0x10)) {
66
return false;
67
}
68
+
69
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
70
+ return false;
71
+ }
72
+
73
rd = a->vd;
74
rm = a->vm;
75
76
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
77
if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
78
return false;
79
}
80
+
81
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
82
+ return false;
83
+ }
84
+
85
rd = a->vd;
86
rm = a->vm;
87
88
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
89
return false;
90
}
91
92
+ if (!dc_isar_feature(aa32_fpdp, s)) {
93
+ return false;
94
+ }
95
+
96
if (!dc_isar_feature(aa32_fpshvec, s) &&
97
(veclen != 0 || s->vec_stride != 0)) {
98
return false;
99
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
100
return false;
101
}
102
103
+ if (!dc_isar_feature(aa32_fpdp, s)) {
104
+ return false;
105
+ }
106
+
107
if (!dc_isar_feature(aa32_fpshvec, s) &&
108
(veclen != 0 || s->vec_stride != 0)) {
109
return false;
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
111
return false;
112
}
113
114
+ if (!dc_isar_feature(aa32_fpdp, s)) {
115
+ return false;
116
+ }
117
+
118
if (!vfp_access_check(s)) {
119
return true;
120
}
121
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
122
return false;
123
}
124
125
+ if (!dc_isar_feature(aa32_fpdp, s)) {
126
+ return false;
127
+ }
128
+
129
if (!dc_isar_feature(aa32_fpshvec, s) &&
130
(veclen != 0 || s->vec_stride != 0)) {
131
return false;
132
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
133
return false;
134
}
135
136
+ if (!dc_isar_feature(aa32_fpdp, s)) {
137
+ return false;
138
+ }
139
+
140
if (!vfp_access_check(s)) {
141
return true;
142
}
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
144
return false;
145
}
146
147
+ if (!dc_isar_feature(aa32_fpdp, s)) {
148
+ return false;
149
+ }
150
+
151
if (!vfp_access_check(s)) {
152
return true;
153
}
154
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
155
return false;
156
}
157
158
+ if (!dc_isar_feature(aa32_fpdp, s)) {
159
+ return false;
160
+ }
161
+
162
if (!vfp_access_check(s)) {
163
return true;
164
}
165
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
166
return false;
167
}
168
169
+ if (!dc_isar_feature(aa32_fpdp, s)) {
170
+ return false;
171
+ }
172
+
173
if (!vfp_access_check(s)) {
174
return true;
175
}
176
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
177
return false;
178
}
179
180
+ if (!dc_isar_feature(aa32_fpdp, s)) {
181
+ return false;
182
+ }
183
+
184
if (!vfp_access_check(s)) {
185
return true;
186
}
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
188
return false;
189
}
190
191
+ if (!dc_isar_feature(aa32_fpdp, s)) {
192
+ return false;
193
+ }
194
+
195
if (!vfp_access_check(s)) {
196
return true;
197
}
198
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
199
return false;
200
}
201
202
+ if (!dc_isar_feature(aa32_fpdp, s)) {
203
+ return false;
204
+ }
205
+
206
if (!vfp_access_check(s)) {
207
return true;
208
}
209
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
210
return false;
211
}
212
213
+ if (!dc_isar_feature(aa32_fpdp, s)) {
214
+ return false;
215
+ }
216
+
217
if (!vfp_access_check(s)) {
218
return true;
219
}
220
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
221
return false;
222
}
223
224
+ if (!dc_isar_feature(aa32_fpdp, s)) {
225
+ return false;
226
+ }
227
+
228
if (!vfp_access_check(s)) {
229
return true;
230
}
231
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
232
return false;
233
}
234
235
+ if (!dc_isar_feature(aa32_fpdp, s)) {
236
+ return false;
237
+ }
238
+
239
if (!vfp_access_check(s)) {
240
return true;
241
}
242
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
243
return false;
244
}
245
246
+ if (!dc_isar_feature(aa32_fpdp, s)) {
247
+ return false;
248
+ }
249
+
250
if (!vfp_access_check(s)) {
251
return true;
252
}
253
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
254
return false;
255
}
256
257
+ if (!dc_isar_feature(aa32_fpdp, s)) {
258
+ return false;
259
+ }
260
+
261
if (!vfp_access_check(s)) {
262
return true;
263
}
264
--
29
--
265
2.20.1
30
2.34.1
266
267
diff view generated by jsdifflib
1
The SSE-200 hardware has configurable integration settings which
1
In the code for TARGET_NR_clock_adjtime, we set the pointer phtx to
2
determine whether its two CPUs have the FPU and DSP:
2
the address of the local variable htx. This means it can never be
3
* CPU0_FPU (default 0)
3
NULL, but later in the code we check it for NULL anyway. Coverity
4
* CPU0_DSP (default 0)
4
complains about this (CID 1507683) because the NULL check comes after
5
* CPU1_FPU (default 1)
5
a call to clock_adjtime() that assumes it is non-NULL.
6
* CPU1_DSP (default 1)
7
6
8
Similarly, the IoTKit has settings for its single CPU:
7
Since phtx is always &htx, and is used only in three places, it's not
9
* CPU0_FPU (default 1)
8
really necessary. Remove it, bringing the code structure in to line
10
* CPU0_DSP (default 1)
9
with that for TARGET_NR_clock_adjtime64, which already uses a simple
11
10
'&htx' when it wants a pointer to 'htx'.
12
Of our four boards that use either the IoTKit or the SSE-200:
13
* mps2-an505, mps2-an521 and musca-a use the default settings
14
* musca-b1 enables FPU and DSP on both CPUs
15
16
Currently QEMU models all these boards using CPUs with
17
both FPU and DSP enabled. This means that we are incorrect
18
for mps2-an521 and musca-a, which should not have FPU or DSP
19
on CPU0.
20
21
Create QOM properties on the ARMSSE devices corresponding to the
22
default h/w integration settings, and make the Musca-B1 board
23
enable FPU and DSP on both CPUs. This fixes the mps2-an521
24
and musca-a behaviour, and leaves the musca-b1 and mps2-an505
25
behaviour unchanged.
26
11
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
29
Message-id: 20190517174046.11146-5-peter.maydell@linaro.org
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20230623144410.1837261-1-peter.maydell@linaro.org
30
---
16
---
31
include/hw/arm/armsse.h | 7 +++++
17
linux-user/syscall.c | 12 +++++-------
32
hw/arm/armsse.c | 58 ++++++++++++++++++++++++++++++++---------
18
1 file changed, 5 insertions(+), 7 deletions(-)
33
hw/arm/musca.c | 8 ++++++
34
3 files changed, 61 insertions(+), 12 deletions(-)
35
19
36
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
20
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
37
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/arm/armsse.h
22
--- a/linux-user/syscall.c
39
+++ b/include/hw/arm/armsse.h
23
+++ b/linux-user/syscall.c
40
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
41
* address of each SRAM bank (and thus the total amount of internal SRAM)
25
#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
42
* + QOM property "init-svtor" sets the initial value of the CPU SVTOR register
26
case TARGET_NR_clock_adjtime:
43
* (where it expects to load the PC and SP from the vector table on reset)
27
{
44
+ * + QOM properties "CPU0_FPU", "CPU0_DSP", "CPU1_FPU" and "CPU1_DSP" which
28
- struct timex htx, *phtx = &htx;
45
+ * set whether the CPUs have the FPU and DSP features present. The default
29
+ struct timex htx;
46
+ * (matching the hardware) is that for CPU0 in an IoTKit and CPU1 in an
30
47
+ * SSE-200 both are present; CPU0 in an SSE-200 has neither.
31
- if (target_to_host_timex(phtx, arg2) != 0) {
48
+ * Since the IoTKit has only one CPU, it does not have the CPU1_* properties.
32
+ if (target_to_host_timex(&htx, arg2) != 0) {
49
* + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts for CPU 0,
33
return -TARGET_EFAULT;
50
* which are wired to its NVIC lines 32 .. n+32
34
}
51
* + Named GPIO inputs "EXP_CPU1_IRQ" 0..n are the expansion interrupts for
35
- ret = get_errno(clock_adjtime(arg1, phtx));
52
@@ -XXX,XX +XXX,XX @@ typedef struct ARMSSE {
36
- if (!is_error(ret) && phtx) {
53
uint32_t mainclk_frq;
37
- if (host_to_target_timex(arg2, phtx) != 0) {
54
uint32_t sram_addr_width;
38
- return -TARGET_EFAULT;
55
uint32_t init_svtor;
39
- }
56
+ bool cpu_fpu[SSE_MAX_CPUS];
40
+ ret = get_errno(clock_adjtime(arg1, &htx));
57
+ bool cpu_dsp[SSE_MAX_CPUS];
41
+ if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
58
} ARMSSE;
42
+ return -TARGET_EFAULT;
59
60
typedef struct ARMSSEInfo ARMSSEInfo;
61
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/hw/arm/armsse.c
64
+++ b/hw/arm/armsse.c
65
@@ -XXX,XX +XXX,XX @@ struct ARMSSEInfo {
66
bool has_cachectrl;
67
bool has_cpusecctrl;
68
bool has_cpuid;
69
+ Property *props;
70
+};
71
+
72
+static Property iotkit_properties[] = {
73
+ DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
74
+ MemoryRegion *),
75
+ DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
76
+ DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
77
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
78
+ DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
79
+ DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true),
80
+ DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
81
+ DEFINE_PROP_END_OF_LIST()
82
+};
83
+
84
+static Property armsse_properties[] = {
85
+ DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
86
+ MemoryRegion *),
87
+ DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
88
+ DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
89
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
90
+ DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
91
+ DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], false),
92
+ DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], false),
93
+ DEFINE_PROP_BOOL("CPU1_FPU", ARMSSE, cpu_fpu[1], true),
94
+ DEFINE_PROP_BOOL("CPU1_DSP", ARMSSE, cpu_dsp[1], true),
95
+ DEFINE_PROP_END_OF_LIST()
96
};
97
98
static const ARMSSEInfo armsse_variants[] = {
99
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
100
.has_cachectrl = false,
101
.has_cpusecctrl = false,
102
.has_cpuid = false,
103
+ .props = iotkit_properties,
104
},
105
{
106
.name = TYPE_SSE200,
107
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
108
.has_cachectrl = true,
109
.has_cpusecctrl = true,
110
.has_cpuid = true,
111
+ .props = armsse_properties,
112
},
113
};
114
115
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
116
return;
117
}
43
}
118
}
44
}
119
+ if (!s->cpu_fpu[i]) {
45
return ret;
120
+ object_property_set_bool(cpuobj, false, "vfp", &err);
121
+ if (err) {
122
+ error_propagate(errp, err);
123
+ return;
124
+ }
125
+ }
126
+ if (!s->cpu_dsp[i]) {
127
+ object_property_set_bool(cpuobj, false, "dsp", &err);
128
+ if (err) {
129
+ error_propagate(errp, err);
130
+ return;
131
+ }
132
+ }
133
134
if (i > 0) {
135
memory_region_add_subregion_overlap(&s->cpu_container[i], 0,
136
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription armsse_vmstate = {
137
}
138
};
139
140
-static Property armsse_properties[] = {
141
- DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
142
- MemoryRegion *),
143
- DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
144
- DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
145
- DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
146
- DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
147
- DEFINE_PROP_END_OF_LIST()
148
-};
149
-
150
static void armsse_reset(DeviceState *dev)
151
{
152
ARMSSE *s = ARMSSE(dev);
153
@@ -XXX,XX +XXX,XX @@ static void armsse_class_init(ObjectClass *klass, void *data)
154
DeviceClass *dc = DEVICE_CLASS(klass);
155
IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(klass);
156
ARMSSEClass *asc = ARMSSE_CLASS(klass);
157
+ const ARMSSEInfo *info = data;
158
159
dc->realize = armsse_realize;
160
dc->vmsd = &armsse_vmstate;
161
- dc->props = armsse_properties;
162
+ dc->props = info->props;
163
dc->reset = armsse_reset;
164
iic->check = armsse_idau_check;
165
- asc->info = data;
166
+ asc->info = info;
167
}
168
169
static const TypeInfo armsse_info = {
170
diff --git a/hw/arm/musca.c b/hw/arm/musca.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/hw/arm/musca.c
173
+++ b/hw/arm/musca.c
174
@@ -XXX,XX +XXX,XX @@ static void musca_init(MachineState *machine)
175
qdev_prop_set_uint32(ssedev, "init-svtor", mmc->init_svtor);
176
qdev_prop_set_uint32(ssedev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width);
177
qdev_prop_set_uint32(ssedev, "MAINCLK", SYSCLK_FRQ);
178
+ /*
179
+ * Musca-A takes the default SSE-200 FPU/DSP settings (ie no for
180
+ * CPU0 and yes for CPU1); Musca-B1 explicitly enables them for CPU0.
181
+ */
182
+ if (mmc->type == MUSCA_B1) {
183
+ qdev_prop_set_bit(ssedev, "CPU0_FPU", true);
184
+ qdev_prop_set_bit(ssedev, "CPU0_DSP", true);
185
+ }
186
object_property_set_bool(OBJECT(&mms->sse), true, "realized",
187
&error_fatal);
188
189
--
46
--
190
2.20.1
47
2.34.1
191
48
192
49
diff view generated by jsdifflib
1
Remove some old constructns from NEON_2RM_VCVT_F16_F32 code:
1
Add comments to the in_* fields in the S1Translate struct
2
* don't use CPU_F0s
2
that explain what they're doing.
3
* don't use tcg_gen_st_f32
4
3
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20230710152130.3928330-2-peter.maydell@linaro.org
8
Message-id: 20190613163917.28589-12-peter.maydell@linaro.org
9
---
7
---
10
target/arm/translate.c | 26 +++++++++++---------------
8
target/arm/ptw.c | 40 ++++++++++++++++++++++++++++++++++++++++
11
1 file changed, 11 insertions(+), 15 deletions(-)
9
1 file changed, 40 insertions(+)
12
10
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
13
--- a/target/arm/ptw.c
16
+++ b/target/arm/translate.c
14
+++ b/target/arm/ptw.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
15
@@ -XXX,XX +XXX,XX @@
18
return ret;
16
#endif
19
}
17
20
18
typedef struct S1Translate {
21
-#define tcg_gen_st_f32 tcg_gen_st_i32
19
+ /*
22
-
20
+ * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
23
#define ARM_CP_RW_BIT (1 << 20)
21
+ * Together with in_space, specifies the architectural translation regime.
24
22
+ */
25
/* Include the VFP decoder */
23
ARMMMUIdx in_mmu_idx;
26
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
24
+ /*
27
tmp = neon_load_reg(rm, 0);
25
+ * in_ptw_idx: specifies which mmuidx to use for the actual
28
tmp2 = neon_load_reg(rm, 1);
26
+ * page table descriptor load operations. This will be one of the
29
tcg_gen_ext16u_i32(tmp3, tmp);
27
+ * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
30
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
28
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
31
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
29
+ * this field is updated accordingly.
32
- tcg_gen_shri_i32(tmp3, tmp, 16);
30
+ */
33
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
31
ARMMMUIdx in_ptw_idx;
34
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
32
+ /*
35
- tcg_temp_free_i32(tmp);
33
+ * in_space: the security space for this walk. This plus
36
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
34
+ * the in_mmu_idx specify the architectural translation regime.
37
+ neon_store_reg(rd, 0, tmp3);
35
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
38
+ tcg_gen_shri_i32(tmp, tmp, 16);
36
+ * this field is updated accordingly.
39
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
37
+ *
40
+ neon_store_reg(rd, 1, tmp);
38
+ * Note that the security space for the in_ptw_idx may be different
41
+ tmp3 = tcg_temp_new_i32();
39
+ * from that for the in_mmu_idx. We do not need to explicitly track
42
tcg_gen_ext16u_i32(tmp3, tmp2);
40
+ * the in_ptw_idx security space because:
43
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
41
+ * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
44
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
42
+ * itself specifies the security space
45
- tcg_gen_shri_i32(tmp3, tmp2, 16);
43
+ * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
46
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
44
+ * space used for ptw reads is the same as that of the security
47
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
45
+ * space of the stage 1 translation for all cases except where
48
- tcg_temp_free_i32(tmp2);
46
+ * stage 1 is Secure; in that case the only possibilities for
49
- tcg_temp_free_i32(tmp3);
47
+ * the ptw read are Secure and NonSecure, and the in_ptw_idx
50
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
48
+ * value being Stage2 vs Stage2_S distinguishes those.
51
+ neon_store_reg(rd, 2, tmp3);
49
+ */
52
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
50
ARMSecuritySpace in_space;
53
+ gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
51
+ /*
54
+ neon_store_reg(rd, 3, tmp2);
52
+ * in_secure: whether the translation regime is a Secure one.
55
tcg_temp_free_i32(ahp);
53
+ * This is always equal to arm_space_is_secure(in_space).
56
tcg_temp_free_ptr(fpst);
54
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
57
break;
55
+ * this field is updated accordingly.
56
+ */
57
bool in_secure;
58
+ /*
59
+ * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
60
+ * accesses will not update the guest page table access flags
61
+ * and will not change the state of the softmmu TLBs.
62
+ */
63
bool in_debug;
64
/*
65
* If this is stage 2 of a stage 1+2 page table walk, then this must
58
--
66
--
59
2.20.1
67
2.34.1
60
61
diff view generated by jsdifflib
1
We want to use vfp_expand_imm() in the AArch32 VFP decode;
1
In commit fe4a5472ccd6 we rearranged the logic in S1_ptw_translate()
2
move it from the a64-only header/source file to the
2
so that the debug-access "call get_phys_addr_*" codepath is used both
3
AArch32 one (which is always compiled even for AArch64).
3
when S1 is doing ptw reads from stage 2 and when it is doing ptw
4
reads from physical memory. However, we didn't update the
5
calculation of s2ptw->in_space and s2ptw->in_secure to account for
6
the "ptw reads from physical memory" case. This meant that debug
7
accesses when in Secure state broke.
4
8
9
Create a new function S2_security_space() which returns the
10
correct security space to use for the ptw load, and use it to
11
determine the correct .in_secure and .in_space fields for the
12
stage 2 lookup for the ptw load.
13
14
Reported-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
18
Message-id: 20230710152130.3928330-3-peter.maydell@linaro.org
8
Message-id: 20190613163917.28589-2-peter.maydell@linaro.org
19
Fixes: fe4a5472ccd6 ("target/arm: Use get_phys_addr_with_struct in S1_ptw_translate")
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
21
---
10
target/arm/translate-a64.h | 1 -
22
target/arm/ptw.c | 37 ++++++++++++++++++++++++++++++++-----
11
target/arm/translate.h | 7 +++++++
23
1 file changed, 32 insertions(+), 5 deletions(-)
12
target/arm/translate-a64.c | 32 --------------------------------
13
target/arm/translate-vfp.inc.c | 33 +++++++++++++++++++++++++++++++++
14
4 files changed, 40 insertions(+), 33 deletions(-)
15
24
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
25
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
17
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.h
27
--- a/target/arm/ptw.c
19
+++ b/target/arm/translate-a64.h
28
+++ b/target/arm/ptw.c
20
@@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
29
@@ -XXX,XX +XXX,XX @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
21
TCGv_ptr get_fpstatus_ptr(bool);
22
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
23
unsigned int imms, unsigned int immr);
24
-uint64_t vfp_expand_imm(int size, uint8_t imm8);
25
bool sve_access_check(DisasContext *s);
26
27
/* We should have at some point before trying to access an FP register
28
diff --git a/target/arm/translate.h b/target/arm/translate.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate.h
31
+++ b/target/arm/translate.h
32
@@ -XXX,XX +XXX,XX @@ static inline void gen_ss_advance(DisasContext *s)
33
}
30
}
34
}
31
}
35
32
36
+/*
33
+static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
37
+ * Given a VFP floating point constant encoded into an 8 bit immediate in an
34
+ ARMMMUIdx s2_mmu_idx)
38
+ * instruction, expand it to the actual constant value of the specified
39
+ * size, as per the VFPExpandImm() pseudocode in the Arm ARM.
40
+ */
41
+uint64_t vfp_expand_imm(int size, uint8_t imm8);
42
+
43
/* Vector operations shared between ARM and AArch64. */
44
extern const GVecGen3 mla_op[4];
45
extern const GVecGen3 mls_op[4];
46
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate-a64.c
49
+++ b/target/arm/translate-a64.c
50
@@ -XXX,XX +XXX,XX @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
51
}
52
}
53
54
-/* The imm8 encodes the sign bit, enough bits to represent an exponent in
55
- * the range 01....1xx to 10....0xx, and the most significant 4 bits of
56
- * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
57
- */
58
-uint64_t vfp_expand_imm(int size, uint8_t imm8)
59
-{
60
- uint64_t imm;
61
-
62
- switch (size) {
63
- case MO_64:
64
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
65
- (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
66
- extract32(imm8, 0, 6);
67
- imm <<= 48;
68
- break;
69
- case MO_32:
70
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
71
- (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
72
- (extract32(imm8, 0, 6) << 3);
73
- imm <<= 16;
74
- break;
75
- case MO_16:
76
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
77
- (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
78
- (extract32(imm8, 0, 6) << 6);
79
- break;
80
- default:
81
- g_assert_not_reached();
82
- }
83
- return imm;
84
-}
85
-
86
/* Floating point immediate
87
* 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
88
* +---+---+---+-----------+------+---+------------+-------+------+------+
89
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/translate-vfp.inc.c
92
+++ b/target/arm/translate-vfp.inc.c
93
@@ -XXX,XX +XXX,XX @@
94
#include "decode-vfp.inc.c"
95
#include "decode-vfp-uncond.inc.c"
96
97
+/*
98
+ * The imm8 encodes the sign bit, enough bits to represent an exponent in
99
+ * the range 01....1xx to 10....0xx, and the most significant 4 bits of
100
+ * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
101
+ */
102
+uint64_t vfp_expand_imm(int size, uint8_t imm8)
103
+{
35
+{
104
+ uint64_t imm;
36
+ /*
105
+
37
+ * Return the security space to use for stage 2 when doing
106
+ switch (size) {
38
+ * the S1 page table descriptor load.
107
+ case MO_64:
39
+ */
108
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
40
+ if (regime_is_stage2(s2_mmu_idx)) {
109
+ (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
41
+ /*
110
+ extract32(imm8, 0, 6);
42
+ * The security space for ptw reads is almost always the same
111
+ imm <<= 48;
43
+ * as that of the security space of the stage 1 translation.
112
+ break;
44
+ * The only exception is when stage 1 is Secure; in that case
113
+ case MO_32:
45
+ * the ptw read might be to the Secure or the NonSecure space
114
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
46
+ * (but never Realm or Root), and the s2_mmu_idx tells us which.
115
+ (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
47
+ * Root translations are always single-stage.
116
+ (extract32(imm8, 0, 6) << 3);
48
+ */
117
+ imm <<= 16;
49
+ if (s1_space == ARMSS_Secure) {
118
+ break;
50
+ return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
119
+ case MO_16:
51
+ } else {
120
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
52
+ assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
121
+ (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
53
+ assert(s1_space != ARMSS_Root);
122
+ (extract32(imm8, 0, 6) << 6);
54
+ return s1_space;
123
+ break;
55
+ }
124
+ default:
56
+ } else {
125
+ g_assert_not_reached();
57
+ /* ptw loads are from phys: the mmu idx itself says which space */
58
+ return arm_phys_to_space(s2_mmu_idx);
126
+ }
59
+ }
127
+ return imm;
128
+}
60
+}
129
+
61
+
130
/*
62
/* Translate a S1 pagetable walk through S2 if needed. */
131
* Return the offset of a 16-bit half of the specified VFP single-precision
63
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
132
* register. If top is true, returns the top 16 bits; otherwise the bottom
64
hwaddr addr, ARMMMUFaultInfo *fi)
65
{
66
- ARMSecuritySpace space = ptw->in_space;
67
bool is_secure = ptw->in_secure;
68
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
69
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
70
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
71
* From gdbstub, do not use softmmu so that we don't modify the
72
* state of the cpu at all, including softmmu tlb contents.
73
*/
74
+ ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
75
S1Translate s2ptw = {
76
.in_mmu_idx = s2_mmu_idx,
77
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
78
- .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
79
- .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
80
- : space == ARMSS_Realm ? ARMSS_Realm
81
- : ARMSS_NonSecure),
82
+ .in_secure = arm_space_is_secure(s2_space),
83
+ .in_space = s2_space,
84
.in_debug = true,
85
};
86
GetPhysAddrResult s2 = { };
133
--
87
--
134
2.20.1
88
2.34.1
135
136
diff view generated by jsdifflib
1
Remove the now unused TCG globals cpu_F0s, cpu_F0d, cpu_F1s, cpu_F1d.
1
In get_phys_addr_twostage() the code that applies the effects of
2
VSTCR.{SA,SW} and VTCR.{NSA,NSW} only updates result->f.attrs.secure.
3
Now we also have f.attrs.space for FEAT_RME, we need to keep the two
4
in sync.
2
5
3
cpu_M0 is still used by the iwmmxt code, and cpu_V0 and
6
These bits only have an effect for Secure space translations, not
4
cpu_V1 are used by both iwmmxt and Neon.
7
for Root, so use the input in_space field to determine whether to
8
apply them rather than the input is_secure. This doesn't actually
9
make a difference because Root translations are never two-stage,
10
but it's a little clearer.
5
11
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Message-id: 20230710152130.3928330-4-peter.maydell@linaro.org
9
Message-id: 20190613163917.28589-13-peter.maydell@linaro.org
10
---
15
---
11
target/arm/translate.c | 12 ++----------
16
target/arm/ptw.c | 13 ++++++++-----
12
1 file changed, 2 insertions(+), 10 deletions(-)
17
1 file changed, 8 insertions(+), 5 deletions(-)
13
18
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
19
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
15
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
21
--- a/target/arm/ptw.c
17
+++ b/target/arm/translate.c
22
+++ b/target/arm/ptw.c
18
@@ -XXX,XX +XXX,XX @@ TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
23
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
19
TCGv_i64 cpu_exclusive_addr;
24
hwaddr ipa;
20
TCGv_i64 cpu_exclusive_val;
25
int s1_prot, s1_lgpgsz;
21
26
bool is_secure = ptw->in_secure;
22
-/* FIXME: These should be removed. */
27
+ ARMSecuritySpace in_space = ptw->in_space;
23
-static TCGv_i32 cpu_F0s, cpu_F1s;
28
bool ret, ipa_secure;
24
-static TCGv_i64 cpu_F0d, cpu_F1d;
29
ARMCacheAttrs cacheattrs1;
25
-
30
ARMSecuritySpace ipa_space;
26
#include "exec/gen-icount.h"
31
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
27
32
* Check if IPA translates to secure or non-secure PA space.
28
static const char * const regnames[] =
33
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
29
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
34
*/
30
dc->base.max_insns = MIN(dc->base.max_insns, bound);
35
- result->f.attrs.secure =
31
}
36
- (is_secure
32
37
- && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
33
- cpu_F0s = tcg_temp_new_i32();
38
- && (ipa_secure
34
- cpu_F1s = tcg_temp_new_i32();
39
- || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
35
- cpu_F0d = tcg_temp_new_i64();
40
+ if (in_space == ARMSS_Secure) {
36
- cpu_F1d = tcg_temp_new_i64();
41
+ result->f.attrs.secure =
37
- cpu_V0 = cpu_F0d;
42
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
38
- cpu_V1 = cpu_F1d;
43
+ && (ipa_secure
39
+ cpu_V0 = tcg_temp_new_i64();
44
+ || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
40
+ cpu_V1 = tcg_temp_new_i64();
45
+ result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
41
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
46
+ }
42
cpu_M0 = tcg_temp_new_i64();
47
48
return false;
43
}
49
}
44
--
50
--
45
2.20.1
51
2.34.1
46
47
diff view generated by jsdifflib
1
Since Linux v3.17, the kernel's Image header includes a field image_size,
1
In commit f0a08b0913befbd we changed the type of the PC from
2
which gives the total size of the kernel including unpopulated data
2
target_ulong to vaddr. In doing so we inadvertently dropped the
3
sections such as the BSS). If this is present, then return it from
3
zero-padding on the PC in trace lines (the second item inside the []
4
load_aarch64_image() as the true size of the kernel rather than
4
in these lines). They used to look like this on AArch64, for
5
just using the size of the Image file itself. This allows the code
5
instance:
6
which calculates where to put the initrd to avoid putting it in
7
the kernel's BSS area.
8
6
9
This means that we should be able to reliably load kernel images
7
Trace 0: 0x7f2260000100 [00000000/0000000040000000/00000061/ff200000]
10
which are larger than 128MB without accidentally putting the
11
initrd or dtb in locations that clash with the kernel itself.
12
8
13
Fixes: https://bugs.launchpad.net/qemu/+bug/1823998
9
and now they look like this:
10
Trace 0: 0x7f4f50000100 [00000000/40000000/00000061/ff200000]
11
12
and if the PC happens to be somewhere low like 0x5000
13
then the field is shown as /5000/.
14
15
This is because TARGET_FMT_lx is a "%08x" or "%016x" specifier,
16
depending on TARGET_LONG_SIZE, whereas VADDR_PRIx is just PRIx64
17
with no width specifier.
18
19
Restore the zero-padding by adding an 016 width specifier to
20
this tracing and a couple of others that were similarly recently
21
changed to use VADDR_PRIx without a width specifier.
22
23
We can't unfortunately restore the "32-bit guests are padded to
24
8 hex digits and 64-bit guests to 16 hex digits" behaviour so
25
easily.
26
27
Fixes: f0a08b0913befbd ("accel/tcg/cpu-exec.c: Widen pc to vaddr")
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
30
Reviewed-by: Anton Johansson <anjo@rev.ng>
17
Tested-by: Mark Rutland <mark.rutland@arm.com>
31
Message-id: 20230711165434.4123674-1-peter.maydell@linaro.org
18
Message-id: 20190516144733.32399-5-peter.maydell@linaro.org
19
---
32
---
20
hw/arm/boot.c | 17 +++++++++++++++--
33
accel/tcg/cpu-exec.c | 4 ++--
21
1 file changed, 15 insertions(+), 2 deletions(-)
34
accel/tcg/translate-all.c | 2 +-
35
2 files changed, 3 insertions(+), 3 deletions(-)
22
36
23
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
37
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
24
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
25
--- a/hw/arm/boot.c
39
--- a/accel/tcg/cpu-exec.c
26
+++ b/hw/arm/boot.c
40
+++ b/accel/tcg/cpu-exec.c
27
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
41
@@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
28
hwaddr *entry, AddressSpace *as)
42
if (qemu_log_in_addr_range(pc)) {
29
{
43
qemu_log_mask(CPU_LOG_EXEC,
30
hwaddr kernel_load_offset = KERNEL64_LOAD_ADDR;
44
"Trace %d: %p [%08" PRIx64
31
+ uint64_t kernel_size = 0;
45
- "/%" VADDR_PRIx "/%08x/%08x] %s\n",
32
uint8_t *buffer;
46
+ "/%016" VADDR_PRIx "/%08x/%08x] %s\n",
33
int size;
47
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
34
48
tb->flags, tb->cflags, lookup_symbol(pc));
35
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
49
36
* is only valid if the image_size is non-zero.
50
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
37
*/
51
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
38
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
52
vaddr pc = log_pc(cpu, last_tb);
39
- if (hdrvals[1] != 0) {
53
if (qemu_log_in_addr_range(pc)) {
40
+
54
- qemu_log("Stopped execution of TB chain before %p [%"
41
+ kernel_size = le64_to_cpu(hdrvals[1]);
55
+ qemu_log("Stopped execution of TB chain before %p [%016"
42
+
56
VADDR_PRIx "] %s\n",
43
+ if (kernel_size != 0) {
57
last_tb->tc.ptr, pc, lookup_symbol(pc));
44
kernel_load_offset = le64_to_cpu(hdrvals[0]);
58
}
45
59
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
46
/*
60
index XXXXXXX..XXXXXXX 100644
47
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
61
--- a/accel/tcg/translate-all.c
62
+++ b/accel/tcg/translate-all.c
63
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
64
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
65
vaddr pc = log_pc(cpu, tb);
66
if (qemu_log_in_addr_range(pc)) {
67
- qemu_log("cpu_io_recompile: rewound execution of TB to %"
68
+ qemu_log("cpu_io_recompile: rewound execution of TB to %016"
69
VADDR_PRIx "\n", pc);
48
}
70
}
49
}
71
}
50
51
+ /*
52
+ * Kernels before v3.17 don't populate the image_size field, and
53
+ * raw images have no header. For those our best guess at the size
54
+ * is the size of the Image file itself.
55
+ */
56
+ if (kernel_size == 0) {
57
+ kernel_size = size;
58
+ }
59
+
60
*entry = mem_base + kernel_load_offset;
61
rom_add_blob_fixed_as(filename, buffer, size, *entry, as);
62
63
g_free(buffer);
64
65
- return size;
66
+ return kernel_size;
67
}
68
69
static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
70
--
72
--
71
2.20.1
73
2.34.1
72
74
73
75
diff view generated by jsdifflib
Deleted patch
1
Allow VFP and neon to be disabled via a CPU property. As with
2
the "pmu" property, we only allow these features to be removed
3
from CPUs which have it by default, not added to CPUs which
4
don't have it.
5
1
6
The primary motivation here is to be able to optionally
7
create Cortex-M33 CPUs with no FPU, but we provide switches
8
for both VFP and Neon because the two interact:
9
* AArch64 can't have one without the other
10
* Some ID register fields only change if both are disabled
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Message-id: 20190517174046.11146-2-peter.maydell@linaro.org
16
---
17
target/arm/cpu.h | 4 ++
18
target/arm/cpu.c | 150 +++++++++++++++++++++++++++++++++++++++++++++--
19
2 files changed, 148 insertions(+), 6 deletions(-)
20
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
25
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
26
bool has_el3;
27
/* CPU has PMU (Performance Monitor Unit) */
28
bool has_pmu;
29
+ /* CPU has VFP */
30
+ bool has_vfp;
31
+ /* CPU has Neon */
32
+ bool has_neon;
33
34
/* CPU has memory protection unit */
35
bool has_mpu;
36
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/cpu.c
39
+++ b/target/arm/cpu.c
40
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_cfgend_property =
41
static Property arm_cpu_has_pmu_property =
42
DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
43
44
+static Property arm_cpu_has_vfp_property =
45
+ DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
46
+
47
+static Property arm_cpu_has_neon_property =
48
+ DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
49
+
50
static Property arm_cpu_has_mpu_property =
51
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
52
53
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
54
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
55
set_feature(&cpu->env, ARM_FEATURE_PMSA);
56
}
57
+ /* Similarly for the VFP feature bits */
58
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP4)) {
59
+ set_feature(&cpu->env, ARM_FEATURE_VFP3);
60
+ }
61
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP3)) {
62
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
63
+ }
64
65
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
66
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
67
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
68
&error_abort);
69
}
70
71
+ /*
72
+ * Allow user to turn off VFP and Neon support, but only for TCG --
73
+ * KVM does not currently allow us to lie to the guest about its
74
+ * ID/feature registers, so the guest always sees what the host has.
75
+ */
76
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
77
+ cpu->has_vfp = true;
78
+ if (!kvm_enabled()) {
79
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property,
80
+ &error_abort);
81
+ }
82
+ }
83
+
84
+ if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
85
+ cpu->has_neon = true;
86
+ if (!kvm_enabled()) {
87
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property,
88
+ &error_abort);
89
+ }
90
+ }
91
+
92
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
93
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
94
&error_abort);
95
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
96
return;
97
}
98
99
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
100
+ cpu->has_vfp != cpu->has_neon) {
101
+ /*
102
+ * This is an architectural requirement for AArch64; AArch32 is
103
+ * more flexible and permits VFP-no-Neon and Neon-no-VFP.
104
+ */
105
+ error_setg(errp,
106
+ "AArch64 CPUs must have both VFP and Neon or neither");
107
+ return;
108
+ }
109
+
110
+ if (!cpu->has_vfp) {
111
+ uint64_t t;
112
+ uint32_t u;
113
+
114
+ unset_feature(env, ARM_FEATURE_VFP);
115
+ unset_feature(env, ARM_FEATURE_VFP3);
116
+ unset_feature(env, ARM_FEATURE_VFP4);
117
+
118
+ t = cpu->isar.id_aa64isar1;
119
+ t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
120
+ cpu->isar.id_aa64isar1 = t;
121
+
122
+ t = cpu->isar.id_aa64pfr0;
123
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
124
+ cpu->isar.id_aa64pfr0 = t;
125
+
126
+ u = cpu->isar.id_isar6;
127
+ u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
128
+ cpu->isar.id_isar6 = u;
129
+
130
+ u = cpu->isar.mvfr0;
131
+ u = FIELD_DP32(u, MVFR0, FPSP, 0);
132
+ u = FIELD_DP32(u, MVFR0, FPDP, 0);
133
+ u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
134
+ u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
135
+ u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
136
+ u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
137
+ u = FIELD_DP32(u, MVFR0, FPROUND, 0);
138
+ cpu->isar.mvfr0 = u;
139
+
140
+ u = cpu->isar.mvfr1;
141
+ u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
142
+ u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
143
+ u = FIELD_DP32(u, MVFR1, FPHP, 0);
144
+ cpu->isar.mvfr1 = u;
145
+
146
+ u = cpu->isar.mvfr2;
147
+ u = FIELD_DP32(u, MVFR2, FPMISC, 0);
148
+ cpu->isar.mvfr2 = u;
149
+ }
150
+
151
+ if (!cpu->has_neon) {
152
+ uint64_t t;
153
+ uint32_t u;
154
+
155
+ unset_feature(env, ARM_FEATURE_NEON);
156
+
157
+ t = cpu->isar.id_aa64isar0;
158
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
159
+ cpu->isar.id_aa64isar0 = t;
160
+
161
+ t = cpu->isar.id_aa64isar1;
162
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
163
+ cpu->isar.id_aa64isar1 = t;
164
+
165
+ t = cpu->isar.id_aa64pfr0;
166
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
167
+ cpu->isar.id_aa64pfr0 = t;
168
+
169
+ u = cpu->isar.id_isar5;
170
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
171
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
172
+ cpu->isar.id_isar5 = u;
173
+
174
+ u = cpu->isar.id_isar6;
175
+ u = FIELD_DP32(u, ID_ISAR6, DP, 0);
176
+ u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
177
+ cpu->isar.id_isar6 = u;
178
+
179
+ u = cpu->isar.mvfr1;
180
+ u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
181
+ u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
182
+ u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
183
+ u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
184
+ u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
185
+ cpu->isar.mvfr1 = u;
186
+
187
+ u = cpu->isar.mvfr2;
188
+ u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
189
+ cpu->isar.mvfr2 = u;
190
+ }
191
+
192
+ if (!cpu->has_neon && !cpu->has_vfp) {
193
+ uint64_t t;
194
+ uint32_t u;
195
+
196
+ t = cpu->isar.id_aa64isar0;
197
+ t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
198
+ cpu->isar.id_aa64isar0 = t;
199
+
200
+ t = cpu->isar.id_aa64isar1;
201
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
202
+ cpu->isar.id_aa64isar1 = t;
203
+
204
+ u = cpu->isar.mvfr0;
205
+ u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
206
+ cpu->isar.mvfr0 = u;
207
+ }
208
+
209
/* Some features automatically imply others: */
210
if (arm_feature(env, ARM_FEATURE_V8)) {
211
if (arm_feature(env, ARM_FEATURE_M)) {
212
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
213
if (arm_feature(env, ARM_FEATURE_V5)) {
214
set_feature(env, ARM_FEATURE_V4T);
215
}
216
- if (arm_feature(env, ARM_FEATURE_VFP4)) {
217
- set_feature(env, ARM_FEATURE_VFP3);
218
- }
219
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
220
- set_feature(env, ARM_FEATURE_VFP);
221
- }
222
if (arm_feature(env, ARM_FEATURE_LPAE)) {
223
set_feature(env, ARM_FEATURE_V7MP);
224
set_feature(env, ARM_FEATURE_PXN);
225
--
226
2.20.1
227
228
diff view generated by jsdifflib
Deleted patch
1
Allow the DSP extension to be disabled via a CPU property for
2
M-profile CPUs. (A and R-profile CPUs don't have this extension
3
as a defined separate optional architecture extension, so
4
they don't need the property.)
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20190517174046.11146-3-peter.maydell@linaro.org
10
---
11
target/arm/cpu.h | 2 ++
12
target/arm/cpu.c | 29 +++++++++++++++++++++++++++++
13
2 files changed, 31 insertions(+)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
20
bool has_vfp;
21
/* CPU has Neon */
22
bool has_neon;
23
+ /* CPU has M-profile DSP extension */
24
+ bool has_dsp;
25
26
/* CPU has memory protection unit */
27
bool has_mpu;
28
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/cpu.c
31
+++ b/target/arm/cpu.c
32
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_has_vfp_property =
33
static Property arm_cpu_has_neon_property =
34
DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
35
36
+static Property arm_cpu_has_dsp_property =
37
+ DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
38
+
39
static Property arm_cpu_has_mpu_property =
40
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
41
42
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
43
}
44
}
45
46
+ if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
47
+ arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
48
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property,
49
+ &error_abort);
50
+ }
51
+
52
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
53
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
54
&error_abort);
55
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
56
cpu->isar.mvfr0 = u;
57
}
58
59
+ if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
60
+ uint32_t u;
61
+
62
+ unset_feature(env, ARM_FEATURE_THUMB_DSP);
63
+
64
+ u = cpu->isar.id_isar1;
65
+ u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
66
+ cpu->isar.id_isar1 = u;
67
+
68
+ u = cpu->isar.id_isar2;
69
+ u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
70
+ u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
71
+ cpu->isar.id_isar2 = u;
72
+
73
+ u = cpu->isar.id_isar3;
74
+ u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
75
+ u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
76
+ cpu->isar.id_isar3 = u;
77
+ }
78
+
79
/* Some features automatically imply others: */
80
if (arm_feature(env, ARM_FEATURE_V8)) {
81
if (arm_feature(env, ARM_FEATURE_M)) {
82
--
83
2.20.1
84
85
diff view generated by jsdifflib
Deleted patch
1
Create "vfp" and "dsp" properties on the armv7m container object
2
which will be forwarded to its CPU object, so that SoCs can
3
configure whether the CPU has these features.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Message-id: 20190517174046.11146-4-peter.maydell@linaro.org
9
---
10
include/hw/arm/armv7m.h | 4 ++++
11
hw/arm/armv7m.c | 18 ++++++++++++++++++
12
2 files changed, 22 insertions(+)
13
14
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/hw/arm/armv7m.h
17
+++ b/include/hw/arm/armv7m.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct {
19
* devices will be automatically layered on top of this view.)
20
* + Property "idau": IDAU interface (forwarded to CPU object)
21
* + Property "init-svtor": secure VTOR reset value (forwarded to CPU object)
22
+ * + Property "vfp": enable VFP (forwarded to CPU object)
23
+ * + Property "dsp": enable DSP (forwarded to CPU object)
24
* + Property "enable-bitband": expose bitbanded IO
25
*/
26
typedef struct ARMv7MState {
27
@@ -XXX,XX +XXX,XX @@ typedef struct ARMv7MState {
28
uint32_t init_svtor;
29
bool enable_bitband;
30
bool start_powered_off;
31
+ bool vfp;
32
+ bool dsp;
33
} ARMv7MState;
34
35
#endif
36
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/hw/arm/armv7m.c
39
+++ b/hw/arm/armv7m.c
40
@@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp)
41
return;
42
}
43
}
44
+ if (object_property_find(OBJECT(s->cpu), "vfp", NULL)) {
45
+ object_property_set_bool(OBJECT(s->cpu), s->vfp,
46
+ "vfp", &err);
47
+ if (err != NULL) {
48
+ error_propagate(errp, err);
49
+ return;
50
+ }
51
+ }
52
+ if (object_property_find(OBJECT(s->cpu), "dsp", NULL)) {
53
+ object_property_set_bool(OBJECT(s->cpu), s->dsp,
54
+ "dsp", &err);
55
+ if (err != NULL) {
56
+ error_propagate(errp, err);
57
+ return;
58
+ }
59
+ }
60
61
/*
62
* Tell the CPU where the NVIC is; it will fail realize if it doesn't
63
@@ -XXX,XX +XXX,XX @@ static Property armv7m_properties[] = {
64
DEFINE_PROP_BOOL("enable-bitband", ARMv7MState, enable_bitband, false),
65
DEFINE_PROP_BOOL("start-powered-off", ARMv7MState, start_powered_off,
66
false),
67
+ DEFINE_PROP_BOOL("vfp", ARMv7MState, vfp, true),
68
+ DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true),
69
DEFINE_PROP_END_OF_LIST(),
70
};
71
72
--
73
2.20.1
74
75
diff view generated by jsdifflib
Deleted patch
1
The GIC ID registers cover an area 0x30 bytes in size
2
(12 registers, 4 bytes each). We were incorrectly decoding
3
only the first 0x20 bytes.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-id: 20190524124248.28394-2-peter.maydell@linaro.org
8
---
9
hw/intc/arm_gicv3_dist.c | 4 ++--
10
hw/intc/arm_gicv3_redist.c | 4 ++--
11
2 files changed, 4 insertions(+), 4 deletions(-)
12
13
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/intc/arm_gicv3_dist.c
16
+++ b/hw/intc/arm_gicv3_dist.c
17
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
18
}
19
return MEMTX_OK;
20
}
21
- case GICD_IDREGS ... GICD_IDREGS + 0x1f:
22
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
23
/* ID registers */
24
*data = gicv3_idreg(offset - GICD_IDREGS);
25
return MEMTX_OK;
26
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset,
27
gicd_write_irouter(s, attrs, irq, r);
28
return MEMTX_OK;
29
}
30
- case GICD_IDREGS ... GICD_IDREGS + 0x1f:
31
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
32
case GICD_TYPER:
33
case GICD_IIDR:
34
/* RO registers, ignore the write */
35
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/hw/intc/arm_gicv3_redist.c
38
+++ b/hw/intc/arm_gicv3_redist.c
39
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
40
}
41
*data = cs->gicr_nsacr;
42
return MEMTX_OK;
43
- case GICR_IDREGS ... GICR_IDREGS + 0x1f:
44
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
45
*data = gicv3_idreg(offset - GICR_IDREGS);
46
return MEMTX_OK;
47
default:
48
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
49
return MEMTX_OK;
50
case GICR_IIDR:
51
case GICR_TYPER:
52
- case GICR_IDREGS ... GICR_IDREGS + 0x1f:
53
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
54
/* RO registers, ignore the write */
55
qemu_log_mask(LOG_GUEST_ERROR,
56
"%s: invalid guest write to RO register at offset "
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
Deleted patch
1
The GICv3 specification says that the GICD_TYPER.SecurityExtn bit
2
is RAZ if GICD_CTLR.DS is 1. We were incorrectly making it RAZ
3
if the security extension is unsupported. "Security extension
4
unsupported" always implies GICD_CTLR.DS == 1, but the guest can
5
also set DS on a GIC which does support the security extension.
6
Fix the condition to correctly check the GICD_CTLR.DS bit.
7
1
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20190524124248.28394-3-peter.maydell@linaro.org
10
---
11
hw/intc/arm_gicv3_dist.c | 8 +++++++-
12
1 file changed, 7 insertions(+), 1 deletion(-)
13
14
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/intc/arm_gicv3_dist.c
17
+++ b/hw/intc/arm_gicv3_dist.c
18
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
19
* ITLinesNumber == (num external irqs / 32) - 1
20
*/
21
int itlinesnumber = ((s->num_irq - GIC_INTERNAL) / 32) - 1;
22
+ /*
23
+ * SecurityExtn must be RAZ if GICD_CTLR.DS == 1, and
24
+ * "security extensions not supported" always implies DS == 1,
25
+ * so we only need to check the DS bit.
26
+ */
27
+ bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS);
28
29
- *data = (1 << 25) | (1 << 24) | (s->security_extn << 10) |
30
+ *data = (1 << 25) | (1 << 24) | (sec_extn << 10) |
31
(0xf << 19) | itlinesnumber;
32
return MEMTX_OK;
33
}
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
Deleted patch
1
The AArch32 VMOV (immediate) instruction uses the same VFP encoded
2
immediate format we already handle in vfp_expand_imm(). Use that
3
function rather than hand-decoding it.
4
1
5
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190613163917.28589-3-peter.maydell@linaro.org
10
---
11
target/arm/translate-vfp.inc.c | 28 ++++------------------------
12
target/arm/vfp.decode | 10 ++++++----
13
2 files changed, 10 insertions(+), 28 deletions(-)
14
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.inc.c
18
+++ b/target/arm/translate-vfp.inc.c
19
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
20
uint32_t delta_d = 0;
21
int veclen = s->vec_len;
22
TCGv_i32 fd;
23
- uint32_t n, i, vd;
24
+ uint32_t vd;
25
26
vd = a->vd;
27
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
29
}
30
}
31
32
- n = (a->imm4h << 28) & 0x80000000;
33
- i = ((a->imm4h << 4) & 0x70) | a->imm4l;
34
- if (i & 0x40) {
35
- i |= 0x780;
36
- } else {
37
- i |= 0x800;
38
- }
39
- n |= i << 19;
40
-
41
- fd = tcg_temp_new_i32();
42
- tcg_gen_movi_i32(fd, n);
43
+ fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
44
45
for (;;) {
46
neon_store_reg32(fd, vd);
47
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
48
uint32_t delta_d = 0;
49
int veclen = s->vec_len;
50
TCGv_i64 fd;
51
- uint32_t n, i, vd;
52
+ uint32_t vd;
53
54
vd = a->vd;
55
56
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
57
}
58
}
59
60
- n = (a->imm4h << 28) & 0x80000000;
61
- i = ((a->imm4h << 4) & 0x70) | a->imm4l;
62
- if (i & 0x40) {
63
- i |= 0x3f80;
64
- } else {
65
- i |= 0x4000;
66
- }
67
- n |= i << 16;
68
-
69
- fd = tcg_temp_new_i64();
70
- tcg_gen_movi_i64(fd, ((uint64_t)n) << 32);
71
+ fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
72
73
for (;;) {
74
neon_store_reg64(fd, vd);
75
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/vfp.decode
78
+++ b/target/arm/vfp.decode
79
@@ -XXX,XX +XXX,XX @@
80
%vmov_idx_b 21:1 5:2
81
%vmov_idx_h 21:1 6:1
82
83
+%vmov_imm 16:4 0:4
84
+
85
# VMOV scalar to general-purpose register; note that this does
86
# include some Neon cases.
87
VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \
88
@@ -XXX,XX +XXX,XX @@ VFM_sp ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \
89
VFM_dp ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \
90
vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2
91
92
-VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
93
- vd=%vd_sp
94
-VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
95
- vd=%vd_dp
96
+VMOV_imm_sp ---- 1110 1.11 .... .... 1010 0000 .... \
97
+ vd=%vd_sp imm=%vmov_imm
98
+VMOV_imm_dp ---- 1110 1.11 .... .... 1011 0000 .... \
99
+ vd=%vd_dp imm=%vmov_imm
100
101
VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... \
102
vd=%vd_sp vm=%vm_sp
103
--
104
2.20.1
105
106
diff view generated by jsdifflib
Deleted patch
1
Where Neon instructions are floating point operations, we
2
mostly use the old VFP utility functions like gen_vfp_abs()
3
which work on the TCG globals cpu_F0s and cpu_F1s. The
4
Neon for-each-element loop conditionally loads the inputs
5
into either a plain old TCG temporary for most operations
6
or into cpu_F0s for float operations, and similarly stores
7
back either cpu_F0s or the temporary.
8
1
9
Switch NEON_2RM_VABS_F away from using cpu_F0s, and
10
update neon_2rm_is_float_op() accordingly.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190613163917.28589-4-peter.maydell@linaro.org
16
---
17
target/arm/translate.c | 19 ++++++++-----------
18
1 file changed, 8 insertions(+), 11 deletions(-)
19
20
diff --git a/target/arm/translate.c b/target/arm/translate.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate.c
23
+++ b/target/arm/translate.c
24
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
25
return statusptr;
26
}
27
28
-static inline void gen_vfp_abs(int dp)
29
-{
30
- if (dp)
31
- gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
32
- else
33
- gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
34
-}
35
-
36
static inline void gen_vfp_neg(int dp)
37
{
38
if (dp)
39
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_3r_sizes[] = {
40
41
static int neon_2rm_is_float_op(int op)
42
{
43
- /* Return true if this neon 2reg-misc op is float-to-float */
44
- return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
45
+ /*
46
+ * Return true if this neon 2reg-misc op is float-to-float.
47
+ * This is not a property of the operation but of our code --
48
+ * what we are asking here is "does the code for this case in
49
+ * the Neon for-each-pass loop use cpu_F0s?".
50
+ */
51
+ return (op == NEON_2RM_VNEG_F ||
52
(op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
53
op == NEON_2RM_VRINTM ||
54
(op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
55
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
56
break;
57
}
58
case NEON_2RM_VABS_F:
59
- gen_vfp_abs(0);
60
+ gen_helper_vfp_abss(tmp, tmp);
61
break;
62
case NEON_2RM_VNEG_F:
63
gen_vfp_neg(0);
64
--
65
2.20.1
66
67
diff view generated by jsdifflib
Deleted patch
1
Switch NEON_2RM_VABS_F away from using cpu_F0s.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-5-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 13 ++-----------
9
1 file changed, 2 insertions(+), 11 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
16
return statusptr;
17
}
18
19
-static inline void gen_vfp_neg(int dp)
20
-{
21
- if (dp)
22
- gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
23
- else
24
- gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
25
-}
26
-
27
#define VFP_GEN_ITOF(name) \
28
static inline void gen_vfp_##name(int dp, int neon) \
29
{ \
30
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
31
* what we are asking here is "does the code for this case in
32
* the Neon for-each-pass loop use cpu_F0s?".
33
*/
34
- return (op == NEON_2RM_VNEG_F ||
35
- (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
36
+ return ((op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
37
op == NEON_2RM_VRINTM ||
38
(op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
39
op >= NEON_2RM_VRECPE_F);
40
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
41
gen_helper_vfp_abss(tmp, tmp);
42
break;
43
case NEON_2RM_VNEG_F:
44
- gen_vfp_neg(0);
45
+ gen_helper_vfp_negs(tmp, tmp);
46
break;
47
case NEON_2RM_VSWP:
48
tmp2 = neon_load_reg(rd, pass);
49
--
50
2.20.1
51
52
diff view generated by jsdifflib
Deleted patch
1
Switch NEON_2RM_VRINT* away from using cpu_F0s.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-6-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 8 +++-----
9
1 file changed, 3 insertions(+), 5 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
16
* what we are asking here is "does the code for this case in
17
* the Neon for-each-pass loop use cpu_F0s?".
18
*/
19
- return ((op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
20
- op == NEON_2RM_VRINTM ||
21
- (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
22
+ return ((op >= NEON_2RM_VCVTAU && op <= NEON_2RM_VCVTMS) ||
23
op >= NEON_2RM_VRECPE_F);
24
}
25
26
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
27
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
28
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
29
cpu_env);
30
- gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
31
+ gen_helper_rints(tmp, tmp, fpstatus);
32
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
33
cpu_env);
34
tcg_temp_free_ptr(fpstatus);
35
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
36
case NEON_2RM_VRINTX:
37
{
38
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
39
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
40
+ gen_helper_rints_exact(tmp, tmp, fpstatus);
41
tcg_temp_free_ptr(fpstatus);
42
break;
43
}
44
--
45
2.20.1
46
47
diff view generated by jsdifflib
Deleted patch
1
Stop using cpu_F0s for the NEON_2RM_VCVT[ANPM][US] ops.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-7-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 7 +++----
9
1 file changed, 3 insertions(+), 4 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
16
* what we are asking here is "does the code for this case in
17
* the Neon for-each-pass loop use cpu_F0s?".
18
*/
19
- return ((op >= NEON_2RM_VCVTAU && op <= NEON_2RM_VCVTMS) ||
20
- op >= NEON_2RM_VRECPE_F);
21
+ return op >= NEON_2RM_VRECPE_F;
22
}
23
24
static bool neon_2rm_is_v8_op(int op)
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
26
cpu_env);
27
28
if (is_signed) {
29
- gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
30
+ gen_helper_vfp_tosls(tmp, tmp,
31
tcg_shift, fpst);
32
} else {
33
- gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
34
+ gen_helper_vfp_touls(tmp, tmp,
35
tcg_shift, fpst);
36
}
37
38
--
39
2.20.1
40
41
diff view generated by jsdifflib
Deleted patch
1
Stop using cpu_F0s for NEON_2RM_VRECPE_F and NEON_2RM_VRSQRTE_F.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-8-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 6 +++---
9
1 file changed, 3 insertions(+), 3 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
16
* what we are asking here is "does the code for this case in
17
* the Neon for-each-pass loop use cpu_F0s?".
18
*/
19
- return op >= NEON_2RM_VRECPE_F;
20
+ return op >= NEON_2RM_VCVT_FS;
21
}
22
23
static bool neon_2rm_is_v8_op(int op)
24
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
25
case NEON_2RM_VRECPE_F:
26
{
27
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
28
- gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
29
+ gen_helper_recpe_f32(tmp, tmp, fpstatus);
30
tcg_temp_free_ptr(fpstatus);
31
break;
32
}
33
case NEON_2RM_VRSQRTE_F:
34
{
35
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
36
- gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
37
+ gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
38
tcg_temp_free_ptr(fpstatus);
39
break;
40
}
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
Deleted patch
1
Stop using cpu_F0s for the Neon f32/s32 VCVT operations.
2
Since this is the last user of cpu_F0s in the Neon 2rm-op
3
loop, we can remove the handling code for it too.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-9-peter.maydell@linaro.org
9
---
10
target/arm/translate.c | 82 ++++++++++++------------------------------
11
1 file changed, 22 insertions(+), 60 deletions(-)
12
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
18
return statusptr;
19
}
20
21
-#define VFP_GEN_ITOF(name) \
22
-static inline void gen_vfp_##name(int dp, int neon) \
23
-{ \
24
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
25
- if (dp) { \
26
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
27
- } else { \
28
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
29
- } \
30
- tcg_temp_free_ptr(statusptr); \
31
-}
32
-
33
-VFP_GEN_ITOF(uito)
34
-VFP_GEN_ITOF(sito)
35
-#undef VFP_GEN_ITOF
36
-
37
-#define VFP_GEN_FTOI(name) \
38
-static inline void gen_vfp_##name(int dp, int neon) \
39
-{ \
40
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
41
- if (dp) { \
42
- gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
43
- } else { \
44
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
45
- } \
46
- tcg_temp_free_ptr(statusptr); \
47
-}
48
-
49
-VFP_GEN_FTOI(touiz)
50
-VFP_GEN_FTOI(tosiz)
51
-#undef VFP_GEN_FTOI
52
-
53
#define VFP_GEN_FIX(name, round) \
54
static inline void gen_vfp_##name(int dp, int shift, int neon) \
55
{ \
56
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_3r_sizes[] = {
57
#define NEON_2RM_VCVT_SF 62
58
#define NEON_2RM_VCVT_UF 63
59
60
-static int neon_2rm_is_float_op(int op)
61
-{
62
- /*
63
- * Return true if this neon 2reg-misc op is float-to-float.
64
- * This is not a property of the operation but of our code --
65
- * what we are asking here is "does the code for this case in
66
- * the Neon for-each-pass loop use cpu_F0s?".
67
- */
68
- return op >= NEON_2RM_VCVT_FS;
69
-}
70
-
71
static bool neon_2rm_is_v8_op(int op)
72
{
73
/* Return true if this neon 2reg-misc op is ARMv8 and up */
74
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
75
default:
76
elementwise:
77
for (pass = 0; pass < (q ? 4 : 2); pass++) {
78
- if (neon_2rm_is_float_op(op)) {
79
- tcg_gen_ld_f32(cpu_F0s, cpu_env,
80
- neon_reg_offset(rm, pass));
81
- tmp = NULL;
82
- } else {
83
- tmp = neon_load_reg(rm, pass);
84
- }
85
+ tmp = neon_load_reg(rm, pass);
86
switch (op) {
87
case NEON_2RM_VREV32:
88
switch (size) {
89
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
90
break;
91
}
92
case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
93
- gen_vfp_sito(0, 1);
94
+ {
95
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
96
+ gen_helper_vfp_sitos(tmp, tmp, fpstatus);
97
+ tcg_temp_free_ptr(fpstatus);
98
break;
99
+ }
100
case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
101
- gen_vfp_uito(0, 1);
102
+ {
103
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
104
+ gen_helper_vfp_uitos(tmp, tmp, fpstatus);
105
+ tcg_temp_free_ptr(fpstatus);
106
break;
107
+ }
108
case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
109
- gen_vfp_tosiz(0, 1);
110
+ {
111
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
112
+ gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
113
+ tcg_temp_free_ptr(fpstatus);
114
break;
115
+ }
116
case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
117
- gen_vfp_touiz(0, 1);
118
+ {
119
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
120
+ gen_helper_vfp_touizs(tmp, tmp, fpstatus);
121
+ tcg_temp_free_ptr(fpstatus);
122
break;
123
+ }
124
default:
125
/* Reserved op values were caught by the
126
* neon_2rm_sizes[] check earlier.
127
*/
128
abort();
129
}
130
- if (neon_2rm_is_float_op(op)) {
131
- tcg_gen_st_f32(cpu_F0s, cpu_env,
132
- neon_reg_offset(rd, pass));
133
- } else {
134
- neon_store_reg(rd, pass, tmp);
135
- }
136
+ neon_store_reg(rd, pass, tmp);
137
}
138
break;
139
}
140
--
141
2.20.1
142
143
diff view generated by jsdifflib
Deleted patch
1
Stop using cpu_F0s in the Neon VCVT fixed-point operations.
2
1
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-10-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 62 +++++++++++++++++++-----------------------
9
1 file changed, 28 insertions(+), 34 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static const char * const regnames[] =
16
/* Function prototypes for gen_ functions calling Neon helpers. */
17
typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
18
TCGv_i32, TCGv_i32);
19
+/* Function prototypes for gen_ functions for fix point conversions */
20
+typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
21
22
/* initialize TCG globals. */
23
void arm_translate_init(void)
24
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
25
return statusptr;
26
}
27
28
-#define VFP_GEN_FIX(name, round) \
29
-static inline void gen_vfp_##name(int dp, int shift, int neon) \
30
-{ \
31
- TCGv_i32 tmp_shift = tcg_const_i32(shift); \
32
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
33
- if (dp) { \
34
- gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
35
- statusptr); \
36
- } else { \
37
- gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
38
- statusptr); \
39
- } \
40
- tcg_temp_free_i32(tmp_shift); \
41
- tcg_temp_free_ptr(statusptr); \
42
-}
43
-VFP_GEN_FIX(tosl, _round_to_zero)
44
-VFP_GEN_FIX(toul, _round_to_zero)
45
-VFP_GEN_FIX(slto, )
46
-VFP_GEN_FIX(ulto, )
47
-#undef VFP_GEN_FIX
48
-
49
static inline long vfp_reg_offset(bool dp, unsigned reg)
50
{
51
if (dp) {
52
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
53
}
54
} else if (op >= 14) {
55
/* VCVT fixed-point. */
56
+ TCGv_ptr fpst;
57
+ TCGv_i32 shiftv;
58
+ VFPGenFixPointFn *fn;
59
+
60
if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
61
return 1;
62
}
63
+
64
+ if (!(op & 1)) {
65
+ if (u) {
66
+ fn = gen_helper_vfp_ultos;
67
+ } else {
68
+ fn = gen_helper_vfp_sltos;
69
+ }
70
+ } else {
71
+ if (u) {
72
+ fn = gen_helper_vfp_touls_round_to_zero;
73
+ } else {
74
+ fn = gen_helper_vfp_tosls_round_to_zero;
75
+ }
76
+ }
77
+
78
/* We have already masked out the must-be-1 top bit of imm6,
79
* hence this 32-shift where the ARM ARM has 64-imm6.
80
*/
81
shift = 32 - shift;
82
+ fpst = get_fpstatus_ptr(1);
83
+ shiftv = tcg_const_i32(shift);
84
for (pass = 0; pass < (q ? 4 : 2); pass++) {
85
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
86
- if (!(op & 1)) {
87
- if (u)
88
- gen_vfp_ulto(0, shift, 1);
89
- else
90
- gen_vfp_slto(0, shift, 1);
91
- } else {
92
- if (u)
93
- gen_vfp_toul(0, shift, 1);
94
- else
95
- gen_vfp_tosl(0, shift, 1);
96
- }
97
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
98
+ TCGv_i32 tmpf = neon_load_reg(rm, pass);
99
+ fn(tmpf, tmpf, shiftv, fpst);
100
+ neon_store_reg(rd, pass, tmpf);
101
}
102
+ tcg_temp_free_ptr(fpst);
103
+ tcg_temp_free_i32(shiftv);
104
} else {
105
return 1;
106
}
107
--
108
2.20.1
109
110
diff view generated by jsdifflib
Deleted patch
1
Remove some old constructs from NEON_2RM_VCVT_F16_F32 code:
2
* don't use cpu_F0s
3
* don't use tcg_gen_ld_f32
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-11-peter.maydell@linaro.org
9
---
10
target/arm/translate.c | 27 ++++++++++++---------------
11
1 file changed, 12 insertions(+), 15 deletions(-)
12
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
18
return ret;
19
}
20
21
-#define tcg_gen_ld_f32 tcg_gen_ld_i32
22
#define tcg_gen_st_f32 tcg_gen_st_i32
23
24
#define ARM_CP_RW_BIT (1 << 20)
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
26
q || (rm & 1)) {
27
return 1;
28
}
29
- tmp = tcg_temp_new_i32();
30
- tmp2 = tcg_temp_new_i32();
31
fpst = get_fpstatus_ptr(true);
32
ahp = get_ahp_flag();
33
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
34
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
35
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
36
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
37
+ tmp = neon_load_reg(rm, 0);
38
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
39
+ tmp2 = neon_load_reg(rm, 1);
40
+ gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
41
tcg_gen_shli_i32(tmp2, tmp2, 16);
42
tcg_gen_or_i32(tmp2, tmp2, tmp);
43
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
44
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
45
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
46
+ tcg_temp_free_i32(tmp);
47
+ tmp = neon_load_reg(rm, 2);
48
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
49
+ tmp3 = neon_load_reg(rm, 3);
50
neon_store_reg(rd, 0, tmp2);
51
- tmp2 = tcg_temp_new_i32();
52
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
53
- tcg_gen_shli_i32(tmp2, tmp2, 16);
54
- tcg_gen_or_i32(tmp2, tmp2, tmp);
55
- neon_store_reg(rd, 1, tmp2);
56
+ gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
57
+ tcg_gen_shli_i32(tmp3, tmp3, 16);
58
+ tcg_gen_or_i32(tmp3, tmp3, tmp);
59
+ neon_store_reg(rd, 1, tmp3);
60
tcg_temp_free_i32(tmp);
61
tcg_temp_free_i32(ahp);
62
tcg_temp_free_ptr(fpst);
63
--
64
2.20.1
65
66
diff view generated by jsdifflib
1
In several places cut and paste errors meant we were using the wrong
1
From: Tong Ho <tong.ho@amd.com>
2
type for the 'arg' struct in trans_ functions called by the
3
decodetree decoder, because we were using the _sp version of the
4
struct in the _dp function. These were harmless, because the two
5
structs were identical and so decodetree made them typedefs of the
6
same underlying structure (and we'd have had a compile error if they
7
were not harmless), but we should clean them up anyway.
8
2
3
Add a check in the bit-set operation to write the backstore
4
only if the affected bit is 0 before.
5
6
With this in place, there will be no need for callers to
7
do the checking in order to avoid unnecessary writes.
8
9
Signed-off-by: Tong Ho <tong.ho@amd.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Message-id: 20190614104457.24703-2-peter.maydell@linaro.org
12
---
14
---
13
target/arm/translate-vfp.inc.c | 28 ++++++++++++++--------------
15
hw/nvram/xlnx-efuse.c | 11 +++++++++--
14
1 file changed, 14 insertions(+), 14 deletions(-)
16
1 file changed, 9 insertions(+), 2 deletions(-)
15
17
16
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
18
diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-vfp.inc.c
20
--- a/hw/nvram/xlnx-efuse.c
19
+++ b/target/arm/translate-vfp.inc.c
21
+++ b/hw/nvram/xlnx-efuse.c
20
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
22
@@ -XXX,XX +XXX,XX @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k)
23
24
bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
25
{
26
+ uint32_t set, *row;
27
+
28
if (efuse_ro_bits_find(s, bit)) {
29
g_autofree char *path = object_get_canonical_path(OBJECT(s));
30
31
@@ -XXX,XX +XXX,XX @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
32
return false;
33
}
34
35
- s->fuse32[bit / 32] |= 1 << (bit % 32);
36
- efuse_bdrv_sync(s, bit);
37
+ /* Avoid back-end write unless there is a real update */
38
+ row = &s->fuse32[bit / 32];
39
+ set = 1 << (bit % 32);
40
+ if (!(set & *row)) {
41
+ *row |= set;
42
+ efuse_bdrv_sync(s, bit);
43
+ }
21
return true;
44
return true;
22
}
45
}
23
46
24
-static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
25
+static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
26
{
27
TCGv_i32 tmp;
28
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
30
return true;
31
}
32
33
-static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
34
+static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
35
{
36
uint32_t offset;
37
TCGv_i32 addr;
38
@@ -XXX,XX +XXX,XX @@ static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
39
tcg_temp_free_i64(tmp);
40
}
41
42
-static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a)
43
+static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
44
{
45
return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
46
}
47
@@ -XXX,XX +XXX,XX @@ static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
48
tcg_temp_free_i64(tmp);
49
}
50
51
-static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a)
52
+static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
53
{
54
return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
55
}
56
@@ -XXX,XX +XXX,XX @@ static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
57
tcg_temp_free_i64(tmp);
58
}
59
60
-static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a)
61
+static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
62
{
63
return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
64
}
65
@@ -XXX,XX +XXX,XX @@ static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
66
tcg_temp_free_i64(tmp);
67
}
68
69
-static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a)
70
+static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
71
{
72
return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
73
}
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
75
return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
76
}
77
78
-static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a)
79
+static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
80
{
81
return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
82
}
83
@@ -XXX,XX +XXX,XX @@ static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
84
gen_helper_vfp_negd(vd, vd);
85
}
86
87
-static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a)
88
+static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
89
{
90
return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
91
}
92
@@ -XXX,XX +XXX,XX @@ static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
93
return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
94
}
95
96
-static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a)
97
+static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
98
{
99
return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
100
}
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
102
return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
103
}
104
105
-static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a)
106
+static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
107
{
108
return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
109
}
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
111
return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
112
}
113
114
-static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a)
115
+static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
116
{
117
return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
118
}
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
120
return true;
121
}
122
123
-static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a)
124
+static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
125
{
126
/*
127
* VFNMA : fd = muladd(-fd, fn, fm)
128
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
129
return true;
130
}
131
132
-static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a)
133
+static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
134
{
135
TCGv_ptr fpst;
136
TCGv_i64 tmp;
137
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
138
return true;
139
}
140
141
-static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a)
142
+static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
143
{
144
TCGv_ptr fpst;
145
TCGv_i64 tmp;
146
--
47
--
147
2.20.1
48
2.34.1
148
49
149
50
diff view generated by jsdifflib