1
A small set of arm bugfixes for rc0.
1
Latest arm queue, half minor code cleanups and half minor
2
bug fixes.
2
3
4
-- PMM
3
5
6
The following changes since commit 5d0e5694470d2952b4f257bc985cac8c89b4fd92:
4
7
5
The following changes since commit 5853e92207193e967abf5e4c25b4a551c7604725:
8
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (2019-06-17 11:55:14 +0100)
6
9
7
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-cocoa-20171107' into staging (2017-11-07 12:19:48 +0000)
10
are available in the Git repository at:
8
11
9
are available in the git repository at:
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190617
10
13
11
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20171107
14
for you to fetch changes up to 1120827fa182f0e76226df7ffe7a86598d1df54f:
12
15
13
for you to fetch changes up to 8a7348b5d62d7ea16807e6bea54b448a0184bb0f:
16
target/arm: Only implement doubles if the FPU supports them (2019-06-17 15:15:06 +0100)
14
15
hw/intc/arm_gicv3_its: Don't abort on table save failure (2017-11-07 13:03:52 +0000)
16
17
17
----------------------------------------------------------------
18
----------------------------------------------------------------
18
target-arm queue:
19
target-arm queue:
19
* arm_gicv3_its: Don't abort on table save failure
20
* support large kernel images in bootloader (by avoiding
20
* arm_gicv3_its: Fix the VM termination in vm_change_state_handler()
21
putting the initrd over the top of them)
21
* translate.c: Fix usermode big-endian AArch32 LDREXD and STREXD
22
* correctly disable FPU/DSP in the CPU for the mps2-an521, musca-a boards
22
* hw/arm: Mark the "fsl,imx31/25/6" devices with user_creatable = false
23
* arm_gicv3: Fix decoding of ID register range
23
* arm: implement cache/shareability attribute bits for PAR registers
24
* arm_gicv3: GICD_TYPER.SecurityExtn is RAZ if GICD_CTLR.DS == 1
25
* some code cleanups following on from the VFP decodetree conversion
26
* Only implement doubles if the FPU supports them
27
(so we now correctly model Cortex-M4, -M33 as single precision only)
24
28
25
----------------------------------------------------------------
29
----------------------------------------------------------------
26
Andrew Baumann (1):
30
Peter Maydell (24):
27
arm: implement cache/shareability attribute bits for PAR registers
31
hw/arm/boot: Don't assume RAM starts at address zero
32
hw/arm/boot: Diagnose layouts that put initrd or DTB off the end of RAM
33
hw/arm/boot: Avoid placing the initrd on top of the kernel
34
hw/arm/boot: Honour image size field in AArch64 Image format kernels
35
target/arm: Allow VFP and Neon to be disabled via a CPU property
36
target/arm: Allow M-profile CPUs to disable the DSP extension via CPU property
37
hw/arm/armv7m: Forward "vfp" and "dsp" properties to CPU
38
hw/arm: Correctly disable FPU/DSP for some ARMSSE-based boards
39
hw/intc/arm_gicv3: Fix decoding of ID register range
40
hw/intc/arm_gicv3: GICD_TYPER.SecurityExtn is RAZ if GICD_CTLR.DS == 1
41
target/arm: Move vfp_expand_imm() to translate.[ch]
42
target/arm: Use vfp_expand_imm() for AArch32 VFP VMOV_imm
43
target/arm: Stop using cpu_F0s for NEON_2RM_VABS_F
44
target/arm: Stop using cpu_F0s for NEON_2RM_VNEG_F
45
target/arm: Stop using cpu_F0s for NEON_2RM_VRINT*
46
target/arm: Stop using cpu_F0s for NEON_2RM_VCVT[ANPM][US]
47
target/arm: Stop using cpu_F0s for NEON_2RM_VRECPE_F and NEON_2RM_VRSQRTE_F
48
target/arm: Stop using cpu_F0s for Neon f32/s32 VCVT
49
target/arm: Stop using cpu_F0s in Neon VCVT fixed-point ops
50
target/arm: stop using deprecated functions in NEON_2RM_VCVT_F16_F32
51
target/arm: Stop using deprecated functions in NEON_2RM_VCVT_F32_F16
52
target/arm: Remove unused cpu_F0s, cpu_F0d, cpu_F1s, cpu_F1d
53
target/arm: Fix typos in trans function prototypes
54
target/arm: Only implement doubles if the FPU supports them
28
55
29
Eric Auger (1):
56
include/hw/arm/armsse.h | 7 ++
30
hw/intc/arm_gicv3_its: Don't abort on table save failure
57
include/hw/arm/armv7m.h | 4 +
58
target/arm/cpu.h | 12 +++
59
target/arm/translate-a64.h | 1 -
60
target/arm/translate.h | 7 ++
61
hw/arm/armsse.c | 58 +++++++---
62
hw/arm/armv7m.c | 18 ++++
63
hw/arm/boot.c | 83 ++++++++++----
64
hw/arm/musca.c | 8 ++
65
hw/intc/arm_gicv3_dist.c | 12 ++-
66
hw/intc/arm_gicv3_redist.c | 4 +-
67
target/arm/cpu.c | 179 ++++++++++++++++++++++++++++--
68
target/arm/translate-a64.c | 32 ------
69
target/arm/translate-vfp.inc.c | 173 ++++++++++++++++++++++-------
70
target/arm/translate.c | 240 ++++++++++++++---------------------------
71
target/arm/vfp.decode | 10 +-
72
16 files changed, 572 insertions(+), 276 deletions(-)
31
73
32
Peter Maydell (1):
33
translate.c: Fix usermode big-endian AArch32 LDREXD and STREXD
34
35
Shanker Donthineni (1):
36
hw/intc/arm_gicv3_its: Fix the VM termination in vm_change_state_handler()
37
38
Thomas Huth (3):
39
hw/arm: Mark the "fsl,imx6" device with user_creatable = false
40
hw/arm: Mark the "fsl,imx25" device with user_creatable = false
41
hw/arm: Mark the "fsl,imx31" device with user_creatable = false
42
43
hw/arm/fsl-imx25.c | 6 +-
44
hw/arm/fsl-imx31.c | 6 +-
45
hw/arm/fsl-imx6.c | 3 +-
46
hw/intc/arm_gicv3_its_kvm.c | 12 +--
47
target/arm/helper.c | 178 ++++++++++++++++++++++++++++++++++++++++----
48
target/arm/translate.c | 39 ++++++++--
49
6 files changed, 214 insertions(+), 30 deletions(-)
50
diff view generated by jsdifflib
New patch
1
In the Arm kernel/initrd loading code, in some places we make the
2
incorrect assumption that info->ram_size can be treated as the
3
address of the end of RAM, as for instance when we calculate the
4
available space for the initrd using "info->ram_size - info->initrd_start".
5
This is wrong, because many Arm boards (including "virt") specify
6
a non-zero info->loader_start to indicate that their RAM area
7
starts at a non-zero physical address.
1
8
9
Correct the places which make this incorrect assumption.
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Tested-by: Mark Rutland <mark.rutland@arm.com>
14
Message-id: 20190516144733.32399-2-peter.maydell@linaro.org
15
---
16
hw/arm/boot.c | 9 ++++-----
17
1 file changed, 4 insertions(+), 5 deletions(-)
18
19
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/arm/boot.c
22
+++ b/hw/arm/boot.c
23
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
24
int elf_machine;
25
hwaddr entry;
26
static const ARMInsnFixup *primary_loader;
27
+ uint64_t ram_end = info->loader_start + info->ram_size;
28
29
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
30
primary_loader = bootloader_aarch64;
31
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
32
/* 32-bit ARM */
33
entry = info->loader_start + KERNEL_LOAD_ADDR;
34
kernel_size = load_image_targphys_as(info->kernel_filename, entry,
35
- info->ram_size - KERNEL_LOAD_ADDR,
36
- as);
37
+ ram_end - KERNEL_LOAD_ADDR, as);
38
is_linux = 1;
39
}
40
if (kernel_size < 0) {
41
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
42
if (info->initrd_filename) {
43
initrd_size = load_ramdisk_as(info->initrd_filename,
44
info->initrd_start,
45
- info->ram_size - info->initrd_start,
46
- as);
47
+ ram_end - info->initrd_start, as);
48
if (initrd_size < 0) {
49
initrd_size = load_image_targphys_as(info->initrd_filename,
50
info->initrd_start,
51
- info->ram_size -
52
+ ram_end -
53
info->initrd_start,
54
as);
55
}
56
--
57
2.20.1
58
59
diff view generated by jsdifflib
New patch
1
We calculate the locations in memory where we want to put the
2
initrd and the DTB based on the size of the kernel, since they
3
come after it. Add some explicit checks that these aren't off the
4
end of RAM entirely.
1
5
6
(At the moment the way we calculate the initrd_start means that
7
it can't ever be off the end of RAM, but that will change with
8
the next commit.)
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Tested-by: Mark Rutland <mark.rutland@arm.com>
13
Message-id: 20190516144733.32399-3-peter.maydell@linaro.org
14
---
15
hw/arm/boot.c | 23 +++++++++++++++++++++++
16
1 file changed, 23 insertions(+)
17
18
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/boot.c
21
+++ b/hw/arm/boot.c
22
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
23
error_report("could not load kernel '%s'", info->kernel_filename);
24
exit(1);
25
}
26
+
27
+ if (kernel_size > info->ram_size) {
28
+ error_report("kernel '%s' is too large to fit in RAM "
29
+ "(kernel size %d, RAM size %" PRId64 ")",
30
+ info->kernel_filename, kernel_size, info->ram_size);
31
+ exit(1);
32
+ }
33
+
34
info->entry = entry;
35
if (is_linux) {
36
uint32_t fixupcontext[FIXUP_MAX];
37
38
if (info->initrd_filename) {
39
+
40
+ if (info->initrd_start >= ram_end) {
41
+ error_report("not enough space after kernel to load initrd");
42
+ exit(1);
43
+ }
44
+
45
initrd_size = load_ramdisk_as(info->initrd_filename,
46
info->initrd_start,
47
ram_end - info->initrd_start, as);
48
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
49
info->initrd_filename);
50
exit(1);
51
}
52
+ if (info->initrd_start + initrd_size > info->ram_size) {
53
+ error_report("could not load initrd '%s': "
54
+ "too big to fit into RAM after the kernel",
55
+ info->initrd_filename);
56
+ }
57
} else {
58
initrd_size = 0;
59
}
60
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
61
/* Place the DTB after the initrd in memory with alignment. */
62
info->dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size,
63
align);
64
+ if (info->dtb_start >= ram_end) {
65
+ error_report("Not enough space for DTB after kernel/initrd");
66
+ exit(1);
67
+ }
68
fixupcontext[FIXUP_ARGPTR_LO] = info->dtb_start;
69
fixupcontext[FIXUP_ARGPTR_HI] = info->dtb_start >> 32;
70
} else {
71
--
72
2.20.1
73
74
diff view generated by jsdifflib
New patch
1
We currently put the initrd at the smaller of:
2
* 128MB into RAM
3
* halfway into the RAM
4
(with the dtb following it).
1
5
6
However for large kernels this might mean that the kernel
7
overlaps the initrd. For some kinds of kernel (self-decompressing
8
32-bit kernels, and ELF images with a BSS section at the end)
9
we don't know the exact size, but even there we have a
10
minimum size. Put the initrd at least further into RAM than
11
that. For image formats that can give us an exact kernel size, this
12
will mean that we definitely avoid overlaying kernel and initrd.
13
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Tested-by: Mark Rutland <mark.rutland@arm.com>
17
Message-id: 20190516144733.32399-4-peter.maydell@linaro.org
18
---
19
hw/arm/boot.c | 34 ++++++++++++++++++++--------------
20
1 file changed, 20 insertions(+), 14 deletions(-)
21
22
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/hw/arm/boot.c
25
+++ b/hw/arm/boot.c
26
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
27
if (info->nb_cpus == 0)
28
info->nb_cpus = 1;
29
30
- /*
31
- * We want to put the initrd far enough into RAM that when the
32
- * kernel is uncompressed it will not clobber the initrd. However
33
- * on boards without much RAM we must ensure that we still leave
34
- * enough room for a decent sized initrd, and on boards with large
35
- * amounts of RAM we must avoid the initrd being so far up in RAM
36
- * that it is outside lowmem and inaccessible to the kernel.
37
- * So for boards with less than 256MB of RAM we put the initrd
38
- * halfway into RAM, and for boards with 256MB of RAM or more we put
39
- * the initrd at 128MB.
40
- */
41
- info->initrd_start = info->loader_start +
42
- MIN(info->ram_size / 2, 128 * 1024 * 1024);
43
-
44
/* Assume that raw images are linux kernels, and ELF images are not. */
45
kernel_size = arm_load_elf(info, &elf_entry, &elf_low_addr,
46
&elf_high_addr, elf_machine, as);
47
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
48
}
49
50
info->entry = entry;
51
+
52
+ /*
53
+ * We want to put the initrd far enough into RAM that when the
54
+ * kernel is uncompressed it will not clobber the initrd. However
55
+ * on boards without much RAM we must ensure that we still leave
56
+ * enough room for a decent sized initrd, and on boards with large
57
+ * amounts of RAM we must avoid the initrd being so far up in RAM
58
+ * that it is outside lowmem and inaccessible to the kernel.
59
+ * So for boards with less than 256MB of RAM we put the initrd
60
+ * halfway into RAM, and for boards with 256MB of RAM or more we put
61
+ * the initrd at 128MB.
62
+ * We also refuse to put the initrd somewhere that will definitely
63
+ * overlay the kernel we just loaded, though for kernel formats which
64
+ * don't tell us their exact size (eg self-decompressing 32-bit kernels)
65
+ * we might still make a bad choice here.
66
+ */
67
+ info->initrd_start = info->loader_start +
68
+ MAX(MIN(info->ram_size / 2, 128 * 1024 * 1024), kernel_size);
69
+ info->initrd_start = TARGET_PAGE_ALIGN(info->initrd_start);
70
+
71
if (is_linux) {
72
uint32_t fixupcontext[FIXUP_MAX];
73
74
--
75
2.20.1
76
77
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
Since Linux v3.17, the kernel's Image header includes a field image_size,
2
which gives the total size of the kernel including unpopulated data
3
sections such as the BSS). If this is present, then return it from
4
load_aarch64_image() as the true size of the kernel rather than
5
just using the size of the Image file itself. This allows the code
6
which calculates where to put the initrd to avoid putting it in
7
the kernel's BSS area.
2
8
3
The ITS is not fully properly reset at the moment. Caches are
9
This means that we should be able to reliably load kernel images
4
not emptied.
10
which are larger than 128MB without accidentally putting the
11
initrd or dtb in locations that clash with the kernel itself.
5
12
6
After a reset, in case we attempt to save the state before
13
Fixes: https://bugs.launchpad.net/qemu/+bug/1823998
7
the bound devices have registered their MSIs and after the
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
1st level table has been allocated by the ITS driver
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
(device BASER is valid), the first level entries are still
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
invalid. If the device cache is not empty (devices registered
17
Tested-by: Mark Rutland <mark.rutland@arm.com>
11
before the reset), vgic_its_save_device_tables fails with -EINVAL.
18
Message-id: 20190516144733.32399-5-peter.maydell@linaro.org
12
This causes a QEMU abort().
19
---
20
hw/arm/boot.c | 17 +++++++++++++++--
21
1 file changed, 15 insertions(+), 2 deletions(-)
13
22
14
Cc: qemu-stable@nongnu.org
23
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
15
Signed-off-by: Eric Auger <eric.auger@redhat.com>
16
Reported-by: wanghaibin <wanghaibin.wang@huawei.com>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
20
hw/intc/arm_gicv3_its_kvm.c | 8 ++------
21
1 file changed, 2 insertions(+), 6 deletions(-)
22
23
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
24
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
25
--- a/hw/intc/arm_gicv3_its_kvm.c
25
--- a/hw/arm/boot.c
26
+++ b/hw/intc/arm_gicv3_its_kvm.c
26
+++ b/hw/arm/boot.c
27
@@ -XXX,XX +XXX,XX @@ static void vm_change_state_handler(void *opaque, int running,
27
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
28
hwaddr *entry, AddressSpace *as)
28
{
29
{
29
GICv3ITSState *s = (GICv3ITSState *)opaque;
30
hwaddr kernel_load_offset = KERNEL64_LOAD_ADDR;
30
Error *err = NULL;
31
+ uint64_t kernel_size = 0;
31
- int ret;
32
uint8_t *buffer;
32
33
int size;
33
if (running) {
34
34
return;
35
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
36
* is only valid if the image_size is non-zero.
37
*/
38
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
39
- if (hdrvals[1] != 0) {
40
+
41
+ kernel_size = le64_to_cpu(hdrvals[1]);
42
+
43
+ if (kernel_size != 0) {
44
kernel_load_offset = le64_to_cpu(hdrvals[0]);
45
46
/*
47
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
48
}
35
}
49
}
36
50
37
- ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
51
+ /*
38
- KVM_DEV_ARM_ITS_SAVE_TABLES, NULL, true, &err);
52
+ * Kernels before v3.17 don't populate the image_size field, and
39
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
53
+ * raw images have no header. For those our best guess at the size
40
+ KVM_DEV_ARM_ITS_SAVE_TABLES, NULL, true, &err);
54
+ * is the size of the Image file itself.
41
if (err) {
55
+ */
42
error_report_err(err);
56
+ if (kernel_size == 0) {
43
}
57
+ kernel_size = size;
44
- if (ret < 0 && ret != -EFAULT) {
58
+ }
45
- abort();
59
+
46
- }
60
*entry = mem_base + kernel_load_offset;
61
rom_add_blob_fixed_as(filename, buffer, size, *entry, as);
62
63
g_free(buffer);
64
65
- return size;
66
+ return kernel_size;
47
}
67
}
48
68
49
static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
69
static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
50
--
70
--
51
2.7.4
71
2.20.1
52
72
53
73
diff view generated by jsdifflib
New patch
1
1
Allow VFP and neon to be disabled via a CPU property. As with
2
the "pmu" property, we only allow these features to be removed
3
from CPUs which have it by default, not added to CPUs which
4
don't have it.
5
6
The primary motivation here is to be able to optionally
7
create Cortex-M33 CPUs with no FPU, but we provide switches
8
for both VFP and Neon because the two interact:
9
* AArch64 can't have one without the other
10
* Some ID register fields only change if both are disabled
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Message-id: 20190517174046.11146-2-peter.maydell@linaro.org
16
---
17
target/arm/cpu.h | 4 ++
18
target/arm/cpu.c | 150 +++++++++++++++++++++++++++++++++++++++++++++--
19
2 files changed, 148 insertions(+), 6 deletions(-)
20
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
25
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
26
bool has_el3;
27
/* CPU has PMU (Performance Monitor Unit) */
28
bool has_pmu;
29
+ /* CPU has VFP */
30
+ bool has_vfp;
31
+ /* CPU has Neon */
32
+ bool has_neon;
33
34
/* CPU has memory protection unit */
35
bool has_mpu;
36
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/cpu.c
39
+++ b/target/arm/cpu.c
40
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_cfgend_property =
41
static Property arm_cpu_has_pmu_property =
42
DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
43
44
+static Property arm_cpu_has_vfp_property =
45
+ DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
46
+
47
+static Property arm_cpu_has_neon_property =
48
+ DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
49
+
50
static Property arm_cpu_has_mpu_property =
51
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
52
53
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
54
if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
55
set_feature(&cpu->env, ARM_FEATURE_PMSA);
56
}
57
+ /* Similarly for the VFP feature bits */
58
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP4)) {
59
+ set_feature(&cpu->env, ARM_FEATURE_VFP3);
60
+ }
61
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP3)) {
62
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
63
+ }
64
65
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
66
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
67
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
68
&error_abort);
69
}
70
71
+ /*
72
+ * Allow user to turn off VFP and Neon support, but only for TCG --
73
+ * KVM does not currently allow us to lie to the guest about its
74
+ * ID/feature registers, so the guest always sees what the host has.
75
+ */
76
+ if (arm_feature(&cpu->env, ARM_FEATURE_VFP)) {
77
+ cpu->has_vfp = true;
78
+ if (!kvm_enabled()) {
79
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property,
80
+ &error_abort);
81
+ }
82
+ }
83
+
84
+ if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
85
+ cpu->has_neon = true;
86
+ if (!kvm_enabled()) {
87
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property,
88
+ &error_abort);
89
+ }
90
+ }
91
+
92
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
93
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
94
&error_abort);
95
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
96
return;
97
}
98
99
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
100
+ cpu->has_vfp != cpu->has_neon) {
101
+ /*
102
+ * This is an architectural requirement for AArch64; AArch32 is
103
+ * more flexible and permits VFP-no-Neon and Neon-no-VFP.
104
+ */
105
+ error_setg(errp,
106
+ "AArch64 CPUs must have both VFP and Neon or neither");
107
+ return;
108
+ }
109
+
110
+ if (!cpu->has_vfp) {
111
+ uint64_t t;
112
+ uint32_t u;
113
+
114
+ unset_feature(env, ARM_FEATURE_VFP);
115
+ unset_feature(env, ARM_FEATURE_VFP3);
116
+ unset_feature(env, ARM_FEATURE_VFP4);
117
+
118
+ t = cpu->isar.id_aa64isar1;
119
+ t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
120
+ cpu->isar.id_aa64isar1 = t;
121
+
122
+ t = cpu->isar.id_aa64pfr0;
123
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
124
+ cpu->isar.id_aa64pfr0 = t;
125
+
126
+ u = cpu->isar.id_isar6;
127
+ u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
128
+ cpu->isar.id_isar6 = u;
129
+
130
+ u = cpu->isar.mvfr0;
131
+ u = FIELD_DP32(u, MVFR0, FPSP, 0);
132
+ u = FIELD_DP32(u, MVFR0, FPDP, 0);
133
+ u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
134
+ u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
135
+ u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
136
+ u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
137
+ u = FIELD_DP32(u, MVFR0, FPROUND, 0);
138
+ cpu->isar.mvfr0 = u;
139
+
140
+ u = cpu->isar.mvfr1;
141
+ u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
142
+ u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
143
+ u = FIELD_DP32(u, MVFR1, FPHP, 0);
144
+ cpu->isar.mvfr1 = u;
145
+
146
+ u = cpu->isar.mvfr2;
147
+ u = FIELD_DP32(u, MVFR2, FPMISC, 0);
148
+ cpu->isar.mvfr2 = u;
149
+ }
150
+
151
+ if (!cpu->has_neon) {
152
+ uint64_t t;
153
+ uint32_t u;
154
+
155
+ unset_feature(env, ARM_FEATURE_NEON);
156
+
157
+ t = cpu->isar.id_aa64isar0;
158
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
159
+ cpu->isar.id_aa64isar0 = t;
160
+
161
+ t = cpu->isar.id_aa64isar1;
162
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
163
+ cpu->isar.id_aa64isar1 = t;
164
+
165
+ t = cpu->isar.id_aa64pfr0;
166
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
167
+ cpu->isar.id_aa64pfr0 = t;
168
+
169
+ u = cpu->isar.id_isar5;
170
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
171
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
172
+ cpu->isar.id_isar5 = u;
173
+
174
+ u = cpu->isar.id_isar6;
175
+ u = FIELD_DP32(u, ID_ISAR6, DP, 0);
176
+ u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
177
+ cpu->isar.id_isar6 = u;
178
+
179
+ u = cpu->isar.mvfr1;
180
+ u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
181
+ u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
182
+ u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
183
+ u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
184
+ u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
185
+ cpu->isar.mvfr1 = u;
186
+
187
+ u = cpu->isar.mvfr2;
188
+ u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
189
+ cpu->isar.mvfr2 = u;
190
+ }
191
+
192
+ if (!cpu->has_neon && !cpu->has_vfp) {
193
+ uint64_t t;
194
+ uint32_t u;
195
+
196
+ t = cpu->isar.id_aa64isar0;
197
+ t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
198
+ cpu->isar.id_aa64isar0 = t;
199
+
200
+ t = cpu->isar.id_aa64isar1;
201
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
202
+ cpu->isar.id_aa64isar1 = t;
203
+
204
+ u = cpu->isar.mvfr0;
205
+ u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
206
+ cpu->isar.mvfr0 = u;
207
+ }
208
+
209
/* Some features automatically imply others: */
210
if (arm_feature(env, ARM_FEATURE_V8)) {
211
if (arm_feature(env, ARM_FEATURE_M)) {
212
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
213
if (arm_feature(env, ARM_FEATURE_V5)) {
214
set_feature(env, ARM_FEATURE_V4T);
215
}
216
- if (arm_feature(env, ARM_FEATURE_VFP4)) {
217
- set_feature(env, ARM_FEATURE_VFP3);
218
- }
219
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
220
- set_feature(env, ARM_FEATURE_VFP);
221
- }
222
if (arm_feature(env, ARM_FEATURE_LPAE)) {
223
set_feature(env, ARM_FEATURE_V7MP);
224
set_feature(env, ARM_FEATURE_PXN);
225
--
226
2.20.1
227
228
diff view generated by jsdifflib
New patch
1
Allow the DSP extension to be disabled via a CPU property for
2
M-profile CPUs. (A and R-profile CPUs don't have this extension
3
as a defined separate optional architecture extension, so
4
they don't need the property.)
1
5
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20190517174046.11146-3-peter.maydell@linaro.org
10
---
11
target/arm/cpu.h | 2 ++
12
target/arm/cpu.c | 29 +++++++++++++++++++++++++++++
13
2 files changed, 31 insertions(+)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
20
bool has_vfp;
21
/* CPU has Neon */
22
bool has_neon;
23
+ /* CPU has M-profile DSP extension */
24
+ bool has_dsp;
25
26
/* CPU has memory protection unit */
27
bool has_mpu;
28
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/cpu.c
31
+++ b/target/arm/cpu.c
32
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_has_vfp_property =
33
static Property arm_cpu_has_neon_property =
34
DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
35
36
+static Property arm_cpu_has_dsp_property =
37
+ DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
38
+
39
static Property arm_cpu_has_mpu_property =
40
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
41
42
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
43
}
44
}
45
46
+ if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
47
+ arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
48
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property,
49
+ &error_abort);
50
+ }
51
+
52
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
53
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
54
&error_abort);
55
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
56
cpu->isar.mvfr0 = u;
57
}
58
59
+ if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
60
+ uint32_t u;
61
+
62
+ unset_feature(env, ARM_FEATURE_THUMB_DSP);
63
+
64
+ u = cpu->isar.id_isar1;
65
+ u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
66
+ cpu->isar.id_isar1 = u;
67
+
68
+ u = cpu->isar.id_isar2;
69
+ u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
70
+ u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
71
+ cpu->isar.id_isar2 = u;
72
+
73
+ u = cpu->isar.id_isar3;
74
+ u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
75
+ u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
76
+ cpu->isar.id_isar3 = u;
77
+ }
78
+
79
/* Some features automatically imply others: */
80
if (arm_feature(env, ARM_FEATURE_V8)) {
81
if (arm_feature(env, ARM_FEATURE_M)) {
82
--
83
2.20.1
84
85
diff view generated by jsdifflib
New patch
1
Create "vfp" and "dsp" properties on the armv7m container object
2
which will be forwarded to its CPU object, so that SoCs can
3
configure whether the CPU has these features.
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Message-id: 20190517174046.11146-4-peter.maydell@linaro.org
9
---
10
include/hw/arm/armv7m.h | 4 ++++
11
hw/arm/armv7m.c | 18 ++++++++++++++++++
12
2 files changed, 22 insertions(+)
13
14
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/hw/arm/armv7m.h
17
+++ b/include/hw/arm/armv7m.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct {
19
* devices will be automatically layered on top of this view.)
20
* + Property "idau": IDAU interface (forwarded to CPU object)
21
* + Property "init-svtor": secure VTOR reset value (forwarded to CPU object)
22
+ * + Property "vfp": enable VFP (forwarded to CPU object)
23
+ * + Property "dsp": enable DSP (forwarded to CPU object)
24
* + Property "enable-bitband": expose bitbanded IO
25
*/
26
typedef struct ARMv7MState {
27
@@ -XXX,XX +XXX,XX @@ typedef struct ARMv7MState {
28
uint32_t init_svtor;
29
bool enable_bitband;
30
bool start_powered_off;
31
+ bool vfp;
32
+ bool dsp;
33
} ARMv7MState;
34
35
#endif
36
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/hw/arm/armv7m.c
39
+++ b/hw/arm/armv7m.c
40
@@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp)
41
return;
42
}
43
}
44
+ if (object_property_find(OBJECT(s->cpu), "vfp", NULL)) {
45
+ object_property_set_bool(OBJECT(s->cpu), s->vfp,
46
+ "vfp", &err);
47
+ if (err != NULL) {
48
+ error_propagate(errp, err);
49
+ return;
50
+ }
51
+ }
52
+ if (object_property_find(OBJECT(s->cpu), "dsp", NULL)) {
53
+ object_property_set_bool(OBJECT(s->cpu), s->dsp,
54
+ "dsp", &err);
55
+ if (err != NULL) {
56
+ error_propagate(errp, err);
57
+ return;
58
+ }
59
+ }
60
61
/*
62
* Tell the CPU where the NVIC is; it will fail realize if it doesn't
63
@@ -XXX,XX +XXX,XX @@ static Property armv7m_properties[] = {
64
DEFINE_PROP_BOOL("enable-bitband", ARMv7MState, enable_bitband, false),
65
DEFINE_PROP_BOOL("start-powered-off", ARMv7MState, start_powered_off,
66
false),
67
+ DEFINE_PROP_BOOL("vfp", ARMv7MState, vfp, true),
68
+ DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true),
69
DEFINE_PROP_END_OF_LIST(),
70
};
71
72
--
73
2.20.1
74
75
diff view generated by jsdifflib
New patch
1
The SSE-200 hardware has configurable integration settings which
2
determine whether its two CPUs have the FPU and DSP:
3
* CPU0_FPU (default 0)
4
* CPU0_DSP (default 0)
5
* CPU1_FPU (default 1)
6
* CPU1_DSP (default 1)
1
7
8
Similarly, the IoTKit has settings for its single CPU:
9
* CPU0_FPU (default 1)
10
* CPU0_DSP (default 1)
11
12
Of our four boards that use either the IoTKit or the SSE-200:
13
* mps2-an505, mps2-an521 and musca-a use the default settings
14
* musca-b1 enables FPU and DSP on both CPUs
15
16
Currently QEMU models all these boards using CPUs with
17
both FPU and DSP enabled. This means that we are incorrect
18
for mps2-an521 and musca-a, which should not have FPU or DSP
19
on CPU0.
20
21
Create QOM properties on the ARMSSE devices corresponding to the
22
default h/w integration settings, and make the Musca-B1 board
23
enable FPU and DSP on both CPUs. This fixes the mps2-an521
24
and musca-a behaviour, and leaves the musca-b1 and mps2-an505
25
behaviour unchanged.
26
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
29
Message-id: 20190517174046.11146-5-peter.maydell@linaro.org
30
---
31
include/hw/arm/armsse.h | 7 +++++
32
hw/arm/armsse.c | 58 ++++++++++++++++++++++++++++++++---------
33
hw/arm/musca.c | 8 ++++++
34
3 files changed, 61 insertions(+), 12 deletions(-)
35
36
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/arm/armsse.h
39
+++ b/include/hw/arm/armsse.h
40
@@ -XXX,XX +XXX,XX @@
41
* address of each SRAM bank (and thus the total amount of internal SRAM)
42
* + QOM property "init-svtor" sets the initial value of the CPU SVTOR register
43
* (where it expects to load the PC and SP from the vector table on reset)
44
+ * + QOM properties "CPU0_FPU", "CPU0_DSP", "CPU1_FPU" and "CPU1_DSP" which
45
+ * set whether the CPUs have the FPU and DSP features present. The default
46
+ * (matching the hardware) is that for CPU0 in an IoTKit and CPU1 in an
47
+ * SSE-200 both are present; CPU0 in an SSE-200 has neither.
48
+ * Since the IoTKit has only one CPU, it does not have the CPU1_* properties.
49
* + Named GPIO inputs "EXP_IRQ" 0..n are the expansion interrupts for CPU 0,
50
* which are wired to its NVIC lines 32 .. n+32
51
* + Named GPIO inputs "EXP_CPU1_IRQ" 0..n are the expansion interrupts for
52
@@ -XXX,XX +XXX,XX @@ typedef struct ARMSSE {
53
uint32_t mainclk_frq;
54
uint32_t sram_addr_width;
55
uint32_t init_svtor;
56
+ bool cpu_fpu[SSE_MAX_CPUS];
57
+ bool cpu_dsp[SSE_MAX_CPUS];
58
} ARMSSE;
59
60
typedef struct ARMSSEInfo ARMSSEInfo;
61
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/hw/arm/armsse.c
64
+++ b/hw/arm/armsse.c
65
@@ -XXX,XX +XXX,XX @@ struct ARMSSEInfo {
66
bool has_cachectrl;
67
bool has_cpusecctrl;
68
bool has_cpuid;
69
+ Property *props;
70
+};
71
+
72
+static Property iotkit_properties[] = {
73
+ DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
74
+ MemoryRegion *),
75
+ DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
76
+ DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
77
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
78
+ DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
79
+ DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true),
80
+ DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
81
+ DEFINE_PROP_END_OF_LIST()
82
+};
83
+
84
+static Property armsse_properties[] = {
85
+ DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
86
+ MemoryRegion *),
87
+ DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
88
+ DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
89
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
90
+ DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
91
+ DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], false),
92
+ DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], false),
93
+ DEFINE_PROP_BOOL("CPU1_FPU", ARMSSE, cpu_fpu[1], true),
94
+ DEFINE_PROP_BOOL("CPU1_DSP", ARMSSE, cpu_dsp[1], true),
95
+ DEFINE_PROP_END_OF_LIST()
96
};
97
98
static const ARMSSEInfo armsse_variants[] = {
99
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
100
.has_cachectrl = false,
101
.has_cpusecctrl = false,
102
.has_cpuid = false,
103
+ .props = iotkit_properties,
104
},
105
{
106
.name = TYPE_SSE200,
107
@@ -XXX,XX +XXX,XX @@ static const ARMSSEInfo armsse_variants[] = {
108
.has_cachectrl = true,
109
.has_cpusecctrl = true,
110
.has_cpuid = true,
111
+ .props = armsse_properties,
112
},
113
};
114
115
@@ -XXX,XX +XXX,XX @@ static void armsse_realize(DeviceState *dev, Error **errp)
116
return;
117
}
118
}
119
+ if (!s->cpu_fpu[i]) {
120
+ object_property_set_bool(cpuobj, false, "vfp", &err);
121
+ if (err) {
122
+ error_propagate(errp, err);
123
+ return;
124
+ }
125
+ }
126
+ if (!s->cpu_dsp[i]) {
127
+ object_property_set_bool(cpuobj, false, "dsp", &err);
128
+ if (err) {
129
+ error_propagate(errp, err);
130
+ return;
131
+ }
132
+ }
133
134
if (i > 0) {
135
memory_region_add_subregion_overlap(&s->cpu_container[i], 0,
136
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription armsse_vmstate = {
137
}
138
};
139
140
-static Property armsse_properties[] = {
141
- DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
142
- MemoryRegion *),
143
- DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
144
- DEFINE_PROP_UINT32("MAINCLK", ARMSSE, mainclk_frq, 0),
145
- DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
146
- DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
147
- DEFINE_PROP_END_OF_LIST()
148
-};
149
-
150
static void armsse_reset(DeviceState *dev)
151
{
152
ARMSSE *s = ARMSSE(dev);
153
@@ -XXX,XX +XXX,XX @@ static void armsse_class_init(ObjectClass *klass, void *data)
154
DeviceClass *dc = DEVICE_CLASS(klass);
155
IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(klass);
156
ARMSSEClass *asc = ARMSSE_CLASS(klass);
157
+ const ARMSSEInfo *info = data;
158
159
dc->realize = armsse_realize;
160
dc->vmsd = &armsse_vmstate;
161
- dc->props = armsse_properties;
162
+ dc->props = info->props;
163
dc->reset = armsse_reset;
164
iic->check = armsse_idau_check;
165
- asc->info = data;
166
+ asc->info = info;
167
}
168
169
static const TypeInfo armsse_info = {
170
diff --git a/hw/arm/musca.c b/hw/arm/musca.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/hw/arm/musca.c
173
+++ b/hw/arm/musca.c
174
@@ -XXX,XX +XXX,XX @@ static void musca_init(MachineState *machine)
175
qdev_prop_set_uint32(ssedev, "init-svtor", mmc->init_svtor);
176
qdev_prop_set_uint32(ssedev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width);
177
qdev_prop_set_uint32(ssedev, "MAINCLK", SYSCLK_FRQ);
178
+ /*
179
+ * Musca-A takes the default SSE-200 FPU/DSP settings (ie no for
180
+ * CPU0 and yes for CPU1); Musca-B1 explicitly enables them for CPU0.
181
+ */
182
+ if (mmc->type == MUSCA_B1) {
183
+ qdev_prop_set_bit(ssedev, "CPU0_FPU", true);
184
+ qdev_prop_set_bit(ssedev, "CPU0_DSP", true);
185
+ }
186
object_property_set_bool(OBJECT(&mms->sse), true, "realized",
187
&error_fatal);
188
189
--
190
2.20.1
191
192
diff view generated by jsdifflib
New patch
1
The GIC ID registers cover an area 0x30 bytes in size
2
(12 registers, 4 bytes each). We were incorrectly decoding
3
only the first 0x20 bytes.
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-id: 20190524124248.28394-2-peter.maydell@linaro.org
8
---
9
hw/intc/arm_gicv3_dist.c | 4 ++--
10
hw/intc/arm_gicv3_redist.c | 4 ++--
11
2 files changed, 4 insertions(+), 4 deletions(-)
12
13
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/intc/arm_gicv3_dist.c
16
+++ b/hw/intc/arm_gicv3_dist.c
17
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
18
}
19
return MEMTX_OK;
20
}
21
- case GICD_IDREGS ... GICD_IDREGS + 0x1f:
22
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
23
/* ID registers */
24
*data = gicv3_idreg(offset - GICD_IDREGS);
25
return MEMTX_OK;
26
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_writel(GICv3State *s, hwaddr offset,
27
gicd_write_irouter(s, attrs, irq, r);
28
return MEMTX_OK;
29
}
30
- case GICD_IDREGS ... GICD_IDREGS + 0x1f:
31
+ case GICD_IDREGS ... GICD_IDREGS + 0x2f:
32
case GICD_TYPER:
33
case GICD_IIDR:
34
/* RO registers, ignore the write */
35
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/hw/intc/arm_gicv3_redist.c
38
+++ b/hw/intc/arm_gicv3_redist.c
39
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
40
}
41
*data = cs->gicr_nsacr;
42
return MEMTX_OK;
43
- case GICR_IDREGS ... GICR_IDREGS + 0x1f:
44
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
45
*data = gicv3_idreg(offset - GICR_IDREGS);
46
return MEMTX_OK;
47
default:
48
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
49
return MEMTX_OK;
50
case GICR_IIDR:
51
case GICR_TYPER:
52
- case GICR_IDREGS ... GICR_IDREGS + 0x1f:
53
+ case GICR_IDREGS ... GICR_IDREGS + 0x2f:
54
/* RO registers, ignore the write */
55
qemu_log_mask(LOG_GUEST_ERROR,
56
"%s: invalid guest write to RO register at offset "
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
New patch
1
The GICv3 specification says that the GICD_TYPER.SecurityExtn bit
2
is RAZ if GICD_CTLR.DS is 1. We were incorrectly making it RAZ
3
if the security extension is unsupported. "Security extension
4
unsupported" always implies GICD_CTLR.DS == 1, but the guest can
5
also set DS on a GIC which does support the security extension.
6
Fix the condition to correctly check the GICD_CTLR.DS bit.
1
7
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20190524124248.28394-3-peter.maydell@linaro.org
10
---
11
hw/intc/arm_gicv3_dist.c | 8 +++++++-
12
1 file changed, 7 insertions(+), 1 deletion(-)
13
14
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/intc/arm_gicv3_dist.c
17
+++ b/hw/intc/arm_gicv3_dist.c
18
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicd_readl(GICv3State *s, hwaddr offset,
19
* ITLinesNumber == (num external irqs / 32) - 1
20
*/
21
int itlinesnumber = ((s->num_irq - GIC_INTERNAL) / 32) - 1;
22
+ /*
23
+ * SecurityExtn must be RAZ if GICD_CTLR.DS == 1, and
24
+ * "security extensions not supported" always implies DS == 1,
25
+ * so we only need to check the DS bit.
26
+ */
27
+ bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS);
28
29
- *data = (1 << 25) | (1 << 24) | (s->security_extn << 10) |
30
+ *data = (1 << 25) | (1 << 24) | (sec_extn << 10) |
31
(0xf << 19) | itlinesnumber;
32
return MEMTX_OK;
33
}
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
New patch
1
We want to use vfp_expand_imm() in the AArch32 VFP decode;
2
move it from the a64-only header/source file to the
3
AArch32 one (which is always compiled even for AArch64).
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-2-peter.maydell@linaro.org
9
---
10
target/arm/translate-a64.h | 1 -
11
target/arm/translate.h | 7 +++++++
12
target/arm/translate-a64.c | 32 --------------------------------
13
target/arm/translate-vfp.inc.c | 33 +++++++++++++++++++++++++++++++++
14
4 files changed, 40 insertions(+), 33 deletions(-)
15
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.h
19
+++ b/target/arm/translate-a64.h
20
@@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
21
TCGv_ptr get_fpstatus_ptr(bool);
22
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
23
unsigned int imms, unsigned int immr);
24
-uint64_t vfp_expand_imm(int size, uint8_t imm8);
25
bool sve_access_check(DisasContext *s);
26
27
/* We should have at some point before trying to access an FP register
28
diff --git a/target/arm/translate.h b/target/arm/translate.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate.h
31
+++ b/target/arm/translate.h
32
@@ -XXX,XX +XXX,XX @@ static inline void gen_ss_advance(DisasContext *s)
33
}
34
}
35
36
+/*
37
+ * Given a VFP floating point constant encoded into an 8 bit immediate in an
38
+ * instruction, expand it to the actual constant value of the specified
39
+ * size, as per the VFPExpandImm() pseudocode in the Arm ARM.
40
+ */
41
+uint64_t vfp_expand_imm(int size, uint8_t imm8);
42
+
43
/* Vector operations shared between ARM and AArch64. */
44
extern const GVecGen3 mla_op[4];
45
extern const GVecGen3 mls_op[4];
46
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/translate-a64.c
49
+++ b/target/arm/translate-a64.c
50
@@ -XXX,XX +XXX,XX @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
51
}
52
}
53
54
-/* The imm8 encodes the sign bit, enough bits to represent an exponent in
55
- * the range 01....1xx to 10....0xx, and the most significant 4 bits of
56
- * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
57
- */
58
-uint64_t vfp_expand_imm(int size, uint8_t imm8)
59
-{
60
- uint64_t imm;
61
-
62
- switch (size) {
63
- case MO_64:
64
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
65
- (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
66
- extract32(imm8, 0, 6);
67
- imm <<= 48;
68
- break;
69
- case MO_32:
70
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
71
- (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
72
- (extract32(imm8, 0, 6) << 3);
73
- imm <<= 16;
74
- break;
75
- case MO_16:
76
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
77
- (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
78
- (extract32(imm8, 0, 6) << 6);
79
- break;
80
- default:
81
- g_assert_not_reached();
82
- }
83
- return imm;
84
-}
85
-
86
/* Floating point immediate
87
* 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
88
* +---+---+---+-----------+------+---+------------+-------+------+------+
89
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/translate-vfp.inc.c
92
+++ b/target/arm/translate-vfp.inc.c
93
@@ -XXX,XX +XXX,XX @@
94
#include "decode-vfp.inc.c"
95
#include "decode-vfp-uncond.inc.c"
96
97
+/*
98
+ * The imm8 encodes the sign bit, enough bits to represent an exponent in
99
+ * the range 01....1xx to 10....0xx, and the most significant 4 bits of
100
+ * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
101
+ */
102
+uint64_t vfp_expand_imm(int size, uint8_t imm8)
103
+{
104
+ uint64_t imm;
105
+
106
+ switch (size) {
107
+ case MO_64:
108
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
109
+ (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
110
+ extract32(imm8, 0, 6);
111
+ imm <<= 48;
112
+ break;
113
+ case MO_32:
114
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
115
+ (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
116
+ (extract32(imm8, 0, 6) << 3);
117
+ imm <<= 16;
118
+ break;
119
+ case MO_16:
120
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
121
+ (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
122
+ (extract32(imm8, 0, 6) << 6);
123
+ break;
124
+ default:
125
+ g_assert_not_reached();
126
+ }
127
+ return imm;
128
+}
129
+
130
/*
131
* Return the offset of a 16-bit half of the specified VFP single-precision
132
* register. If top is true, returns the top 16 bits; otherwise the bottom
133
--
134
2.20.1
135
136
diff view generated by jsdifflib
New patch
1
The AArch32 VMOV (immediate) instruction uses the same VFP encoded
2
immediate format we already handle in vfp_expand_imm(). Use that
3
function rather than hand-decoding it.
1
4
5
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190613163917.28589-3-peter.maydell@linaro.org
10
---
11
target/arm/translate-vfp.inc.c | 28 ++++------------------------
12
target/arm/vfp.decode | 10 ++++++----
13
2 files changed, 10 insertions(+), 28 deletions(-)
14
15
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-vfp.inc.c
18
+++ b/target/arm/translate-vfp.inc.c
19
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
20
uint32_t delta_d = 0;
21
int veclen = s->vec_len;
22
TCGv_i32 fd;
23
- uint32_t n, i, vd;
24
+ uint32_t vd;
25
26
vd = a->vd;
27
28
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
29
}
30
}
31
32
- n = (a->imm4h << 28) & 0x80000000;
33
- i = ((a->imm4h << 4) & 0x70) | a->imm4l;
34
- if (i & 0x40) {
35
- i |= 0x780;
36
- } else {
37
- i |= 0x800;
38
- }
39
- n |= i << 19;
40
-
41
- fd = tcg_temp_new_i32();
42
- tcg_gen_movi_i32(fd, n);
43
+ fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
44
45
for (;;) {
46
neon_store_reg32(fd, vd);
47
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
48
uint32_t delta_d = 0;
49
int veclen = s->vec_len;
50
TCGv_i64 fd;
51
- uint32_t n, i, vd;
52
+ uint32_t vd;
53
54
vd = a->vd;
55
56
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
57
}
58
}
59
60
- n = (a->imm4h << 28) & 0x80000000;
61
- i = ((a->imm4h << 4) & 0x70) | a->imm4l;
62
- if (i & 0x40) {
63
- i |= 0x3f80;
64
- } else {
65
- i |= 0x4000;
66
- }
67
- n |= i << 16;
68
-
69
- fd = tcg_temp_new_i64();
70
- tcg_gen_movi_i64(fd, ((uint64_t)n) << 32);
71
+ fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
72
73
for (;;) {
74
neon_store_reg64(fd, vd);
75
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/vfp.decode
78
+++ b/target/arm/vfp.decode
79
@@ -XXX,XX +XXX,XX @@
80
%vmov_idx_b 21:1 5:2
81
%vmov_idx_h 21:1 6:1
82
83
+%vmov_imm 16:4 0:4
84
+
85
# VMOV scalar to general-purpose register; note that this does
86
# include some Neon cases.
87
VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \
88
@@ -XXX,XX +XXX,XX @@ VFM_sp ---- 1110 1.10 .... .... 1010 . o2:1 . 0 .... \
89
VFM_dp ---- 1110 1.10 .... .... 1011 . o2:1 . 0 .... \
90
vm=%vm_dp vn=%vn_dp vd=%vd_dp o1=2
91
92
-VMOV_imm_sp ---- 1110 1.11 imm4h:4 .... 1010 0000 imm4l:4 \
93
- vd=%vd_sp
94
-VMOV_imm_dp ---- 1110 1.11 imm4h:4 .... 1011 0000 imm4l:4 \
95
- vd=%vd_dp
96
+VMOV_imm_sp ---- 1110 1.11 .... .... 1010 0000 .... \
97
+ vd=%vd_sp imm=%vmov_imm
98
+VMOV_imm_dp ---- 1110 1.11 .... .... 1011 0000 .... \
99
+ vd=%vd_dp imm=%vmov_imm
100
101
VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... \
102
vd=%vd_sp vm=%vm_sp
103
--
104
2.20.1
105
106
diff view generated by jsdifflib
New patch
1
Where Neon instructions are floating point operations, we
2
mostly use the old VFP utility functions like gen_vfp_abs()
3
which work on the TCG globals cpu_F0s and cpu_F1s. The
4
Neon for-each-element loop conditionally loads the inputs
5
into either a plain old TCG temporary for most operations
6
or into cpu_F0s for float operations, and similarly stores
7
back either cpu_F0s or the temporary.
1
8
9
Switch NEON_2RM_VABS_F away from using cpu_F0s, and
10
update neon_2rm_is_float_op() accordingly.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-id: 20190613163917.28589-4-peter.maydell@linaro.org
16
---
17
target/arm/translate.c | 19 ++++++++-----------
18
1 file changed, 8 insertions(+), 11 deletions(-)
19
20
diff --git a/target/arm/translate.c b/target/arm/translate.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate.c
23
+++ b/target/arm/translate.c
24
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
25
return statusptr;
26
}
27
28
-static inline void gen_vfp_abs(int dp)
29
-{
30
- if (dp)
31
- gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
32
- else
33
- gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
34
-}
35
-
36
static inline void gen_vfp_neg(int dp)
37
{
38
if (dp)
39
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_3r_sizes[] = {
40
41
static int neon_2rm_is_float_op(int op)
42
{
43
- /* Return true if this neon 2reg-misc op is float-to-float */
44
- return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
45
+ /*
46
+ * Return true if this neon 2reg-misc op is float-to-float.
47
+ * This is not a property of the operation but of our code --
48
+ * what we are asking here is "does the code for this case in
49
+ * the Neon for-each-pass loop use cpu_F0s?".
50
+ */
51
+ return (op == NEON_2RM_VNEG_F ||
52
(op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
53
op == NEON_2RM_VRINTM ||
54
(op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
55
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
56
break;
57
}
58
case NEON_2RM_VABS_F:
59
- gen_vfp_abs(0);
60
+ gen_helper_vfp_abss(tmp, tmp);
61
break;
62
case NEON_2RM_VNEG_F:
63
gen_vfp_neg(0);
64
--
65
2.20.1
66
67
diff view generated by jsdifflib
New patch
1
Switch NEON_2RM_VABS_F away from using cpu_F0s.
1
2
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-5-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 13 ++-----------
9
1 file changed, 2 insertions(+), 11 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
16
return statusptr;
17
}
18
19
-static inline void gen_vfp_neg(int dp)
20
-{
21
- if (dp)
22
- gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
23
- else
24
- gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
25
-}
26
-
27
#define VFP_GEN_ITOF(name) \
28
static inline void gen_vfp_##name(int dp, int neon) \
29
{ \
30
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
31
* what we are asking here is "does the code for this case in
32
* the Neon for-each-pass loop use cpu_F0s?".
33
*/
34
- return (op == NEON_2RM_VNEG_F ||
35
- (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
36
+ return ((op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
37
op == NEON_2RM_VRINTM ||
38
(op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
39
op >= NEON_2RM_VRECPE_F);
40
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
41
gen_helper_vfp_abss(tmp, tmp);
42
break;
43
case NEON_2RM_VNEG_F:
44
- gen_vfp_neg(0);
45
+ gen_helper_vfp_negs(tmp, tmp);
46
break;
47
case NEON_2RM_VSWP:
48
tmp2 = neon_load_reg(rd, pass);
49
--
50
2.20.1
51
52
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
Switch NEON_2RM_VRINT* away from using cpu_F0s.
2
2
3
QEMU currently crashes when the user tries to instantiate the fsl,imx31
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
device manually:
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-6-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 8 +++-----
9
1 file changed, 3 insertions(+), 5 deletions(-)
5
10
6
$ aarch64-softmmu/qemu-system-aarch64 -M kzm -device fsl,,imx31
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
7
**
8
ERROR:/home/thuth/devel/qemu/tcg/tcg.c:538:tcg_register_thread:
9
assertion failed: (n < max_cpus)
10
Aborted (core dumped)
11
12
The kzm board (which is the one that uses this CPU type) only supports
13
one CPU, and the realize function of the "fsl,imx31" device also uses
14
serial_hds[] directly, so this device clearly can not be instantiated
15
twice and thus we should mark it with user_creatable = false.
16
17
Signed-off-by: Thomas Huth <thuth@redhat.com>
18
Message-id: 1509519537-6964-4-git-send-email-thuth@redhat.com
19
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
22
hw/arm/fsl-imx31.c | 6 +++++-
23
1 file changed, 5 insertions(+), 1 deletion(-)
24
25
diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c
26
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/arm/fsl-imx31.c
13
--- a/target/arm/translate.c
28
+++ b/hw/arm/fsl-imx31.c
14
+++ b/target/arm/translate.c
29
@@ -XXX,XX +XXX,XX @@ static void fsl_imx31_class_init(ObjectClass *oc, void *data)
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
30
DeviceClass *dc = DEVICE_CLASS(oc);
16
* what we are asking here is "does the code for this case in
31
17
* the Neon for-each-pass loop use cpu_F0s?".
32
dc->realize = fsl_imx31_realize;
18
*/
33
-
19
- return ((op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
34
dc->desc = "i.MX31 SOC";
20
- op == NEON_2RM_VRINTM ||
35
+ /*
21
- (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
36
+ * Reason: uses serial_hds in realize and the kzm board does not
22
+ return ((op >= NEON_2RM_VCVTAU && op <= NEON_2RM_VCVTMS) ||
37
+ * support multiple CPUs
23
op >= NEON_2RM_VRECPE_F);
38
+ */
39
+ dc->user_creatable = false;
40
}
24
}
41
25
42
static const TypeInfo fsl_imx31_type_info = {
26
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
27
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
28
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
29
cpu_env);
30
- gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
31
+ gen_helper_rints(tmp, tmp, fpstatus);
32
gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
33
cpu_env);
34
tcg_temp_free_ptr(fpstatus);
35
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
36
case NEON_2RM_VRINTX:
37
{
38
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
39
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
40
+ gen_helper_rints_exact(tmp, tmp, fpstatus);
41
tcg_temp_free_ptr(fpstatus);
42
break;
43
}
43
--
44
--
44
2.7.4
45
2.20.1
45
46
46
47
diff view generated by jsdifflib
1
For AArch32 LDREXD and STREXD, architecturally the 32-bit word at the
1
Stop using cpu_F0s for the NEON_2RM_VCVT[ANPM][US] ops.
2
lowest address is always Rt and the one at addr+4 is Rt2, even if the
3
CPU is big-endian. Our implementation does these with a single
4
64-bit store, so if we're big-endian then we need to put the two
5
32-bit halves together in the opposite order to little-endian,
6
so that they end up in the right places. We were trying to do
7
this with the gen_aa32_frob64() function, but that is not correct
8
for the usermode emulator, because there there is a distinction
9
between "load a 64 bit value" (which does a BE 64-bit access
10
and doesn't need swapping) and "load two 32 bit values as one
11
64 bit access" (where we still need to do the swapping, like
12
system mode BE32).
13
2
14
Fixes: https://bugs.launchpad.net/qemu/+bug/1725267
15
Cc: qemu-stable@nongnu.org
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 1509622400-13351-1-git-send-email-peter.maydell@linaro.org
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-7-peter.maydell@linaro.org
19
---
7
---
20
target/arm/translate.c | 39 ++++++++++++++++++++++++++++++++++-----
8
target/arm/translate.c | 7 +++----
21
1 file changed, 34 insertions(+), 5 deletions(-)
9
1 file changed, 3 insertions(+), 4 deletions(-)
22
10
23
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
24
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/translate.c
13
--- a/target/arm/translate.c
26
+++ b/target/arm/translate.c
14
+++ b/target/arm/translate.c
27
@@ -XXX,XX +XXX,XX @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
28
TCGv_i32 tmp2 = tcg_temp_new_i32();
16
* what we are asking here is "does the code for this case in
29
TCGv_i64 t64 = tcg_temp_new_i64();
17
* the Neon for-each-pass loop use cpu_F0s?".
30
18
*/
31
- gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
19
- return ((op >= NEON_2RM_VCVTAU && op <= NEON_2RM_VCVTMS) ||
32
+ /* For AArch32, architecturally the 32-bit word at the lowest
20
- op >= NEON_2RM_VRECPE_F);
33
+ * address is always Rt and the one at addr+4 is Rt2, even if
21
+ return op >= NEON_2RM_VRECPE_F;
34
+ * the CPU is big-endian. That means we don't want to do a
22
}
35
+ * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
23
36
+ * for an architecturally 64-bit access, but instead do a
24
static bool neon_2rm_is_v8_op(int op)
37
+ * 64-bit access using MO_BE if appropriate and then split
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
38
+ * the two halves.
26
cpu_env);
39
+ * This only makes a difference for BE32 user-mode, where
27
40
+ * frob64() must not flip the two halves of the 64-bit data
28
if (is_signed) {
41
+ * but this code must treat BE32 user-mode like BE32 system.
29
- gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
42
+ */
30
+ gen_helper_vfp_tosls(tmp, tmp,
43
+ TCGv taddr = gen_aa32_addr(s, addr, opc);
31
tcg_shift, fpst);
44
+
32
} else {
45
+ tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
33
- gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
46
+ tcg_temp_free(taddr);
34
+ gen_helper_vfp_touls(tmp, tmp,
47
tcg_gen_mov_i64(cpu_exclusive_val, t64);
35
tcg_shift, fpst);
48
- tcg_gen_extr_i64_i32(tmp, tmp2, t64);
36
}
49
+ if (s->be_data == MO_BE) {
50
+ tcg_gen_extr_i64_i32(tmp2, tmp, t64);
51
+ } else {
52
+ tcg_gen_extr_i64_i32(tmp, tmp2, t64);
53
+ }
54
tcg_temp_free_i64(t64);
55
56
store_reg(s, rt2, tmp2);
57
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
58
TCGv_i64 n64 = tcg_temp_new_i64();
59
60
t2 = load_reg(s, rt2);
61
- tcg_gen_concat_i32_i64(n64, t1, t2);
62
+ /* For AArch32, architecturally the 32-bit word at the lowest
63
+ * address is always Rt and the one at addr+4 is Rt2, even if
64
+ * the CPU is big-endian. Since we're going to treat this as a
65
+ * single 64-bit BE store, we need to put the two halves in the
66
+ * opposite order for BE to LE, so that they end up in the right
67
+ * places.
68
+ * We don't want gen_aa32_frob64() because that does the wrong
69
+ * thing for BE32 usermode.
70
+ */
71
+ if (s->be_data == MO_BE) {
72
+ tcg_gen_concat_i32_i64(n64, t2, t1);
73
+ } else {
74
+ tcg_gen_concat_i32_i64(n64, t1, t2);
75
+ }
76
tcg_temp_free_i32(t2);
77
- gen_aa32_frob64(s, n64);
78
79
tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
80
get_mem_index(s), opc);
81
tcg_temp_free_i64(n64);
82
83
- gen_aa32_frob64(s, o64);
84
tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
85
tcg_gen_extrl_i64_i32(t0, o64);
86
37
87
--
38
--
88
2.7.4
39
2.20.1
89
40
90
41
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
Stop using cpu_F0s for NEON_2RM_VRECPE_F and NEON_2RM_VRSQRTE_F.
2
2
3
QEMU currently crashes when the user tries to instantiate the fsl,imx25
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
device manually:
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-8-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 6 +++---
9
1 file changed, 3 insertions(+), 3 deletions(-)
5
10
6
$ aarch64-softmmu/qemu-system-aarch64 -S -M imx25-pdk -device fsl,,imx25
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
7
**
8
ERROR:/home/thuth/devel/qemu/tcg/tcg.c:538:tcg_register_thread:
9
assertion failed: (n < max_cpus)
10
11
The imx25-pdk board (which is the one that uses this CPU type) only
12
supports one CPU, and the realize function of the "fsl,imx25" device
13
also uses serial_hds[] directly, so this device clearly can not be
14
instantiated twice and thus we should mark it with user_creatable = 0.
15
16
Signed-off-by: Thomas Huth <thuth@redhat.com>
17
Message-id: 1509519537-6964-3-git-send-email-thuth@redhat.com
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
21
hw/arm/fsl-imx25.c | 6 +++++-
22
1 file changed, 5 insertions(+), 1 deletion(-)
23
24
diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c
25
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/arm/fsl-imx25.c
13
--- a/target/arm/translate.c
27
+++ b/hw/arm/fsl-imx25.c
14
+++ b/target/arm/translate.c
28
@@ -XXX,XX +XXX,XX @@ static void fsl_imx25_class_init(ObjectClass *oc, void *data)
15
@@ -XXX,XX +XXX,XX @@ static int neon_2rm_is_float_op(int op)
29
DeviceClass *dc = DEVICE_CLASS(oc);
16
* what we are asking here is "does the code for this case in
30
17
* the Neon for-each-pass loop use cpu_F0s?".
31
dc->realize = fsl_imx25_realize;
18
*/
32
-
19
- return op >= NEON_2RM_VRECPE_F;
33
dc->desc = "i.MX25 SOC";
20
+ return op >= NEON_2RM_VCVT_FS;
34
+ /*
35
+ * Reason: uses serial_hds in realize and the imx25 board does not
36
+ * support multiple CPUs
37
+ */
38
+ dc->user_creatable = false;
39
}
21
}
40
22
41
static const TypeInfo fsl_imx25_type_info = {
23
static bool neon_2rm_is_v8_op(int op)
24
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
25
case NEON_2RM_VRECPE_F:
26
{
27
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
28
- gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
29
+ gen_helper_recpe_f32(tmp, tmp, fpstatus);
30
tcg_temp_free_ptr(fpstatus);
31
break;
32
}
33
case NEON_2RM_VRSQRTE_F:
34
{
35
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
36
- gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
37
+ gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
38
tcg_temp_free_ptr(fpstatus);
39
break;
40
}
42
--
41
--
43
2.7.4
42
2.20.1
44
43
45
44
diff view generated by jsdifflib
New patch
1
Stop using cpu_F0s for the Neon f32/s32 VCVT operations.
2
Since this is the last user of cpu_F0s in the Neon 2rm-op
3
loop, we can remove the handling code for it too.
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-9-peter.maydell@linaro.org
9
---
10
target/arm/translate.c | 82 ++++++++++++------------------------------
11
1 file changed, 22 insertions(+), 60 deletions(-)
12
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
18
return statusptr;
19
}
20
21
-#define VFP_GEN_ITOF(name) \
22
-static inline void gen_vfp_##name(int dp, int neon) \
23
-{ \
24
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
25
- if (dp) { \
26
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
27
- } else { \
28
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
29
- } \
30
- tcg_temp_free_ptr(statusptr); \
31
-}
32
-
33
-VFP_GEN_ITOF(uito)
34
-VFP_GEN_ITOF(sito)
35
-#undef VFP_GEN_ITOF
36
-
37
-#define VFP_GEN_FTOI(name) \
38
-static inline void gen_vfp_##name(int dp, int neon) \
39
-{ \
40
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
41
- if (dp) { \
42
- gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
43
- } else { \
44
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
45
- } \
46
- tcg_temp_free_ptr(statusptr); \
47
-}
48
-
49
-VFP_GEN_FTOI(touiz)
50
-VFP_GEN_FTOI(tosiz)
51
-#undef VFP_GEN_FTOI
52
-
53
#define VFP_GEN_FIX(name, round) \
54
static inline void gen_vfp_##name(int dp, int shift, int neon) \
55
{ \
56
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_3r_sizes[] = {
57
#define NEON_2RM_VCVT_SF 62
58
#define NEON_2RM_VCVT_UF 63
59
60
-static int neon_2rm_is_float_op(int op)
61
-{
62
- /*
63
- * Return true if this neon 2reg-misc op is float-to-float.
64
- * This is not a property of the operation but of our code --
65
- * what we are asking here is "does the code for this case in
66
- * the Neon for-each-pass loop use cpu_F0s?".
67
- */
68
- return op >= NEON_2RM_VCVT_FS;
69
-}
70
-
71
static bool neon_2rm_is_v8_op(int op)
72
{
73
/* Return true if this neon 2reg-misc op is ARMv8 and up */
74
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
75
default:
76
elementwise:
77
for (pass = 0; pass < (q ? 4 : 2); pass++) {
78
- if (neon_2rm_is_float_op(op)) {
79
- tcg_gen_ld_f32(cpu_F0s, cpu_env,
80
- neon_reg_offset(rm, pass));
81
- tmp = NULL;
82
- } else {
83
- tmp = neon_load_reg(rm, pass);
84
- }
85
+ tmp = neon_load_reg(rm, pass);
86
switch (op) {
87
case NEON_2RM_VREV32:
88
switch (size) {
89
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
90
break;
91
}
92
case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
93
- gen_vfp_sito(0, 1);
94
+ {
95
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
96
+ gen_helper_vfp_sitos(tmp, tmp, fpstatus);
97
+ tcg_temp_free_ptr(fpstatus);
98
break;
99
+ }
100
case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
101
- gen_vfp_uito(0, 1);
102
+ {
103
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
104
+ gen_helper_vfp_uitos(tmp, tmp, fpstatus);
105
+ tcg_temp_free_ptr(fpstatus);
106
break;
107
+ }
108
case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
109
- gen_vfp_tosiz(0, 1);
110
+ {
111
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
112
+ gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
113
+ tcg_temp_free_ptr(fpstatus);
114
break;
115
+ }
116
case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
117
- gen_vfp_touiz(0, 1);
118
+ {
119
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
120
+ gen_helper_vfp_touizs(tmp, tmp, fpstatus);
121
+ tcg_temp_free_ptr(fpstatus);
122
break;
123
+ }
124
default:
125
/* Reserved op values were caught by the
126
* neon_2rm_sizes[] check earlier.
127
*/
128
abort();
129
}
130
- if (neon_2rm_is_float_op(op)) {
131
- tcg_gen_st_f32(cpu_F0s, cpu_env,
132
- neon_reg_offset(rd, pass));
133
- } else {
134
- neon_store_reg(rd, pass, tmp);
135
- }
136
+ neon_store_reg(rd, pass, tmp);
137
}
138
break;
139
}
140
--
141
2.20.1
142
143
diff view generated by jsdifflib
New patch
1
Stop using cpu_F0s in the Neon VCVT fixed-point operations.
1
2
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20190613163917.28589-10-peter.maydell@linaro.org
7
---
8
target/arm/translate.c | 62 +++++++++++++++++++-----------------------
9
1 file changed, 28 insertions(+), 34 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static const char * const regnames[] =
16
/* Function prototypes for gen_ functions calling Neon helpers. */
17
typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
18
TCGv_i32, TCGv_i32);
19
+/* Function prototypes for gen_ functions for fix point conversions */
20
+typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
21
22
/* initialize TCG globals. */
23
void arm_translate_init(void)
24
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_fpstatus_ptr(int neon)
25
return statusptr;
26
}
27
28
-#define VFP_GEN_FIX(name, round) \
29
-static inline void gen_vfp_##name(int dp, int shift, int neon) \
30
-{ \
31
- TCGv_i32 tmp_shift = tcg_const_i32(shift); \
32
- TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
33
- if (dp) { \
34
- gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
35
- statusptr); \
36
- } else { \
37
- gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
38
- statusptr); \
39
- } \
40
- tcg_temp_free_i32(tmp_shift); \
41
- tcg_temp_free_ptr(statusptr); \
42
-}
43
-VFP_GEN_FIX(tosl, _round_to_zero)
44
-VFP_GEN_FIX(toul, _round_to_zero)
45
-VFP_GEN_FIX(slto, )
46
-VFP_GEN_FIX(ulto, )
47
-#undef VFP_GEN_FIX
48
-
49
static inline long vfp_reg_offset(bool dp, unsigned reg)
50
{
51
if (dp) {
52
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
53
}
54
} else if (op >= 14) {
55
/* VCVT fixed-point. */
56
+ TCGv_ptr fpst;
57
+ TCGv_i32 shiftv;
58
+ VFPGenFixPointFn *fn;
59
+
60
if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
61
return 1;
62
}
63
+
64
+ if (!(op & 1)) {
65
+ if (u) {
66
+ fn = gen_helper_vfp_ultos;
67
+ } else {
68
+ fn = gen_helper_vfp_sltos;
69
+ }
70
+ } else {
71
+ if (u) {
72
+ fn = gen_helper_vfp_touls_round_to_zero;
73
+ } else {
74
+ fn = gen_helper_vfp_tosls_round_to_zero;
75
+ }
76
+ }
77
+
78
/* We have already masked out the must-be-1 top bit of imm6,
79
* hence this 32-shift where the ARM ARM has 64-imm6.
80
*/
81
shift = 32 - shift;
82
+ fpst = get_fpstatus_ptr(1);
83
+ shiftv = tcg_const_i32(shift);
84
for (pass = 0; pass < (q ? 4 : 2); pass++) {
85
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
86
- if (!(op & 1)) {
87
- if (u)
88
- gen_vfp_ulto(0, shift, 1);
89
- else
90
- gen_vfp_slto(0, shift, 1);
91
- } else {
92
- if (u)
93
- gen_vfp_toul(0, shift, 1);
94
- else
95
- gen_vfp_tosl(0, shift, 1);
96
- }
97
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
98
+ TCGv_i32 tmpf = neon_load_reg(rm, pass);
99
+ fn(tmpf, tmpf, shiftv, fpst);
100
+ neon_store_reg(rd, pass, tmpf);
101
}
102
+ tcg_temp_free_ptr(fpst);
103
+ tcg_temp_free_i32(shiftv);
104
} else {
105
return 1;
106
}
107
--
108
2.20.1
109
110
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
Remove some old constructs from NEON_2RM_VCVT_F16_F32 code:
2
* don't use cpu_F0s
3
* don't use tcg_gen_ld_f32
2
4
3
This device causes QEMU to abort if the user tries to instantiate it:
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-11-peter.maydell@linaro.org
9
---
10
target/arm/translate.c | 27 ++++++++++++---------------
11
1 file changed, 12 insertions(+), 15 deletions(-)
4
12
5
$ qemu-system-aarch64 -M sabrelite -smp 1,maxcpus=2 -device fsl,,imx6
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
6
Unexpected error in qemu_chr_fe_init() at chardev/char-fe.c:222:
7
qemu-system-aarch64: -device fsl,,imx6: Device 'serial0' is in use
8
Aborted (core dumped)
9
10
The device uses serial_hds[] directly in its realize function, so it
11
can not be instantiated again by the user.
12
13
Signed-off-by: Thomas Huth <thuth@redhat.com>
14
Message-id: 1509519537-6964-2-git-send-email-thuth@redhat.com
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
hw/arm/fsl-imx6.c | 3 ++-
19
1 file changed, 2 insertions(+), 1 deletion(-)
20
21
diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c
22
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/arm/fsl-imx6.c
15
--- a/target/arm/translate.c
24
+++ b/hw/arm/fsl-imx6.c
16
+++ b/target/arm/translate.c
25
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6_class_init(ObjectClass *oc, void *data)
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
26
DeviceClass *dc = DEVICE_CLASS(oc);
18
return ret;
27
28
dc->realize = fsl_imx6_realize;
29
-
30
dc->desc = "i.MX6 SOC";
31
+ /* Reason: Uses serial_hds[] in the realize() function */
32
+ dc->user_creatable = false;
33
}
19
}
34
20
35
static const TypeInfo fsl_imx6_type_info = {
21
-#define tcg_gen_ld_f32 tcg_gen_ld_i32
22
#define tcg_gen_st_f32 tcg_gen_st_i32
23
24
#define ARM_CP_RW_BIT (1 << 20)
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
26
q || (rm & 1)) {
27
return 1;
28
}
29
- tmp = tcg_temp_new_i32();
30
- tmp2 = tcg_temp_new_i32();
31
fpst = get_fpstatus_ptr(true);
32
ahp = get_ahp_flag();
33
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
34
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
35
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
36
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
37
+ tmp = neon_load_reg(rm, 0);
38
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
39
+ tmp2 = neon_load_reg(rm, 1);
40
+ gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
41
tcg_gen_shli_i32(tmp2, tmp2, 16);
42
tcg_gen_or_i32(tmp2, tmp2, tmp);
43
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
44
- gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
45
- tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
46
+ tcg_temp_free_i32(tmp);
47
+ tmp = neon_load_reg(rm, 2);
48
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
49
+ tmp3 = neon_load_reg(rm, 3);
50
neon_store_reg(rd, 0, tmp2);
51
- tmp2 = tcg_temp_new_i32();
52
- gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
53
- tcg_gen_shli_i32(tmp2, tmp2, 16);
54
- tcg_gen_or_i32(tmp2, tmp2, tmp);
55
- neon_store_reg(rd, 1, tmp2);
56
+ gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
57
+ tcg_gen_shli_i32(tmp3, tmp3, 16);
58
+ tcg_gen_or_i32(tmp3, tmp3, tmp);
59
+ neon_store_reg(rd, 1, tmp3);
60
tcg_temp_free_i32(tmp);
61
tcg_temp_free_i32(ahp);
62
tcg_temp_free_ptr(fpst);
36
--
63
--
37
2.7.4
64
2.20.1
38
65
39
66
diff view generated by jsdifflib
New patch
1
Remove some old constructns from NEON_2RM_VCVT_F16_F32 code:
2
* don't use CPU_F0s
3
* don't use tcg_gen_st_f32
1
4
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20190613163917.28589-12-peter.maydell@linaro.org
9
---
10
target/arm/translate.c | 26 +++++++++++---------------
11
1 file changed, 11 insertions(+), 15 deletions(-)
12
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
18
return ret;
19
}
20
21
-#define tcg_gen_st_f32 tcg_gen_st_i32
22
-
23
#define ARM_CP_RW_BIT (1 << 20)
24
25
/* Include the VFP decoder */
26
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
27
tmp = neon_load_reg(rm, 0);
28
tmp2 = neon_load_reg(rm, 1);
29
tcg_gen_ext16u_i32(tmp3, tmp);
30
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
31
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
32
- tcg_gen_shri_i32(tmp3, tmp, 16);
33
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
34
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
35
- tcg_temp_free_i32(tmp);
36
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
37
+ neon_store_reg(rd, 0, tmp3);
38
+ tcg_gen_shri_i32(tmp, tmp, 16);
39
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
40
+ neon_store_reg(rd, 1, tmp);
41
+ tmp3 = tcg_temp_new_i32();
42
tcg_gen_ext16u_i32(tmp3, tmp2);
43
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
44
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
45
- tcg_gen_shri_i32(tmp3, tmp2, 16);
46
- gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
47
- tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
48
- tcg_temp_free_i32(tmp2);
49
- tcg_temp_free_i32(tmp3);
50
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
51
+ neon_store_reg(rd, 2, tmp3);
52
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
53
+ gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
54
+ neon_store_reg(rd, 3, tmp2);
55
tcg_temp_free_i32(ahp);
56
tcg_temp_free_ptr(fpst);
57
break;
58
--
59
2.20.1
60
61
diff view generated by jsdifflib
1
From: Shanker Donthineni <shankerd@codeaurora.org>
1
Remove the now unused TCG globals cpu_F0s, cpu_F0d, cpu_F1s, cpu_F1d.
2
2
3
The commit cddafd8f353d ("hw/intc/arm_gicv3_its: Implement state save
3
cpu_M0 is still used by the iwmmxt code, and cpu_V0 and
4
/restore") breaks the backward compatibility with the older kernels
4
cpu_V1 are used by both iwmmxt and Neon.
5
where vITS save/restore support is not available. The vmstate function
6
vm_change_state_handler() should not be registered if the running kernel
7
doesn't support ITS save/restore feature. Otherwise VM instance will be
8
killed whenever vmstate callback function is invoked.
9
5
10
Observed a virtual machine shutdown with QEMU-2.10+linux-4.11 when testing
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
the reboot command "virsh reboot <domain> --mode acpi" instead of reboot.
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20190613163917.28589-13-peter.maydell@linaro.org
10
---
11
target/arm/translate.c | 12 ++----------
12
1 file changed, 2 insertions(+), 10 deletions(-)
12
13
13
KVM Error: 'KVM_SET_DEVICE_ATTR failed: Group 4 attr 0x00000000000001'
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
15
Signed-off-by: Shanker Donthineni <shankerd@codeaurora.org>
16
Reviewed-by: Eric Auger <eric.auger@redhat.com>
17
Message-id: 1509712671-16299-1-git-send-email-shankerd@codeaurora.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
20
hw/intc/arm_gicv3_its_kvm.c | 4 ++--
21
1 file changed, 2 insertions(+), 2 deletions(-)
22
23
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
24
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
25
--- a/hw/intc/arm_gicv3_its_kvm.c
16
--- a/target/arm/translate.c
26
+++ b/hw/intc/arm_gicv3_its_kvm.c
17
+++ b/target/arm/translate.c
27
@@ -XXX,XX +XXX,XX @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
18
@@ -XXX,XX +XXX,XX @@ TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
28
error_free(s->migration_blocker);
19
TCGv_i64 cpu_exclusive_addr;
29
return;
20
TCGv_i64 cpu_exclusive_val;
30
}
21
31
+ } else {
22
-/* FIXME: These should be removed. */
32
+ qemu_add_vm_change_state_handler(vm_change_state_handler, s);
23
-static TCGv_i32 cpu_F0s, cpu_F1s;
24
-static TCGv_i64 cpu_F0d, cpu_F1d;
25
-
26
#include "exec/gen-icount.h"
27
28
static const char * const regnames[] =
29
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
30
dc->base.max_insns = MIN(dc->base.max_insns, bound);
33
}
31
}
34
32
35
kvm_msi_use_devid = true;
33
- cpu_F0s = tcg_temp_new_i32();
36
kvm_gsi_direct_mapping = false;
34
- cpu_F1s = tcg_temp_new_i32();
37
kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
35
- cpu_F0d = tcg_temp_new_i64();
38
-
36
- cpu_F1d = tcg_temp_new_i64();
39
- qemu_add_vm_change_state_handler(vm_change_state_handler, s);
37
- cpu_V0 = cpu_F0d;
38
- cpu_V1 = cpu_F1d;
39
+ cpu_V0 = tcg_temp_new_i64();
40
+ cpu_V1 = tcg_temp_new_i64();
41
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
42
cpu_M0 = tcg_temp_new_i64();
40
}
43
}
41
42
/**
43
--
44
--
44
2.7.4
45
2.20.1
45
46
46
47
diff view generated by jsdifflib
1
From: Andrew Baumann <Andrew.Baumann@microsoft.com>
1
In several places cut and paste errors meant we were using the wrong
2
type for the 'arg' struct in trans_ functions called by the
3
decodetree decoder, because we were using the _sp version of the
4
struct in the _dp function. These were harmless, because the two
5
structs were identical and so decodetree made them typedefs of the
6
same underlying structure (and we'd have had a compile error if they
7
were not harmless), but we should clean them up anyway.
2
8
3
On a successful address translation instruction, PAR is supposed to
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
contain cacheability and shareability attributes determined by the
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
translation. We previously returned 0 for these bits (in line with the
11
Message-id: 20190614104457.24703-2-peter.maydell@linaro.org
6
general strategy of ignoring caches and memory attributes), but some
12
---
7
guest OSes may depend on them.
13
target/arm/translate-vfp.inc.c | 28 ++++++++++++++--------------
14
1 file changed, 14 insertions(+), 14 deletions(-)
8
15
9
This patch collects the attribute bits in the page-table walk, and
16
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
10
updates PAR with the correct attributes for all LPAE translations.
11
Short descriptor formats still return 0 for these bits, as in the
12
prior implementation.
13
14
Signed-off-by: Andrew Baumann <Andrew.Baumann@microsoft.com>
15
Message-id: 20171031223830.4608-1-Andrew.Baumann@microsoft.com
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
19
target/arm/helper.c | 178 +++++++++++++++++++++++++++++++++++++++++++++++-----
20
1 file changed, 164 insertions(+), 14 deletions(-)
21
22
diff --git a/target/arm/helper.c b/target/arm/helper.c
23
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/helper.c
18
--- a/target/arm/translate-vfp.inc.c
25
+++ b/target/arm/helper.c
19
+++ b/target/arm/translate-vfp.inc.c
26
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
27
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
28
29
#ifndef CONFIG_USER_ONLY
30
+/* Cacheability and shareability attributes for a memory access */
31
+typedef struct ARMCacheAttrs {
32
+ unsigned int attrs:8; /* as in the MAIR register encoding */
33
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
34
+} ARMCacheAttrs;
35
+
36
static bool get_phys_addr(CPUARMState *env, target_ulong address,
37
MMUAccessType access_type, ARMMMUIdx mmu_idx,
38
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
39
target_ulong *page_size, uint32_t *fsr,
40
- ARMMMUFaultInfo *fi);
41
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
42
43
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
44
MMUAccessType access_type, ARMMMUIdx mmu_idx,
45
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
46
target_ulong *page_size_ptr, uint32_t *fsr,
47
- ARMMMUFaultInfo *fi);
48
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
49
50
/* Security attributes for an address, as returned by v8m_security_lookup. */
51
typedef struct V8M_SAttributes {
52
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
53
uint64_t par64;
54
MemTxAttrs attrs = {};
55
ARMMMUFaultInfo fi = {};
56
+ ARMCacheAttrs cacheattrs = {};
57
58
- ret = get_phys_addr(env, value, access_type, mmu_idx,
59
- &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
60
+ ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
61
+ &prot, &page_size, &fsr, &fi, &cacheattrs);
62
if (extended_addresses_enabled(env)) {
63
/* fsr is a DFSR/IFSR value for the long descriptor
64
* translation table format, but with WnR always clear.
65
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
66
if (!attrs.secure) {
67
par64 |= (1 << 9); /* NS */
68
}
69
- /* We don't set the ATTR or SH fields in the PAR. */
70
+ par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
71
+ par64 |= cacheattrs.shareability << 7; /* SH */
72
} else {
73
par64 |= 1; /* F */
74
par64 |= (fsr & 0x3f) << 1; /* FS */
75
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
76
return false;
77
}
78
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
79
- &physaddr, &attrs, &prot, &page_size, &fsr, &fi)) {
80
+ &physaddr, &attrs, &prot, &page_size, &fsr, &fi, NULL)) {
81
/* the MPU lookup failed */
82
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
83
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
84
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
85
int ret;
86
87
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
88
- &txattrs, &s2prot, &s2size, fsr, fi);
89
+ &txattrs, &s2prot, &s2size, fsr, fi, NULL);
90
if (ret) {
91
fi->s2addr = addr;
92
fi->stage2 = true;
93
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
94
return true;
21
return true;
95
}
22
}
96
23
97
+/* Translate from the 4-bit stage 2 representation of
24
-static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
98
+ * memory attributes (without cache-allocation hints) to
25
+static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
99
+ * the 8-bit representation of the stage 1 MAIR registers
100
+ * (which includes allocation hints).
101
+ *
102
+ * ref: shared/translation/attrs/S2AttrDecode()
103
+ * .../S2ConvertAttrsHints()
104
+ */
105
+static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
106
+{
107
+ uint8_t hiattr = extract32(s2attrs, 2, 2);
108
+ uint8_t loattr = extract32(s2attrs, 0, 2);
109
+ uint8_t hihint = 0, lohint = 0;
110
+
111
+ if (hiattr != 0) { /* normal memory */
112
+ if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
113
+ hiattr = loattr = 1; /* non-cacheable */
114
+ } else {
115
+ if (hiattr != 1) { /* Write-through or write-back */
116
+ hihint = 3; /* RW allocate */
117
+ }
118
+ if (loattr != 1) { /* Write-through or write-back */
119
+ lohint = 3; /* RW allocate */
120
+ }
121
+ }
122
+ }
123
+
124
+ return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
125
+}
126
+
127
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
128
MMUAccessType access_type, ARMMMUIdx mmu_idx,
129
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
130
target_ulong *page_size_ptr, uint32_t *fsr,
131
- ARMMMUFaultInfo *fi)
132
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
133
{
26
{
134
ARMCPU *cpu = arm_env_get_cpu(env);
27
TCGv_i32 tmp;
135
CPUState *cs = CPU(cpu);
28
136
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
29
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
137
*/
30
return true;
138
txattrs->secure = false;
139
}
140
+
141
+ if (cacheattrs != NULL) {
142
+ if (mmu_idx == ARMMMUIdx_S2NS) {
143
+ cacheattrs->attrs = convert_stage2_attrs(env,
144
+ extract32(attrs, 0, 4));
145
+ } else {
146
+ /* Index into MAIR registers for cache attributes */
147
+ uint8_t attrindx = extract32(attrs, 0, 3);
148
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
149
+ assert(attrindx <= 7);
150
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
151
+ }
152
+ cacheattrs->shareability = extract32(attrs, 6, 2);
153
+ }
154
+
155
*phys_ptr = descaddr;
156
*page_size_ptr = page_size;
157
return false;
158
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
159
return false;
160
}
31
}
161
32
162
+/* Combine either inner or outer cacheability attributes for normal
33
-static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
163
+ * memory, according to table D4-42 and pseudocode procedure
34
+static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
164
+ * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
165
+ *
166
+ * NB: only stage 1 includes allocation hints (RW bits), leading to
167
+ * some asymmetry.
168
+ */
169
+static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
170
+{
171
+ if (s1 == 4 || s2 == 4) {
172
+ /* non-cacheable has precedence */
173
+ return 4;
174
+ } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
175
+ /* stage 1 write-through takes precedence */
176
+ return s1;
177
+ } else if (extract32(s2, 2, 2) == 2) {
178
+ /* stage 2 write-through takes precedence, but the allocation hint
179
+ * is still taken from stage 1
180
+ */
181
+ return (2 << 2) | extract32(s1, 0, 2);
182
+ } else { /* write-back */
183
+ return s1;
184
+ }
185
+}
186
+
187
+/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
188
+ * and CombineS1S2Desc()
189
+ *
190
+ * @s1: Attributes from stage 1 walk
191
+ * @s2: Attributes from stage 2 walk
192
+ */
193
+static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
194
+{
195
+ uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
196
+ uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
197
+ ARMCacheAttrs ret;
198
+
199
+ /* Combine shareability attributes (table D4-43) */
200
+ if (s1.shareability == 2 || s2.shareability == 2) {
201
+ /* if either are outer-shareable, the result is outer-shareable */
202
+ ret.shareability = 2;
203
+ } else if (s1.shareability == 3 || s2.shareability == 3) {
204
+ /* if either are inner-shareable, the result is inner-shareable */
205
+ ret.shareability = 3;
206
+ } else {
207
+ /* both non-shareable */
208
+ ret.shareability = 0;
209
+ }
210
+
211
+ /* Combine memory type and cacheability attributes */
212
+ if (s1hi == 0 || s2hi == 0) {
213
+ /* Device has precedence over normal */
214
+ if (s1lo == 0 || s2lo == 0) {
215
+ /* nGnRnE has precedence over anything */
216
+ ret.attrs = 0;
217
+ } else if (s1lo == 4 || s2lo == 4) {
218
+ /* non-Reordering has precedence over Reordering */
219
+ ret.attrs = 4; /* nGnRE */
220
+ } else if (s1lo == 8 || s2lo == 8) {
221
+ /* non-Gathering has precedence over Gathering */
222
+ ret.attrs = 8; /* nGRE */
223
+ } else {
224
+ ret.attrs = 0xc; /* GRE */
225
+ }
226
+
227
+ /* Any location for which the resultant memory type is any
228
+ * type of Device memory is always treated as Outer Shareable.
229
+ */
230
+ ret.shareability = 2;
231
+ } else { /* Normal memory */
232
+ /* Outer/inner cacheability combine independently */
233
+ ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
234
+ | combine_cacheattr_nibble(s1lo, s2lo);
235
+
236
+ if (ret.attrs == 0x44) {
237
+ /* Any location for which the resultant memory type is Normal
238
+ * Inner Non-cacheable, Outer Non-cacheable is always treated
239
+ * as Outer Shareable.
240
+ */
241
+ ret.shareability = 2;
242
+ }
243
+ }
244
+
245
+ return ret;
246
+}
247
+
248
+
249
/* get_phys_addr - get the physical address for this virtual address
250
*
251
* Find the physical address corresponding to the given virtual address,
252
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
253
* @prot: set to the permissions for the page containing phys_ptr
254
* @page_size: set to the size of the page containing phys_ptr
255
* @fsr: set to the DFSR/IFSR value on failure
256
+ * @fi: set to fault info if the translation fails
257
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
258
*/
259
static bool get_phys_addr(CPUARMState *env, target_ulong address,
260
MMUAccessType access_type, ARMMMUIdx mmu_idx,
261
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
262
target_ulong *page_size, uint32_t *fsr,
263
- ARMMMUFaultInfo *fi)
264
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
265
{
35
{
266
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
36
uint32_t offset;
267
/* Call ourselves recursively to do the stage 1 and then stage 2
37
TCGv_i32 addr;
268
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
38
@@ -XXX,XX +XXX,XX @@ static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
269
hwaddr ipa;
39
tcg_temp_free_i64(tmp);
270
int s2_prot;
40
}
271
int ret;
41
272
+ ARMCacheAttrs cacheattrs2 = {};
42
-static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a)
273
43
+static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
274
ret = get_phys_addr(env, address, access_type,
44
{
275
stage_1_mmu_idx(mmu_idx), &ipa, attrs,
45
return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
276
- prot, page_size, fsr, fi);
46
}
277
+ prot, page_size, fsr, fi, cacheattrs);
47
@@ -XXX,XX +XXX,XX @@ static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
278
48
tcg_temp_free_i64(tmp);
279
/* If S1 fails or S2 is disabled, return early. */
49
}
280
if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
50
281
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
51
-static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a)
282
/* S1 is done. Now do S2 translation. */
52
+static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
283
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
53
{
284
phys_ptr, attrs, &s2_prot,
54
return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
285
- page_size, fsr, fi);
55
}
286
+ page_size, fsr, fi,
56
@@ -XXX,XX +XXX,XX @@ static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
287
+ cacheattrs != NULL ? &cacheattrs2 : NULL);
57
tcg_temp_free_i64(tmp);
288
fi->s2addr = ipa;
58
}
289
/* Combine the S1 and S2 perms. */
59
290
*prot &= s2_prot;
60
-static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a)
291
+
61
+static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
292
+ /* Combine the S1 and S2 cache attributes, if needed */
62
{
293
+ if (!ret && cacheattrs != NULL) {
63
return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
294
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
64
}
295
+ }
65
@@ -XXX,XX +XXX,XX @@ static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
296
+
66
tcg_temp_free_i64(tmp);
297
return ret;
67
}
298
} else {
68
299
/*
69
-static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a)
300
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
70
+static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
301
71
{
302
if (regime_using_lpae_format(env, mmu_idx)) {
72
return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
303
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
73
}
304
- attrs, prot, page_size, fsr, fi);
74
@@ -XXX,XX +XXX,XX @@ static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
305
+ attrs, prot, page_size, fsr, fi, cacheattrs);
75
return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
306
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
76
}
307
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
77
308
attrs, prot, page_size, fsr, fi);
78
-static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a)
309
@@ -XXX,XX +XXX,XX @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
79
+static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
310
80
{
311
ret = get_phys_addr(env, address, access_type,
81
return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
312
core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
82
}
313
- &attrs, &prot, &page_size, fsr, fi);
83
@@ -XXX,XX +XXX,XX @@ static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
314
+ &attrs, &prot, &page_size, fsr, fi, NULL);
84
gen_helper_vfp_negd(vd, vd);
315
if (!ret) {
85
}
316
/* Map a single [sub]page. */
86
317
phys_addr &= TARGET_PAGE_MASK;
87
-static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a)
318
@@ -XXX,XX +XXX,XX @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
88
+static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
319
*attrs = (MemTxAttrs) {};
89
{
320
90
return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
321
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
91
}
322
- attrs, &prot, &page_size, &fsr, &fi);
92
@@ -XXX,XX +XXX,XX @@ static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
323
+ attrs, &prot, &page_size, &fsr, &fi, NULL);
93
return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
324
94
}
325
if (ret) {
95
326
return -1;
96
-static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a)
97
+static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
98
{
99
return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
100
}
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
102
return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
103
}
104
105
-static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a)
106
+static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
107
{
108
return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
109
}
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
111
return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
112
}
113
114
-static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a)
115
+static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
116
{
117
return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
118
}
119
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
120
return true;
121
}
122
123
-static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a)
124
+static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
125
{
126
/*
127
* VFNMA : fd = muladd(-fd, fn, fm)
128
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
129
return true;
130
}
131
132
-static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a)
133
+static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
134
{
135
TCGv_ptr fpst;
136
TCGv_i64 tmp;
137
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
138
return true;
139
}
140
141
-static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a)
142
+static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
143
{
144
TCGv_ptr fpst;
145
TCGv_i64 tmp;
327
--
146
--
328
2.7.4
147
2.20.1
329
148
330
149
diff view generated by jsdifflib
New patch
1
1
The architecture permits FPUs which have only single-precision
2
support, not double-precision; Cortex-M4 and Cortex-M33 are
3
both like that. Add the necessary checks on the MVFR0 FPDP
4
field so that we UNDEF any double-precision instructions on
5
CPUs like this.
6
7
Note that even if FPDP==0 the insns like VMOV-to/from-gpreg,
8
VLDM/VSTM, VLDR/VSTR which take double precision registers
9
still exist.
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20190614104457.24703-3-peter.maydell@linaro.org
14
---
15
target/arm/cpu.h | 6 +++
16
target/arm/translate-vfp.inc.c | 84 ++++++++++++++++++++++++++++++++++
17
2 files changed, 90 insertions(+)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
24
return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0;
25
}
26
27
+static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id)
28
+{
29
+ /* Return true if CPU supports double precision floating point */
30
+ return FIELD_EX64(id->mvfr0, MVFR0, FPDP) > 0;
31
+}
32
+
33
/*
34
* We always set the FP and SIMD FP16 fields to indicate identical
35
* levels of support (assuming SIMD is implemented at all), so
36
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/translate-vfp.inc.c
39
+++ b/target/arm/translate-vfp.inc.c
40
@@ -XXX,XX +XXX,XX @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
41
((a->vm | a->vn | a->vd) & 0x10)) {
42
return false;
43
}
44
+
45
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
46
+ return false;
47
+ }
48
+
49
rd = a->vd;
50
rn = a->vn;
51
rm = a->vm;
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
53
((a->vm | a->vn | a->vd) & 0x10)) {
54
return false;
55
}
56
+
57
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
58
+ return false;
59
+ }
60
+
61
rd = a->vd;
62
rn = a->vn;
63
rm = a->vm;
64
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
65
((a->vm | a->vd) & 0x10)) {
66
return false;
67
}
68
+
69
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
70
+ return false;
71
+ }
72
+
73
rd = a->vd;
74
rm = a->vm;
75
76
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
77
if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
78
return false;
79
}
80
+
81
+ if (dp && !dc_isar_feature(aa32_fpdp, s)) {
82
+ return false;
83
+ }
84
+
85
rd = a->vd;
86
rm = a->vm;
87
88
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
89
return false;
90
}
91
92
+ if (!dc_isar_feature(aa32_fpdp, s)) {
93
+ return false;
94
+ }
95
+
96
if (!dc_isar_feature(aa32_fpshvec, s) &&
97
(veclen != 0 || s->vec_stride != 0)) {
98
return false;
99
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
100
return false;
101
}
102
103
+ if (!dc_isar_feature(aa32_fpdp, s)) {
104
+ return false;
105
+ }
106
+
107
if (!dc_isar_feature(aa32_fpshvec, s) &&
108
(veclen != 0 || s->vec_stride != 0)) {
109
return false;
110
@@ -XXX,XX +XXX,XX @@ static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
111
return false;
112
}
113
114
+ if (!dc_isar_feature(aa32_fpdp, s)) {
115
+ return false;
116
+ }
117
+
118
if (!vfp_access_check(s)) {
119
return true;
120
}
121
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
122
return false;
123
}
124
125
+ if (!dc_isar_feature(aa32_fpdp, s)) {
126
+ return false;
127
+ }
128
+
129
if (!dc_isar_feature(aa32_fpshvec, s) &&
130
(veclen != 0 || s->vec_stride != 0)) {
131
return false;
132
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
133
return false;
134
}
135
136
+ if (!dc_isar_feature(aa32_fpdp, s)) {
137
+ return false;
138
+ }
139
+
140
if (!vfp_access_check(s)) {
141
return true;
142
}
143
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
144
return false;
145
}
146
147
+ if (!dc_isar_feature(aa32_fpdp, s)) {
148
+ return false;
149
+ }
150
+
151
if (!vfp_access_check(s)) {
152
return true;
153
}
154
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
155
return false;
156
}
157
158
+ if (!dc_isar_feature(aa32_fpdp, s)) {
159
+ return false;
160
+ }
161
+
162
if (!vfp_access_check(s)) {
163
return true;
164
}
165
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
166
return false;
167
}
168
169
+ if (!dc_isar_feature(aa32_fpdp, s)) {
170
+ return false;
171
+ }
172
+
173
if (!vfp_access_check(s)) {
174
return true;
175
}
176
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
177
return false;
178
}
179
180
+ if (!dc_isar_feature(aa32_fpdp, s)) {
181
+ return false;
182
+ }
183
+
184
if (!vfp_access_check(s)) {
185
return true;
186
}
187
@@ -XXX,XX +XXX,XX @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
188
return false;
189
}
190
191
+ if (!dc_isar_feature(aa32_fpdp, s)) {
192
+ return false;
193
+ }
194
+
195
if (!vfp_access_check(s)) {
196
return true;
197
}
198
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
199
return false;
200
}
201
202
+ if (!dc_isar_feature(aa32_fpdp, s)) {
203
+ return false;
204
+ }
205
+
206
if (!vfp_access_check(s)) {
207
return true;
208
}
209
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
210
return false;
211
}
212
213
+ if (!dc_isar_feature(aa32_fpdp, s)) {
214
+ return false;
215
+ }
216
+
217
if (!vfp_access_check(s)) {
218
return true;
219
}
220
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
221
return false;
222
}
223
224
+ if (!dc_isar_feature(aa32_fpdp, s)) {
225
+ return false;
226
+ }
227
+
228
if (!vfp_access_check(s)) {
229
return true;
230
}
231
@@ -XXX,XX +XXX,XX @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
232
return false;
233
}
234
235
+ if (!dc_isar_feature(aa32_fpdp, s)) {
236
+ return false;
237
+ }
238
+
239
if (!vfp_access_check(s)) {
240
return true;
241
}
242
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
243
return false;
244
}
245
246
+ if (!dc_isar_feature(aa32_fpdp, s)) {
247
+ return false;
248
+ }
249
+
250
if (!vfp_access_check(s)) {
251
return true;
252
}
253
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
254
return false;
255
}
256
257
+ if (!dc_isar_feature(aa32_fpdp, s)) {
258
+ return false;
259
+ }
260
+
261
if (!vfp_access_check(s)) {
262
return true;
263
}
264
--
265
2.20.1
266
267
diff view generated by jsdifflib