1
Arm pullreq for the 2.12 codefreeze...
1
arm queue: big stuff here is my MVE codegen optimisation,
2
and Alex's Apple Silicon hvf support.
2
3
3
thanks
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit b39b61e410022f96ceb53d4381d25cba5126ac44:
6
The following changes since commit 7adb961995a3744f51396502b33ad04a56a317c3:
7
7
8
memory: fix flatview_access_valid RCU read lock/unlock imbalance (2018-03-09 15:55:20 +0000)
8
Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-virtiofs-20210916' into staging (2021-09-19 18:53:29 +0100)
9
9
10
are available in the Git repository at:
10
are available in the Git repository at:
11
11
12
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180309
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210920
13
13
14
for you to fetch changes up to 076a0fc32a73a9b960e0f73f04a531bc1bd94308:
14
for you to fetch changes up to 1dc5a60bfe406bc1122d68cbdefda38d23134b27:
15
15
16
MAINTAINERS: Add entries for SD (SDHCI, SDBus, SDCard) (2018-03-09 17:09:45 +0000)
16
target/arm: Optimize MVE 1op-immediate insns (2021-09-20 14:18:01 +0100)
17
17
18
----------------------------------------------------------------
18
----------------------------------------------------------------
19
target-arm queue:
19
target-arm queue:
20
* i.MX: Add i.MX7 SOC implementation and i.MX7 Sabre board
20
* Optimize codegen for MVE when predication not active
21
* Report the correct core count in A53 L2CTLR on the ZynqMP board
21
* hvf: Add Apple Silicon support
22
* linux-user: preliminary SVE support work (signal handling)
22
* hw/intc: Set GIC maintenance interrupt level to only 0 or 1
23
* hw/arm/boot: fix memory leak in case of error loading ELF file
23
* Fix mishandling of MVE FPSCR.LTPSIZE reset for usermode emulator
24
* hw/arm/boot: avoid reading off end of buffer if passed very
24
* elf2dmp: Fix coverity nits
25
small image file
26
* hw/arm: Use more CONFIG switches for the object files
27
* target/arm: Add "-cpu max" support
28
* hw/arm/virt: Support -machine gic-version=max
29
* hw/sd: improve debug tracing
30
* hw/sd: sdcard: Add the Tuning Command (CMD 19)
31
* MAINTAINERS: add Philippe as odd-fixes maintainer for SD
32
25
33
----------------------------------------------------------------
26
----------------------------------------------------------------
34
Alistair Francis (2):
27
Alexander Graf (7):
35
target/arm: Add a core count property
28
arm: Move PMC register definitions to internals.h
36
hw/arm: Set the core count for Xilinx's ZynqMP
29
hvf: Add execute to dirty log permission bitmap
30
hvf: Introduce hvf_arch_init() callback
31
hvf: Add Apple Silicon support
32
hvf: arm: Implement PSCI handling
33
arm: Add Hypervisor.framework build target
34
hvf: arm: Add rudimentary PMC support
37
35
38
Andrey Smirnov (3):
36
Peter Collingbourne (1):
39
pci: Add support for Designware IP block
37
arm/hvf: Add a WFI handler
40
i.MX: Add i.MX7 SOC implementation.
41
Implement support for i.MX7 Sabre board
42
38
43
Marc-André Lureau (2):
39
Peter Maydell (18):
44
arm: fix load ELF error leak
40
elf2dmp: Check curl_easy_setopt() return value
45
arm: avoid heap-buffer-overflow in load_aarch64_image
41
elf2dmp: Fail cleanly if PDB file specifies zero block_size
42
target/arm: Don't skip M-profile reset entirely in user mode
43
target/arm: Always clear exclusive monitor on reset
44
target/arm: Consolidate ifdef blocks in reset
45
hvf: arm: Implement -cpu host
46
target/arm: Avoid goto_tb if we're trying to exit to the main loop
47
target/arm: Enforce that FPDSCR.LTPSIZE is 4 on inbound migration
48
target/arm: Add TB flag for "MVE insns not predicated"
49
target/arm: Optimize MVE logic ops
50
target/arm: Optimize MVE arithmetic ops
51
target/arm: Optimize MVE VNEG, VABS
52
target/arm: Optimize MVE VDUP
53
target/arm: Optimize MVE VMVN
54
target/arm: Optimize MVE VSHL, VSHR immediate forms
55
target/arm: Optimize MVE VSHLL and VMOVL
56
target/arm: Optimize MVE VSLI and VSRI
57
target/arm: Optimize MVE 1op-immediate insns
46
58
47
Peter Maydell (6):
59
Shashi Mallela (1):
48
target/arm: Query host CPU features on-demand at instance init
60
hw/intc: Set GIC maintenance interrupt level to only 0 or 1
49
target/arm: Move definition of 'host' cpu type into cpu.c
50
target/arm: Add "-cpu max" support
51
target/arm: Make 'any' CPU just an alias for 'max'
52
hw/arm/virt: Add "max" to the list of CPU types "virt" supports
53
hw/arm/virt: Support -machine gic-version=max
54
61
55
Philippe Mathieu-Daudé (6):
62
meson.build | 8 +
56
sdcard: Do not trace CMD55, except when we already expect an ACMD
63
include/sysemu/hvf_int.h | 12 +-
57
sdcard: Display command name when tracing CMD/ACMD
64
target/arm/cpu.h | 6 +-
58
sdcard: Display which protocol is used when tracing (SD or SPI)
65
target/arm/hvf_arm.h | 18 +
59
sdcard: Add the Tuning Command (CMD19)
66
target/arm/internals.h | 44 ++
60
sdhci: Fix a typo in comment
67
target/arm/kvm_arm.h | 2 -
61
MAINTAINERS: Add entries for SD (SDHCI, SDBus, SDCard)
68
target/arm/translate.h | 2 +
69
accel/hvf/hvf-accel-ops.c | 21 +-
70
contrib/elf2dmp/download.c | 22 +-
71
contrib/elf2dmp/pdb.c | 4 +
72
hw/intc/arm_gicv3_cpuif.c | 5 +-
73
target/arm/cpu.c | 56 +-
74
target/arm/helper.c | 77 ++-
75
target/arm/hvf/hvf.c | 1278 +++++++++++++++++++++++++++++++++++++++++
76
target/arm/machine.c | 13 +
77
target/arm/translate-m-nocp.c | 8 +-
78
target/arm/translate-mve.c | 310 +++++++---
79
target/arm/translate-vfp.c | 33 +-
80
target/arm/translate.c | 42 +-
81
target/i386/hvf/hvf.c | 10 +
82
MAINTAINERS | 5 +
83
target/arm/hvf/meson.build | 3 +
84
target/arm/hvf/trace-events | 11 +
85
target/arm/meson.build | 2 +
86
24 files changed, 1824 insertions(+), 168 deletions(-)
87
create mode 100644 target/arm/hvf_arm.h
88
create mode 100644 target/arm/hvf/hvf.c
89
create mode 100644 target/arm/hvf/meson.build
90
create mode 100644 target/arm/hvf/trace-events
62
91
63
Richard Henderson (5):
64
linux-user: Implement aarch64 PR_SVE_SET/GET_VL
65
aarch64-linux-user: Split out helpers for guest signal handling
66
aarch64-linux-user: Remove struct target_aux_context
67
aarch64-linux-user: Add support for EXTRA signal frame records
68
aarch64-linux-user: Add support for SVE signal frame records
69
70
Thomas Huth (1):
71
hw/arm: Use more CONFIG switches for the object files
72
73
hw/arm/Makefile.objs | 31 +-
74
hw/pci-host/Makefile.objs | 2 +
75
hw/sd/Makefile.objs | 2 +-
76
hw/sd/sdmmc-internal.h | 24 ++
77
include/hw/arm/fsl-imx7.h | 222 +++++++++++
78
include/hw/pci-host/designware.h | 102 +++++
79
include/hw/pci/pci_ids.h | 2 +
80
linux-user/aarch64/target_syscall.h | 3 +
81
target/arm/cpu-qom.h | 2 +
82
target/arm/cpu.h | 11 +
83
target/arm/kvm_arm.h | 35 +-
84
hw/arm/boot.c | 4 +-
85
hw/arm/fsl-imx7.c | 582 ++++++++++++++++++++++++++++
86
hw/arm/mcimx7d-sabre.c | 90 +++++
87
hw/arm/virt.c | 30 +-
88
hw/arm/xlnx-zynqmp.c | 2 +
89
hw/pci-host/designware.c | 754 ++++++++++++++++++++++++++++++++++++
90
hw/sd/sd.c | 55 ++-
91
hw/sd/sdhci.c | 4 +-
92
hw/sd/sdmmc-internal.c | 72 ++++
93
linux-user/signal.c | 415 ++++++++++++++++----
94
linux-user/syscall.c | 27 ++
95
target/arm/cpu.c | 103 ++++-
96
target/arm/cpu64.c | 113 ++++--
97
target/arm/kvm.c | 53 +--
98
target/arm/kvm32.c | 8 +-
99
target/arm/kvm64.c | 8 +-
100
MAINTAINERS | 8 +
101
default-configs/arm-softmmu.mak | 9 +
102
hw/sd/trace-events | 8 +-
103
30 files changed, 2583 insertions(+), 198 deletions(-)
104
create mode 100644 include/hw/arm/fsl-imx7.h
105
create mode 100644 include/hw/pci-host/designware.h
106
create mode 100644 hw/arm/fsl-imx7.c
107
create mode 100644 hw/arm/mcimx7d-sabre.c
108
create mode 100644 hw/pci-host/designware.c
109
create mode 100644 hw/sd/sdmmc-internal.c
110
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Coverity points out that we aren't checking the return value
2
from curl_easy_setopt().
2
3
3
After spending months studying all the different SD Specifications
4
Fixes: Coverity CID 1458895
4
from the SD Association, voluntarily add myself as maintainer
5
Inspired-by: Peter Maydell <peter.maydell@linaro.org>
5
for the SD code.
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
7
Reviewed-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Tested-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20210910170656.366592-2-philmd@redhat.com
9
Message-id: 20180309153654.13518-9-f4bug@amsat.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
MAINTAINERS | 8 ++++++++
12
contrib/elf2dmp/download.c | 22 ++++++++++------------
13
1 file changed, 8 insertions(+)
13
1 file changed, 10 insertions(+), 12 deletions(-)
14
14
15
diff --git a/MAINTAINERS b/MAINTAINERS
15
diff --git a/contrib/elf2dmp/download.c b/contrib/elf2dmp/download.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/MAINTAINERS
17
--- a/contrib/elf2dmp/download.c
18
+++ b/MAINTAINERS
18
+++ b/contrib/elf2dmp/download.c
19
@@ -XXX,XX +XXX,XX @@ M: Peter Crosthwaite <crosthwaite.peter@gmail.com>
19
@@ -XXX,XX +XXX,XX @@ int download_url(const char *name, const char *url)
20
S: Maintained
20
goto out_curl;
21
F: hw/ssi/xilinx_*
21
}
22
22
23
+SD (Secure Card)
23
- curl_easy_setopt(curl, CURLOPT_URL, url);
24
+M: Philippe Mathieu-Daudé <f4bug@amsat.org>
24
- curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL);
25
+S: Odd Fixes
25
- curl_easy_setopt(curl, CURLOPT_WRITEDATA, file);
26
+F: include/hw/sd/sd*
26
- curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
27
+F: hw/sd/core.c
27
- curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0);
28
+F: hw/sd/sd*
28
-
29
+F: tests/sd*
29
- if (curl_easy_perform(curl) != CURLE_OK) {
30
+
30
- err = 1;
31
USB
31
- fclose(file);
32
M: Gerd Hoffmann <kraxel@redhat.com>
32
+ if (curl_easy_setopt(curl, CURLOPT_URL, url) != CURLE_OK
33
S: Maintained
33
+ || curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL) != CURLE_OK
34
+ || curl_easy_setopt(curl, CURLOPT_WRITEDATA, file) != CURLE_OK
35
+ || curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK
36
+ || curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK
37
+ || curl_easy_perform(curl) != CURLE_OK) {
38
unlink(name);
39
- goto out_curl;
40
+ fclose(file);
41
+ err = 1;
42
+ } else {
43
+ err = fclose(file);
44
}
45
46
- err = fclose(file);
47
-
48
out_curl:
49
curl_easy_cleanup(curl);
50
34
--
51
--
35
2.16.2
52
2.20.1
36
53
37
54
diff view generated by jsdifflib
New patch
1
Coverity points out that if the PDB file we're trying to read
2
has a header specifying a block_size of zero then we will
3
end up trying to divide by zero in pdb_ds_read_file().
4
Check for this and fail cleanly instead.
1
5
6
Fixes: Coverity CID 1458869
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Tested-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
11
Message-id: 20210910170656.366592-3-philmd@redhat.com
12
Message-Id: <20210901143910.17112-3-peter.maydell@linaro.org>
13
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
---
15
contrib/elf2dmp/pdb.c | 4 ++++
16
1 file changed, 4 insertions(+)
17
18
diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/contrib/elf2dmp/pdb.c
21
+++ b/contrib/elf2dmp/pdb.c
22
@@ -XXX,XX +XXX,XX @@ out_symbols:
23
24
static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr)
25
{
26
+ if (hdr->block_size == 0) {
27
+ return 1;
28
+ }
29
+
30
memset(r->file_used, 0, sizeof(r->file_used));
31
r->ds.header = hdr;
32
r->ds.toc = pdb_ds_read(hdr, (uint32_t *)((uint8_t *)hdr +
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
1
Now we have a working '-cpu max', the linux-user-only
1
Currently all of the M-profile specific code in arm_cpu_reset() is
2
'any' CPU is pretty much the same thing, so implement it
2
inside a !defined(CONFIG_USER_ONLY) ifdef block. This is
3
that way.
3
unintentional: it happened because originally the only
4
M-profile-specific handling was the setup of the initial SP and PC
5
from the vector table, which is system-emulation only. But then we
6
added a lot of other M-profile setup to the same "if (ARM_FEATURE_M)"
7
code block without noticing that it was all inside a not-user-mode
8
ifdef. This has generally been harmless, but with the addition of
9
v8.1M low-overhead-loop support we ran into a problem: the reset of
10
FPSCR.LTPSIZE to 4 was only being done for system emulation mode, so
11
if a user-mode guest tried to execute the LE instruction it would
12
incorrectly take a UsageFault.
4
13
5
For the moment we don't add any of the extra feature bits
14
Adjust the ifdefs so only the really system-emulation specific parts
6
to the system-emulation "max", because we don't set the
15
are covered. Because this means we now run some reset code that sets
7
ID register bits we would need to to advertise those
16
up initial values in the FPCCR and similar FPU related registers,
8
features as present.
17
explicitly set up the registers controlling FPU context handling in
18
user-emulation mode so that the FPU works by design and not by
19
chance.
9
20
21
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/613
22
Cc: qemu-stable@nongnu.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20180308130626.12393-5-peter.maydell@linaro.org
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
25
Message-id: 20210914120725.24992-2-peter.maydell@linaro.org
13
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
---
26
---
15
target/arm/cpu.c | 52 +++++++++++++++++++++++++----------------------
27
target/arm/cpu.c | 19 +++++++++++++++++++
16
target/arm/cpu64.c | 59 ++++++++++++++++++++++++++----------------------------
28
1 file changed, 19 insertions(+)
17
2 files changed, 56 insertions(+), 55 deletions(-)
18
29
19
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
20
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.c
32
--- a/target/arm/cpu.c
22
+++ b/target/arm/cpu.c
33
+++ b/target/arm/cpu.c
23
@@ -XXX,XX +XXX,XX @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
24
ObjectClass *oc;
35
env->uncached_cpsr = ARM_CPU_MODE_SVC;
25
char *typename;
36
}
26
char **cpuname;
37
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
27
+ const char *cpunamestr;
28
29
cpuname = g_strsplit(cpu_model, ",", 1);
30
- typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpuname[0]);
31
+ cpunamestr = cpuname[0];
32
+#ifdef CONFIG_USER_ONLY
33
+ /* For backwards compatibility usermode emulation allows "-cpu any",
34
+ * which has the same semantics as "-cpu max".
35
+ */
36
+ if (!strcmp(cpunamestr, "any")) {
37
+ cpunamestr = "max";
38
+ }
39
+#endif
38
+#endif
40
+ typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
39
41
oc = object_class_by_name(typename);
40
if (arm_feature(env, ARM_FEATURE_M)) {
42
g_strfreev(cpuname);
41
+#ifndef CONFIG_USER_ONLY
43
g_free(typename);
42
uint32_t initial_msp; /* Loaded from 0x0 */
44
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
43
uint32_t initial_pc; /* Loaded from 0x4 */
45
kvm_arm_set_cpu_features_from_host(cpu);
44
uint8_t *rom;
46
} else {
45
uint32_t vecbase;
47
cortex_a15_initfn(obj);
46
+#endif
48
- /* In future we might add feature bits here even if the
47
49
- * real-world A15 doesn't implement them.
48
if (cpu_isar_feature(aa32_lob, cpu)) {
50
- */
49
/*
51
- }
50
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
52
-}
51
env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
53
-#endif
52
R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
54
-
53
}
55
#ifdef CONFIG_USER_ONLY
54
+
56
-static void arm_any_initfn(Object *obj)
55
+#ifndef CONFIG_USER_ONLY
57
-{
56
/* Unlike A/R profile, M profile defines the reset LR value */
58
- ARMCPU *cpu = ARM_CPU(obj);
57
env->regs[14] = 0xffffffff;
59
- set_feature(&cpu->env, ARM_FEATURE_V8);
58
60
- set_feature(&cpu->env, ARM_FEATURE_VFP4);
59
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
61
- set_feature(&cpu->env, ARM_FEATURE_NEON);
60
env->regs[13] = initial_msp & 0xFFFFFFFC;
62
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
61
env->regs[15] = initial_pc & ~1;
63
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
62
env->thumb = initial_pc & 1;
64
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
63
+#else
65
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
64
+ /*
66
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
65
+ * For user mode we run non-secure and with access to the FPU.
67
- set_feature(&cpu->env, ARM_FEATURE_CRC);
66
+ * The FPU context is active (ie does not need further setup)
68
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
67
+ * and is owned by non-secure.
69
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
70
- cpu->midr = 0xffffffff;
71
+ /* We don't set these in system emulation mode for the moment,
72
+ * since we don't correctly set the ID registers to advertise them,
73
+ */
68
+ */
74
+ set_feature(&cpu->env, ARM_FEATURE_V8);
69
+ env->v7m.secure = false;
75
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
70
+ env->v7m.nsacr = 0xcff;
76
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
71
+ env->v7m.cpacr[M_REG_NS] = 0xf0ffff;
77
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
72
+ env->v7m.fpccr[M_REG_S] &=
78
+ set_feature(&cpu->env, ARM_FEATURE_V8_AES);
73
+ ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK);
79
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
74
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
80
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
81
+ set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
82
+ set_feature(&cpu->env, ARM_FEATURE_CRC);
83
+ set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
84
+ set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
85
+#endif
86
+ }
87
}
88
#endif
89
90
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo arm_cpus[] = {
91
{ .name = "max", .initfn = arm_max_initfn },
92
#endif
93
#ifdef CONFIG_USER_ONLY
94
- { .name = "any", .initfn = arm_any_initfn },
95
+ { .name = "any", .initfn = arm_max_initfn },
96
#endif
97
#endif
98
{ .name = NULL }
99
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/target/arm/cpu64.c
102
+++ b/target/arm/cpu64.c
103
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
104
kvm_arm_set_cpu_features_from_host(cpu);
105
} else {
106
aarch64_a57_initfn(obj);
107
- /* In future we might add feature bits here even if the
108
- * real-world A57 doesn't implement them.
109
+#ifdef CONFIG_USER_ONLY
110
+ /* We don't set these in system emulation mode for the moment,
111
+ * since we don't correctly set the ID registers to advertise them,
112
+ * and in some cases they're only available in AArch64 and not AArch32,
113
+ * whereas the architecture requires them to be present in both if
114
+ * present in either.
115
*/
116
+ set_feature(&cpu->env, ARM_FEATURE_V8);
117
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
118
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
119
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
120
+ set_feature(&cpu->env, ARM_FEATURE_V8_AES);
121
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
122
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
123
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
124
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
125
+ set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
126
+ set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
127
+ set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
128
+ set_feature(&cpu->env, ARM_FEATURE_CRC);
129
+ set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
130
+ set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
131
+ set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
132
+ /* For usermode -cpu max we can use a larger and more efficient DCZ
133
+ * blocksize since we don't have to follow what the hardware does.
134
+ */
135
+ cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
136
+ cpu->dcz_blocksize = 7; /* 512 bytes */
137
+#endif
75
+#endif
138
}
76
}
139
}
77
140
78
+#ifndef CONFIG_USER_ONLY
141
-#ifdef CONFIG_USER_ONLY
79
/* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
142
-static void aarch64_any_initfn(Object *obj)
80
* executing as AArch32 then check if highvecs are enabled and
143
-{
81
* adjust the PC accordingly.
144
- ARMCPU *cpu = ARM_CPU(obj);
145
-
146
- set_feature(&cpu->env, ARM_FEATURE_V8);
147
- set_feature(&cpu->env, ARM_FEATURE_VFP4);
148
- set_feature(&cpu->env, ARM_FEATURE_NEON);
149
- set_feature(&cpu->env, ARM_FEATURE_AARCH64);
150
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
151
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
152
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
153
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
154
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
155
- set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
156
- set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
157
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
158
- set_feature(&cpu->env, ARM_FEATURE_CRC);
159
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
160
- set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
161
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
162
- cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
163
- cpu->dcz_blocksize = 7; /* 512 bytes */
164
-}
165
-#endif
166
-
167
typedef struct ARMCPUInfo {
168
const char *name;
169
void (*initfn)(Object *obj);
170
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo aarch64_cpus[] = {
171
{ .name = "cortex-a57", .initfn = aarch64_a57_initfn },
172
{ .name = "cortex-a53", .initfn = aarch64_a53_initfn },
173
{ .name = "max", .initfn = aarch64_max_initfn },
174
-#ifdef CONFIG_USER_ONLY
175
- { .name = "any", .initfn = aarch64_any_initfn },
176
-#endif
177
{ .name = NULL }
178
};
179
180
--
82
--
181
2.16.2
83
2.20.1
182
84
183
85
diff view generated by jsdifflib
1
Add support for "-cpu max" for ARM guests. This CPU type behaves
1
There's no particular reason why the exclusive monitor should
2
like "-cpu host" when KVM is enabled, and like a system CPU with
2
be only cleared on reset in system emulation mode. It doesn't
3
the maximum possible feature set otherwise. (Note that this means
3
hurt if it isn't cleared in user mode, but we might as well
4
it won't be migratable across versions, as we will likely add
4
reduce the amount of code we have that's inside an ifdef.
5
features to it in future.)
6
5
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180308130626.12393-4-peter.maydell@linaro.org
8
Message-id: 20210914120725.24992-3-peter.maydell@linaro.org
10
---
9
---
11
target/arm/cpu-qom.h | 2 ++
10
target/arm/cpu.c | 6 +++---
12
target/arm/cpu.c | 24 ++++++++++++++++++++++++
11
1 file changed, 3 insertions(+), 3 deletions(-)
13
target/arm/cpu64.c | 21 +++++++++++++++++++++
14
3 files changed, 47 insertions(+)
15
12
16
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu-qom.h
19
+++ b/target/arm/cpu-qom.h
20
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info;
21
#define ARM_CPU_GET_CLASS(obj) \
22
OBJECT_GET_CLASS(ARMCPUClass, (obj), TYPE_ARM_CPU)
23
24
+#define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU
25
+
26
/**
27
* ARMCPUClass:
28
* @parent_realize: The parent class' realize handler.
29
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
30
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/cpu.c
15
--- a/target/arm/cpu.c
32
+++ b/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
33
@@ -XXX,XX +XXX,XX @@ static void pxa270c5_initfn(Object *obj)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
34
cpu->reset_sctlr = 0x00000078;
18
env->regs[15] = 0xFFFF0000;
35
}
19
}
36
20
37
+#ifndef TARGET_AARCH64
21
+ env->vfp.xregs[ARM_VFP_FPEXC] = 0;
38
+/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
39
+ * otherwise, a CPU with as many features enabled as our emulation supports.
40
+ * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
41
+ * this only needs to handle 32 bits.
42
+ */
43
+static void arm_max_initfn(Object *obj)
44
+{
45
+ ARMCPU *cpu = ARM_CPU(obj);
46
+
47
+ if (kvm_enabled()) {
48
+ kvm_arm_set_cpu_features_from_host(cpu);
49
+ } else {
50
+ cortex_a15_initfn(obj);
51
+ /* In future we might add feature bits here even if the
52
+ * real-world A15 doesn't implement them.
53
+ */
54
+ }
55
+}
56
+#endif
22
+#endif
57
+
23
+
58
#ifdef CONFIG_USER_ONLY
24
/* M profile requires that reset clears the exclusive monitor;
59
static void arm_any_initfn(Object *obj)
25
* A profile does not, but clearing it makes more sense than having it
60
{
26
* set with an exclusive access on address zero.
61
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo arm_cpus[] = {
27
*/
62
{ .name = "pxa270-b1", .initfn = pxa270b1_initfn },
28
arm_clear_exclusive(env);
63
{ .name = "pxa270-c0", .initfn = pxa270c0_initfn },
29
64
{ .name = "pxa270-c5", .initfn = pxa270c5_initfn },
30
- env->vfp.xregs[ARM_VFP_FPEXC] = 0;
65
+#ifndef TARGET_AARCH64
31
-#endif
66
+ { .name = "max", .initfn = arm_max_initfn },
32
-
67
+#endif
33
if (arm_feature(env, ARM_FEATURE_PMSA)) {
68
#ifdef CONFIG_USER_ONLY
34
if (cpu->pmsav7_dregion > 0) {
69
{ .name = "any", .initfn = arm_any_initfn },
35
if (arm_feature(env, ARM_FEATURE_V8)) {
70
#endif
71
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/cpu64.c
74
+++ b/target/arm/cpu64.c
75
@@ -XXX,XX +XXX,XX @@
76
#include "hw/arm/arm.h"
77
#include "sysemu/sysemu.h"
78
#include "sysemu/kvm.h"
79
+#include "kvm_arm.h"
80
81
static inline void set_feature(CPUARMState *env, int feature)
82
{
83
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
84
define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
85
}
86
87
+/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
88
+ * otherwise, a CPU with as many features enabled as our emulation supports.
89
+ * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
90
+ * this only needs to handle 64 bits.
91
+ */
92
+static void aarch64_max_initfn(Object *obj)
93
+{
94
+ ARMCPU *cpu = ARM_CPU(obj);
95
+
96
+ if (kvm_enabled()) {
97
+ kvm_arm_set_cpu_features_from_host(cpu);
98
+ } else {
99
+ aarch64_a57_initfn(obj);
100
+ /* In future we might add feature bits here even if the
101
+ * real-world A57 doesn't implement them.
102
+ */
103
+ }
104
+}
105
+
106
#ifdef CONFIG_USER_ONLY
107
static void aarch64_any_initfn(Object *obj)
108
{
109
@@ -XXX,XX +XXX,XX @@ typedef struct ARMCPUInfo {
110
static const ARMCPUInfo aarch64_cpus[] = {
111
{ .name = "cortex-a57", .initfn = aarch64_a57_initfn },
112
{ .name = "cortex-a53", .initfn = aarch64_a53_initfn },
113
+ { .name = "max", .initfn = aarch64_max_initfn },
114
#ifdef CONFIG_USER_ONLY
115
{ .name = "any", .initfn = aarch64_any_initfn },
116
#endif
117
--
36
--
118
2.16.2
37
2.20.1
119
38
120
39
diff view generated by jsdifflib
1
From: Alistair Francis <alistair.francis@xilinx.com>
1
Move an ifndef CONFIG_USER_ONLY code block up in arm_cpu_reset() so
2
it can be merged with another earlier one.
2
3
3
The cortex A53 TRM specifies that bits 24 and 25 of the L2CTLR register
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
specify the number of cores in the processor, not the total number of
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
cores in the system. To report this correctly on machines with multiple
6
Message-id: 20210914120725.24992-4-peter.maydell@linaro.org
6
CPU clusters (ARM's big.LITTLE or Xilinx's ZynqMP) we need to allow
7
---
7
the machine to overwrite this value. To do this let's add an optional
8
target/arm/cpu.c | 22 ++++++++++------------
8
property.
9
1 file changed, 10 insertions(+), 12 deletions(-)
9
10
10
Signed-off-by: Alistair Francis <alistair.francis@xilinx.com>
11
Message-id: ef01d95c0759e88f47f22d11b14c91512a658b4f.1520018138.git.alistair.francis@xilinx.com
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
target/arm/cpu.h | 5 +++++
16
target/arm/cpu.c | 6 ++++++
17
target/arm/cpu64.c | 6 ++++--
18
3 files changed, 15 insertions(+), 2 deletions(-)
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
25
/* Uniprocessor system with MP extensions */
26
bool mp_is_up;
27
28
+ /* Specify the number of cores in this CPU cluster. Used for the L2CTLR
29
+ * register.
30
+ */
31
+ int32_t core_count;
32
+
33
/* The instance init functions for implementation-specific subclasses
34
* set these fields to specify the implementation-dependent values of
35
* various constant registers and reset values of non-constant
36
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
11
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
37
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/cpu.c
13
--- a/target/arm/cpu.c
39
+++ b/target/arm/cpu.c
14
+++ b/target/arm/cpu.c
40
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
15
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
41
cs->num_ases = 1;
16
env->uncached_cpsr = ARM_CPU_MODE_SVC;
42
}
17
}
43
cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
18
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
44
+
19
+
45
+ /* No core_count specified, default to smp_cpus. */
20
+ /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
46
+ if (cpu->core_count == -1) {
21
+ * executing as AArch32 then check if highvecs are enabled and
47
+ cpu->core_count = smp_cpus;
22
+ * adjust the PC accordingly.
23
+ */
24
+ if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
25
+ env->regs[15] = 0xFFFF0000;
48
+ }
26
+ }
27
+
28
+ env->vfp.xregs[ARM_VFP_FPEXC] = 0;
49
#endif
29
#endif
50
30
51
qemu_init_vcpu(cs);
31
if (arm_feature(env, ARM_FEATURE_M)) {
52
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_properties[] = {
32
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
53
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
54
mp_affinity, ARM64_AFFINITY_INVALID),
55
DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
56
+ DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
57
DEFINE_PROP_END_OF_LIST()
58
};
59
60
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/cpu64.c
63
+++ b/target/arm/cpu64.c
64
@@ -XXX,XX +XXX,XX @@ static inline void unset_feature(CPUARMState *env, int feature)
65
#ifndef CONFIG_USER_ONLY
66
static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
67
{
68
- /* Number of processors is in [25:24]; otherwise we RAZ */
69
- return (smp_cpus - 1) << 24;
70
+ ARMCPU *cpu = arm_env_get_cpu(env);
71
+
72
+ /* Number of cores is in [25:24]; otherwise we RAZ */
73
+ return (cpu->core_count - 1) << 24;
74
}
75
#endif
33
#endif
76
34
}
35
36
-#ifndef CONFIG_USER_ONLY
37
- /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
38
- * executing as AArch32 then check if highvecs are enabled and
39
- * adjust the PC accordingly.
40
- */
41
- if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
42
- env->regs[15] = 0xFFFF0000;
43
- }
44
-
45
- env->vfp.xregs[ARM_VFP_FPEXC] = 0;
46
-#endif
47
-
48
/* M profile requires that reset clears the exclusive monitor;
49
* A profile does not, but clearing it makes more sense than having it
50
* set with an exclusive access on address zero.
77
--
51
--
78
2.16.2
52
2.20.1
79
53
80
54
diff view generated by jsdifflib
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
1
From: Shashi Mallela <shashi.mallela@linaro.org>
2
2
3
Spotted by ASAN:
3
During sbsa acs level 3 testing, it is seen that the GIC maintenance
4
interrupts are not triggered and the related test cases fail. This
5
is because we were incorrectly passing the value of the MISR register
6
(from maintenance_interrupt_state()) to qemu_set_irq() as the level
7
argument, whereas the device on the other end of this irq line
8
expects a 0/1 value.
4
9
5
elmarco@boraha:~/src/qemu/build (master *%)$ QTEST_QEMU_BINARY=aarch64-softmmu/qemu-system-aarch64 tests/boot-serial-test
10
Fix the logic to pass a 0/1 level indication, rather than a
6
/aarch64/boot-serial/virt: ** (process:19740): DEBUG: 18:39:30.275: foo /tmp/qtest-boot-serial-cXaS94D
11
0/not-0 value.
7
=================================================================
8
==19740==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x603000069648 at pc 0x7f1d2201cc54 bp 0x7fff331f6a40 sp 0x7fff331f61e8
9
READ of size 4 at 0x603000069648 thread T0
10
#0 0x7f1d2201cc53 (/lib64/libasan.so.4+0xafc53)
11
#1 0x55bc86685ee3 in load_aarch64_image /home/elmarco/src/qemu/hw/arm/boot.c:894
12
#2 0x55bc86687217 in arm_load_kernel_notify /home/elmarco/src/qemu/hw/arm/boot.c:1047
13
#3 0x55bc877363b5 in notifier_list_notify /home/elmarco/src/qemu/util/notify.c:40
14
#4 0x55bc869331ea in qemu_run_machine_init_done_notifiers /home/elmarco/src/qemu/vl.c:2716
15
#5 0x55bc8693bc39 in main /home/elmarco/src/qemu/vl.c:4679
16
#6 0x7f1d1652c009 in __libc_start_main (/lib64/libc.so.6+0x21009)
17
#7 0x55bc86255cc9 in _start (/home/elmarco/src/qemu/build/aarch64-softmmu/qemu-system-aarch64+0x1ae5cc9)
18
12
19
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
13
Fixes: c5fc89b36c0 ("hw/intc/arm_gicv3: Implement gicv3_cpuif_virt_update()")
14
Signed-off-by: Shashi Mallela <shashi.mallela@linaro.org>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Message-id: 20210915205809.59068-1-shashi.mallela@linaro.org
17
[PMM: tweaked commit message; collapsed nested if()s into one]
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
---
20
---
23
hw/arm/boot.c | 3 ++-
21
hw/intc/arm_gicv3_cpuif.c | 5 +++--
24
1 file changed, 2 insertions(+), 1 deletion(-)
22
1 file changed, 3 insertions(+), 2 deletions(-)
25
23
26
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
24
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
27
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
28
--- a/hw/arm/boot.c
26
--- a/hw/intc/arm_gicv3_cpuif.c
29
+++ b/hw/arm/boot.c
27
+++ b/hw/intc/arm_gicv3_cpuif.c
30
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
28
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
29
}
31
}
30
}
32
31
33
/* check the arm64 magic header value -- very old kernels may not have it */
32
- if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
34
- if (memcmp(buffer + ARM64_MAGIC_OFFSET, "ARM\x64", 4) == 0) {
33
- maintlevel = maintenance_interrupt_state(cs);
35
+ if (size > ARM64_MAGIC_OFFSET + 4 &&
34
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
36
+ memcmp(buffer + ARM64_MAGIC_OFFSET, "ARM\x64", 4) == 0) {
35
+ maintenance_interrupt_state(cs) != 0) {
37
uint64_t hdrvals[2];
36
+ maintlevel = 1;
38
37
}
39
/* The arm64 Image header has text_offset and image_size fields at 8 and
38
39
trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
40
--
40
--
41
2.16.2
41
2.20.1
42
42
43
43
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
Split out helpers from target_setup_frame and target_restore_sigframe
3
We will need PMC register definitions in accel specific code later.
4
for dealing with general registers, fpsimd registers, and the end record.
4
Move all constant definitions to common arm headers so we can reuse
5
them.
5
6
6
When we add support for sve registers, the relative positions of
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
these will change.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210916155404.86958-2-agraf@csgraf.de
11
Message-id: 20180303143823.27055-3-richard.henderson@linaro.org
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
11
---
15
linux-user/signal.c | 120 ++++++++++++++++++++++++++++++----------------------
12
target/arm/internals.h | 44 ++++++++++++++++++++++++++++++++++++++++++
16
1 file changed, 69 insertions(+), 51 deletions(-)
13
target/arm/helper.c | 44 ------------------------------------------
14
2 files changed, 44 insertions(+), 44 deletions(-)
17
15
18
diff --git a/linux-user/signal.c b/linux-user/signal.c
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/linux-user/signal.c
18
--- a/target/arm/internals.h
21
+++ b/linux-user/signal.c
19
+++ b/target/arm/internals.h
22
@@ -XXX,XX +XXX,XX @@ struct target_rt_sigframe {
20
@@ -XXX,XX +XXX,XX @@ enum MVEECIState {
23
uint32_t tramp[2];
21
/* All other values reserved */
24
};
22
};
25
23
26
-static int target_setup_sigframe(struct target_rt_sigframe *sf,
24
+/* Definitions for the PMU registers */
27
- CPUARMState *env, target_sigset_t *set)
25
+#define PMCRN_MASK 0xf800
28
+static void target_setup_general_frame(struct target_rt_sigframe *sf,
26
+#define PMCRN_SHIFT 11
29
+ CPUARMState *env, target_sigset_t *set)
27
+#define PMCRLC 0x40
30
{
28
+#define PMCRDP 0x20
31
int i;
29
+#define PMCRX 0x10
32
- struct target_aux_context *aux =
30
+#define PMCRD 0x8
33
- (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
31
+#define PMCRC 0x4
34
32
+#define PMCRP 0x2
35
- /* set up the stack frame for unwinding */
33
+#define PMCRE 0x1
36
- __put_user(env->xregs[29], &sf->fp);
34
+/*
37
- __put_user(env->xregs[30], &sf->lr);
35
+ * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
38
+ __put_user(0, &sf->uc.tuc_flags);
36
+ * which can be written as 1 to trigger behaviour but which stay RAZ).
39
+ __put_user(0, &sf->uc.tuc_link);
37
+ */
38
+#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
40
+
39
+
41
+ __put_user(target_sigaltstack_used.ss_sp, &sf->uc.tuc_stack.ss_sp);
40
+#define PMXEVTYPER_P 0x80000000
42
+ __put_user(sas_ss_flags(env->xregs[31]), &sf->uc.tuc_stack.ss_flags);
41
+#define PMXEVTYPER_U 0x40000000
43
+ __put_user(target_sigaltstack_used.ss_size, &sf->uc.tuc_stack.ss_size);
42
+#define PMXEVTYPER_NSK 0x20000000
44
43
+#define PMXEVTYPER_NSU 0x10000000
45
for (i = 0; i < 31; i++) {
44
+#define PMXEVTYPER_NSH 0x08000000
46
__put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
45
+#define PMXEVTYPER_M 0x04000000
47
@@ -XXX,XX +XXX,XX @@ static int target_setup_sigframe(struct target_rt_sigframe *sf,
46
+#define PMXEVTYPER_MT 0x02000000
48
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
47
+#define PMXEVTYPER_EVTCOUNT 0x0000ffff
49
__put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
48
+#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
50
}
49
+ PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
50
+ PMXEVTYPER_M | PMXEVTYPER_MT | \
51
+ PMXEVTYPER_EVTCOUNT)
52
+
53
+#define PMCCFILTR 0xf8000000
54
+#define PMCCFILTR_M PMXEVTYPER_M
55
+#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
56
+
57
+static inline uint32_t pmu_num_counters(CPUARMState *env)
58
+{
59
+ return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
51
+}
60
+}
52
+
61
+
53
+static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
62
+/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
54
+ CPUARMState *env)
63
+static inline uint64_t pmu_counter_mask(CPUARMState *env)
55
+{
64
+{
56
+ int i;
65
+ return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
57
+
58
+ __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
59
+ __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
60
+ __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
61
+ __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
62
63
for (i = 0; i < 32; i++) {
64
uint64_t *q = aa64_vfp_qreg(env, i);
65
#ifdef TARGET_WORDS_BIGENDIAN
66
- __put_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
67
- __put_user(q[1], &aux->fpsimd.vregs[i * 2]);
68
+ __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
69
+ __put_user(q[1], &fpsimd->vregs[i * 2]);
70
#else
71
- __put_user(q[0], &aux->fpsimd.vregs[i * 2]);
72
- __put_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
73
+ __put_user(q[0], &fpsimd->vregs[i * 2]);
74
+ __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
75
#endif
76
}
77
- __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
78
- __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
79
- __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
80
- __put_user(sizeof(struct target_fpsimd_context),
81
- &aux->fpsimd.head.size);
82
-
83
- /* set the "end" magic */
84
- __put_user(0, &aux->end.magic);
85
- __put_user(0, &aux->end.size);
86
-
87
- return 0;
88
}
89
90
-static int target_restore_sigframe(CPUARMState *env,
91
- struct target_rt_sigframe *sf)
92
+static void target_setup_end_record(struct target_aarch64_ctx *end)
93
+{
94
+ __put_user(0, &end->magic);
95
+ __put_user(0, &end->size);
96
+}
66
+}
97
+
67
+
98
+static void target_restore_general_frame(CPUARMState *env,
99
+ struct target_rt_sigframe *sf)
100
{
101
sigset_t set;
102
- int i;
103
- struct target_aux_context *aux =
104
- (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
105
- uint32_t magic, size, fpsr, fpcr;
106
uint64_t pstate;
107
+ int i;
108
109
target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
110
set_sigmask(&set);
111
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
112
__get_user(env->pc, &sf->uc.tuc_mcontext.pc);
113
__get_user(pstate, &sf->uc.tuc_mcontext.pstate);
114
pstate_write(env, pstate);
115
+}
116
117
- __get_user(magic, &aux->fpsimd.head.magic);
118
- __get_user(size, &aux->fpsimd.head.size);
119
+static void target_restore_fpsimd_record(CPUARMState *env,
120
+ struct target_fpsimd_context *fpsimd)
121
+{
122
+ uint32_t fpsr, fpcr;
123
+ int i;
124
125
- if (magic != TARGET_FPSIMD_MAGIC
126
- || size != sizeof(struct target_fpsimd_context)) {
127
- return 1;
128
- }
129
+ __get_user(fpsr, &fpsimd->fpsr);
130
+ vfp_set_fpsr(env, fpsr);
131
+ __get_user(fpcr, &fpsimd->fpcr);
132
+ vfp_set_fpcr(env, fpcr);
133
134
for (i = 0; i < 32; i++) {
135
uint64_t *q = aa64_vfp_qreg(env, i);
136
#ifdef TARGET_WORDS_BIGENDIAN
137
- __get_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
138
- __get_user(q[1], &aux->fpsimd.vregs[i * 2]);
139
+ __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
140
+ __get_user(q[1], &fpsimd->vregs[i * 2]);
141
#else
142
- __get_user(q[0], &aux->fpsimd.vregs[i * 2]);
143
- __get_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
144
+ __get_user(q[0], &fpsimd->vregs[i * 2]);
145
+ __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
146
#endif
68
#endif
147
}
69
diff --git a/target/arm/helper.c b/target/arm/helper.c
148
- __get_user(fpsr, &aux->fpsimd.fpsr);
70
index XXXXXXX..XXXXXXX 100644
149
- vfp_set_fpsr(env, fpsr);
71
--- a/target/arm/helper.c
150
- __get_user(fpcr, &aux->fpsimd.fpcr);
72
+++ b/target/arm/helper.c
151
- vfp_set_fpcr(env, fpcr);
73
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
152
+}
74
REGINFO_SENTINEL
153
75
};
154
+static int target_restore_sigframe(CPUARMState *env,
76
155
+ struct target_rt_sigframe *sf)
77
-/* Definitions for the PMU registers */
156
+{
78
-#define PMCRN_MASK 0xf800
157
+ struct target_aux_context *aux
79
-#define PMCRN_SHIFT 11
158
+ = (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
80
-#define PMCRLC 0x40
159
+ uint32_t magic, size;
81
-#define PMCRDP 0x20
160
+
82
-#define PMCRX 0x10
161
+ target_restore_general_frame(env, sf);
83
-#define PMCRD 0x8
162
+
84
-#define PMCRC 0x4
163
+ __get_user(magic, &aux->fpsimd.head.magic);
85
-#define PMCRP 0x2
164
+ __get_user(size, &aux->fpsimd.head.size);
86
-#define PMCRE 0x1
165
+ if (magic == TARGET_FPSIMD_MAGIC
87
-/*
166
+ && size == sizeof(struct target_fpsimd_context)) {
88
- * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
167
+ target_restore_fpsimd_record(env, &aux->fpsimd);
89
- * which can be written as 1 to trigger behaviour but which stay RAZ).
168
+ } else {
90
- */
169
+ return 1;
91
-#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
170
+ }
92
-
171
return 0;
93
-#define PMXEVTYPER_P 0x80000000
172
}
94
-#define PMXEVTYPER_U 0x40000000
173
95
-#define PMXEVTYPER_NSK 0x20000000
174
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
96
-#define PMXEVTYPER_NSU 0x10000000
175
CPUARMState *env)
97
-#define PMXEVTYPER_NSH 0x08000000
176
{
98
-#define PMXEVTYPER_M 0x04000000
177
struct target_rt_sigframe *frame;
99
-#define PMXEVTYPER_MT 0x02000000
178
+ struct target_aux_context *aux;
100
-#define PMXEVTYPER_EVTCOUNT 0x0000ffff
179
abi_ulong frame_addr, return_addr;
101
-#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
180
102
- PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
181
frame_addr = get_sigframe(ka, env);
103
- PMXEVTYPER_M | PMXEVTYPER_MT | \
182
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
104
- PMXEVTYPER_EVTCOUNT)
183
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
105
-
184
goto give_sigsegv;
106
-#define PMCCFILTR 0xf8000000
185
}
107
-#define PMCCFILTR_M PMXEVTYPER_M
186
+ aux = (struct target_aux_context *)frame->uc.tuc_mcontext.__reserved;
108
-#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
187
109
-
188
- __put_user(0, &frame->uc.tuc_flags);
110
-static inline uint32_t pmu_num_counters(CPUARMState *env)
189
- __put_user(0, &frame->uc.tuc_link);
111
-{
190
+ target_setup_general_frame(frame, env, set);
112
- return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
191
+ target_setup_fpsimd_record(&aux->fpsimd, env);
113
-}
192
+ target_setup_end_record(&aux->end);
114
-
193
115
-/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
194
- __put_user(target_sigaltstack_used.ss_sp,
116
-static inline uint64_t pmu_counter_mask(CPUARMState *env)
195
- &frame->uc.tuc_stack.ss_sp);
117
-{
196
- __put_user(sas_ss_flags(env->xregs[31]),
118
- return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
197
- &frame->uc.tuc_stack.ss_flags);
119
-}
198
- __put_user(target_sigaltstack_used.ss_size,
120
-
199
- &frame->uc.tuc_stack.ss_size);
121
typedef struct pm_event {
200
- target_setup_sigframe(frame, env, set);
122
uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
201
if (ka->sa_flags & TARGET_SA_RESTORER) {
123
/* If the event is supported on this CPU (used to generate PMCEID[01]) */
202
return_addr = ka->sa_restorer;
203
} else {
204
--
124
--
205
2.16.2
125
2.20.1
206
126
207
127
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
From the "Physical Layer Simplified Specification Version 3.01":
3
Hvf's permission bitmap during and after dirty logging does not include
4
the HV_MEMORY_EXEC permission. At least on Apple Silicon, this leads to
5
instruction faults once dirty logging was enabled.
4
6
5
A known data block ("Tuning block") can be used to tune sampling
7
Add the bit to make it work properly.
6
point for tuning required hosts. [...]
7
This procedure gives the system optimal timing for each specific
8
host and card combination and compensates for static delays in
9
the timing budget including process, voltage and different PCB
10
loads and skews. [...]
11
Data block, carried by DAT[3:0], contains a pattern for tuning
12
sampling position to receive data on the CMD and DAT[3:0] line.
13
8
14
[based on a patch from Alistair Francis <alistair.francis@xilinx.com>
9
Signed-off-by: Alexander Graf <agraf@csgraf.de>
15
from qemu/xilinx tag xilinx-v2015.2]
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20210916155404.86958-3-agraf@csgraf.de
17
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
18
Message-id: 20180309153654.13518-5-f4bug@amsat.org
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
13
---
21
hw/sd/sd.c | 29 +++++++++++++++++++++++++++++
14
accel/hvf/hvf-accel-ops.c | 4 ++--
22
1 file changed, 29 insertions(+)
15
1 file changed, 2 insertions(+), 2 deletions(-)
23
16
24
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
17
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
25
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/sd/sd.c
19
--- a/accel/hvf/hvf-accel-ops.c
27
+++ b/hw/sd/sd.c
20
+++ b/accel/hvf/hvf-accel-ops.c
28
@@ -XXX,XX +XXX,XX @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
21
@@ -XXX,XX +XXX,XX @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
29
}
22
if (on) {
30
break;
23
slot->flags |= HVF_SLOT_LOG;
31
24
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
32
+ case 19: /* CMD19: SEND_TUNING_BLOCK (SD) */
25
- HV_MEMORY_READ);
33
+ if (sd->state == sd_transfer_state) {
26
+ HV_MEMORY_READ | HV_MEMORY_EXEC);
34
+ sd->state = sd_sendingdata_state;
27
/* stop tracking region*/
35
+ sd->data_offset = 0;
28
} else {
36
+ return sd_r1;
29
slot->flags &= ~HVF_SLOT_LOG;
37
+ }
30
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
38
+ break;
31
- HV_MEMORY_READ | HV_MEMORY_WRITE);
39
+
32
+ HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
40
case 23: /* CMD23: SET_BLOCK_COUNT */
41
switch (sd->state) {
42
case sd_transfer_state:
43
@@ -XXX,XX +XXX,XX @@ void sd_write_data(SDState *sd, uint8_t value)
44
}
33
}
45
}
34
}
46
35
47
+#define SD_TUNING_BLOCK_SIZE 64
48
+
49
+static const uint8_t sd_tuning_block_pattern[SD_TUNING_BLOCK_SIZE] = {
50
+ /* See: Physical Layer Simplified Specification Version 3.01, Table 4-2 */
51
+ 0xff, 0x0f, 0xff, 0x00, 0x0f, 0xfc, 0xc3, 0xcc,
52
+ 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
53
+ 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
54
+ 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
55
+ 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
56
+ 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
57
+ 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
58
+ 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
59
+};
60
+
61
uint8_t sd_read_data(SDState *sd)
62
{
63
/* TODO: Append CRCs */
64
@@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd)
65
}
66
break;
67
68
+ case 19: /* CMD19: SEND_TUNING_BLOCK (SD) */
69
+ if (sd->data_offset >= SD_TUNING_BLOCK_SIZE - 1) {
70
+ sd->state = sd_transfer_state;
71
+ }
72
+ ret = sd_tuning_block_pattern[sd->data_offset++];
73
+ break;
74
+
75
case 22:    /* ACMD22: SEND_NUM_WR_BLOCKS */
76
ret = sd->data[sd->data_offset ++];
77
78
--
36
--
79
2.16.2
37
2.20.1
80
38
81
39
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
We will need to install a migration helper for the ARM hvf backend.
4
Acked-by: Alistair Francis <alistair.francis@xilinx.com>
4
Let's introduce an arch callback for the overall hvf init chain to
5
Message-id: 20180309153654.13518-2-f4bug@amsat.org
5
do so.
6
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20210916155404.86958-4-agraf@csgraf.de
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
11
---
9
hw/sd/sd.c | 11 ++++++++---
12
include/sysemu/hvf_int.h | 1 +
10
1 file changed, 8 insertions(+), 3 deletions(-)
13
accel/hvf/hvf-accel-ops.c | 3 ++-
14
target/i386/hvf/hvf.c | 5 +++++
15
3 files changed, 8 insertions(+), 1 deletion(-)
11
16
12
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
17
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
13
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/sd/sd.c
19
--- a/include/sysemu/hvf_int.h
15
+++ b/hw/sd/sd.c
20
+++ b/include/sysemu/hvf_int.h
16
@@ -XXX,XX +XXX,XX @@ static void sd_lock_command(SDState *sd)
21
@@ -XXX,XX +XXX,XX @@ struct hvf_vcpu_state {
17
sd->card_status &= ~CARD_IS_LOCKED;
22
};
23
24
void assert_hvf_ok(hv_return_t ret);
25
+int hvf_arch_init(void);
26
int hvf_arch_init_vcpu(CPUState *cpu);
27
void hvf_arch_vcpu_destroy(CPUState *cpu);
28
int hvf_vcpu_exec(CPUState *);
29
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/hvf/hvf-accel-ops.c
32
+++ b/accel/hvf/hvf-accel-ops.c
33
@@ -XXX,XX +XXX,XX @@ static int hvf_accel_init(MachineState *ms)
34
35
hvf_state = s;
36
memory_listener_register(&hvf_memory_listener, &address_space_memory);
37
- return 0;
38
+
39
+ return hvf_arch_init();
18
}
40
}
19
41
20
-static sd_rsp_type_t sd_normal_command(SDState *sd,
42
static void hvf_accel_class_init(ObjectClass *oc, void *data)
21
- SDRequest req)
43
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
22
+static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/i386/hvf/hvf.c
46
+++ b/target/i386/hvf/hvf.c
47
@@ -XXX,XX +XXX,XX @@ static inline bool apic_bus_freq_is_known(CPUX86State *env)
48
return env->apic_bus_freq != 0;
49
}
50
51
+int hvf_arch_init(void)
52
+{
53
+ return 0;
54
+}
55
+
56
int hvf_arch_init_vcpu(CPUState *cpu)
23
{
57
{
24
uint32_t rca = 0x0000;
58
X86CPU *x86cpu = X86_CPU(cpu);
25
uint64_t addr = (sd->ocr & (1 << 30)) ? (uint64_t) req.arg << 9 : req.arg;
26
27
- trace_sdcard_normal_command(req.cmd, req.arg, sd_state_name(sd->state));
28
+ /* CMD55 precedes an ACMD, so we are not interested in tracing it.
29
+ * However there is no ACMD55, so we want to trace this particular case.
30
+ */
31
+ if (req.cmd != 55 || sd->expecting_acmd) {
32
+ trace_sdcard_normal_command(req.cmd, req.arg,
33
+ sd_state_name(sd->state));
34
+ }
35
36
/* Not interpreting this as an app command */
37
sd->card_status &= ~APP_CMD;
38
--
59
--
39
2.16.2
60
2.20.1
40
61
41
62
diff view generated by jsdifflib
1
From: Andrey Smirnov <andrew.smirnov@gmail.com>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
The following interfaces are partially or fully emulated:
3
With Apple Silicon available to the masses, it's a good time to add support
4
for driving its virtualization extensions from QEMU.
4
5
5
* up to 2 Cortex A9 cores (SMP works with PSCI)
6
This patch adds all necessary architecture specific code to get basic VMs
6
* A7 MPCORE (identical to A15 MPCORE)
7
working, including save/restore.
7
* 4 GPTs modules
8
* 7 GPIO controllers
9
* 2 IOMUXC controllers
10
* 1 CCM module
11
* 1 SVNS module
12
* 1 SRC module
13
* 1 GPCv2 controller
14
* 4 eCSPI controllers
15
* 4 I2C controllers
16
* 7 i.MX UART controllers
17
* 2 FlexCAN controllers
18
* 2 Ethernet controllers (FEC)
19
* 3 SD controllers (USDHC)
20
* 4 WDT modules
21
* 1 SDMA module
22
* 1 GPR module
23
* 2 USBMISC modules
24
* 2 ADC modules
25
* 1 PCIe controller
26
8
27
Tested to boot and work with upstream Linux (4.13+) guest.
9
Known limitations:
28
10
11
- WFI handling is missing (follows in later patch)
12
- No watchpoint/breakpoint support
13
14
Signed-off-by: Alexander Graf <agraf@csgraf.de>
15
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
16
Reviewed-by: Sergio Lopez <slp@redhat.com>
29
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
30
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
18
Message-id: 20210916155404.86958-5-agraf@csgraf.de
31
[PMM: folded a couple of long lines]
32
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
33
---
20
---
34
hw/arm/Makefile.objs | 1 +
21
meson.build | 1 +
35
include/hw/arm/fsl-imx7.h | 222 +++++++++++++++
22
include/sysemu/hvf_int.h | 10 +-
36
hw/arm/fsl-imx7.c | 582 ++++++++++++++++++++++++++++++++++++++++
23
accel/hvf/hvf-accel-ops.c | 9 +
37
default-configs/arm-softmmu.mak | 1 +
24
target/arm/hvf/hvf.c | 794 ++++++++++++++++++++++++++++++++++++
38
4 files changed, 806 insertions(+)
25
target/i386/hvf/hvf.c | 5 +
39
create mode 100644 include/hw/arm/fsl-imx7.h
26
MAINTAINERS | 5 +
40
create mode 100644 hw/arm/fsl-imx7.c
27
target/arm/hvf/trace-events | 10 +
28
7 files changed, 833 insertions(+), 1 deletion(-)
29
create mode 100644 target/arm/hvf/hvf.c
30
create mode 100644 target/arm/hvf/trace-events
41
31
42
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
32
diff --git a/meson.build b/meson.build
43
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
44
--- a/hw/arm/Makefile.objs
34
--- a/meson.build
45
+++ b/hw/arm/Makefile.objs
35
+++ b/meson.build
46
@@ -XXX,XX +XXX,XX @@ obj-$(CONFIG_MPS2) += mps2.o
36
@@ -XXX,XX +XXX,XX @@ if have_system or have_user
47
obj-$(CONFIG_MPS2) += mps2-tz.o
37
'accel/tcg',
48
obj-$(CONFIG_MSF2) += msf2-soc.o msf2-som.o
38
'hw/core',
49
obj-$(CONFIG_IOTKIT) += iotkit.o
39
'target/arm',
50
+obj-$(CONFIG_FSL_IMX7) += fsl-imx7.o
40
+ 'target/arm/hvf',
51
diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h
41
'target/hppa',
42
'target/i386',
43
'target/i386/kvm',
44
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/include/sysemu/hvf_int.h
47
+++ b/include/sysemu/hvf_int.h
48
@@ -XXX,XX +XXX,XX @@
49
#ifndef HVF_INT_H
50
#define HVF_INT_H
51
52
+#ifdef __aarch64__
53
+#include <Hypervisor/Hypervisor.h>
54
+#else
55
#include <Hypervisor/hv.h>
56
+#endif
57
58
/* hvf_slot flags */
59
#define HVF_SLOT_LOG (1 << 0)
60
@@ -XXX,XX +XXX,XX @@ struct HVFState {
61
int num_slots;
62
63
hvf_vcpu_caps *hvf_caps;
64
+ uint64_t vtimer_offset;
65
};
66
extern HVFState *hvf_state;
67
68
struct hvf_vcpu_state {
69
- int fd;
70
+ uint64_t fd;
71
+ void *exit;
72
+ bool vtimer_masked;
73
};
74
75
void assert_hvf_ok(hv_return_t ret);
76
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *);
77
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
78
int hvf_put_registers(CPUState *);
79
int hvf_get_registers(CPUState *);
80
+void hvf_kick_vcpu_thread(CPUState *cpu);
81
82
#endif
83
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/accel/hvf/hvf-accel-ops.c
86
+++ b/accel/hvf/hvf-accel-ops.c
87
@@ -XXX,XX +XXX,XX @@
88
89
HVFState *hvf_state;
90
91
+#ifdef __aarch64__
92
+#define HV_VM_DEFAULT NULL
93
+#endif
94
+
95
/* Memory slots */
96
97
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
98
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
99
pthread_sigmask(SIG_BLOCK, NULL, &set);
100
sigdelset(&set, SIG_IPI);
101
102
+#ifdef __aarch64__
103
+ r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
104
+#else
105
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
106
+#endif
107
cpu->vcpu_dirty = 1;
108
assert_hvf_ok(r);
109
110
@@ -XXX,XX +XXX,XX @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
111
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
112
113
ops->create_vcpu_thread = hvf_start_vcpu_thread;
114
+ ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
115
116
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
117
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
118
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
52
new file mode 100644
119
new file mode 100644
53
index XXXXXXX..XXXXXXX
120
index XXXXXXX..XXXXXXX
54
--- /dev/null
121
--- /dev/null
55
+++ b/include/hw/arm/fsl-imx7.h
122
+++ b/target/arm/hvf/hvf.c
56
@@ -XXX,XX +XXX,XX @@
123
@@ -XXX,XX +XXX,XX @@
57
+/*
124
+/*
58
+ * Copyright (c) 2018, Impinj, Inc.
125
+ * QEMU Hypervisor.framework support for Apple Silicon
126
+
127
+ * Copyright 2020 Alexander Graf <agraf@csgraf.de>
59
+ *
128
+ *
60
+ * i.MX7 SoC definitions
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
61
+ *
131
+ *
62
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
63
+ *
64
+ * This program is free software; you can redistribute it and/or modify
65
+ * it under the terms of the GNU General Public License as published by
66
+ * the Free Software Foundation; either version 2 of the License, or
67
+ * (at your option) any later version.
68
+ *
69
+ * This program is distributed in the hope that it will be useful,
70
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
71
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
72
+ * GNU General Public License for more details.
73
+ */
132
+ */
74
+
133
+
75
+#ifndef FSL_IMX7_H
134
+#include "qemu/osdep.h"
76
+#define FSL_IMX7_H
135
+#include "qemu-common.h"
77
+
136
+#include "qemu/error-report.h"
78
+#include "hw/arm/arm.h"
137
+
79
+#include "hw/cpu/a15mpcore.h"
138
+#include "sysemu/runstate.h"
80
+#include "hw/intc/imx_gpcv2.h"
139
+#include "sysemu/hvf.h"
81
+#include "hw/misc/imx7_ccm.h"
140
+#include "sysemu/hvf_int.h"
82
+#include "hw/misc/imx7_snvs.h"
141
+#include "sysemu/hw_accel.h"
83
+#include "hw/misc/imx7_gpr.h"
142
+
84
+#include "hw/misc/imx6_src.h"
143
+#include <mach/mach_time.h>
85
+#include "hw/misc/imx2_wdt.h"
144
+
86
+#include "hw/gpio/imx_gpio.h"
145
+#include "exec/address-spaces.h"
87
+#include "hw/char/imx_serial.h"
146
+#include "hw/irq.h"
88
+#include "hw/timer/imx_gpt.h"
147
+#include "qemu/main-loop.h"
89
+#include "hw/timer/imx_epit.h"
148
+#include "sysemu/cpus.h"
90
+#include "hw/i2c/imx_i2c.h"
149
+#include "target/arm/cpu.h"
91
+#include "hw/gpio/imx_gpio.h"
150
+#include "target/arm/internals.h"
92
+#include "hw/sd/sdhci.h"
151
+#include "trace/trace-target_arm_hvf.h"
93
+#include "hw/ssi/imx_spi.h"
152
+#include "migration/vmstate.h"
94
+#include "hw/net/imx_fec.h"
153
+
95
+#include "hw/pci-host/designware.h"
154
+#define HVF_SYSREG(crn, crm, op0, op1, op2) \
96
+#include "hw/usb/chipidea.h"
155
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
97
+#include "exec/memory.h"
156
+#define PL1_WRITE_MASK 0x4
98
+#include "cpu.h"
157
+
99
+
158
+#define SYSREG(op0, op1, crn, crm, op2) \
100
+#define TYPE_FSL_IMX7 "fsl,imx7"
159
+ ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1))
101
+#define FSL_IMX7(obj) OBJECT_CHECK(FslIMX7State, (obj), TYPE_FSL_IMX7)
160
+#define SYSREG_MASK SYSREG(0x3, 0x7, 0xf, 0xf, 0x7)
102
+
161
+#define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
103
+enum FslIMX7Configuration {
162
+#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
104
+ FSL_IMX7_NUM_CPUS = 2,
163
+#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
105
+ FSL_IMX7_NUM_UARTS = 7,
164
+#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
106
+ FSL_IMX7_NUM_ETHS = 2,
165
+
107
+ FSL_IMX7_ETH_NUM_TX_RINGS = 3,
166
+#define WFX_IS_WFE (1 << 0)
108
+ FSL_IMX7_NUM_USDHCS = 3,
167
+
109
+ FSL_IMX7_NUM_WDTS = 4,
168
+#define TMR_CTL_ENABLE (1 << 0)
110
+ FSL_IMX7_NUM_GPTS = 4,
169
+#define TMR_CTL_IMASK (1 << 1)
111
+ FSL_IMX7_NUM_IOMUXCS = 2,
170
+#define TMR_CTL_ISTATUS (1 << 2)
112
+ FSL_IMX7_NUM_GPIOS = 7,
171
+
113
+ FSL_IMX7_NUM_I2CS = 4,
172
+typedef struct HVFVTimer {
114
+ FSL_IMX7_NUM_ECSPIS = 4,
173
+ /* Vtimer value during migration and paused state */
115
+ FSL_IMX7_NUM_USBS = 3,
174
+ uint64_t vtimer_val;
116
+ FSL_IMX7_NUM_ADCS = 2,
175
+} HVFVTimer;
176
+
177
+static HVFVTimer vtimer;
178
+
179
+struct hvf_reg_match {
180
+ int reg;
181
+ uint64_t offset;
117
+};
182
+};
118
+
183
+
119
+typedef struct FslIMX7State {
184
+static const struct hvf_reg_match hvf_reg_match[] = {
120
+ /*< private >*/
185
+ { HV_REG_X0, offsetof(CPUARMState, xregs[0]) },
121
+ DeviceState parent_obj;
186
+ { HV_REG_X1, offsetof(CPUARMState, xregs[1]) },
122
+
187
+ { HV_REG_X2, offsetof(CPUARMState, xregs[2]) },
123
+ /*< public >*/
188
+ { HV_REG_X3, offsetof(CPUARMState, xregs[3]) },
124
+ ARMCPU cpu[FSL_IMX7_NUM_CPUS];
189
+ { HV_REG_X4, offsetof(CPUARMState, xregs[4]) },
125
+ A15MPPrivState a7mpcore;
190
+ { HV_REG_X5, offsetof(CPUARMState, xregs[5]) },
126
+ IMXGPTState gpt[FSL_IMX7_NUM_GPTS];
191
+ { HV_REG_X6, offsetof(CPUARMState, xregs[6]) },
127
+ IMXGPIOState gpio[FSL_IMX7_NUM_GPIOS];
192
+ { HV_REG_X7, offsetof(CPUARMState, xregs[7]) },
128
+ IMX7CCMState ccm;
193
+ { HV_REG_X8, offsetof(CPUARMState, xregs[8]) },
129
+ IMX7AnalogState analog;
194
+ { HV_REG_X9, offsetof(CPUARMState, xregs[9]) },
130
+ IMX7SNVSState snvs;
195
+ { HV_REG_X10, offsetof(CPUARMState, xregs[10]) },
131
+ IMXGPCv2State gpcv2;
196
+ { HV_REG_X11, offsetof(CPUARMState, xregs[11]) },
132
+ IMXSPIState spi[FSL_IMX7_NUM_ECSPIS];
197
+ { HV_REG_X12, offsetof(CPUARMState, xregs[12]) },
133
+ IMXI2CState i2c[FSL_IMX7_NUM_I2CS];
198
+ { HV_REG_X13, offsetof(CPUARMState, xregs[13]) },
134
+ IMXSerialState uart[FSL_IMX7_NUM_UARTS];
199
+ { HV_REG_X14, offsetof(CPUARMState, xregs[14]) },
135
+ IMXFECState eth[FSL_IMX7_NUM_ETHS];
200
+ { HV_REG_X15, offsetof(CPUARMState, xregs[15]) },
136
+ SDHCIState usdhc[FSL_IMX7_NUM_USDHCS];
201
+ { HV_REG_X16, offsetof(CPUARMState, xregs[16]) },
137
+ IMX2WdtState wdt[FSL_IMX7_NUM_WDTS];
202
+ { HV_REG_X17, offsetof(CPUARMState, xregs[17]) },
138
+ IMX7GPRState gpr;
203
+ { HV_REG_X18, offsetof(CPUARMState, xregs[18]) },
139
+ ChipideaState usb[FSL_IMX7_NUM_USBS];
204
+ { HV_REG_X19, offsetof(CPUARMState, xregs[19]) },
140
+ DesignwarePCIEHost pcie;
205
+ { HV_REG_X20, offsetof(CPUARMState, xregs[20]) },
141
+} FslIMX7State;
206
+ { HV_REG_X21, offsetof(CPUARMState, xregs[21]) },
142
+
207
+ { HV_REG_X22, offsetof(CPUARMState, xregs[22]) },
143
+enum FslIMX7MemoryMap {
208
+ { HV_REG_X23, offsetof(CPUARMState, xregs[23]) },
144
+ FSL_IMX7_MMDC_ADDR = 0x80000000,
209
+ { HV_REG_X24, offsetof(CPUARMState, xregs[24]) },
145
+ FSL_IMX7_MMDC_SIZE = 2 * 1024 * 1024 * 1024UL,
210
+ { HV_REG_X25, offsetof(CPUARMState, xregs[25]) },
146
+
211
+ { HV_REG_X26, offsetof(CPUARMState, xregs[26]) },
147
+ FSL_IMX7_GPIO1_ADDR = 0x30200000,
212
+ { HV_REG_X27, offsetof(CPUARMState, xregs[27]) },
148
+ FSL_IMX7_GPIO2_ADDR = 0x30210000,
213
+ { HV_REG_X28, offsetof(CPUARMState, xregs[28]) },
149
+ FSL_IMX7_GPIO3_ADDR = 0x30220000,
214
+ { HV_REG_X29, offsetof(CPUARMState, xregs[29]) },
150
+ FSL_IMX7_GPIO4_ADDR = 0x30230000,
215
+ { HV_REG_X30, offsetof(CPUARMState, xregs[30]) },
151
+ FSL_IMX7_GPIO5_ADDR = 0x30240000,
216
+ { HV_REG_PC, offsetof(CPUARMState, pc) },
152
+ FSL_IMX7_GPIO6_ADDR = 0x30250000,
217
+};
153
+ FSL_IMX7_GPIO7_ADDR = 0x30260000,
218
+
154
+
219
+static const struct hvf_reg_match hvf_fpreg_match[] = {
155
+ FSL_IMX7_IOMUXC_LPSR_GPR_ADDR = 0x30270000,
220
+ { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) },
156
+
221
+ { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) },
157
+ FSL_IMX7_WDOG1_ADDR = 0x30280000,
222
+ { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) },
158
+ FSL_IMX7_WDOG2_ADDR = 0x30290000,
223
+ { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) },
159
+ FSL_IMX7_WDOG3_ADDR = 0x302A0000,
224
+ { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) },
160
+ FSL_IMX7_WDOG4_ADDR = 0x302B0000,
225
+ { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) },
161
+
226
+ { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) },
162
+ FSL_IMX7_IOMUXC_LPSR_ADDR = 0x302C0000,
227
+ { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) },
163
+
228
+ { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) },
164
+ FSL_IMX7_GPT1_ADDR = 0x302D0000,
229
+ { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) },
165
+ FSL_IMX7_GPT2_ADDR = 0x302E0000,
230
+ { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
166
+ FSL_IMX7_GPT3_ADDR = 0x302F0000,
231
+ { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
167
+ FSL_IMX7_GPT4_ADDR = 0x30300000,
232
+ { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
168
+
233
+ { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
169
+ FSL_IMX7_IOMUXC_ADDR = 0x30330000,
234
+ { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
170
+ FSL_IMX7_IOMUXC_GPR_ADDR = 0x30340000,
235
+ { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
171
+ FSL_IMX7_IOMUXCn_SIZE = 0x1000,
236
+ { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
172
+
237
+ { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
173
+ FSL_IMX7_ANALOG_ADDR = 0x30360000,
238
+ { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
174
+ FSL_IMX7_SNVS_ADDR = 0x30370000,
239
+ { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
175
+ FSL_IMX7_CCM_ADDR = 0x30380000,
240
+ { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
176
+
241
+ { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
177
+ FSL_IMX7_SRC_ADDR = 0x30390000,
242
+ { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
178
+ FSL_IMX7_SRC_SIZE = 0x1000,
243
+ { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
179
+
244
+ { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
180
+ FSL_IMX7_ADC1_ADDR = 0x30610000,
245
+ { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
181
+ FSL_IMX7_ADC2_ADDR = 0x30620000,
246
+ { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
182
+ FSL_IMX7_ADCn_SIZE = 0x1000,
247
+ { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
183
+
248
+ { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
184
+ FSL_IMX7_GPC_ADDR = 0x303A0000,
249
+ { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
185
+
250
+ { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
186
+ FSL_IMX7_I2C1_ADDR = 0x30A20000,
251
+ { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
187
+ FSL_IMX7_I2C2_ADDR = 0x30A30000,
252
+};
188
+ FSL_IMX7_I2C3_ADDR = 0x30A40000,
253
+
189
+ FSL_IMX7_I2C4_ADDR = 0x30A50000,
254
+struct hvf_sreg_match {
190
+
255
+ int reg;
191
+ FSL_IMX7_ECSPI1_ADDR = 0x30820000,
256
+ uint32_t key;
192
+ FSL_IMX7_ECSPI2_ADDR = 0x30830000,
257
+ uint32_t cp_idx;
193
+ FSL_IMX7_ECSPI3_ADDR = 0x30840000,
258
+};
194
+ FSL_IMX7_ECSPI4_ADDR = 0x30630000,
259
+
195
+
260
+static struct hvf_sreg_match hvf_sreg_match[] = {
196
+ FSL_IMX7_LCDIF_ADDR = 0x30730000,
261
+ { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
197
+ FSL_IMX7_LCDIF_SIZE = 0x1000,
262
+ { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
198
+
263
+ { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
199
+ FSL_IMX7_UART1_ADDR = 0x30860000,
264
+ { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
265
+
266
+ { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
267
+ { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
268
+ { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
269
+ { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
270
+
271
+ { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
272
+ { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
273
+ { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
274
+ { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
275
+
276
+ { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
277
+ { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
278
+ { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
279
+ { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
280
+
281
+ { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
282
+ { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
283
+ { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
284
+ { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
285
+
286
+ { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
287
+ { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
288
+ { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
289
+ { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
290
+
291
+ { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
292
+ { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
293
+ { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
294
+ { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
295
+
296
+ { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
297
+ { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
298
+ { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
299
+ { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
300
+
301
+ { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
302
+ { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
303
+ { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
304
+ { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
305
+
306
+ { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
307
+ { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
308
+ { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
309
+ { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
310
+
311
+ { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
312
+ { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
313
+ { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
314
+ { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
315
+
316
+ { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
317
+ { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
318
+ { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
319
+ { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
320
+
321
+ { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
322
+ { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
323
+ { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
324
+ { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
325
+
326
+ { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
327
+ { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
328
+ { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
329
+ { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
330
+
331
+ { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
332
+ { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
333
+ { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
334
+ { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
335
+
336
+ { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
337
+ { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
338
+ { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
339
+ { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
340
+
341
+#ifdef SYNC_NO_RAW_REGS
200
+ /*
342
+ /*
201
+ * Some versions of the reference manual claim that UART2 is @
343
+ * The registers below are manually synced on init because they are
202
+ * 0x30870000, but experiments with HW + DT files in upstream
344
+ * marked as NO_RAW. We still list them to make number space sync easier.
203
+ * Linux kernel show that not to be true and that block is
204
+ * acutally located @ 0x30890000
205
+ */
345
+ */
206
+ FSL_IMX7_UART2_ADDR = 0x30890000,
346
+ { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
207
+ FSL_IMX7_UART3_ADDR = 0x30880000,
347
+ { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
208
+ FSL_IMX7_UART4_ADDR = 0x30A60000,
348
+ { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
209
+ FSL_IMX7_UART5_ADDR = 0x30A70000,
349
+ { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
210
+ FSL_IMX7_UART6_ADDR = 0x30A80000,
350
+#endif
211
+ FSL_IMX7_UART7_ADDR = 0x30A90000,
351
+ { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
212
+
352
+ { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
213
+ FSL_IMX7_ENET1_ADDR = 0x30BE0000,
353
+ { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
214
+ FSL_IMX7_ENET2_ADDR = 0x30BF0000,
354
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
215
+
355
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
216
+ FSL_IMX7_USB1_ADDR = 0x30B10000,
356
+#ifdef SYNC_NO_MMFR0
217
+ FSL_IMX7_USBMISC1_ADDR = 0x30B10200,
357
+ /* We keep the hardware MMFR0 around. HW limits are there anyway */
218
+ FSL_IMX7_USB2_ADDR = 0x30B20000,
358
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
219
+ FSL_IMX7_USBMISC2_ADDR = 0x30B20200,
359
+#endif
220
+ FSL_IMX7_USB3_ADDR = 0x30B30000,
360
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
221
+ FSL_IMX7_USBMISC3_ADDR = 0x30B30200,
361
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
222
+ FSL_IMX7_USBMISCn_SIZE = 0x200,
362
+
223
+
363
+ { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
224
+ FSL_IMX7_USDHC1_ADDR = 0x30B40000,
364
+ { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
225
+ FSL_IMX7_USDHC2_ADDR = 0x30B50000,
365
+ { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
226
+ FSL_IMX7_USDHC3_ADDR = 0x30B60000,
366
+ { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
227
+
367
+ { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
228
+ FSL_IMX7_SDMA_ADDR = 0x30BD0000,
368
+ { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
229
+ FSL_IMX7_SDMA_SIZE = 0x1000,
369
+
230
+
370
+ { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
231
+ FSL_IMX7_A7MPCORE_ADDR = 0x31000000,
371
+ { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
232
+ FSL_IMX7_A7MPCORE_DAP_ADDR = 0x30000000,
372
+ { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
233
+
373
+ { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
234
+ FSL_IMX7_PCIE_REG_ADDR = 0x33800000,
374
+ { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
235
+ FSL_IMX7_PCIE_REG_SIZE = 16 * 1024,
375
+ { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
236
+
376
+ { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
237
+ FSL_IMX7_GPR_ADDR = 0x30340000,
377
+ { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
378
+ { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
379
+ { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
380
+
381
+ { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
382
+ { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
383
+ { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
384
+ { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
385
+ { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
386
+ { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
387
+ { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
388
+ { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
389
+ { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
390
+ { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
391
+ { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
392
+ { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
393
+ { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
394
+ { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
395
+ { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
396
+ { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
397
+ { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
398
+ { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
399
+ { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
400
+ { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
238
+};
401
+};
239
+
402
+
240
+enum FslIMX7IRQs {
403
+int hvf_get_registers(CPUState *cpu)
241
+ FSL_IMX7_USDHC1_IRQ = 22,
404
+{
242
+ FSL_IMX7_USDHC2_IRQ = 23,
405
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
243
+ FSL_IMX7_USDHC3_IRQ = 24,
406
+ CPUARMState *env = &arm_cpu->env;
244
+
407
+ hv_return_t ret;
245
+ FSL_IMX7_UART1_IRQ = 26,
408
+ uint64_t val;
246
+ FSL_IMX7_UART2_IRQ = 27,
409
+ hv_simd_fp_uchar16_t fpval;
247
+ FSL_IMX7_UART3_IRQ = 28,
410
+ int i;
248
+ FSL_IMX7_UART4_IRQ = 29,
411
+
249
+ FSL_IMX7_UART5_IRQ = 30,
412
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
250
+ FSL_IMX7_UART6_IRQ = 16,
413
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
251
+
414
+ *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
252
+ FSL_IMX7_ECSPI1_IRQ = 31,
415
+ assert_hvf_ok(ret);
253
+ FSL_IMX7_ECSPI2_IRQ = 32,
416
+ }
254
+ FSL_IMX7_ECSPI3_IRQ = 33,
417
+
255
+ FSL_IMX7_ECSPI4_IRQ = 34,
418
+ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
256
+
419
+ ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
257
+ FSL_IMX7_I2C1_IRQ = 35,
420
+ &fpval);
258
+ FSL_IMX7_I2C2_IRQ = 36,
421
+ memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
259
+ FSL_IMX7_I2C3_IRQ = 37,
422
+ assert_hvf_ok(ret);
260
+ FSL_IMX7_I2C4_IRQ = 38,
423
+ }
261
+
424
+
262
+ FSL_IMX7_USB1_IRQ = 43,
425
+ val = 0;
263
+ FSL_IMX7_USB2_IRQ = 42,
426
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
264
+ FSL_IMX7_USB3_IRQ = 40,
427
+ assert_hvf_ok(ret);
265
+
428
+ vfp_set_fpcr(env, val);
266
+ FSL_IMX7_PCI_INTA_IRQ = 122,
429
+
267
+ FSL_IMX7_PCI_INTB_IRQ = 123,
430
+ val = 0;
268
+ FSL_IMX7_PCI_INTC_IRQ = 124,
431
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
269
+ FSL_IMX7_PCI_INTD_IRQ = 125,
432
+ assert_hvf_ok(ret);
270
+
433
+ vfp_set_fpsr(env, val);
271
+ FSL_IMX7_UART7_IRQ = 126,
434
+
272
+
435
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
273
+#define FSL_IMX7_ENET_IRQ(i, n) ((n) + ((i) ? 100 : 118))
436
+ assert_hvf_ok(ret);
274
+
437
+ pstate_write(env, val);
275
+ FSL_IMX7_MAX_IRQ = 128,
438
+
439
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
440
+ if (hvf_sreg_match[i].cp_idx == -1) {
441
+ continue;
442
+ }
443
+
444
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
445
+ assert_hvf_ok(ret);
446
+
447
+ arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
448
+ }
449
+ assert(write_list_to_cpustate(arm_cpu));
450
+
451
+ aarch64_restore_sp(env, arm_current_el(env));
452
+
453
+ return 0;
454
+}
455
+
456
+int hvf_put_registers(CPUState *cpu)
457
+{
458
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
459
+ CPUARMState *env = &arm_cpu->env;
460
+ hv_return_t ret;
461
+ uint64_t val;
462
+ hv_simd_fp_uchar16_t fpval;
463
+ int i;
464
+
465
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
466
+ val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
467
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
468
+ assert_hvf_ok(ret);
469
+ }
470
+
471
+ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
472
+ memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
473
+ ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
474
+ fpval);
475
+ assert_hvf_ok(ret);
476
+ }
477
+
478
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
479
+ assert_hvf_ok(ret);
480
+
481
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
482
+ assert_hvf_ok(ret);
483
+
484
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
485
+ assert_hvf_ok(ret);
486
+
487
+ aarch64_save_sp(env, arm_current_el(env));
488
+
489
+ assert(write_cpustate_to_list(arm_cpu, false));
490
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
491
+ if (hvf_sreg_match[i].cp_idx == -1) {
492
+ continue;
493
+ }
494
+
495
+ val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
496
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
497
+ assert_hvf_ok(ret);
498
+ }
499
+
500
+ ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
501
+ assert_hvf_ok(ret);
502
+
503
+ return 0;
504
+}
505
+
506
+static void flush_cpu_state(CPUState *cpu)
507
+{
508
+ if (cpu->vcpu_dirty) {
509
+ hvf_put_registers(cpu);
510
+ cpu->vcpu_dirty = false;
511
+ }
512
+}
513
+
514
+static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
515
+{
516
+ hv_return_t r;
517
+
518
+ flush_cpu_state(cpu);
519
+
520
+ if (rt < 31) {
521
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
522
+ assert_hvf_ok(r);
523
+ }
524
+}
525
+
526
+static uint64_t hvf_get_reg(CPUState *cpu, int rt)
527
+{
528
+ uint64_t val = 0;
529
+ hv_return_t r;
530
+
531
+ flush_cpu_state(cpu);
532
+
533
+ if (rt < 31) {
534
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
535
+ assert_hvf_ok(r);
536
+ }
537
+
538
+ return val;
539
+}
540
+
541
+void hvf_arch_vcpu_destroy(CPUState *cpu)
542
+{
543
+}
544
+
545
+int hvf_arch_init_vcpu(CPUState *cpu)
546
+{
547
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
548
+ CPUARMState *env = &arm_cpu->env;
549
+ uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
550
+ uint32_t sregs_cnt = 0;
551
+ uint64_t pfr;
552
+ hv_return_t ret;
553
+ int i;
554
+
555
+ env->aarch64 = 1;
556
+ asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
557
+
558
+ /* Allocate enough space for our sysreg sync */
559
+ arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
560
+ sregs_match_len);
561
+ arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
562
+ sregs_match_len);
563
+ arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
564
+ arm_cpu->cpreg_vmstate_indexes,
565
+ sregs_match_len);
566
+ arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
567
+ arm_cpu->cpreg_vmstate_values,
568
+ sregs_match_len);
569
+
570
+ memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
571
+
572
+ /* Populate cp list for all known sysregs */
573
+ for (i = 0; i < sregs_match_len; i++) {
574
+ const ARMCPRegInfo *ri;
575
+ uint32_t key = hvf_sreg_match[i].key;
576
+
577
+ ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
578
+ if (ri) {
579
+ assert(!(ri->type & ARM_CP_NO_RAW));
580
+ hvf_sreg_match[i].cp_idx = sregs_cnt;
581
+ arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
582
+ } else {
583
+ hvf_sreg_match[i].cp_idx = -1;
584
+ }
585
+ }
586
+ arm_cpu->cpreg_array_len = sregs_cnt;
587
+ arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
588
+
589
+ assert(write_cpustate_to_list(arm_cpu, false));
590
+
591
+ /* Set CP_NO_RAW system registers on init */
592
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
593
+ arm_cpu->midr);
594
+ assert_hvf_ok(ret);
595
+
596
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
597
+ arm_cpu->mp_affinity);
598
+ assert_hvf_ok(ret);
599
+
600
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
601
+ assert_hvf_ok(ret);
602
+ pfr |= env->gicv3state ? (1 << 24) : 0;
603
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
604
+ assert_hvf_ok(ret);
605
+
606
+ /* We're limited to underlying hardware caps, override internal versions */
607
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
608
+ &arm_cpu->isar.id_aa64mmfr0);
609
+ assert_hvf_ok(ret);
610
+
611
+ return 0;
612
+}
613
+
614
+void hvf_kick_vcpu_thread(CPUState *cpu)
615
+{
616
+ hv_vcpus_exit(&cpu->hvf->fd, 1);
617
+}
618
+
619
+static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
620
+ uint32_t syndrome)
621
+{
622
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
623
+ CPUARMState *env = &arm_cpu->env;
624
+
625
+ cpu->exception_index = excp;
626
+ env->exception.target_el = 1;
627
+ env->exception.syndrome = syndrome;
628
+
629
+ arm_cpu_do_interrupt(cpu);
630
+}
631
+
632
+static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
633
+{
634
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
635
+ CPUARMState *env = &arm_cpu->env;
636
+ uint64_t val = 0;
637
+
638
+ switch (reg) {
639
+ case SYSREG_CNTPCT_EL0:
640
+ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
641
+ gt_cntfrq_period_ns(arm_cpu);
642
+ break;
643
+ case SYSREG_OSLSR_EL1:
644
+ val = env->cp15.oslsr_el1;
645
+ break;
646
+ case SYSREG_OSDLR_EL1:
647
+ /* Dummy register */
648
+ break;
649
+ default:
650
+ cpu_synchronize_state(cpu);
651
+ trace_hvf_unhandled_sysreg_read(env->pc, reg,
652
+ (reg >> 20) & 0x3,
653
+ (reg >> 14) & 0x7,
654
+ (reg >> 10) & 0xf,
655
+ (reg >> 1) & 0xf,
656
+ (reg >> 17) & 0x7);
657
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
658
+ return 1;
659
+ }
660
+
661
+ trace_hvf_sysreg_read(reg,
662
+ (reg >> 20) & 0x3,
663
+ (reg >> 14) & 0x7,
664
+ (reg >> 10) & 0xf,
665
+ (reg >> 1) & 0xf,
666
+ (reg >> 17) & 0x7,
667
+ val);
668
+ hvf_set_reg(cpu, rt, val);
669
+
670
+ return 0;
671
+}
672
+
673
+static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
674
+{
675
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
676
+ CPUARMState *env = &arm_cpu->env;
677
+
678
+ trace_hvf_sysreg_write(reg,
679
+ (reg >> 20) & 0x3,
680
+ (reg >> 14) & 0x7,
681
+ (reg >> 10) & 0xf,
682
+ (reg >> 1) & 0xf,
683
+ (reg >> 17) & 0x7,
684
+ val);
685
+
686
+ switch (reg) {
687
+ case SYSREG_OSLAR_EL1:
688
+ env->cp15.oslsr_el1 = val & 1;
689
+ break;
690
+ case SYSREG_OSDLR_EL1:
691
+ /* Dummy register */
692
+ break;
693
+ default:
694
+ cpu_synchronize_state(cpu);
695
+ trace_hvf_unhandled_sysreg_write(env->pc, reg,
696
+ (reg >> 20) & 0x3,
697
+ (reg >> 14) & 0x7,
698
+ (reg >> 10) & 0xf,
699
+ (reg >> 1) & 0xf,
700
+ (reg >> 17) & 0x7);
701
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
702
+ return 1;
703
+ }
704
+
705
+ return 0;
706
+}
707
+
708
+static int hvf_inject_interrupts(CPUState *cpu)
709
+{
710
+ if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
711
+ trace_hvf_inject_fiq();
712
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
713
+ true);
714
+ }
715
+
716
+ if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
717
+ trace_hvf_inject_irq();
718
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
719
+ true);
720
+ }
721
+
722
+ return 0;
723
+}
724
+
725
+static uint64_t hvf_vtimer_val_raw(void)
726
+{
727
+ /*
728
+ * mach_absolute_time() returns the vtimer value without the VM
729
+ * offset that we define. Add our own offset on top.
730
+ */
731
+ return mach_absolute_time() - hvf_state->vtimer_offset;
732
+}
733
+
734
+static void hvf_sync_vtimer(CPUState *cpu)
735
+{
736
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
737
+ hv_return_t r;
738
+ uint64_t ctl;
739
+ bool irq_state;
740
+
741
+ if (!cpu->hvf->vtimer_masked) {
742
+ /* We will get notified on vtimer changes by hvf, nothing to do */
743
+ return;
744
+ }
745
+
746
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
747
+ assert_hvf_ok(r);
748
+
749
+ irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
750
+ (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
751
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
752
+
753
+ if (!irq_state) {
754
+ /* Timer no longer asserting, we can unmask it */
755
+ hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
756
+ cpu->hvf->vtimer_masked = false;
757
+ }
758
+}
759
+
760
+int hvf_vcpu_exec(CPUState *cpu)
761
+{
762
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
763
+ CPUARMState *env = &arm_cpu->env;
764
+ hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
765
+ hv_return_t r;
766
+ bool advance_pc = false;
767
+
768
+ if (hvf_inject_interrupts(cpu)) {
769
+ return EXCP_INTERRUPT;
770
+ }
771
+
772
+ if (cpu->halted) {
773
+ return EXCP_HLT;
774
+ }
775
+
776
+ flush_cpu_state(cpu);
777
+
778
+ qemu_mutex_unlock_iothread();
779
+ assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
780
+
781
+ /* handle VMEXIT */
782
+ uint64_t exit_reason = hvf_exit->reason;
783
+ uint64_t syndrome = hvf_exit->exception.syndrome;
784
+ uint32_t ec = syn_get_ec(syndrome);
785
+
786
+ qemu_mutex_lock_iothread();
787
+ switch (exit_reason) {
788
+ case HV_EXIT_REASON_EXCEPTION:
789
+ /* This is the main one, handle below. */
790
+ break;
791
+ case HV_EXIT_REASON_VTIMER_ACTIVATED:
792
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
793
+ cpu->hvf->vtimer_masked = true;
794
+ return 0;
795
+ case HV_EXIT_REASON_CANCELED:
796
+ /* we got kicked, no exit to process */
797
+ return 0;
798
+ default:
799
+ assert(0);
800
+ }
801
+
802
+ hvf_sync_vtimer(cpu);
803
+
804
+ switch (ec) {
805
+ case EC_DATAABORT: {
806
+ bool isv = syndrome & ARM_EL_ISV;
807
+ bool iswrite = (syndrome >> 6) & 1;
808
+ bool s1ptw = (syndrome >> 7) & 1;
809
+ uint32_t sas = (syndrome >> 22) & 3;
810
+ uint32_t len = 1 << sas;
811
+ uint32_t srt = (syndrome >> 16) & 0x1f;
812
+ uint64_t val = 0;
813
+
814
+ trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
815
+ hvf_exit->exception.physical_address, isv,
816
+ iswrite, s1ptw, len, srt);
817
+
818
+ assert(isv);
819
+
820
+ if (iswrite) {
821
+ val = hvf_get_reg(cpu, srt);
822
+ address_space_write(&address_space_memory,
823
+ hvf_exit->exception.physical_address,
824
+ MEMTXATTRS_UNSPECIFIED, &val, len);
825
+ } else {
826
+ address_space_read(&address_space_memory,
827
+ hvf_exit->exception.physical_address,
828
+ MEMTXATTRS_UNSPECIFIED, &val, len);
829
+ hvf_set_reg(cpu, srt, val);
830
+ }
831
+
832
+ advance_pc = true;
833
+ break;
834
+ }
835
+ case EC_SYSTEMREGISTERTRAP: {
836
+ bool isread = (syndrome >> 0) & 1;
837
+ uint32_t rt = (syndrome >> 5) & 0x1f;
838
+ uint32_t reg = syndrome & SYSREG_MASK;
839
+ uint64_t val;
840
+ int ret = 0;
841
+
842
+ if (isread) {
843
+ ret = hvf_sysreg_read(cpu, reg, rt);
844
+ } else {
845
+ val = hvf_get_reg(cpu, rt);
846
+ ret = hvf_sysreg_write(cpu, reg, val);
847
+ }
848
+
849
+ advance_pc = !ret;
850
+ break;
851
+ }
852
+ case EC_WFX_TRAP:
853
+ advance_pc = true;
854
+ break;
855
+ case EC_AA64_HVC:
856
+ cpu_synchronize_state(cpu);
857
+ trace_hvf_unknown_hvc(env->xregs[0]);
858
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
859
+ env->xregs[0] = -1;
860
+ break;
861
+ case EC_AA64_SMC:
862
+ cpu_synchronize_state(cpu);
863
+ trace_hvf_unknown_smc(env->xregs[0]);
864
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
865
+ break;
866
+ default:
867
+ cpu_synchronize_state(cpu);
868
+ trace_hvf_exit(syndrome, ec, env->pc);
869
+ error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
870
+ }
871
+
872
+ if (advance_pc) {
873
+ uint64_t pc;
874
+
875
+ flush_cpu_state(cpu);
876
+
877
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
878
+ assert_hvf_ok(r);
879
+ pc += 4;
880
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
881
+ assert_hvf_ok(r);
882
+ }
883
+
884
+ return 0;
885
+}
886
+
887
+static const VMStateDescription vmstate_hvf_vtimer = {
888
+ .name = "hvf-vtimer",
889
+ .version_id = 1,
890
+ .minimum_version_id = 1,
891
+ .fields = (VMStateField[]) {
892
+ VMSTATE_UINT64(vtimer_val, HVFVTimer),
893
+ VMSTATE_END_OF_LIST()
894
+ },
276
+};
895
+};
277
+
896
+
278
+#endif /* FSL_IMX7_H */
897
+static void hvf_vm_state_change(void *opaque, bool running, RunState state)
279
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
898
+{
899
+ HVFVTimer *s = opaque;
900
+
901
+ if (running) {
902
+ /* Update vtimer offset on all CPUs */
903
+ hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
904
+ cpu_synchronize_all_states();
905
+ } else {
906
+ /* Remember vtimer value on every pause */
907
+ s->vtimer_val = hvf_vtimer_val_raw();
908
+ }
909
+}
910
+
911
+int hvf_arch_init(void)
912
+{
913
+ hvf_state->vtimer_offset = mach_absolute_time();
914
+ vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
915
+ qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
916
+ return 0;
917
+}
918
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
919
index XXXXXXX..XXXXXXX 100644
920
--- a/target/i386/hvf/hvf.c
921
+++ b/target/i386/hvf/hvf.c
922
@@ -XXX,XX +XXX,XX @@ static inline bool apic_bus_freq_is_known(CPUX86State *env)
923
return env->apic_bus_freq != 0;
924
}
925
926
+void hvf_kick_vcpu_thread(CPUState *cpu)
927
+{
928
+ cpus_kick_thread(cpu);
929
+}
930
+
931
int hvf_arch_init(void)
932
{
933
return 0;
934
diff --git a/MAINTAINERS b/MAINTAINERS
935
index XXXXXXX..XXXXXXX 100644
936
--- a/MAINTAINERS
937
+++ b/MAINTAINERS
938
@@ -XXX,XX +XXX,XX @@ F: accel/accel-*.c
939
F: accel/Makefile.objs
940
F: accel/stubs/Makefile.objs
941
942
+Apple Silicon HVF CPUs
943
+M: Alexander Graf <agraf@csgraf.de>
944
+S: Maintained
945
+F: target/arm/hvf/
946
+
947
X86 HVF CPUs
948
M: Cameron Esfahani <dirty@apple.com>
949
M: Roman Bolshakov <r.bolshakov@yadro.com>
950
diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events
280
new file mode 100644
951
new file mode 100644
281
index XXXXXXX..XXXXXXX
952
index XXXXXXX..XXXXXXX
282
--- /dev/null
953
--- /dev/null
283
+++ b/hw/arm/fsl-imx7.c
954
+++ b/target/arm/hvf/trace-events
284
@@ -XXX,XX +XXX,XX @@
955
@@ -XXX,XX +XXX,XX @@
285
+/*
956
+hvf_unhandled_sysreg_read(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg read at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)"
286
+ * Copyright (c) 2018, Impinj, Inc.
957
+hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)"
287
+ *
958
+hvf_inject_fiq(void) "injecting FIQ"
288
+ * i.MX7 SoC definitions
959
+hvf_inject_irq(void) "injecting IRQ"
289
+ *
960
+hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]"
290
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
961
+hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64
291
+ *
962
+hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")"
292
+ * Based on hw/arm/fsl-imx6.c
963
+hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64
293
+ *
964
+hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
294
+ * This program is free software; you can redistribute it and/or modify
965
+hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
295
+ * it under the terms of the GNU General Public License as published by
296
+ * the Free Software Foundation; either version 2 of the License, or
297
+ * (at your option) any later version.
298
+ *
299
+ * This program is distributed in the hope that it will be useful,
300
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
301
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
302
+ * GNU General Public License for more details.
303
+ */
304
+
305
+#include "qemu/osdep.h"
306
+#include "qapi/error.h"
307
+#include "qemu-common.h"
308
+#include "hw/arm/fsl-imx7.h"
309
+#include "hw/misc/unimp.h"
310
+#include "sysemu/sysemu.h"
311
+#include "qemu/error-report.h"
312
+
313
+#define NAME_SIZE 20
314
+
315
+static void fsl_imx7_init(Object *obj)
316
+{
317
+ BusState *sysbus = sysbus_get_default();
318
+ FslIMX7State *s = FSL_IMX7(obj);
319
+ char name[NAME_SIZE];
320
+ int i;
321
+
322
+ if (smp_cpus > FSL_IMX7_NUM_CPUS) {
323
+ error_report("%s: Only %d CPUs are supported (%d requested)",
324
+ TYPE_FSL_IMX7, FSL_IMX7_NUM_CPUS, smp_cpus);
325
+ exit(1);
326
+ }
327
+
328
+ for (i = 0; i < smp_cpus; i++) {
329
+ object_initialize(&s->cpu[i], sizeof(s->cpu[i]),
330
+ ARM_CPU_TYPE_NAME("cortex-a7"));
331
+ snprintf(name, NAME_SIZE, "cpu%d", i);
332
+ object_property_add_child(obj, name, OBJECT(&s->cpu[i]),
333
+ &error_fatal);
334
+ }
335
+
336
+ /*
337
+ * A7MPCORE
338
+ */
339
+ object_initialize(&s->a7mpcore, sizeof(s->a7mpcore), TYPE_A15MPCORE_PRIV);
340
+ qdev_set_parent_bus(DEVICE(&s->a7mpcore), sysbus);
341
+ object_property_add_child(obj, "a7mpcore",
342
+ OBJECT(&s->a7mpcore), &error_fatal);
343
+
344
+ /*
345
+ * GPIOs 1 to 7
346
+ */
347
+ for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) {
348
+ object_initialize(&s->gpio[i], sizeof(s->gpio[i]),
349
+ TYPE_IMX_GPIO);
350
+ qdev_set_parent_bus(DEVICE(&s->gpio[i]), sysbus);
351
+ snprintf(name, NAME_SIZE, "gpio%d", i);
352
+ object_property_add_child(obj, name,
353
+ OBJECT(&s->gpio[i]), &error_fatal);
354
+ }
355
+
356
+ /*
357
+ * GPT1, 2, 3, 4
358
+ */
359
+ for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) {
360
+ object_initialize(&s->gpt[i], sizeof(s->gpt[i]), TYPE_IMX7_GPT);
361
+ qdev_set_parent_bus(DEVICE(&s->gpt[i]), sysbus);
362
+ snprintf(name, NAME_SIZE, "gpt%d", i);
363
+ object_property_add_child(obj, name, OBJECT(&s->gpt[i]),
364
+ &error_fatal);
365
+ }
366
+
367
+ /*
368
+ * CCM
369
+ */
370
+ object_initialize(&s->ccm, sizeof(s->ccm), TYPE_IMX7_CCM);
371
+ qdev_set_parent_bus(DEVICE(&s->ccm), sysbus);
372
+ object_property_add_child(obj, "ccm", OBJECT(&s->ccm), &error_fatal);
373
+
374
+ /*
375
+ * Analog
376
+ */
377
+ object_initialize(&s->analog, sizeof(s->analog), TYPE_IMX7_ANALOG);
378
+ qdev_set_parent_bus(DEVICE(&s->analog), sysbus);
379
+ object_property_add_child(obj, "analog", OBJECT(&s->analog), &error_fatal);
380
+
381
+ /*
382
+ * GPCv2
383
+ */
384
+ object_initialize(&s->gpcv2, sizeof(s->gpcv2), TYPE_IMX_GPCV2);
385
+ qdev_set_parent_bus(DEVICE(&s->gpcv2), sysbus);
386
+ object_property_add_child(obj, "gpcv2", OBJECT(&s->gpcv2), &error_fatal);
387
+
388
+ for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) {
389
+ object_initialize(&s->spi[i], sizeof(s->spi[i]), TYPE_IMX_SPI);
390
+ qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
391
+ snprintf(name, NAME_SIZE, "spi%d", i + 1);
392
+ object_property_add_child(obj, name, OBJECT(&s->spi[i]), NULL);
393
+ }
394
+
395
+
396
+ for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) {
397
+ object_initialize(&s->i2c[i], sizeof(s->i2c[i]), TYPE_IMX_I2C);
398
+ qdev_set_parent_bus(DEVICE(&s->i2c[i]), sysbus_get_default());
399
+ snprintf(name, NAME_SIZE, "i2c%d", i + 1);
400
+ object_property_add_child(obj, name, OBJECT(&s->i2c[i]), NULL);
401
+ }
402
+
403
+ /*
404
+ * UART
405
+ */
406
+ for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) {
407
+ object_initialize(&s->uart[i], sizeof(s->uart[i]), TYPE_IMX_SERIAL);
408
+ qdev_set_parent_bus(DEVICE(&s->uart[i]), sysbus);
409
+ snprintf(name, NAME_SIZE, "uart%d", i);
410
+ object_property_add_child(obj, name, OBJECT(&s->uart[i]),
411
+ &error_fatal);
412
+ }
413
+
414
+ /*
415
+ * Ethernet
416
+ */
417
+ for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) {
418
+ object_initialize(&s->eth[i], sizeof(s->eth[i]), TYPE_IMX_ENET);
419
+ qdev_set_parent_bus(DEVICE(&s->eth[i]), sysbus);
420
+ snprintf(name, NAME_SIZE, "eth%d", i);
421
+ object_property_add_child(obj, name, OBJECT(&s->eth[i]),
422
+ &error_fatal);
423
+ }
424
+
425
+ /*
426
+ * SDHCI
427
+ */
428
+ for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) {
429
+ object_initialize(&s->usdhc[i], sizeof(s->usdhc[i]),
430
+ TYPE_IMX_USDHC);
431
+ qdev_set_parent_bus(DEVICE(&s->usdhc[i]), sysbus);
432
+ snprintf(name, NAME_SIZE, "usdhc%d", i);
433
+ object_property_add_child(obj, name, OBJECT(&s->usdhc[i]),
434
+ &error_fatal);
435
+ }
436
+
437
+ /*
438
+ * SNVS
439
+ */
440
+ object_initialize(&s->snvs, sizeof(s->snvs), TYPE_IMX7_SNVS);
441
+ qdev_set_parent_bus(DEVICE(&s->snvs), sysbus);
442
+ object_property_add_child(obj, "snvs", OBJECT(&s->snvs), &error_fatal);
443
+
444
+ /*
445
+ * Watchdog
446
+ */
447
+ for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) {
448
+ object_initialize(&s->wdt[i], sizeof(s->wdt[i]), TYPE_IMX2_WDT);
449
+ qdev_set_parent_bus(DEVICE(&s->wdt[i]), sysbus);
450
+ snprintf(name, NAME_SIZE, "wdt%d", i);
451
+ object_property_add_child(obj, name, OBJECT(&s->wdt[i]),
452
+ &error_fatal);
453
+ }
454
+
455
+ /*
456
+ * GPR
457
+ */
458
+ object_initialize(&s->gpr, sizeof(s->gpr), TYPE_IMX7_GPR);
459
+ qdev_set_parent_bus(DEVICE(&s->gpr), sysbus);
460
+ object_property_add_child(obj, "gpr", OBJECT(&s->gpr), &error_fatal);
461
+
462
+ object_initialize(&s->pcie, sizeof(s->pcie), TYPE_DESIGNWARE_PCIE_HOST);
463
+ qdev_set_parent_bus(DEVICE(&s->pcie), sysbus);
464
+ object_property_add_child(obj, "pcie", OBJECT(&s->pcie), &error_fatal);
465
+
466
+ for (i = 0; i < FSL_IMX7_NUM_USBS; i++) {
467
+ object_initialize(&s->usb[i],
468
+ sizeof(s->usb[i]), TYPE_CHIPIDEA);
469
+ qdev_set_parent_bus(DEVICE(&s->usb[i]), sysbus);
470
+ snprintf(name, NAME_SIZE, "usb%d", i);
471
+ object_property_add_child(obj, name,
472
+ OBJECT(&s->usb[i]), &error_fatal);
473
+ }
474
+}
475
+
476
+static void fsl_imx7_realize(DeviceState *dev, Error **errp)
477
+{
478
+ FslIMX7State *s = FSL_IMX7(dev);
479
+ Object *o;
480
+ int i;
481
+ qemu_irq irq;
482
+ char name[NAME_SIZE];
483
+
484
+ for (i = 0; i < smp_cpus; i++) {
485
+ o = OBJECT(&s->cpu[i]);
486
+
487
+ object_property_set_int(o, QEMU_PSCI_CONDUIT_SMC,
488
+ "psci-conduit", &error_abort);
489
+
490
+ /* On uniprocessor, the CBAR is set to 0 */
491
+ if (smp_cpus > 1) {
492
+ object_property_set_int(o, FSL_IMX7_A7MPCORE_ADDR,
493
+ "reset-cbar", &error_abort);
494
+ }
495
+
496
+ if (i) {
497
+ /* Secondary CPUs start in PSCI powered-down state */
498
+ object_property_set_bool(o, true,
499
+ "start-powered-off", &error_abort);
500
+ }
501
+
502
+ object_property_set_bool(o, true, "realized", &error_abort);
503
+ }
504
+
505
+ /*
506
+ * A7MPCORE
507
+ */
508
+ object_property_set_int(OBJECT(&s->a7mpcore), smp_cpus, "num-cpu",
509
+ &error_abort);
510
+ object_property_set_int(OBJECT(&s->a7mpcore),
511
+ FSL_IMX7_MAX_IRQ + GIC_INTERNAL,
512
+ "num-irq", &error_abort);
513
+
514
+ object_property_set_bool(OBJECT(&s->a7mpcore), true, "realized",
515
+ &error_abort);
516
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX7_A7MPCORE_ADDR);
517
+
518
+ for (i = 0; i < smp_cpus; i++) {
519
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore);
520
+ DeviceState *d = DEVICE(qemu_get_cpu(i));
521
+
522
+ irq = qdev_get_gpio_in(d, ARM_CPU_IRQ);
523
+ sysbus_connect_irq(sbd, i, irq);
524
+ irq = qdev_get_gpio_in(d, ARM_CPU_FIQ);
525
+ sysbus_connect_irq(sbd, i + smp_cpus, irq);
526
+ }
527
+
528
+ /*
529
+ * A7MPCORE DAP
530
+ */
531
+ create_unimplemented_device("a7mpcore-dap", FSL_IMX7_A7MPCORE_DAP_ADDR,
532
+ 0x100000);
533
+
534
+ /*
535
+ * GPT1, 2, 3, 4
536
+ */
537
+ for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) {
538
+ static const hwaddr FSL_IMX7_GPTn_ADDR[FSL_IMX7_NUM_GPTS] = {
539
+ FSL_IMX7_GPT1_ADDR,
540
+ FSL_IMX7_GPT2_ADDR,
541
+ FSL_IMX7_GPT3_ADDR,
542
+ FSL_IMX7_GPT4_ADDR,
543
+ };
544
+
545
+ s->gpt[i].ccm = IMX_CCM(&s->ccm);
546
+ object_property_set_bool(OBJECT(&s->gpt[i]), true, "realized",
547
+ &error_abort);
548
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, FSL_IMX7_GPTn_ADDR[i]);
549
+ }
550
+
551
+ for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) {
552
+ static const hwaddr FSL_IMX7_GPIOn_ADDR[FSL_IMX7_NUM_GPIOS] = {
553
+ FSL_IMX7_GPIO1_ADDR,
554
+ FSL_IMX7_GPIO2_ADDR,
555
+ FSL_IMX7_GPIO3_ADDR,
556
+ FSL_IMX7_GPIO4_ADDR,
557
+ FSL_IMX7_GPIO5_ADDR,
558
+ FSL_IMX7_GPIO6_ADDR,
559
+ FSL_IMX7_GPIO7_ADDR,
560
+ };
561
+
562
+ object_property_set_bool(OBJECT(&s->gpio[i]), true, "realized",
563
+ &error_abort);
564
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, FSL_IMX7_GPIOn_ADDR[i]);
565
+ }
566
+
567
+ /*
568
+ * IOMUXC and IOMUXC_LPSR
569
+ */
570
+ for (i = 0; i < FSL_IMX7_NUM_IOMUXCS; i++) {
571
+ static const hwaddr FSL_IMX7_IOMUXCn_ADDR[FSL_IMX7_NUM_IOMUXCS] = {
572
+ FSL_IMX7_IOMUXC_ADDR,
573
+ FSL_IMX7_IOMUXC_LPSR_ADDR,
574
+ };
575
+
576
+ snprintf(name, NAME_SIZE, "iomuxc%d", i);
577
+ create_unimplemented_device(name, FSL_IMX7_IOMUXCn_ADDR[i],
578
+ FSL_IMX7_IOMUXCn_SIZE);
579
+ }
580
+
581
+ /*
582
+ * CCM
583
+ */
584
+ object_property_set_bool(OBJECT(&s->ccm), true, "realized", &error_abort);
585
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX7_CCM_ADDR);
586
+
587
+ /*
588
+ * Analog
589
+ */
590
+ object_property_set_bool(OBJECT(&s->analog), true, "realized",
591
+ &error_abort);
592
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->analog), 0, FSL_IMX7_ANALOG_ADDR);
593
+
594
+ /*
595
+ * GPCv2
596
+ */
597
+ object_property_set_bool(OBJECT(&s->gpcv2), true,
598
+ "realized", &error_abort);
599
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX7_GPC_ADDR);
600
+
601
+ /* Initialize all ECSPI */
602
+ for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) {
603
+ static const hwaddr FSL_IMX7_SPIn_ADDR[FSL_IMX7_NUM_ECSPIS] = {
604
+ FSL_IMX7_ECSPI1_ADDR,
605
+ FSL_IMX7_ECSPI2_ADDR,
606
+ FSL_IMX7_ECSPI3_ADDR,
607
+ FSL_IMX7_ECSPI4_ADDR,
608
+ };
609
+
610
+ static const hwaddr FSL_IMX7_SPIn_IRQ[FSL_IMX7_NUM_ECSPIS] = {
611
+ FSL_IMX7_ECSPI1_IRQ,
612
+ FSL_IMX7_ECSPI2_IRQ,
613
+ FSL_IMX7_ECSPI3_IRQ,
614
+ FSL_IMX7_ECSPI4_IRQ,
615
+ };
616
+
617
+ /* Initialize the SPI */
618
+ object_property_set_bool(OBJECT(&s->spi[i]), true, "realized",
619
+ &error_abort);
620
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0,
621
+ FSL_IMX7_SPIn_ADDR[i]);
622
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0,
623
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
624
+ FSL_IMX7_SPIn_IRQ[i]));
625
+ }
626
+
627
+ for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) {
628
+ static const hwaddr FSL_IMX7_I2Cn_ADDR[FSL_IMX7_NUM_I2CS] = {
629
+ FSL_IMX7_I2C1_ADDR,
630
+ FSL_IMX7_I2C2_ADDR,
631
+ FSL_IMX7_I2C3_ADDR,
632
+ FSL_IMX7_I2C4_ADDR,
633
+ };
634
+
635
+ static const hwaddr FSL_IMX7_I2Cn_IRQ[FSL_IMX7_NUM_I2CS] = {
636
+ FSL_IMX7_I2C1_IRQ,
637
+ FSL_IMX7_I2C2_IRQ,
638
+ FSL_IMX7_I2C3_IRQ,
639
+ FSL_IMX7_I2C4_IRQ,
640
+ };
641
+
642
+ object_property_set_bool(OBJECT(&s->i2c[i]), true, "realized",
643
+ &error_abort);
644
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX7_I2Cn_ADDR[i]);
645
+
646
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0,
647
+ qdev_get_gpio_in(DEVICE(&s->a7mpcore),
648
+ FSL_IMX7_I2Cn_IRQ[i]));
649
+ }
650
+
651
+ /*
652
+ * UART
653
+ */
654
+ for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) {
655
+ static const hwaddr FSL_IMX7_UARTn_ADDR[FSL_IMX7_NUM_UARTS] = {
656
+ FSL_IMX7_UART1_ADDR,
657
+ FSL_IMX7_UART2_ADDR,
658
+ FSL_IMX7_UART3_ADDR,
659
+ FSL_IMX7_UART4_ADDR,
660
+ FSL_IMX7_UART5_ADDR,
661
+ FSL_IMX7_UART6_ADDR,
662
+ FSL_IMX7_UART7_ADDR,
663
+ };
664
+
665
+ static const int FSL_IMX7_UARTn_IRQ[FSL_IMX7_NUM_UARTS] = {
666
+ FSL_IMX7_UART1_IRQ,
667
+ FSL_IMX7_UART2_IRQ,
668
+ FSL_IMX7_UART3_IRQ,
669
+ FSL_IMX7_UART4_IRQ,
670
+ FSL_IMX7_UART5_IRQ,
671
+ FSL_IMX7_UART6_IRQ,
672
+ FSL_IMX7_UART7_IRQ,
673
+ };
674
+
675
+
676
+ if (i < MAX_SERIAL_PORTS) {
677
+ qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hds[i]);
678
+ }
679
+
680
+ object_property_set_bool(OBJECT(&s->uart[i]), true, "realized",
681
+ &error_abort);
682
+
683
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, FSL_IMX7_UARTn_ADDR[i]);
684
+
685
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_UARTn_IRQ[i]);
686
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, irq);
687
+ }
688
+
689
+ /*
690
+ * Ethernet
691
+ */
692
+ for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) {
693
+ static const hwaddr FSL_IMX7_ENETn_ADDR[FSL_IMX7_NUM_ETHS] = {
694
+ FSL_IMX7_ENET1_ADDR,
695
+ FSL_IMX7_ENET2_ADDR,
696
+ };
697
+
698
+ object_property_set_uint(OBJECT(&s->eth[i]), FSL_IMX7_ETH_NUM_TX_RINGS,
699
+ "tx-ring-num", &error_abort);
700
+ qdev_set_nic_properties(DEVICE(&s->eth[i]), &nd_table[i]);
701
+ object_property_set_bool(OBJECT(&s->eth[i]), true, "realized",
702
+ &error_abort);
703
+
704
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, FSL_IMX7_ENETn_ADDR[i]);
705
+
706
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 0));
707
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0, irq);
708
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 3));
709
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1, irq);
710
+ }
711
+
712
+ /*
713
+ * USDHC
714
+ */
715
+ for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) {
716
+ static const hwaddr FSL_IMX7_USDHCn_ADDR[FSL_IMX7_NUM_USDHCS] = {
717
+ FSL_IMX7_USDHC1_ADDR,
718
+ FSL_IMX7_USDHC2_ADDR,
719
+ FSL_IMX7_USDHC3_ADDR,
720
+ };
721
+
722
+ static const int FSL_IMX7_USDHCn_IRQ[FSL_IMX7_NUM_USDHCS] = {
723
+ FSL_IMX7_USDHC1_IRQ,
724
+ FSL_IMX7_USDHC2_IRQ,
725
+ FSL_IMX7_USDHC3_IRQ,
726
+ };
727
+
728
+ object_property_set_bool(OBJECT(&s->usdhc[i]), true, "realized",
729
+ &error_abort);
730
+
731
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0,
732
+ FSL_IMX7_USDHCn_ADDR[i]);
733
+
734
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USDHCn_IRQ[i]);
735
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, irq);
736
+ }
737
+
738
+ /*
739
+ * SNVS
740
+ */
741
+ object_property_set_bool(OBJECT(&s->snvs), true, "realized", &error_abort);
742
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX7_SNVS_ADDR);
743
+
744
+ /*
745
+ * SRC
746
+ */
747
+ create_unimplemented_device("sdma", FSL_IMX7_SRC_ADDR, FSL_IMX7_SRC_SIZE);
748
+
749
+ /*
750
+ * Watchdog
751
+ */
752
+ for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) {
753
+ static const hwaddr FSL_IMX7_WDOGn_ADDR[FSL_IMX7_NUM_WDTS] = {
754
+ FSL_IMX7_WDOG1_ADDR,
755
+ FSL_IMX7_WDOG2_ADDR,
756
+ FSL_IMX7_WDOG3_ADDR,
757
+ FSL_IMX7_WDOG4_ADDR,
758
+ };
759
+
760
+ object_property_set_bool(OBJECT(&s->wdt[i]), true, "realized",
761
+ &error_abort);
762
+
763
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX7_WDOGn_ADDR[i]);
764
+ }
765
+
766
+ /*
767
+ * SDMA
768
+ */
769
+ create_unimplemented_device("sdma", FSL_IMX7_SDMA_ADDR, FSL_IMX7_SDMA_SIZE);
770
+
771
+
772
+ object_property_set_bool(OBJECT(&s->gpr), true, "realized",
773
+ &error_abort);
774
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX7_GPR_ADDR);
775
+
776
+ object_property_set_bool(OBJECT(&s->pcie), true,
777
+ "realized", &error_abort);
778
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX7_PCIE_REG_ADDR);
779
+
780
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTA_IRQ);
781
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, irq);
782
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTB_IRQ);
783
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, irq);
784
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTC_IRQ);
785
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, irq);
786
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_IRQ);
787
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq);
788
+
789
+
790
+ for (i = 0; i < FSL_IMX7_NUM_USBS; i++) {
791
+ static const hwaddr FSL_IMX7_USBMISCn_ADDR[FSL_IMX7_NUM_USBS] = {
792
+ FSL_IMX7_USBMISC1_ADDR,
793
+ FSL_IMX7_USBMISC2_ADDR,
794
+ FSL_IMX7_USBMISC3_ADDR,
795
+ };
796
+
797
+ static const hwaddr FSL_IMX7_USBn_ADDR[FSL_IMX7_NUM_USBS] = {
798
+ FSL_IMX7_USB1_ADDR,
799
+ FSL_IMX7_USB2_ADDR,
800
+ FSL_IMX7_USB3_ADDR,
801
+ };
802
+
803
+ static const hwaddr FSL_IMX7_USBn_IRQ[FSL_IMX7_NUM_USBS] = {
804
+ FSL_IMX7_USB1_IRQ,
805
+ FSL_IMX7_USB2_IRQ,
806
+ FSL_IMX7_USB3_IRQ,
807
+ };
808
+
809
+ object_property_set_bool(OBJECT(&s->usb[i]), true, "realized",
810
+ &error_abort);
811
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0,
812
+ FSL_IMX7_USBn_ADDR[i]);
813
+
814
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USBn_IRQ[i]);
815
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, irq);
816
+
817
+ snprintf(name, NAME_SIZE, "usbmisc%d", i);
818
+ create_unimplemented_device(name, FSL_IMX7_USBMISCn_ADDR[i],
819
+ FSL_IMX7_USBMISCn_SIZE);
820
+ }
821
+
822
+ /*
823
+ * ADCs
824
+ */
825
+ for (i = 0; i < FSL_IMX7_NUM_ADCS; i++) {
826
+ static const hwaddr FSL_IMX7_ADCn_ADDR[FSL_IMX7_NUM_ADCS] = {
827
+ FSL_IMX7_ADC1_ADDR,
828
+ FSL_IMX7_ADC2_ADDR,
829
+ };
830
+
831
+ snprintf(name, NAME_SIZE, "adc%d", i);
832
+ create_unimplemented_device(name, FSL_IMX7_ADCn_ADDR[i],
833
+ FSL_IMX7_ADCn_SIZE);
834
+ }
835
+
836
+ /*
837
+ * LCD
838
+ */
839
+ create_unimplemented_device("lcdif", FSL_IMX7_LCDIF_ADDR,
840
+ FSL_IMX7_LCDIF_SIZE);
841
+}
842
+
843
+static void fsl_imx7_class_init(ObjectClass *oc, void *data)
844
+{
845
+ DeviceClass *dc = DEVICE_CLASS(oc);
846
+
847
+ dc->realize = fsl_imx7_realize;
848
+
849
+ /* Reason: Uses serial_hds and nd_table in realize() directly */
850
+ dc->user_creatable = false;
851
+ dc->desc = "i.MX7 SOC";
852
+}
853
+
854
+static const TypeInfo fsl_imx7_type_info = {
855
+ .name = TYPE_FSL_IMX7,
856
+ .parent = TYPE_DEVICE,
857
+ .instance_size = sizeof(FslIMX7State),
858
+ .instance_init = fsl_imx7_init,
859
+ .class_init = fsl_imx7_class_init,
860
+};
861
+
862
+static void fsl_imx7_register_types(void)
863
+{
864
+ type_register_static(&fsl_imx7_type_info);
865
+}
866
+type_init(fsl_imx7_register_types)
867
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
868
index XXXXXXX..XXXXXXX 100644
869
--- a/default-configs/arm-softmmu.mak
870
+++ b/default-configs/arm-softmmu.mak
871
@@ -XXX,XX +XXX,XX @@ CONFIG_ALLWINNER_A10=y
872
CONFIG_FSL_IMX6=y
873
CONFIG_FSL_IMX31=y
874
CONFIG_FSL_IMX25=y
875
+CONFIG_FSL_IMX7=y
876
877
CONFIG_IMX_I2C=y
878
879
--
966
--
880
2.16.2
967
2.20.1
881
968
882
969
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Peter Collingbourne <pcc@google.com>
2
2
3
Depending on the currently selected size of the SVE vector registers,
3
Sleep on WFI until the VTIMER is due but allow ourselves to be woken
4
we can either store the data within the "standard" allocation, or we
4
up on IPI.
5
may beedn to allocate additional space with an EXTRA record.
6
5
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
In this implementation IPI is blocked on the CPU thread at startup and
8
Message-id: 20180303143823.27055-6-richard.henderson@linaro.org
7
pselect() is used to atomically unblock the signal and begin sleeping.
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
The signal is sent unconditionally so there's no need to worry about
9
races between actually sleeping and the "we think we're sleeping"
10
state. It may lead to an extra wakeup but that's better than missing
11
it entirely.
12
13
Signed-off-by: Peter Collingbourne <pcc@google.com>
14
Signed-off-by: Alexander Graf <agraf@csgraf.de>
15
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
16
Reviewed-by: Sergio Lopez <slp@redhat.com>
17
Message-id: 20210916155404.86958-6-agraf@csgraf.de
18
[agraf: Remove unused 'set' variable, always advance PC on WFX trap,
19
support vm stop / continue operations and cntv offsets]
20
Signed-off-by: Alexander Graf <agraf@csgraf.de>
21
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
22
Reviewed-by: Sergio Lopez <slp@redhat.com>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
24
---
12
linux-user/signal.c | 210 +++++++++++++++++++++++++++++++++++++++++++++++-----
25
include/sysemu/hvf_int.h | 1 +
13
1 file changed, 192 insertions(+), 18 deletions(-)
26
accel/hvf/hvf-accel-ops.c | 5 +--
27
target/arm/hvf/hvf.c | 79 +++++++++++++++++++++++++++++++++++++++
28
3 files changed, 82 insertions(+), 3 deletions(-)
14
29
15
diff --git a/linux-user/signal.c b/linux-user/signal.c
30
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
16
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/signal.c
32
--- a/include/sysemu/hvf_int.h
18
+++ b/linux-user/signal.c
33
+++ b/include/sysemu/hvf_int.h
19
@@ -XXX,XX +XXX,XX @@ struct target_extra_context {
34
@@ -XXX,XX +XXX,XX @@ struct hvf_vcpu_state {
20
uint32_t reserved[3];
35
uint64_t fd;
36
void *exit;
37
bool vtimer_masked;
38
+ sigset_t unblock_ipi_mask;
21
};
39
};
22
40
23
+#define TARGET_SVE_MAGIC 0x53564501
41
void assert_hvf_ok(hv_return_t ret);
42
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/accel/hvf/hvf-accel-ops.c
45
+++ b/accel/hvf/hvf-accel-ops.c
46
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
47
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
48
49
/* init cpu signals */
50
- sigset_t set;
51
struct sigaction sigact;
52
53
memset(&sigact, 0, sizeof(sigact));
54
sigact.sa_handler = dummy_signal;
55
sigaction(SIG_IPI, &sigact, NULL);
56
57
- pthread_sigmask(SIG_BLOCK, NULL, &set);
58
- sigdelset(&set, SIG_IPI);
59
+ pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
60
+ sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
61
62
#ifdef __aarch64__
63
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
64
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/hvf/hvf.c
67
+++ b/target/arm/hvf/hvf.c
68
@@ -XXX,XX +XXX,XX @@
69
* QEMU Hypervisor.framework support for Apple Silicon
70
71
* Copyright 2020 Alexander Graf <agraf@csgraf.de>
72
+ * Copyright 2020 Google LLC
73
*
74
* This work is licensed under the terms of the GNU GPL, version 2 or later.
75
* See the COPYING file in the top-level directory.
76
@@ -XXX,XX +XXX,XX @@ int hvf_arch_init_vcpu(CPUState *cpu)
77
78
void hvf_kick_vcpu_thread(CPUState *cpu)
79
{
80
+ cpus_kick_thread(cpu);
81
hv_vcpus_exit(&cpu->hvf->fd, 1);
82
}
83
84
@@ -XXX,XX +XXX,XX @@ static uint64_t hvf_vtimer_val_raw(void)
85
return mach_absolute_time() - hvf_state->vtimer_offset;
86
}
87
88
+static uint64_t hvf_vtimer_val(void)
89
+{
90
+ if (!runstate_is_running()) {
91
+ /* VM is paused, the vtimer value is in vtimer.vtimer_val */
92
+ return vtimer.vtimer_val;
93
+ }
24
+
94
+
25
+struct target_sve_context {
95
+ return hvf_vtimer_val_raw();
26
+ struct target_aarch64_ctx head;
27
+ uint16_t vl;
28
+ uint16_t reserved[3];
29
+ /* The actual SVE data immediately follows. It is layed out
30
+ * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
31
+ * the original struct pointer.
32
+ */
33
+};
34
+
35
+#define TARGET_SVE_VQ_BYTES 16
36
+
37
+#define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
38
+#define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
39
+
40
+#define TARGET_SVE_SIG_REGS_OFFSET \
41
+ QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
42
+#define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
43
+ (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
44
+#define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
45
+ (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
46
+#define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
47
+ (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
48
+#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
49
+ (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
50
+
51
struct target_rt_sigframe {
52
struct target_siginfo info;
53
struct target_ucontext uc;
54
@@ -XXX,XX +XXX,XX @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
55
__put_user(0, &end->size);
56
}
57
58
+static void target_setup_sve_record(struct target_sve_context *sve,
59
+ CPUARMState *env, int vq, int size)
60
+{
61
+ int i, j;
62
+
63
+ __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
64
+ __put_user(size, &sve->head.size);
65
+ __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
66
+
67
+ /* Note that SVE regs are stored as a byte stream, with each byte element
68
+ * at a subsequent address. This corresponds to a little-endian store
69
+ * of our 64-bit hunks.
70
+ */
71
+ for (i = 0; i < 32; ++i) {
72
+ uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
73
+ for (j = 0; j < vq * 2; ++j) {
74
+ __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
75
+ }
76
+ }
77
+ for (i = 0; i <= 16; ++i) {
78
+ uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
79
+ for (j = 0; j < vq; ++j) {
80
+ uint64_t r = env->vfp.pregs[i].p[j >> 2];
81
+ __put_user_e(r >> ((j & 3) * 16), p + j, le);
82
+ }
83
+ }
84
+}
96
+}
85
+
97
+
86
static void target_restore_general_frame(CPUARMState *env,
98
+static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
87
struct target_rt_sigframe *sf)
88
{
89
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
90
}
91
}
92
93
+static void target_restore_sve_record(CPUARMState *env,
94
+ struct target_sve_context *sve, int vq)
95
+{
99
+{
96
+ int i, j;
100
+ /*
97
+
101
+ * Use pselect to sleep so that other threads can IPI us while we're
98
+ /* Note that SVE regs are stored as a byte stream, with each byte element
102
+ * sleeping.
99
+ * at a subsequent address. This corresponds to a little-endian load
100
+ * of our 64-bit hunks.
101
+ */
103
+ */
102
+ for (i = 0; i < 32; ++i) {
104
+ qatomic_mb_set(&cpu->thread_kicked, false);
103
+ uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
105
+ qemu_mutex_unlock_iothread();
104
+ for (j = 0; j < vq * 2; ++j) {
106
+ pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
105
+ __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
107
+ qemu_mutex_lock_iothread();
106
+ }
107
+ }
108
+ for (i = 0; i <= 16; ++i) {
109
+ uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
110
+ for (j = 0; j < vq; ++j) {
111
+ uint16_t r;
112
+ __get_user_e(r, p + j, le);
113
+ if (j & 3) {
114
+ env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
115
+ } else {
116
+ env->vfp.pregs[i].p[j >> 2] = r;
117
+ }
118
+ }
119
+ }
120
+}
108
+}
121
+
109
+
122
static int target_restore_sigframe(CPUARMState *env,
110
+static void hvf_wfi(CPUState *cpu)
123
struct target_rt_sigframe *sf)
111
+{
124
{
112
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
125
struct target_aarch64_ctx *ctx, *extra = NULL;
113
+ struct timespec ts;
126
struct target_fpsimd_context *fpsimd = NULL;
114
+ hv_return_t r;
127
+ struct target_sve_context *sve = NULL;
115
+ uint64_t ctl;
128
uint64_t extra_datap = 0;
116
+ uint64_t cval;
129
bool used_extra = false;
117
+ int64_t ticks_to_sleep;
130
bool err = false;
118
+ uint64_t seconds;
131
+ int vq = 0, sve_size = 0;
119
+ uint64_t nanos;
132
120
+ uint32_t cntfrq;
133
target_restore_general_frame(env, sf);
134
135
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
136
fpsimd = (struct target_fpsimd_context *)ctx;
137
break;
138
139
+ case TARGET_SVE_MAGIC:
140
+ if (arm_feature(env, ARM_FEATURE_SVE)) {
141
+ vq = (env->vfp.zcr_el[1] & 0xf) + 1;
142
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
143
+ if (!sve && size == sve_size) {
144
+ sve = (struct target_sve_context *)ctx;
145
+ break;
146
+ }
147
+ }
148
+ err = true;
149
+ goto exit;
150
+
121
+
151
case TARGET_EXTRA_MAGIC:
122
+ if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
152
if (extra || size != sizeof(struct target_extra_context)) {
123
+ /* Interrupt pending, no need to wait */
153
err = true;
124
+ return;
154
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
155
err = true;
156
}
157
158
+ /* SVE data, if present, overwrites FPSIMD data. */
159
+ if (sve) {
160
+ target_restore_sve_record(env, sve, vq);
161
+ }
125
+ }
162
+
126
+
163
exit:
127
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
164
unlock_user(extra, extra_datap, 0);
128
+ assert_hvf_ok(r);
165
return err;
166
}
167
168
-static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
169
+static abi_ulong get_sigframe(struct target_sigaction *ka,
170
+ CPUARMState *env, int size)
171
{
172
abi_ulong sp;
173
174
@@ -XXX,XX +XXX,XX @@ static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
175
sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
176
}
177
178
- sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
179
+ sp = (sp - size) & ~15;
180
181
return sp;
182
}
183
184
+typedef struct {
185
+ int total_size;
186
+ int extra_base;
187
+ int extra_size;
188
+ int std_end_ofs;
189
+ int extra_ofs;
190
+ int extra_end_ofs;
191
+} target_sigframe_layout;
192
+
129
+
193
+static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
130
+ if (!(ctl & 1) || (ctl & 2)) {
194
+{
131
+ /* Timer disabled or masked, just wait for an IPI. */
195
+ /* Make sure there will always be space for the end marker. */
132
+ hvf_wait_for_ipi(cpu, NULL);
196
+ const int std_size = sizeof(struct target_rt_sigframe)
133
+ return;
197
+ - sizeof(struct target_aarch64_ctx);
134
+ }
198
+ int this_loc = l->total_size;
199
+
135
+
200
+ if (l->extra_base) {
136
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
201
+ /* Once we have begun an extra space, all allocations go there. */
137
+ assert_hvf_ok(r);
202
+ l->extra_size += this_size;
203
+ } else if (this_size + this_loc > std_size) {
204
+ /* This allocation does not fit in the standard space. */
205
+ /* Allocate the extra record. */
206
+ l->extra_ofs = this_loc;
207
+ l->total_size += sizeof(struct target_extra_context);
208
+
138
+
209
+ /* Allocate the standard end record. */
139
+ ticks_to_sleep = cval - hvf_vtimer_val();
210
+ l->std_end_ofs = l->total_size;
140
+ if (ticks_to_sleep < 0) {
211
+ l->total_size += sizeof(struct target_aarch64_ctx);
141
+ return;
142
+ }
212
+
143
+
213
+ /* Allocate the requested record. */
144
+ cntfrq = gt_cntfrq_period_ns(arm_cpu);
214
+ l->extra_base = this_loc = l->total_size;
145
+ seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
215
+ l->extra_size = this_size;
146
+ ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
147
+ nanos = ticks_to_sleep * cntfrq;
148
+
149
+ /*
150
+ * Don't sleep for less than the time a context switch would take,
151
+ * so that we can satisfy fast timer requests on the same CPU.
152
+ * Measurements on M1 show the sweet spot to be ~2ms.
153
+ */
154
+ if (!seconds && nanos < (2 * SCALE_MS)) {
155
+ return;
216
+ }
156
+ }
217
+ l->total_size += this_size;
218
+
157
+
219
+ return this_loc;
158
+ ts = (struct timespec) { seconds, nanos };
159
+ hvf_wait_for_ipi(cpu, &ts);
220
+}
160
+}
221
+
161
+
222
static void target_setup_frame(int usig, struct target_sigaction *ka,
162
static void hvf_sync_vtimer(CPUState *cpu)
223
target_siginfo_t *info, target_sigset_t *set,
224
CPUARMState *env)
225
{
163
{
226
- int size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved);
164
ARMCPU *arm_cpu = ARM_CPU(cpu);
227
- int fpsimd_ofs, end1_ofs, fr_ofs, end2_ofs = 0;
165
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
228
- int extra_ofs = 0, extra_base = 0, extra_size = 0;
229
+ target_sigframe_layout layout = {
230
+ /* Begin with the size pointing to the reserved space. */
231
+ .total_size = offsetof(struct target_rt_sigframe,
232
+ uc.tuc_mcontext.__reserved),
233
+ };
234
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
235
struct target_rt_sigframe *frame;
236
struct target_rt_frame_record *fr;
237
abi_ulong frame_addr, return_addr;
238
239
- fpsimd_ofs = size;
240
- size += sizeof(struct target_fpsimd_context);
241
- end1_ofs = size;
242
- size += sizeof(struct target_aarch64_ctx);
243
- fr_ofs = size;
244
- size += sizeof(struct target_rt_frame_record);
245
+ /* FPSIMD record is always in the standard space. */
246
+ fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
247
+ &layout);
248
249
- frame_addr = get_sigframe(ka, env);
250
+ /* SVE state needs saving only if it exists. */
251
+ if (arm_feature(env, ARM_FEATURE_SVE)) {
252
+ vq = (env->vfp.zcr_el[1] & 0xf) + 1;
253
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
254
+ sve_ofs = alloc_sigframe_space(sve_size, &layout);
255
+ }
256
+
257
+ if (layout.extra_ofs) {
258
+ /* Reserve space for the extra end marker. The standard end marker
259
+ * will have been allocated when we allocated the extra record.
260
+ */
261
+ layout.extra_end_ofs
262
+ = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
263
+ } else {
264
+ /* Reserve space for the standard end marker.
265
+ * Do not use alloc_sigframe_space because we cheat
266
+ * std_size therein to reserve space for this.
267
+ */
268
+ layout.std_end_ofs = layout.total_size;
269
+ layout.total_size += sizeof(struct target_aarch64_ctx);
270
+ }
271
+
272
+ /* Reserve space for the return code. On a real system this would
273
+ * be within the VDSO. So, despite the name this is not a "real"
274
+ * record within the frame.
275
+ */
276
+ fr_ofs = layout.total_size;
277
+ layout.total_size += sizeof(struct target_rt_frame_record);
278
+
279
+ frame_addr = get_sigframe(ka, env, layout.total_size);
280
trace_user_setup_frame(env, frame_addr);
281
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
282
goto give_sigsegv;
283
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
284
285
target_setup_general_frame(frame, env, set);
286
target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
287
- if (extra_ofs) {
288
- target_setup_extra_record((void *)frame + extra_ofs,
289
- frame_addr + extra_base, extra_size);
290
+ target_setup_end_record((void *)frame + layout.std_end_ofs);
291
+ if (layout.extra_ofs) {
292
+ target_setup_extra_record((void *)frame + layout.extra_ofs,
293
+ frame_addr + layout.extra_base,
294
+ layout.extra_size);
295
+ target_setup_end_record((void *)frame + layout.extra_end_ofs);
296
}
166
}
297
- target_setup_end_record((void *)frame + end1_ofs);
167
case EC_WFX_TRAP:
298
- if (end2_ofs) {
168
advance_pc = true;
299
- target_setup_end_record((void *)frame + end2_ofs);
169
+ if (!(syndrome & WFX_IS_WFE)) {
300
+ if (sve_ofs) {
170
+ hvf_wfi(cpu);
301
+ target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
171
+ }
302
}
172
break;
303
173
case EC_AA64_HVC:
304
/* Set up the stack frame for unwinding. */
174
cpu_synchronize_state(cpu);
305
--
175
--
306
2.16.2
176
2.20.1
307
177
308
178
diff view generated by jsdifflib
1
Currently we query the host CPU features in the class init function
1
Now that we have working system register sync, we push more target CPU
2
for the TYPE_ARM_HOST_CPU class, so that we can later copy them
2
properties into the virtual machine. That might be useful in some
3
from the class object into the instance object in the object
3
situations, but is not the typical case that users want.
4
instance init function. This is awkward for implementing "-cpu max",
4
5
which should work like "-cpu host" for KVM but like "cpu with all
5
So let's add a -cpu host option that allows them to explicitly pass all
6
implemented features" for TCG.
6
CPU capabilities of their host CPU into the guest.
7
7
8
Move the place where we store the information about the host CPU from
8
Signed-off-by: Alexander Graf <agraf@csgraf.de>
9
a class object to static variables in kvm.c, and then in the instance
9
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
10
init function call a new kvm_arm_set_cpu_features_from_host()
10
Reviewed-by: Sergio Lopez <slp@redhat.com>
11
function which will query the host kernel if necessary and then
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
fill in the CPU instance fields.
12
Message-id: 20210916155404.86958-7-agraf@csgraf.de
13
13
[PMM: drop unnecessary #include line from .h file]
14
This allows us to drop the special class struct and class init
15
function for TYPE_ARM_HOST_CPU entirely.
16
17
We can't delay the probe until realize, because the ARM
18
instance_post_init hook needs to look at the feature bits we
19
set, so we need to do it in the initfn. This is safe because
20
the probing doesn't affect the actual VM state (it creates a
21
separate scratch VM to do its testing), but the probe might fail.
22
Because we can't report errors in retrieving the host features
23
in the initfn, we check this belatedly in the realize function
24
(the intervening code will be able to cope with the relevant
25
fields in the CPU structure being zero).
26
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
29
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
30
Message-id: 20180308130626.12393-2-peter.maydell@linaro.org
31
---
15
---
32
target/arm/cpu.h | 5 +++++
16
target/arm/cpu.h | 2 +
33
target/arm/kvm_arm.h | 35 ++++++++++++++++++++++++-----------
17
target/arm/hvf_arm.h | 18 +++++++++
34
target/arm/cpu.c | 13 +++++++++++++
18
target/arm/kvm_arm.h | 2 -
35
target/arm/kvm.c | 36 +++++++++++++++++++-----------------
19
target/arm/cpu.c | 13 ++++--
36
target/arm/kvm32.c | 8 ++++----
20
target/arm/hvf/hvf.c | 95 ++++++++++++++++++++++++++++++++++++++++++++
37
target/arm/kvm64.c | 8 ++++----
21
5 files changed, 124 insertions(+), 6 deletions(-)
38
6 files changed, 69 insertions(+), 36 deletions(-)
22
create mode 100644 target/arm/hvf_arm.h
39
23
40
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
24
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
41
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/cpu.h
26
--- a/target/arm/cpu.h
43
+++ b/target/arm/cpu.h
27
+++ b/target/arm/cpu.h
44
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
28
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
45
/* Uniprocessor system with MP extensions */
29
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
46
bool mp_is_up;
30
#define CPU_RESOLVING_TYPE TYPE_ARM_CPU
47
31
48
+ /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init
32
+#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
49
+ * and the probe failed (so we need to report the error in realize)
33
+
50
+ */
34
#define cpu_signal_handler cpu_arm_signal_handler
51
+ bool host_cpu_probe_failed;
35
#define cpu_list arm_cpu_list
52
+
36
53
/* Specify the number of cores in this CPU cluster. Used for the L2CTLR
37
diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h
54
* register.
38
new file mode 100644
55
*/
39
index XXXXXXX..XXXXXXX
40
--- /dev/null
41
+++ b/target/arm/hvf_arm.h
42
@@ -XXX,XX +XXX,XX @@
43
+/*
44
+ * QEMU Hypervisor.framework (HVF) support -- ARM specifics
45
+ *
46
+ * Copyright (c) 2021 Alexander Graf
47
+ *
48
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
49
+ * See the COPYING file in the top-level directory.
50
+ *
51
+ */
52
+
53
+#ifndef QEMU_HVF_ARM_H
54
+#define QEMU_HVF_ARM_H
55
+
56
+#include "cpu.h"
57
+
58
+void hvf_arm_set_cpu_features_from_host(struct ARMCPU *cpu);
59
+
60
+#endif
56
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
61
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
57
index XXXXXXX..XXXXXXX 100644
62
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/kvm_arm.h
63
--- a/target/arm/kvm_arm.h
59
+++ b/target/arm/kvm_arm.h
64
+++ b/target/arm/kvm_arm.h
60
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
65
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
66
*/
61
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
67
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
62
68
63
#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
69
-#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
64
-#define ARM_HOST_CPU_CLASS(klass) \
65
- OBJECT_CLASS_CHECK(ARMHostCPUClass, (klass), TYPE_ARM_HOST_CPU)
66
-#define ARM_HOST_CPU_GET_CLASS(obj) \
67
- OBJECT_GET_CLASS(ARMHostCPUClass, (obj), TYPE_ARM_HOST_CPU)
68
-
70
-
69
-typedef struct ARMHostCPUClass {
70
- /*< private >*/
71
- ARMCPUClass parent_class;
72
- /*< public >*/
73
74
+/**
75
+ * ARMHostCPUFeatures: information about the host CPU (identified
76
+ * by asking the host kernel)
77
+ */
78
+typedef struct ARMHostCPUFeatures {
79
uint64_t features;
80
uint32_t target;
81
const char *dtb_compatible;
82
-} ARMHostCPUClass;
83
+} ARMHostCPUFeatures;
84
85
/**
71
/**
86
* kvm_arm_get_host_cpu_features:
72
* ARMHostCPUFeatures: information about the host CPU (identified
87
@@ -XXX,XX +XXX,XX @@ typedef struct ARMHostCPUClass {
73
* by asking the host kernel)
88
* Probe the capabilities of the host kernel's preferred CPU and fill
89
* in the ARMHostCPUClass struct accordingly.
90
*/
91
-bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc);
92
+bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
93
94
+/**
95
+ * kvm_arm_set_cpu_features_from_host:
96
+ * @cpu: ARMCPU to set the features for
97
+ *
98
+ * Set up the ARMCPU struct fields up to match the information probed
99
+ * from the host CPU.
100
+ */
101
+void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
102
103
/**
104
* kvm_arm_sync_mpstate_to_kvm
105
@@ -XXX,XX +XXX,XX @@ void kvm_arm_pmu_init(CPUState *cs);
106
107
#else
108
109
+static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
110
+{
111
+ /* This should never actually be called in the "not KVM" case,
112
+ * but set up the fields to indicate an error anyway.
113
+ */
114
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
115
+ cpu->host_cpu_probe_failed = true;
116
+}
117
+
118
static inline int kvm_arm_vgic_probe(void)
119
{
120
return 0;
121
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
74
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
122
index XXXXXXX..XXXXXXX 100644
75
index XXXXXXX..XXXXXXX 100644
123
--- a/target/arm/cpu.c
76
--- a/target/arm/cpu.c
124
+++ b/target/arm/cpu.c
77
+++ b/target/arm/cpu.c
78
@@ -XXX,XX +XXX,XX @@
79
#include "sysemu/tcg.h"
80
#include "sysemu/hw_accel.h"
81
#include "kvm_arm.h"
82
+#include "hvf_arm.h"
83
#include "disas/capstone.h"
84
#include "fpu/softfloat.h"
85
125
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
86
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
126
int pagebits;
87
* this is the first point where we can report it.
127
Error *local_err = NULL;
88
*/
128
89
if (cpu->host_cpu_probe_failed) {
129
+ /* If we needed to query the host kernel for the CPU features
90
- if (!kvm_enabled()) {
130
+ * then it's possible that might have failed in the initfn, but
91
- error_setg(errp, "The 'host' CPU type can only be used with KVM");
131
+ * this is the first point where we can report it.
92
+ if (!kvm_enabled() && !hvf_enabled()) {
93
+ error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
94
} else {
95
error_setg(errp, "Failed to retrieve host CPU features");
96
}
97
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
98
#endif /* CONFIG_TCG */
99
}
100
101
-#ifdef CONFIG_KVM
102
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
103
static void arm_host_initfn(Object *obj)
104
{
105
ARMCPU *cpu = ARM_CPU(obj);
106
107
+#ifdef CONFIG_KVM
108
kvm_arm_set_cpu_features_from_host(cpu);
109
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
110
aarch64_add_sve_properties(obj);
111
}
112
+#else
113
+ hvf_arm_set_cpu_features_from_host(cpu);
114
+#endif
115
arm_cpu_post_init(obj);
116
}
117
118
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_register_types(void)
119
{
120
type_register_static(&arm_cpu_type_info);
121
122
-#ifdef CONFIG_KVM
123
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
124
type_register_static(&host_arm_cpu_type_info);
125
#endif
126
}
127
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
128
index XXXXXXX..XXXXXXX 100644
129
--- a/target/arm/hvf/hvf.c
130
+++ b/target/arm/hvf/hvf.c
131
@@ -XXX,XX +XXX,XX @@
132
#include "sysemu/hvf.h"
133
#include "sysemu/hvf_int.h"
134
#include "sysemu/hw_accel.h"
135
+#include "hvf_arm.h"
136
137
#include <mach/mach_time.h>
138
139
@@ -XXX,XX +XXX,XX @@ typedef struct HVFVTimer {
140
141
static HVFVTimer vtimer;
142
143
+typedef struct ARMHostCPUFeatures {
144
+ ARMISARegisters isar;
145
+ uint64_t features;
146
+ uint64_t midr;
147
+ uint32_t reset_sctlr;
148
+ const char *dtb_compatible;
149
+} ARMHostCPUFeatures;
150
+
151
+static ARMHostCPUFeatures arm_host_cpu_features;
152
+
153
struct hvf_reg_match {
154
int reg;
155
uint64_t offset;
156
@@ -XXX,XX +XXX,XX @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
157
return val;
158
}
159
160
+static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
161
+{
162
+ ARMISARegisters host_isar = {};
163
+ const struct isar_regs {
164
+ int reg;
165
+ uint64_t *val;
166
+ } regs[] = {
167
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
168
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
169
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
170
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
171
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
172
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
173
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
174
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
175
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
176
+ };
177
+ hv_vcpu_t fd;
178
+ hv_return_t r = HV_SUCCESS;
179
+ hv_vcpu_exit_t *exit;
180
+ int i;
181
+
182
+ ahcf->dtb_compatible = "arm,arm-v8";
183
+ ahcf->features = (1ULL << ARM_FEATURE_V8) |
184
+ (1ULL << ARM_FEATURE_NEON) |
185
+ (1ULL << ARM_FEATURE_AARCH64) |
186
+ (1ULL << ARM_FEATURE_PMU) |
187
+ (1ULL << ARM_FEATURE_GENERIC_TIMER);
188
+
189
+ /* We set up a small vcpu to extract host registers */
190
+
191
+ if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
192
+ return false;
193
+ }
194
+
195
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
196
+ r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
197
+ }
198
+ r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
199
+ r |= hv_vcpu_destroy(fd);
200
+
201
+ ahcf->isar = host_isar;
202
+
203
+ /*
204
+ * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
205
+ * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
132
+ */
206
+ */
133
+ if (cpu->host_cpu_probe_failed) {
207
+ ahcf->reset_sctlr = 0x30100180;
134
+ if (!kvm_enabled()) {
208
+ /*
135
+ error_setg(errp, "The 'host' CPU type can only be used with KVM");
209
+ * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
136
+ } else {
210
+ * let's disable it on boot and then allow guest software to turn it on by
137
+ error_setg(errp, "Failed to retrieve host CPU features");
211
+ * setting it to 0.
138
+ }
212
+ */
139
+ return;
213
+ ahcf->reset_sctlr |= 0x00800000;
140
+ }
214
+
141
+
215
+ /* Make sure we don't advertise AArch32 support for EL0/EL1 */
142
cpu_exec_realizefn(cs, &local_err);
216
+ if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
143
if (local_err != NULL) {
217
+ return false;
144
error_propagate(errp, local_err);
218
+ }
145
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
219
+
146
index XXXXXXX..XXXXXXX 100644
220
+ return r == HV_SUCCESS;
147
--- a/target/arm/kvm.c
221
+}
148
+++ b/target/arm/kvm.c
222
+
149
@@ -XXX,XX +XXX,XX @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
223
+void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
150
224
+{
151
static bool cap_has_mp_state;
152
153
+static ARMHostCPUFeatures arm_host_cpu_features;
154
+
155
int kvm_arm_vcpu_init(CPUState *cs)
156
{
157
ARMCPU *cpu = ARM_CPU(cs);
158
@@ -XXX,XX +XXX,XX @@ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
159
}
160
}
161
162
-static void kvm_arm_host_cpu_class_init(ObjectClass *oc, void *data)
163
+void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
164
{
165
- ARMHostCPUClass *ahcc = ARM_HOST_CPU_CLASS(oc);
166
+ CPUARMState *env = &cpu->env;
167
168
- /* All we really need to set up for the 'host' CPU
169
- * is the feature bits -- we rely on the fact that the
170
- * various ID register values in ARMCPU are only used for
171
- * TCG CPUs.
172
- */
173
- if (!kvm_arm_get_host_cpu_features(ahcc)) {
174
- fprintf(stderr, "Failed to retrieve host CPU features!\n");
175
- abort();
176
+ if (!arm_host_cpu_features.dtb_compatible) {
225
+ if (!arm_host_cpu_features.dtb_compatible) {
177
+ if (!kvm_enabled() ||
226
+ if (!hvf_enabled() ||
178
+ !kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
227
+ !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
179
+ /* We can't report this error yet, so flag that we need to
228
+ /*
229
+ * We can't report this error yet, so flag that we need to
180
+ * in arm_cpu_realizefn().
230
+ * in arm_cpu_realizefn().
181
+ */
231
+ */
182
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
183
+ cpu->host_cpu_probe_failed = true;
232
+ cpu->host_cpu_probe_failed = true;
184
+ return;
233
+ return;
185
+ }
234
+ }
186
}
235
+ }
187
+
236
+
188
+ cpu->kvm_target = arm_host_cpu_features.target;
189
+ cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
237
+ cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
190
+ env->features = arm_host_cpu_features.features;
238
+ cpu->isar = arm_host_cpu_features.isar;
191
}
239
+ cpu->env.features = arm_host_cpu_features.features;
192
240
+ cpu->midr = arm_host_cpu_features.midr;
193
static void kvm_arm_host_cpu_initfn(Object *obj)
241
+ cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
242
+}
243
+
244
void hvf_arch_vcpu_destroy(CPUState *cpu)
194
{
245
{
195
- ARMHostCPUClass *ahcc = ARM_HOST_CPU_GET_CLASS(obj);
196
ARMCPU *cpu = ARM_CPU(obj);
197
- CPUARMState *env = &cpu->env;
198
199
- cpu->kvm_target = ahcc->target;
200
- cpu->dtb_compatible = ahcc->dtb_compatible;
201
- env->features = ahcc->features;
202
+ kvm_arm_set_cpu_features_from_host(cpu);
203
}
204
205
static const TypeInfo host_arm_cpu_type_info = {
206
@@ -XXX,XX +XXX,XX @@ static const TypeInfo host_arm_cpu_type_info = {
207
.parent = TYPE_ARM_CPU,
208
#endif
209
.instance_init = kvm_arm_host_cpu_initfn,
210
- .class_init = kvm_arm_host_cpu_class_init,
211
- .class_size = sizeof(ARMHostCPUClass),
212
};
213
214
int kvm_arch_init(MachineState *ms, KVMState *s)
215
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
216
index XXXXXXX..XXXXXXX 100644
217
--- a/target/arm/kvm32.c
218
+++ b/target/arm/kvm32.c
219
@@ -XXX,XX +XXX,XX @@ static inline void set_feature(uint64_t *features, int feature)
220
*features |= 1ULL << feature;
221
}
222
223
-bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
224
+bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
225
{
226
/* Identify the feature bits corresponding to the host CPU, and
227
* fill out the ARMHostCPUClass fields accordingly. To do this
228
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
229
return false;
230
}
231
232
- ahcc->target = init.target;
233
+ ahcf->target = init.target;
234
235
/* This is not strictly blessed by the device tree binding docs yet,
236
* but in practice the kernel does not care about this string so
237
* there is no point maintaining an KVM_ARM_TARGET_* -> string table.
238
*/
239
- ahcc->dtb_compatible = "arm,arm-v7";
240
+ ahcf->dtb_compatible = "arm,arm-v7";
241
242
for (i = 0; i < ARRAY_SIZE(idregs); i++) {
243
ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
244
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
245
set_feature(&features, ARM_FEATURE_VFP4);
246
}
247
248
- ahcc->features = features;
249
+ ahcf->features = features;
250
251
return true;
252
}
253
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/target/arm/kvm64.c
256
+++ b/target/arm/kvm64.c
257
@@ -XXX,XX +XXX,XX @@ static inline void unset_feature(uint64_t *features, int feature)
258
*features &= ~(1ULL << feature);
259
}
260
261
-bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
262
+bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
263
{
264
/* Identify the feature bits corresponding to the host CPU, and
265
* fill out the ARMHostCPUClass fields accordingly. To do this
266
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
267
return false;
268
}
269
270
- ahcc->target = init.target;
271
- ahcc->dtb_compatible = "arm,arm-v8";
272
+ ahcf->target = init.target;
273
+ ahcf->dtb_compatible = "arm,arm-v8";
274
275
kvm_arm_destroy_scratch_host_vcpu(fdarray);
276
277
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
278
set_feature(&features, ARM_FEATURE_AARCH64);
279
set_feature(&features, ARM_FEATURE_PMU);
280
281
- ahcc->features = features;
282
+ ahcf->features = features;
283
284
return true;
285
}
246
}
286
--
247
--
287
2.16.2
248
2.20.1
288
249
289
250
diff view generated by jsdifflib
New patch
1
1
From: Alexander Graf <agraf@csgraf.de>
2
3
We need to handle PSCI calls. Most of the TCG code works for us,
4
but we can simplify it to only handle aa64 mode and we need to
5
handle SUSPEND differently.
6
7
This patch takes the TCG code as template and duplicates it in HVF.
8
9
To tell the guest that we support PSCI 0.2 now, update the check in
10
arm_cpu_initfn() as well.
11
12
Signed-off-by: Alexander Graf <agraf@csgraf.de>
13
Reviewed-by: Sergio Lopez <slp@redhat.com>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Message-id: 20210916155404.86958-8-agraf@csgraf.de
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
target/arm/cpu.c | 4 +-
19
target/arm/hvf/hvf.c | 141 ++++++++++++++++++++++++++++++++++--
20
target/arm/hvf/trace-events | 1 +
21
3 files changed, 139 insertions(+), 7 deletions(-)
22
23
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/cpu.c
26
+++ b/target/arm/cpu.c
27
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
28
cpu->psci_version = 1; /* By default assume PSCI v0.1 */
29
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
30
31
- if (tcg_enabled()) {
32
- cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
33
+ if (tcg_enabled() || hvf_enabled()) {
34
+ cpu->psci_version = 2; /* TCG and HVF implement PSCI 0.2 */
35
}
36
}
37
38
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/hvf/hvf.c
41
+++ b/target/arm/hvf/hvf.c
42
@@ -XXX,XX +XXX,XX @@
43
#include "hw/irq.h"
44
#include "qemu/main-loop.h"
45
#include "sysemu/cpus.h"
46
+#include "arm-powerctl.h"
47
#include "target/arm/cpu.h"
48
#include "target/arm/internals.h"
49
#include "trace/trace-target_arm_hvf.h"
50
@@ -XXX,XX +XXX,XX @@
51
#define TMR_CTL_IMASK (1 << 1)
52
#define TMR_CTL_ISTATUS (1 << 2)
53
54
+static void hvf_wfi(CPUState *cpu);
55
+
56
typedef struct HVFVTimer {
57
/* Vtimer value during migration and paused state */
58
uint64_t vtimer_val;
59
@@ -XXX,XX +XXX,XX @@ static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
60
arm_cpu_do_interrupt(cpu);
61
}
62
63
+static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
64
+{
65
+ int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity);
66
+ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
67
+}
68
+
69
+/*
70
+ * Handle a PSCI call.
71
+ *
72
+ * Returns 0 on success
73
+ * -1 when the PSCI call is unknown,
74
+ */
75
+static bool hvf_handle_psci_call(CPUState *cpu)
76
+{
77
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
78
+ CPUARMState *env = &arm_cpu->env;
79
+ uint64_t param[4] = {
80
+ env->xregs[0],
81
+ env->xregs[1],
82
+ env->xregs[2],
83
+ env->xregs[3]
84
+ };
85
+ uint64_t context_id, mpidr;
86
+ bool target_aarch64 = true;
87
+ CPUState *target_cpu_state;
88
+ ARMCPU *target_cpu;
89
+ target_ulong entry;
90
+ int target_el = 1;
91
+ int32_t ret = 0;
92
+
93
+ trace_hvf_psci_call(param[0], param[1], param[2], param[3],
94
+ arm_cpu->mp_affinity);
95
+
96
+ switch (param[0]) {
97
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
98
+ ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
99
+ break;
100
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
101
+ ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
102
+ break;
103
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
104
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
105
+ mpidr = param[1];
106
+
107
+ switch (param[2]) {
108
+ case 0:
109
+ target_cpu_state = arm_get_cpu_by_id(mpidr);
110
+ if (!target_cpu_state) {
111
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
112
+ break;
113
+ }
114
+ target_cpu = ARM_CPU(target_cpu_state);
115
+
116
+ ret = target_cpu->power_state;
117
+ break;
118
+ default:
119
+ /* Everything above affinity level 0 is always on. */
120
+ ret = 0;
121
+ }
122
+ break;
123
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
124
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
125
+ /*
126
+ * QEMU reset and shutdown are async requests, but PSCI
127
+ * mandates that we never return from the reset/shutdown
128
+ * call, so power the CPU off now so it doesn't execute
129
+ * anything further.
130
+ */
131
+ hvf_psci_cpu_off(arm_cpu);
132
+ break;
133
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
134
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
135
+ hvf_psci_cpu_off(arm_cpu);
136
+ break;
137
+ case QEMU_PSCI_0_1_FN_CPU_ON:
138
+ case QEMU_PSCI_0_2_FN_CPU_ON:
139
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
140
+ mpidr = param[1];
141
+ entry = param[2];
142
+ context_id = param[3];
143
+ ret = arm_set_cpu_on(mpidr, entry, context_id,
144
+ target_el, target_aarch64);
145
+ break;
146
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
147
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
148
+ hvf_psci_cpu_off(arm_cpu);
149
+ break;
150
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
151
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
152
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
153
+ /* Affinity levels are not supported in QEMU */
154
+ if (param[1] & 0xfffe0000) {
155
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
156
+ break;
157
+ }
158
+ /* Powerdown is not supported, we always go into WFI */
159
+ env->xregs[0] = 0;
160
+ hvf_wfi(cpu);
161
+ break;
162
+ case QEMU_PSCI_0_1_FN_MIGRATE:
163
+ case QEMU_PSCI_0_2_FN_MIGRATE:
164
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
165
+ break;
166
+ default:
167
+ return false;
168
+ }
169
+
170
+ env->xregs[0] = ret;
171
+ return true;
172
+}
173
+
174
static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
175
{
176
ARMCPU *arm_cpu = ARM_CPU(cpu);
177
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
178
break;
179
case EC_AA64_HVC:
180
cpu_synchronize_state(cpu);
181
- trace_hvf_unknown_hvc(env->xregs[0]);
182
- /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
183
- env->xregs[0] = -1;
184
+ if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
185
+ if (!hvf_handle_psci_call(cpu)) {
186
+ trace_hvf_unknown_hvc(env->xregs[0]);
187
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
188
+ env->xregs[0] = -1;
189
+ }
190
+ } else {
191
+ trace_hvf_unknown_hvc(env->xregs[0]);
192
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
193
+ }
194
break;
195
case EC_AA64_SMC:
196
cpu_synchronize_state(cpu);
197
- trace_hvf_unknown_smc(env->xregs[0]);
198
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
199
+ if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
200
+ advance_pc = true;
201
+
202
+ if (!hvf_handle_psci_call(cpu)) {
203
+ trace_hvf_unknown_smc(env->xregs[0]);
204
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
205
+ env->xregs[0] = -1;
206
+ }
207
+ } else {
208
+ trace_hvf_unknown_smc(env->xregs[0]);
209
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
210
+ }
211
break;
212
default:
213
cpu_synchronize_state(cpu);
214
diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events
215
index XXXXXXX..XXXXXXX 100644
216
--- a/target/arm/hvf/trace-events
217
+++ b/target/arm/hvf/trace-events
218
@@ -XXX,XX +XXX,XX @@ hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_
219
hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64
220
hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
221
hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
222
+hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x"
223
--
224
2.20.1
225
226
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
A lot of ARM object files are linked into the executable unconditionally,
3
Now that we have all logic in place that we need to handle Hypervisor.framework
4
even though we have corresponding CONFIG switches like CONFIG_PXA2XX or
4
on Apple Silicon systems, let's add CONFIG_HVF for aarch64 as well so that we
5
CONFIG_OMAP. We should make sure to use these switches in the Makefile so
5
can build it.
6
that the users can disable certain unwanted boards and devices more easily.
7
While we're at it, also add some new switches for the boards that do not
8
have a CONFIG option yet.
9
6
10
Signed-off-by: Thomas Huth <thuth@redhat.com>
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
11
Message-id: 1520266949-29817-1-git-send-email-thuth@redhat.com
8
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
9
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com> (x86 only)
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Sergio Lopez <slp@redhat.com>
12
Message-id: 20210916155404.86958-9-agraf@csgraf.de
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
14
---
16
hw/arm/Makefile.objs | 30 +++++++++++++++++++++---------
15
meson.build | 7 +++++++
17
default-configs/arm-softmmu.mak | 7 +++++++
16
target/arm/hvf/meson.build | 3 +++
18
2 files changed, 28 insertions(+), 9 deletions(-)
17
target/arm/meson.build | 2 ++
18
3 files changed, 12 insertions(+)
19
create mode 100644 target/arm/hvf/meson.build
19
20
20
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
21
diff --git a/meson.build b/meson.build
21
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/Makefile.objs
23
--- a/meson.build
23
+++ b/hw/arm/Makefile.objs
24
+++ b/meson.build
25
@@ -XXX,XX +XXX,XX @@ else
26
endif
27
28
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
29
+
30
+if cpu in ['aarch64']
31
+ accelerator_targets += {
32
+ 'CONFIG_HVF': ['aarch64-softmmu']
33
+ }
34
+endif
35
+
36
if cpu in ['x86', 'x86_64', 'arm', 'aarch64']
37
# i386 emulator provides xenpv machine type for multiple architectures
38
accelerator_targets += {
39
diff --git a/target/arm/hvf/meson.build b/target/arm/hvf/meson.build
40
new file mode 100644
41
index XXXXXXX..XXXXXXX
42
--- /dev/null
43
+++ b/target/arm/hvf/meson.build
24
@@ -XXX,XX +XXX,XX @@
44
@@ -XXX,XX +XXX,XX @@
25
-obj-y += boot.o collie.o exynos4_boards.o gumstix.o highbank.o
45
+arm_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
26
-obj-$(CONFIG_DIGIC) += digic_boards.o
46
+ 'hvf.c',
27
-obj-y += integratorcp.o mainstone.o musicpal.o nseries.o
47
+))
28
-obj-y += omap_sx1.o palm.o realview.o spitz.o stellaris.o
48
diff --git a/target/arm/meson.build b/target/arm/meson.build
29
-obj-y += tosa.o versatilepb.o vexpress.o virt.o xilinx_zynq.o z2.o
30
+obj-y += boot.o virt.o sysbus-fdt.o
31
obj-$(CONFIG_ACPI) += virt-acpi-build.o
32
-obj-y += netduino2.o
33
-obj-y += sysbus-fdt.o
34
+obj-$(CONFIG_DIGIC) += digic_boards.o
35
+obj-$(CONFIG_EXYNOS4) += exynos4_boards.o
36
+obj-$(CONFIG_HIGHBANK) += highbank.o
37
+obj-$(CONFIG_INTEGRATOR) += integratorcp.o
38
+obj-$(CONFIG_MAINSTONE) += mainstone.o
39
+obj-$(CONFIG_MUSICPAL) += musicpal.o
40
+obj-$(CONFIG_NETDUINO2) += netduino2.o
41
+obj-$(CONFIG_NSERIES) += nseries.o
42
+obj-$(CONFIG_OMAP) += omap_sx1.o palm.o
43
+obj-$(CONFIG_PXA2XX) += gumstix.o spitz.o tosa.o z2.o
44
+obj-$(CONFIG_REALVIEW) += realview.o
45
+obj-$(CONFIG_STELLARIS) += stellaris.o
46
+obj-$(CONFIG_STRONGARM) += collie.o
47
+obj-$(CONFIG_VERSATILE) += vexpress.o versatilepb.o
48
+obj-$(CONFIG_ZYNQ) += xilinx_zynq.o
49
50
-obj-y += armv7m.o exynos4210.o pxa2xx.o pxa2xx_gpio.o pxa2xx_pic.o
51
+obj-$(CONFIG_ARM_V7M) += armv7m.o
52
+obj-$(CONFIG_EXYNOS4) += exynos4210.o
53
+obj-$(CONFIG_PXA2XX) += pxa2xx.o pxa2xx_gpio.o pxa2xx_pic.o
54
obj-$(CONFIG_DIGIC) += digic.o
55
-obj-y += omap1.o omap2.o strongarm.o
56
+obj-$(CONFIG_OMAP) += omap1.o omap2.o
57
+obj-$(CONFIG_STRONGARM) += strongarm.o
58
obj-$(CONFIG_ALLWINNER_A10) += allwinner-a10.o cubieboard.o
59
obj-$(CONFIG_RASPI) += bcm2835_peripherals.o bcm2836.o raspi.o
60
obj-$(CONFIG_STM32F205_SOC) += stm32f205_soc.o
61
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
62
index XXXXXXX..XXXXXXX 100644
49
index XXXXXXX..XXXXXXX 100644
63
--- a/default-configs/arm-softmmu.mak
50
--- a/target/arm/meson.build
64
+++ b/default-configs/arm-softmmu.mak
51
+++ b/target/arm/meson.build
65
@@ -XXX,XX +XXX,XX @@ CONFIG_A9MPCORE=y
52
@@ -XXX,XX +XXX,XX @@ arm_softmmu_ss.add(files(
66
CONFIG_A15MPCORE=y
53
'psci.c',
67
54
))
68
CONFIG_ARM_V7M=y
55
69
+CONFIG_NETDUINO2=y
56
+subdir('hvf')
70
71
CONFIG_ARM_GIC=y
72
CONFIG_ARM_GIC_KVM=$(CONFIG_KVM)
73
@@ -XXX,XX +XXX,XX @@ CONFIG_TZ_PPC=y
74
CONFIG_IOTKIT=y
75
CONFIG_IOTKIT_SECCTL=y
76
77
+CONFIG_VERSATILE=y
78
CONFIG_VERSATILE_PCI=y
79
CONFIG_VERSATILE_I2C=y
80
81
@@ -XXX,XX +XXX,XX @@ CONFIG_VFIO_XGMAC=y
82
CONFIG_VFIO_AMD_XGBE=y
83
84
CONFIG_SDHCI=y
85
+CONFIG_INTEGRATOR=y
86
CONFIG_INTEGRATOR_DEBUG=y
87
88
CONFIG_ALLWINNER_A10_PIT=y
89
@@ -XXX,XX +XXX,XX @@ CONFIG_MSF2=y
90
CONFIG_FW_CFG_DMA=y
91
CONFIG_XILINX_AXI=y
92
CONFIG_PCI_DESIGNWARE=y
93
+
57
+
94
+CONFIG_STRONGARM=y
58
target_arch += {'arm': arm_ss}
95
+CONFIG_HIGHBANK=y
59
target_softmmu_arch += {'arm': arm_softmmu_ss}
96
+CONFIG_MUSICPAL=y
97
--
60
--
98
2.16.2
61
2.20.1
99
62
100
63
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
This changes the qemu signal frame layout to be more like the kernel's,
3
We can expose cycle counters on the PMU easily. To be as compatible as
4
in that the various records are dynamically allocated rather than fixed
4
possible, let's do so, but make sure we don't expose any other architectural
5
in place by a structure.
5
counters that we can not model yet.
6
6
7
For now, all of the allocation is out of uc.tuc_mcontext.__reserved,
7
This allows OSs to work that require PMU support.
8
so the allocation is actually trivial. That will change with SVE support.
8
9
9
Signed-off-by: Alexander Graf <agraf@csgraf.de>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210916155404.86958-10-agraf@csgraf.de
12
Message-id: 20180303143823.27055-4-richard.henderson@linaro.org
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
13
---
16
linux-user/signal.c | 89 ++++++++++++++++++++++++++++++++++++-----------------
14
target/arm/hvf/hvf.c | 179 +++++++++++++++++++++++++++++++++++++++++++
17
1 file changed, 61 insertions(+), 28 deletions(-)
15
1 file changed, 179 insertions(+)
18
16
19
diff --git a/linux-user/signal.c b/linux-user/signal.c
17
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/linux-user/signal.c
19
--- a/target/arm/hvf/hvf.c
22
+++ b/linux-user/signal.c
20
+++ b/target/arm/hvf/hvf.c
23
@@ -XXX,XX +XXX,XX @@ struct target_fpsimd_context {
21
@@ -XXX,XX +XXX,XX @@
24
uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
22
#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
25
};
23
#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
26
24
#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
27
-/*
25
+#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
28
- * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
26
+#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
29
- * user space as it will change with the addition of new context. User space
27
+#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
30
- * should check the magic/size information.
28
+#define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
31
- */
29
+#define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
32
-struct target_aux_context {
30
+#define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3)
33
- struct target_fpsimd_context fpsimd;
31
+#define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4)
34
- /* additional context to be added before "end" */
32
+#define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5)
35
- struct target_aarch64_ctx end;
33
+#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
36
-};
34
+#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
37
-
35
+#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
38
struct target_rt_sigframe {
36
+#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
39
struct target_siginfo info;
37
40
struct target_ucontext uc;
38
#define WFX_IS_WFE (1 << 0)
41
+};
39
42
+
40
@@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
43
+struct target_rt_frame_record {
41
val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
44
uint64_t fp;
42
gt_cntfrq_period_ns(arm_cpu);
45
uint64_t lr;
43
break;
46
uint32_t tramp[2];
44
+ case SYSREG_PMCR_EL0:
47
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
45
+ val = env->cp15.c9_pmcr;
48
static int target_restore_sigframe(CPUARMState *env,
46
+ break;
49
struct target_rt_sigframe *sf)
47
+ case SYSREG_PMCCNTR_EL0:
50
{
48
+ pmu_op_start(env);
51
- struct target_aux_context *aux
49
+ val = env->cp15.c15_ccnt;
52
- = (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
50
+ pmu_op_finish(env);
53
- uint32_t magic, size;
51
+ break;
54
+ struct target_aarch64_ctx *ctx;
52
+ case SYSREG_PMCNTENCLR_EL0:
55
+ struct target_fpsimd_context *fpsimd = NULL;
53
+ val = env->cp15.c9_pmcnten;
56
54
+ break;
57
target_restore_general_frame(env, sf);
55
+ case SYSREG_PMOVSCLR_EL0:
58
56
+ val = env->cp15.c9_pmovsr;
59
- __get_user(magic, &aux->fpsimd.head.magic);
57
+ break;
60
- __get_user(size, &aux->fpsimd.head.size);
58
+ case SYSREG_PMSELR_EL0:
61
- if (magic == TARGET_FPSIMD_MAGIC
59
+ val = env->cp15.c9_pmselr;
62
- && size == sizeof(struct target_fpsimd_context)) {
60
+ break;
63
- target_restore_fpsimd_record(env, &aux->fpsimd);
61
+ case SYSREG_PMINTENCLR_EL1:
64
- } else {
62
+ val = env->cp15.c9_pminten;
65
+ ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
63
+ break;
66
+ while (ctx) {
64
+ case SYSREG_PMCCFILTR_EL0:
67
+ uint32_t magic, size;
65
+ val = env->cp15.pmccfiltr_el0;
68
+
66
+ break;
69
+ __get_user(magic, &ctx->magic);
67
+ case SYSREG_PMCNTENSET_EL0:
70
+ __get_user(size, &ctx->size);
68
+ val = env->cp15.c9_pmcnten;
71
+ switch (magic) {
69
+ break;
72
+ case 0:
70
+ case SYSREG_PMUSERENR_EL0:
73
+ if (size != 0) {
71
+ val = env->cp15.c9_pmuserenr;
74
+ return 1;
72
+ break;
75
+ }
73
+ case SYSREG_PMCEID0_EL0:
76
+ ctx = NULL;
74
+ case SYSREG_PMCEID1_EL0:
77
+ continue;
75
+ /* We can't really count anything yet, declare all events invalid */
78
+
76
+ val = 0;
79
+ case TARGET_FPSIMD_MAGIC:
77
+ break;
80
+ if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
78
case SYSREG_OSLSR_EL1:
81
+ return 1;
79
val = env->cp15.oslsr_el1;
82
+ }
80
break;
83
+ fpsimd = (struct target_fpsimd_context *)ctx;
81
@@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
84
+ break;
85
+
86
+ default:
87
+ /* Unknown record -- we certainly didn't generate it.
88
+ * Did we in fact get out of sync?
89
+ */
90
+ return 1;
91
+ }
92
+ ctx = (void *)ctx + size;
93
+ }
94
+
95
+ /* Require FPSIMD always. */
96
+ if (!fpsimd) {
97
return 1;
98
}
99
+ target_restore_fpsimd_record(env, fpsimd);
100
+
101
return 0;
82
return 0;
102
}
83
}
103
84
104
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
85
+static void pmu_update_irq(CPUARMState *env)
105
target_siginfo_t *info, target_sigset_t *set,
86
+{
106
CPUARMState *env)
87
+ ARMCPU *cpu = env_archcpu(env);
88
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
89
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
90
+}
91
+
92
+static bool pmu_event_supported(uint16_t number)
93
+{
94
+ return false;
95
+}
96
+
97
+/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
98
+ * the current EL, security state, and register configuration.
99
+ */
100
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
101
+{
102
+ uint64_t filter;
103
+ bool enabled, filtered = true;
104
+ int el = arm_current_el(env);
105
+
106
+ enabled = (env->cp15.c9_pmcr & PMCRE) &&
107
+ (env->cp15.c9_pmcnten & (1 << counter));
108
+
109
+ if (counter == 31) {
110
+ filter = env->cp15.pmccfiltr_el0;
111
+ } else {
112
+ filter = env->cp15.c14_pmevtyper[counter];
113
+ }
114
+
115
+ if (el == 0) {
116
+ filtered = filter & PMXEVTYPER_U;
117
+ } else if (el == 1) {
118
+ filtered = filter & PMXEVTYPER_P;
119
+ }
120
+
121
+ if (counter != 31) {
122
+ /*
123
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
124
+ * support
125
+ */
126
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
127
+ if (!pmu_event_supported(event)) {
128
+ return false;
129
+ }
130
+ }
131
+
132
+ return enabled && !filtered;
133
+}
134
+
135
+static void pmswinc_write(CPUARMState *env, uint64_t value)
136
+{
137
+ unsigned int i;
138
+ for (i = 0; i < pmu_num_counters(env); i++) {
139
+ /* Increment a counter's count iff: */
140
+ if ((value & (1 << i)) && /* counter's bit is set */
141
+ /* counter is enabled and not filtered */
142
+ pmu_counter_enabled(env, i) &&
143
+ /* counter is SW_INCR */
144
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
145
+ /*
146
+ * Detect if this write causes an overflow since we can't predict
147
+ * PMSWINC overflows like we can for other events
148
+ */
149
+ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
150
+
151
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
152
+ env->cp15.c9_pmovsr |= (1 << i);
153
+ pmu_update_irq(env);
154
+ }
155
+
156
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
157
+ }
158
+ }
159
+}
160
+
161
static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
107
{
162
{
108
+ int size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved);
163
ARMCPU *arm_cpu = ARM_CPU(cpu);
109
+ int fpsimd_ofs, end1_ofs, fr_ofs;
164
@@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
110
struct target_rt_sigframe *frame;
165
val);
111
- struct target_aux_context *aux;
166
112
+ struct target_rt_frame_record *fr;
167
switch (reg) {
113
abi_ulong frame_addr, return_addr;
168
+ case SYSREG_PMCCNTR_EL0:
114
169
+ pmu_op_start(env);
115
+ fpsimd_ofs = size;
170
+ env->cp15.c15_ccnt = val;
116
+ size += sizeof(struct target_fpsimd_context);
171
+ pmu_op_finish(env);
117
+ end1_ofs = size;
172
+ break;
118
+ size += sizeof(struct target_aarch64_ctx);
173
+ case SYSREG_PMCR_EL0:
119
+ fr_ofs = size;
174
+ pmu_op_start(env);
120
+ size += sizeof(struct target_rt_frame_record);
175
+
121
+
176
+ if (val & PMCRC) {
122
frame_addr = get_sigframe(ka, env);
177
+ /* The counter has been reset */
123
trace_user_setup_frame(env, frame_addr);
178
+ env->cp15.c15_ccnt = 0;
124
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
179
+ }
125
goto give_sigsegv;
180
+
126
}
181
+ if (val & PMCRP) {
127
- aux = (struct target_aux_context *)frame->uc.tuc_mcontext.__reserved;
182
+ unsigned int i;
128
183
+ for (i = 0; i < pmu_num_counters(env); i++) {
129
target_setup_general_frame(frame, env, set);
184
+ env->cp15.c14_pmevcntr[i] = 0;
130
- target_setup_fpsimd_record(&aux->fpsimd, env);
185
+ }
131
- target_setup_end_record(&aux->end);
186
+ }
132
+ target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
187
+
133
+ target_setup_end_record((void *)frame + end1_ofs);
188
+ env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
134
+
189
+ env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK);
135
+ /* Set up the stack frame for unwinding. */
190
+
136
+ fr = (void *)frame + fr_ofs;
191
+ pmu_op_finish(env);
137
+ __put_user(env->xregs[29], &fr->fp);
192
+ break;
138
+ __put_user(env->xregs[30], &fr->lr);
193
+ case SYSREG_PMUSERENR_EL0:
139
194
+ env->cp15.c9_pmuserenr = val & 0xf;
140
if (ka->sa_flags & TARGET_SA_RESTORER) {
195
+ break;
141
return_addr = ka->sa_restorer;
196
+ case SYSREG_PMCNTENSET_EL0:
142
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
197
+ env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
143
* Since these are instructions they need to be put as little-endian
198
+ break;
144
* regardless of target default or current CPU endianness.
199
+ case SYSREG_PMCNTENCLR_EL0:
145
*/
200
+ env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
146
- __put_user_e(0xd2801168, &frame->tramp[0], le);
201
+ break;
147
- __put_user_e(0xd4000001, &frame->tramp[1], le);
202
+ case SYSREG_PMINTENCLR_EL1:
148
- return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
203
+ pmu_op_start(env);
149
+ __put_user_e(0xd2801168, &fr->tramp[0], le);
204
+ env->cp15.c9_pminten |= val;
150
+ __put_user_e(0xd4000001, &fr->tramp[1], le);
205
+ pmu_op_finish(env);
151
+ return_addr = frame_addr + fr_ofs
206
+ break;
152
+ + offsetof(struct target_rt_frame_record, tramp);
207
+ case SYSREG_PMOVSCLR_EL0:
153
}
208
+ pmu_op_start(env);
154
env->xregs[0] = usig;
209
+ env->cp15.c9_pmovsr &= ~val;
155
env->xregs[31] = frame_addr;
210
+ pmu_op_finish(env);
156
- env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
211
+ break;
157
+ env->xregs[29] = frame_addr + fr_ofs;
212
+ case SYSREG_PMSWINC_EL0:
158
env->pc = ka->_sa_handler;
213
+ pmu_op_start(env);
159
env->xregs[30] = return_addr;
214
+ pmswinc_write(env, val);
160
if (info) {
215
+ pmu_op_finish(env);
216
+ break;
217
+ case SYSREG_PMSELR_EL0:
218
+ env->cp15.c9_pmselr = val & 0x1f;
219
+ break;
220
+ case SYSREG_PMCCFILTR_EL0:
221
+ pmu_op_start(env);
222
+ env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
223
+ pmu_op_finish(env);
224
+ break;
225
case SYSREG_OSLAR_EL1:
226
env->cp15.oslsr_el1 = val & 1;
227
break;
161
--
228
--
162
2.16.2
229
2.20.1
163
230
164
231
diff view generated by jsdifflib
1
Add support for passing 'max' to -machine gic-version. By analogy
1
Currently gen_jmp_tb() assumes that if it is called then the jump it
2
with the -cpu max option, this picks the "best available" GIC version
2
is handling is the only reason that we might be trying to end the TB,
3
whether you're using KVM or TCG, so it behaves like 'host' when
3
so it will use goto_tb if it can. This is usually the case: mostly
4
using KVM, and gives you GICv3 when using TCG.
4
"we did something that means we must end the TB" happens on a
5
non-branch instruction. However, there are cases where we decide
6
early in handling an instruction that we need to end the TB and
7
return to the main loop, and then the insn is a complex one that
8
involves gen_jmp_tb(). For instance, for M-profile FP instructions,
9
in gen_preserve_fp_state() which is called from vfp_access_check() we
10
want to force an exit to the main loop if lazy state preservation is
11
active and we are in icount mode.
5
12
6
Also like '-cpu host', using -machine gic-version=max' means there
13
Make gen_jmp_tb() look at the current value of is_jmp, and only use
7
is no guarantee of migration compatibility between QEMU versions;
14
goto_tb if the previous is_jmp was DISAS_NEXT or DISAS_TOO_MANY.
8
in future 'max' might mean '4'.
9
15
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20180308130626.12393-7-peter.maydell@linaro.org
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
18
Message-id: 20210913095440.13462-2-peter.maydell@linaro.org
13
---
19
---
14
hw/arm/virt.c | 29 +++++++++++++++++++----------
20
target/arm/translate.c | 34 +++++++++++++++++++++++++++++++++-
15
1 file changed, 19 insertions(+), 10 deletions(-)
21
1 file changed, 33 insertions(+), 1 deletion(-)
16
22
17
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
23
diff --git a/target/arm/translate.c b/target/arm/translate.c
18
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/virt.c
25
--- a/target/arm/translate.c
20
+++ b/hw/arm/virt.c
26
+++ b/target/arm/translate.c
21
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
27
@@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
22
/* We can probe only here because during property set
28
/* An indirect jump so that we still trigger the debug exception. */
23
* KVM is not available yet
29
gen_set_pc_im(s, dest);
24
*/
30
s->base.is_jmp = DISAS_JUMP;
25
- if (!vms->gic_version) {
31
- } else {
26
+ if (vms->gic_version <= 0) {
32
+ return;
27
+ /* "host" or "max" */
33
+ }
28
if (!kvm_enabled()) {
34
+ switch (s->base.is_jmp) {
29
- error_report("gic-version=host requires KVM");
35
+ case DISAS_NEXT:
30
- exit(1);
36
+ case DISAS_TOO_MANY:
31
- }
37
+ case DISAS_NORETURN:
32
-
38
+ /*
33
- vms->gic_version = kvm_arm_vgic_probe();
39
+ * The normal case: just go to the destination TB.
34
- if (!vms->gic_version) {
40
+ * NB: NORETURN happens if we generate code like
35
- error_report("Unable to determine GIC version supported by host");
41
+ * gen_brcondi(l);
36
- exit(1);
42
+ * gen_jmp();
37
+ if (vms->gic_version == 0) {
43
+ * gen_set_label(l);
38
+ error_report("gic-version=host requires KVM");
44
+ * gen_jmp();
39
+ exit(1);
45
+ * on the second call to gen_jmp().
40
+ } else {
46
+ */
41
+ /* "max": currently means 3 for TCG */
47
gen_goto_tb(s, tbno, dest);
42
+ vms->gic_version = 3;
48
+ break;
43
+ }
49
+ case DISAS_UPDATE_NOCHAIN:
44
+ } else {
50
+ case DISAS_UPDATE_EXIT:
45
+ vms->gic_version = kvm_arm_vgic_probe();
51
+ /*
46
+ if (!vms->gic_version) {
52
+ * We already decided we're leaving the TB for some other reason.
47
+ error_report(
53
+ * Avoid using goto_tb so we really do exit back to the main loop
48
+ "Unable to determine GIC version supported by host");
54
+ * and don't chain to another TB.
49
+ exit(1);
55
+ */
50
+ }
56
+ gen_set_pc_im(s, dest);
51
}
57
+ gen_goto_ptr();
52
}
58
+ s->base.is_jmp = DISAS_NORETURN;
53
59
+ break;
54
@@ -XXX,XX +XXX,XX @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp)
60
+ default:
55
vms->gic_version = 2;
61
+ /*
56
} else if (!strcmp(value, "host")) {
62
+ * We shouldn't be emitting code for a jump and also have
57
vms->gic_version = 0; /* Will probe later */
63
+ * is_jmp set to one of the special cases like DISAS_SWI.
58
+ } else if (!strcmp(value, "max")) {
64
+ */
59
+ vms->gic_version = -1; /* Will probe later */
65
+ g_assert_not_reached();
60
} else {
61
error_setg(errp, "Invalid gic-version value");
62
- error_append_hint(errp, "Valid values are 3, 2, host.\n");
63
+ error_append_hint(errp, "Valid values are 3, 2, host, max.\n");
64
}
66
}
65
}
67
}
66
68
67
--
69
--
68
2.16.2
70
2.20.1
69
71
70
72
diff view generated by jsdifflib
1
Allow the virt board to support '-cpu max' in the same way
1
Architecturally, for an M-profile CPU with the LOB feature the
2
it already handles '-cpu host'.
2
LTPSIZE field in FPDSCR is always constant 4. QEMU's implementation
3
enforces this everywhere, except that we don't check that it is true
4
in incoming migration data.
5
6
We're going to add come in gen_update_fp_context() which relies on
7
the "always 4" property. Since this is TCG-only, we don't actually
8
need to be robust to bogus incoming migration data, and the effect of
9
it being wrong would be wrong code generation rather than a QEMU
10
crash; but if it did ever happen somehow it would be very difficult
11
to track down the cause. Add a check so that we fail the inbound
12
migration if the FPDSCR.LTPSIZE value is incorrect.
3
13
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Message-id: 20180308130626.12393-6-peter.maydell@linaro.org
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Message-id: 20210913095440.13462-3-peter.maydell@linaro.org
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
---
17
---
9
hw/arm/virt.c | 1 +
18
target/arm/machine.c | 13 +++++++++++++
10
1 file changed, 1 insertion(+)
19
1 file changed, 13 insertions(+)
11
20
12
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
21
diff --git a/target/arm/machine.c b/target/arm/machine.c
13
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/arm/virt.c
23
--- a/target/arm/machine.c
15
+++ b/hw/arm/virt.c
24
+++ b/target/arm/machine.c
16
@@ -XXX,XX +XXX,XX @@ static const char *valid_cpus[] = {
25
@@ -XXX,XX +XXX,XX @@ static int cpu_post_load(void *opaque, int version_id)
17
ARM_CPU_TYPE_NAME("cortex-a53"),
26
hw_breakpoint_update_all(cpu);
18
ARM_CPU_TYPE_NAME("cortex-a57"),
27
hw_watchpoint_update_all(cpu);
19
ARM_CPU_TYPE_NAME("host"),
28
20
+ ARM_CPU_TYPE_NAME("max"),
29
+ /*
21
};
30
+ * TCG gen_update_fp_context() relies on the invariant that
22
31
+ * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
23
static bool cpu_type_valid(const char *cpu)
32
+ * forbid bogus incoming data with some other value.
33
+ */
34
+ if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
35
+ if (extract32(env->v7m.fpdscr[M_REG_NS],
36
+ FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
37
+ extract32(env->v7m.fpdscr[M_REG_S],
38
+ FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
39
+ return -1;
40
+ }
41
+ }
42
if (!kvm_enabled()) {
43
pmu_op_finish(&cpu->env);
44
}
24
--
45
--
25
2.16.2
46
2.20.1
26
47
27
48
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Our current codegen for MVE always calls out to helper functions,
2
2
because some byte lanes might be predicated. The common case is that
3
As an implementation choice, widening VL has zeroed the
3
in fact there is no predication active and all lanes should be
4
previously inaccessible portion of the sve registers.
4
updated together, so we can produce better code by detecting that and
5
5
using the TCG generic vector infrastructure.
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Add a TB flag that is set when we can guarantee that there is no
8
Acked-by: Alex Bennée <alex.bennee@linaro.org>
8
active MVE predication, and a bool in the DisasContext. Subsequent
9
Message-id: 20180303143823.27055-2-richard.henderson@linaro.org
9
patches will use this flag to generate improved code for some
10
instructions.
11
12
In most cases when the predication state changes we simply end the TB
13
after that instruction. For the code called from vfp_access_check()
14
that handles lazy state preservation and creating a new FP context,
15
we can usually avoid having to try to end the TB because luckily the
16
new value of the flag following the register changes in those
17
sequences doesn't depend on any runtime decisions. We do have to end
18
the TB if the guest has enabled lazy FP state preservation but not
19
automatic state preservation, but this is an odd corner case that is
20
not going to be common in real-world code.
21
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Message-id: 20210913095440.13462-4-peter.maydell@linaro.org
11
---
25
---
12
linux-user/aarch64/target_syscall.h | 3 +++
26
target/arm/cpu.h | 4 +++-
13
target/arm/cpu.h | 1 +
27
target/arm/translate.h | 2 ++
14
linux-user/syscall.c | 27 ++++++++++++++++++++++++
28
target/arm/helper.c | 33 +++++++++++++++++++++++++++++++++
15
target/arm/cpu64.c | 41 +++++++++++++++++++++++++++++++++++++
29
target/arm/translate-m-nocp.c | 8 +++++++-
16
4 files changed, 72 insertions(+)
30
target/arm/translate-mve.c | 13 ++++++++++++-
17
31
target/arm/translate-vfp.c | 33 +++++++++++++++++++++++++++------
18
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
32
target/arm/translate.c | 8 ++++++++
19
index XXXXXXX..XXXXXXX 100644
33
7 files changed, 92 insertions(+), 9 deletions(-)
20
--- a/linux-user/aarch64/target_syscall.h
34
21
+++ b/linux-user/aarch64/target_syscall.h
22
@@ -XXX,XX +XXX,XX @@ struct target_pt_regs {
23
#define TARGET_MLOCKALL_MCL_CURRENT 1
24
#define TARGET_MLOCKALL_MCL_FUTURE 2
25
26
+#define TARGET_PR_SVE_SET_VL 50
27
+#define TARGET_PR_SVE_GET_VL 51
28
+
29
#endif /* AARCH64_TARGET_SYSCALL_H */
30
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
35
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
31
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu.h
37
--- a/target/arm/cpu.h
33
+++ b/target/arm/cpu.h
38
+++ b/target/arm/cpu.h
34
@@ -XXX,XX +XXX,XX @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
39
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
35
#ifdef TARGET_AARCH64
40
* | TBFLAG_AM32 | +-----+----------+
36
int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
41
* | | |TBFLAG_M32|
37
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
42
* +-------------+----------------+----------+
38
+void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
43
- * 31 23 5 4 0
44
+ * 31 23 6 5 0
45
*
46
* Unless otherwise noted, these bits are cached in env->hflags.
47
*/
48
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
49
FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
50
/* Set if FPCCR.S does not match current security state */
51
FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
52
+/* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */
53
+FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */
54
55
/*
56
* Bit usage when in AArch64 state
57
diff --git a/target/arm/translate.h b/target/arm/translate.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate.h
60
+++ b/target/arm/translate.h
61
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
62
bool align_mem;
63
/* True if PSTATE.IL is set */
64
bool pstate_il;
65
+ /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
66
+ bool mve_no_pred;
67
/*
68
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
69
* < 0, set by the current instruction.
70
diff --git a/target/arm/helper.c b/target/arm/helper.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/helper.c
73
+++ b/target/arm/helper.c
74
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
39
#endif
75
#endif
40
76
}
41
target_ulong do_arm_semihosting(CPUARMState *env);
77
42
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
78
+static bool mve_no_pred(CPUARMState *env)
43
index XXXXXXX..XXXXXXX 100644
79
+{
44
--- a/linux-user/syscall.c
80
+ /*
45
+++ b/linux-user/syscall.c
81
+ * Return true if there is definitely no predication of MVE
46
@@ -XXX,XX +XXX,XX @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
82
+ * instructions by VPR or LTPSIZE. (Returning false even if there
47
break;
83
+ * isn't any predication is OK; generated code will just be
84
+ * a little worse.)
85
+ * If the CPU does not implement MVE then this TB flag is always 0.
86
+ *
87
+ * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
88
+ * logic in gen_update_fp_context() needs to be updated to match.
89
+ *
90
+ * We do not include the effect of the ECI bits here -- they are
91
+ * tracked in other TB flags. This simplifies the logic for
92
+ * "when did we emit code that changes the MVE_NO_PRED TB flag
93
+ * and thus need to end the TB?".
94
+ */
95
+ if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
96
+ return false;
97
+ }
98
+ if (env->v7m.vpr) {
99
+ return false;
100
+ }
101
+ if (env->v7m.ltpsize < 4) {
102
+ return false;
103
+ }
104
+ return true;
105
+}
106
+
107
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
108
target_ulong *cs_base, uint32_t *pflags)
109
{
110
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
111
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
112
DP_TBFLAG_M32(flags, LSPACT, 1);
113
}
114
+
115
+ if (mve_no_pred(env)) {
116
+ DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
117
+ }
118
} else {
119
/*
120
* Note that XSCALE_CPAR shares bits with VECSTRIDE.
121
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/arm/translate-m-nocp.c
124
+++ b/target/arm/translate-m-nocp.c
125
@@ -XXX,XX +XXX,XX @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
126
127
clear_eci_state(s);
128
129
- /* End the TB, because we have updated FP control bits */
130
+ /*
131
+ * End the TB, because we have updated FP control bits,
132
+ * and possibly VPR or LTPSIZE.
133
+ */
134
s->base.is_jmp = DISAS_UPDATE_EXIT;
135
return true;
136
}
137
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
138
store_cpu_field(control, v7m.control[M_REG_S]);
139
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
140
gen_helper_vfp_set_fpscr(cpu_env, tmp);
141
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
142
tcg_temp_free_i32(tmp);
143
tcg_temp_free_i32(sfpa);
144
break;
145
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
48
}
146
}
49
#endif
147
tmp = loadfn(s, opaque, true);
50
+#ifdef TARGET_AARCH64
148
store_cpu_field(tmp, v7m.vpr);
51
+ case TARGET_PR_SVE_SET_VL:
149
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
52
+ /* We cannot support either PR_SVE_SET_VL_ONEXEC
150
break;
53
+ or PR_SVE_VL_INHERIT. Therefore, anything above
151
case ARM_VFP_P0:
54
+ ARM_MAX_VQ results in EINVAL. */
152
{
55
+ ret = -TARGET_EINVAL;
153
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
56
+ if (arm_feature(cpu_env, ARM_FEATURE_SVE)
154
tcg_gen_deposit_i32(vpr, vpr, tmp,
57
+ && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
155
R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
58
+ CPUARMState *env = cpu_env;
156
store_cpu_field(vpr, v7m.vpr);
59
+ int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
157
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
60
+ int vq = MAX(arg2 / 16, 1);
158
tcg_temp_free_i32(tmp);
61
+
159
break;
62
+ if (vq < old_vq) {
160
}
63
+ aarch64_sve_narrow_vq(env, vq);
161
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
64
+ }
162
index XXXXXXX..XXXXXXX 100644
65
+ env->vfp.zcr_el[1] = vq - 1;
163
--- a/target/arm/translate-mve.c
66
+ ret = vq * 16;
164
+++ b/target/arm/translate-mve.c
67
+ }
165
@@ -XXX,XX +XXX,XX @@ DO_LOGIC(VORR, gen_helper_mve_vorr)
68
+ break;
166
DO_LOGIC(VORN, gen_helper_mve_vorn)
69
+ case TARGET_PR_SVE_GET_VL:
167
DO_LOGIC(VEOR, gen_helper_mve_veor)
70
+ ret = -TARGET_EINVAL;
168
71
+ if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
169
-DO_LOGIC(VPSEL, gen_helper_mve_vpsel)
72
+ CPUARMState *env = cpu_env;
170
+static bool trans_VPSEL(DisasContext *s, arg_2op *a)
73
+ ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
74
+ }
75
+ break;
76
+#endif /* AARCH64 */
77
case PR_GET_SECCOMP:
78
case PR_SET_SECCOMP:
79
/* Disable seccomp to prevent the target disabling syscalls we
80
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/cpu64.c
83
+++ b/target/arm/cpu64.c
84
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_register_types(void)
85
}
86
87
type_init(aarch64_cpu_register_types)
88
+
89
+/* The manual says that when SVE is enabled and VQ is widened the
90
+ * implementation is allowed to zero the previously inaccessible
91
+ * portion of the registers. The corollary to that is that when
92
+ * SVE is enabled and VQ is narrowed we are also allowed to zero
93
+ * the now inaccessible portion of the registers.
94
+ *
95
+ * The intent of this is that no predicate bit beyond VQ is ever set.
96
+ * Which means that some operations on predicate registers themselves
97
+ * may operate on full uint64_t or even unrolled across the maximum
98
+ * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
99
+ * may well be cheaper than conditionals to restrict the operation
100
+ * to the relevant portion of a uint16_t[16].
101
+ *
102
+ * TODO: Need to call this for changes to the real system registers
103
+ * and EL state changes.
104
+ */
105
+void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
106
+{
171
+{
107
+ int i, j;
172
+ /* This insn updates predication bits */
108
+ uint64_t pmask;
173
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
109
+
174
+ return do_2op(s, a, gen_helper_mve_vpsel);
110
+ assert(vq >= 1 && vq <= ARM_MAX_VQ);
175
+}
111
+
176
112
+ /* Zap the high bits of the zregs. */
177
#define DO_2OP(INSN, FN) \
113
+ for (i = 0; i < 32; i++) {
178
static bool trans_##INSN(DisasContext *s, arg_2op *a) \
114
+ memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
179
@@ -XXX,XX +XXX,XX @@ static bool trans_VPNOT(DisasContext *s, arg_VPNOT *a)
115
+ }
180
}
116
+
181
117
+ /* Zap the high bits of the pregs and ffr. */
182
gen_helper_mve_vpnot(cpu_env);
118
+ pmask = 0;
183
+ /* This insn updates predication bits */
119
+ if (vq & 3) {
184
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
120
+ pmask = ~(-1ULL << (16 * (vq & 3)));
185
mve_update_eci(s);
121
+ }
186
return true;
122
+ for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
187
}
123
+ for (i = 0; i < 17; ++i) {
188
@@ -XXX,XX +XXX,XX @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn)
124
+ env->vfp.pregs[i].p[j] &= pmask;
189
/* VPT */
190
gen_vpst(s, a->mask);
191
}
192
+ /* This insn updates predication bits */
193
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
194
mve_update_eci(s);
195
return true;
196
}
197
@@ -XXX,XX +XXX,XX @@ static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a,
198
/* VPT */
199
gen_vpst(s, a->mask);
200
}
201
+ /* This insn updates predication bits */
202
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
203
mve_update_eci(s);
204
return true;
205
}
206
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/arm/translate-vfp.c
209
+++ b/target/arm/translate-vfp.c
210
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
211
* Generate code for M-profile lazy FP state preservation if needed;
212
* this corresponds to the pseudocode PreserveFPState() function.
213
*/
214
-static void gen_preserve_fp_state(DisasContext *s)
215
+static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update)
216
{
217
if (s->v7m_lspact) {
218
/*
219
@@ -XXX,XX +XXX,XX @@ static void gen_preserve_fp_state(DisasContext *s)
220
* any further FP insns in this TB.
221
*/
222
s->v7m_lspact = false;
223
+ /*
224
+ * The helper might have zeroed VPR, so we do not know the
225
+ * correct value for the MVE_NO_PRED TB flag any more.
226
+ * If we're about to create a new fp context then that
227
+ * will precisely determine the MVE_NO_PRED value (see
228
+ * gen_update_fp_context()). Otherwise, we must:
229
+ * - set s->mve_no_pred to false, so this instruction
230
+ * is generated to use helper functions
231
+ * - end the TB now, without chaining to the next TB
232
+ */
233
+ if (skip_context_update || !s->v7m_new_fp_ctxt_needed) {
234
+ s->mve_no_pred = false;
235
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
125
+ }
236
+ }
126
+ pmask = 0;
237
}
127
+ }
238
}
128
+}
239
240
@@ -XXX,XX +XXX,XX @@ static void gen_update_fp_context(DisasContext *s)
241
TCGv_i32 z32 = tcg_const_i32(0);
242
store_cpu_field(z32, v7m.vpr);
243
}
244
-
245
/*
246
- * We don't need to arrange to end the TB, because the only
247
- * parts of FPSCR which we cache in the TB flags are the VECLEN
248
- * and VECSTRIDE, and those don't exist for M-profile.
249
+ * We just updated the FPSCR and VPR. Some of this state is cached
250
+ * in the MVE_NO_PRED TB flag. We want to avoid having to end the
251
+ * TB here, which means we need the new value of the MVE_NO_PRED
252
+ * flag to be exactly known here and the same for all executions.
253
+ * Luckily FPDSCR.LTPSIZE is always constant 4 and the VPR is
254
+ * always set to 0, so the new MVE_NO_PRED flag is always 1
255
+ * if and only if we have MVE.
256
+ *
257
+ * (The other FPSCR state cached in TB flags is VECLEN and VECSTRIDE,
258
+ * but those do not exist for M-profile, so are not relevant here.)
259
*/
260
+ s->mve_no_pred = dc_isar_feature(aa32_mve, s);
261
262
if (s->v8m_secure) {
263
bits |= R_V7M_CONTROL_SFPA_MASK;
264
@@ -XXX,XX +XXX,XX @@ bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
265
/* Handle M-profile lazy FP state mechanics */
266
267
/* Trigger lazy-state preservation if necessary */
268
- gen_preserve_fp_state(s);
269
+ gen_preserve_fp_state(s, skip_context_update);
270
271
if (!skip_context_update) {
272
/* Update ownership of FP context and create new FP context if needed */
273
diff --git a/target/arm/translate.c b/target/arm/translate.c
274
index XXXXXXX..XXXXXXX 100644
275
--- a/target/arm/translate.c
276
+++ b/target/arm/translate.c
277
@@ -XXX,XX +XXX,XX @@ static bool trans_DLS(DisasContext *s, arg_DLS *a)
278
/* DLSTP: set FPSCR.LTPSIZE */
279
tmp = tcg_const_i32(a->size);
280
store_cpu_field(tmp, v7m.ltpsize);
281
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
282
}
283
return true;
284
}
285
@@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
286
assert(ok);
287
tmp = tcg_const_i32(a->size);
288
store_cpu_field(tmp, v7m.ltpsize);
289
+ /*
290
+ * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0)
291
+ * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
292
+ */
293
}
294
gen_jmp_tb(s, s->base.pc_next, 1);
295
296
@@ -XXX,XX +XXX,XX @@ static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
297
gen_helper_mve_vctp(cpu_env, masklen);
298
tcg_temp_free_i32(masklen);
299
tcg_temp_free_i32(rn_shifted);
300
+ /* This insn updates predication bits */
301
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
302
mve_update_eci(s);
303
return true;
304
}
305
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
306
dc->v7m_new_fp_ctxt_needed =
307
EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
308
dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
309
+ dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED);
310
} else {
311
dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
312
dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
129
--
313
--
130
2.16.2
314
2.20.1
131
315
132
316
diff view generated by jsdifflib
1
Move the definition of the 'host' cpu type into cpu.c, where all the
1
When not predicating, implement the MVE bitwise logical insns
2
other CPU types are defined. We can do this now we've decoupled it
2
directly using TCG vector operations.
3
from the KVM-specific host feature probing. This means we now create
4
the type unconditionally (assuming we were built with KVM support at
5
all), but if you try to use it without -enable-kvm this will end
6
up in the "host cpu probe failed and KVM not enabled" path in
7
arm_cpu_realizefn(), for an appropriate error message.
8
3
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20180308130626.12393-3-peter.maydell@linaro.org
7
Message-id: 20210913095440.13462-5-peter.maydell@linaro.org
13
---
8
---
14
target/arm/cpu.c | 24 ++++++++++++++++++++++++
9
target/arm/translate-mve.c | 51 +++++++++++++++++++++++++++-----------
15
target/arm/kvm.c | 19 -------------------
10
1 file changed, 36 insertions(+), 15 deletions(-)
16
2 files changed, 24 insertions(+), 19 deletions(-)
17
11
18
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
19
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.c
14
--- a/target/arm/translate-mve.c
21
+++ b/target/arm/cpu.c
15
+++ b/target/arm/translate-mve.c
22
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
16
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr mve_qreg_ptr(unsigned reg)
23
#endif
17
return ret;
24
}
18
}
25
19
26
+#ifdef CONFIG_KVM
20
+static bool mve_no_predication(DisasContext *s)
27
+static void arm_host_initfn(Object *obj)
28
+{
21
+{
29
+ ARMCPU *cpu = ARM_CPU(obj);
22
+ /*
30
+
23
+ * Return true if we are executing the entire MVE instruction
31
+ kvm_arm_set_cpu_features_from_host(cpu);
24
+ * with no predication or partial-execution, and so we can safely
25
+ * use an inline TCG vector implementation.
26
+ */
27
+ return s->eci == 0 && s->mve_no_pred;
32
+}
28
+}
33
+
29
+
34
+static const TypeInfo host_arm_cpu_type_info = {
30
static bool mve_check_qreg_bank(DisasContext *s, int qmask)
35
+ .name = TYPE_ARM_HOST_CPU,
31
{
36
+#ifdef TARGET_AARCH64
32
/*
37
+ .parent = TYPE_AARCH64_CPU,
33
@@ -XXX,XX +XXX,XX @@ static bool trans_VNEG_fp(DisasContext *s, arg_1op *a)
38
+#else
34
return do_1op(s, a, fns[a->size]);
39
+ .parent = TYPE_ARM_CPU,
35
}
40
+#endif
36
41
+ .instance_init = arm_host_initfn,
37
-static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn)
42
+};
38
+static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn,
39
+ GVecGen3Fn *vecfn)
40
{
41
TCGv_ptr qd, qn, qm;
42
43
@@ -XXX,XX +XXX,XX @@ static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn)
44
return true;
45
}
46
47
- qd = mve_qreg_ptr(a->qd);
48
- qn = mve_qreg_ptr(a->qn);
49
- qm = mve_qreg_ptr(a->qm);
50
- fn(cpu_env, qd, qn, qm);
51
- tcg_temp_free_ptr(qd);
52
- tcg_temp_free_ptr(qn);
53
- tcg_temp_free_ptr(qm);
54
+ if (vecfn && mve_no_predication(s)) {
55
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qn),
56
+ mve_qreg_offset(a->qm), 16, 16);
57
+ } else {
58
+ qd = mve_qreg_ptr(a->qd);
59
+ qn = mve_qreg_ptr(a->qn);
60
+ qm = mve_qreg_ptr(a->qm);
61
+ fn(cpu_env, qd, qn, qm);
62
+ tcg_temp_free_ptr(qd);
63
+ tcg_temp_free_ptr(qn);
64
+ tcg_temp_free_ptr(qm);
65
+ }
66
mve_update_eci(s);
67
return true;
68
}
69
70
-#define DO_LOGIC(INSN, HELPER) \
71
+static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn *fn)
72
+{
73
+ return do_2op_vec(s, a, fn, NULL);
74
+}
43
+
75
+
44
+#endif
76
+#define DO_LOGIC(INSN, HELPER, VECFN) \
45
+
77
static bool trans_##INSN(DisasContext *s, arg_2op *a) \
46
static void cpu_register(const ARMCPUInfo *info)
78
{ \
79
- return do_2op(s, a, HELPER); \
80
+ return do_2op_vec(s, a, HELPER, VECFN); \
81
}
82
83
-DO_LOGIC(VAND, gen_helper_mve_vand)
84
-DO_LOGIC(VBIC, gen_helper_mve_vbic)
85
-DO_LOGIC(VORR, gen_helper_mve_vorr)
86
-DO_LOGIC(VORN, gen_helper_mve_vorn)
87
-DO_LOGIC(VEOR, gen_helper_mve_veor)
88
+DO_LOGIC(VAND, gen_helper_mve_vand, tcg_gen_gvec_and)
89
+DO_LOGIC(VBIC, gen_helper_mve_vbic, tcg_gen_gvec_andc)
90
+DO_LOGIC(VORR, gen_helper_mve_vorr, tcg_gen_gvec_or)
91
+DO_LOGIC(VORN, gen_helper_mve_vorn, tcg_gen_gvec_orc)
92
+DO_LOGIC(VEOR, gen_helper_mve_veor, tcg_gen_gvec_xor)
93
94
static bool trans_VPSEL(DisasContext *s, arg_2op *a)
47
{
95
{
48
TypeInfo type_info = {
49
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_register_types(void)
50
cpu_register(info);
51
info++;
52
}
53
+
54
+#ifdef CONFIG_KVM
55
+ type_register_static(&host_arm_cpu_type_info);
56
+#endif
57
}
58
59
type_init(arm_cpu_register_types)
60
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/kvm.c
63
+++ b/target/arm/kvm.c
64
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
65
env->features = arm_host_cpu_features.features;
66
}
67
68
-static void kvm_arm_host_cpu_initfn(Object *obj)
69
-{
70
- ARMCPU *cpu = ARM_CPU(obj);
71
-
72
- kvm_arm_set_cpu_features_from_host(cpu);
73
-}
74
-
75
-static const TypeInfo host_arm_cpu_type_info = {
76
- .name = TYPE_ARM_HOST_CPU,
77
-#ifdef TARGET_AARCH64
78
- .parent = TYPE_AARCH64_CPU,
79
-#else
80
- .parent = TYPE_ARM_CPU,
81
-#endif
82
- .instance_init = kvm_arm_host_cpu_initfn,
83
-};
84
-
85
int kvm_arch_init(MachineState *ms, KVMState *s)
86
{
87
/* For ARM interrupt delivery is always asynchronous,
88
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init(MachineState *ms, KVMState *s)
89
90
cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
91
92
- type_register_static(&host_arm_cpu_type_info);
93
-
94
return 0;
95
}
96
97
--
96
--
98
2.16.2
97
2.20.1
99
98
100
99
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Optimize MVE arithmetic ops when we have a TCG
2
vector operation we can use.
2
3
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Message-id: 20180309153654.13518-4-f4bug@amsat.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210913095440.13462-6-peter.maydell@linaro.org
7
---
8
---
8
hw/sd/sd.c | 14 ++++++++++----
9
target/arm/translate-mve.c | 20 +++++++++++---------
9
hw/sd/trace-events | 8 ++++----
10
1 file changed, 11 insertions(+), 9 deletions(-)
10
2 files changed, 14 insertions(+), 8 deletions(-)
11
11
12
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/sd/sd.c
14
--- a/target/arm/translate-mve.c
15
+++ b/hw/sd/sd.c
15
+++ b/target/arm/translate-mve.c
16
@@ -XXX,XX +XXX,XX @@ struct SDState {
16
@@ -XXX,XX +XXX,XX @@ static bool trans_VPSEL(DisasContext *s, arg_2op *a)
17
qemu_irq readonly_cb;
17
return do_2op(s, a, gen_helper_mve_vpsel);
18
qemu_irq inserted_cb;
18
}
19
QEMUTimer *ocr_power_timer;
19
20
+ const char *proto_name;
20
-#define DO_2OP(INSN, FN) \
21
bool enable;
21
+#define DO_2OP_VEC(INSN, FN, VECFN) \
22
uint8_t dat_lines;
22
static bool trans_##INSN(DisasContext *s, arg_2op *a) \
23
bool cmd_line;
23
{ \
24
@@ -XXX,XX +XXX,XX @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
24
static MVEGenTwoOpFn * const fns[] = { \
25
* However there is no ACMD55, so we want to trace this particular case.
25
@@ -XXX,XX +XXX,XX @@ static bool trans_VPSEL(DisasContext *s, arg_2op *a)
26
*/
26
gen_helper_mve_##FN##w, \
27
if (req.cmd != 55 || sd->expecting_acmd) {
27
NULL, \
28
- trace_sdcard_normal_command(sd_cmd_name(req.cmd), req.cmd,
28
}; \
29
+ trace_sdcard_normal_command(sd->proto_name,
29
- return do_2op(s, a, fns[a->size]); \
30
+ sd_cmd_name(req.cmd), req.cmd,
30
+ return do_2op_vec(s, a, fns[a->size], VECFN); \
31
req.arg, sd_state_name(sd->state));
32
}
31
}
33
32
34
@@ -XXX,XX +XXX,XX @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
33
-DO_2OP(VADD, vadd)
35
static sd_rsp_type_t sd_app_command(SDState *sd,
34
-DO_2OP(VSUB, vsub)
36
SDRequest req)
35
-DO_2OP(VMUL, vmul)
37
{
36
+#define DO_2OP(INSN, FN) DO_2OP_VEC(INSN, FN, NULL)
38
- trace_sdcard_app_command(sd_acmd_name(req.cmd),
39
+ trace_sdcard_app_command(sd->proto_name, sd_acmd_name(req.cmd),
40
req.cmd, req.arg, sd_state_name(sd->state));
41
sd->card_status |= APP_CMD;
42
switch (req.cmd) {
43
@@ -XXX,XX +XXX,XX @@ void sd_write_data(SDState *sd, uint8_t value)
44
if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION))
45
return;
46
47
- trace_sdcard_write_data(sd_acmd_name(sd->current_cmd),
48
+ trace_sdcard_write_data(sd->proto_name,
49
+ sd_acmd_name(sd->current_cmd),
50
sd->current_cmd, value);
51
switch (sd->current_cmd) {
52
case 24:    /* CMD24: WRITE_SINGLE_BLOCK */
53
@@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd)
54
55
io_len = (sd->ocr & (1 << 30)) ? 512 : sd->blk_len;
56
57
- trace_sdcard_read_data(sd_acmd_name(sd->current_cmd),
58
+ trace_sdcard_read_data(sd->proto_name,
59
+ sd_acmd_name(sd->current_cmd),
60
sd->current_cmd, io_len);
61
switch (sd->current_cmd) {
62
case 6:    /* CMD6: SWITCH_FUNCTION */
63
@@ -XXX,XX +XXX,XX @@ static void sd_realize(DeviceState *dev, Error **errp)
64
SDState *sd = SD_CARD(dev);
65
int ret;
66
67
+ sd->proto_name = sd->spi ? "SPI" : "SD";
68
+
37
+
69
if (sd->blk && blk_is_read_only(sd->blk)) {
38
+DO_2OP_VEC(VADD, vadd, tcg_gen_gvec_add)
70
error_setg(errp, "Cannot use read-only drive as SD card");
39
+DO_2OP_VEC(VSUB, vsub, tcg_gen_gvec_sub)
71
return;
40
+DO_2OP_VEC(VMUL, vmul, tcg_gen_gvec_mul)
72
diff --git a/hw/sd/trace-events b/hw/sd/trace-events
41
DO_2OP(VMULH_S, vmulhs)
73
index XXXXXXX..XXXXXXX 100644
42
DO_2OP(VMULH_U, vmulhu)
74
--- a/hw/sd/trace-events
43
DO_2OP(VRMULH_S, vrmulhs)
75
+++ b/hw/sd/trace-events
44
DO_2OP(VRMULH_U, vrmulhu)
76
@@ -XXX,XX +XXX,XX @@ sdhci_write_dataport(uint16_t data_count) "write buffer filled with %u bytes of
45
-DO_2OP(VMAX_S, vmaxs)
77
sdhci_capareg(const char *desc, uint16_t val) "%s: %u"
46
-DO_2OP(VMAX_U, vmaxu)
78
47
-DO_2OP(VMIN_S, vmins)
79
# hw/sd/sd.c
48
-DO_2OP(VMIN_U, vminu)
80
-sdcard_normal_command(const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *state) "%20s/ CMD%02d arg 0x%08x (state %s)"
49
+DO_2OP_VEC(VMAX_S, vmaxs, tcg_gen_gvec_smax)
81
-sdcard_app_command(const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *state) "%23s/ACMD%02d arg 0x%08x (state %s)"
50
+DO_2OP_VEC(VMAX_U, vmaxu, tcg_gen_gvec_umax)
82
+sdcard_normal_command(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *state) "%s %20s/ CMD%02d arg 0x%08x (state %s)"
51
+DO_2OP_VEC(VMIN_S, vmins, tcg_gen_gvec_smin)
83
+sdcard_app_command(const char *proto, const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *state) "%s %23s/ACMD%02d arg 0x%08x (state %s)"
52
+DO_2OP_VEC(VMIN_U, vminu, tcg_gen_gvec_umin)
84
sdcard_response(const char *rspdesc, int rsplen) "%s (sz:%d)"
53
DO_2OP(VABD_S, vabds)
85
sdcard_powerup(void) ""
54
DO_2OP(VABD_U, vabdu)
86
sdcard_inquiry_cmd41(void) ""
55
DO_2OP(VHADD_S, vhadds)
87
@@ -XXX,XX +XXX,XX @@ sdcard_lock(void) ""
88
sdcard_unlock(void) ""
89
sdcard_read_block(uint64_t addr, uint32_t len) "addr 0x%" PRIx64 " size 0x%x"
90
sdcard_write_block(uint64_t addr, uint32_t len) "addr 0x%" PRIx64 " size 0x%x"
91
-sdcard_write_data(const char *cmd_desc, uint8_t cmd, uint8_t value) "%20s/ CMD%02d value 0x%02x"
92
-sdcard_read_data(const char *cmd_desc, uint8_t cmd, int length) "%20s/ CMD%02d len %d"
93
+sdcard_write_data(const char *proto, const char *cmd_desc, uint8_t cmd, uint8_t value) "%s %20s/ CMD%02d value 0x%02x"
94
+sdcard_read_data(const char *proto, const char *cmd_desc, uint8_t cmd, int length) "%s %20s/ CMD%02d len %d"
95
sdcard_set_voltage(uint16_t millivolts) "%u mV"
96
97
# hw/sd/milkymist-memcard.c
98
--
56
--
99
2.16.2
57
2.20.1
100
58
101
59
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Optimize the MVE VNEG and VABS insns by using TCG
2
vector ops when possible.
2
3
3
The SDBus will reuse these functions, so we put them in a new source file.
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210913095440.13462-7-peter.maydell@linaro.org
8
---
9
target/arm/translate-mve.c | 32 ++++++++++++++++++++++----------
10
1 file changed, 22 insertions(+), 10 deletions(-)
4
11
5
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
6
Message-id: 20180309153654.13518-3-f4bug@amsat.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
[PMM: slight wordsmithing of comments, added note that string
9
returned does not need to be freed]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/sd/Makefile.objs | 2 +-
13
hw/sd/sdmmc-internal.h | 24 +++++++++++++++++
14
hw/sd/sd.c | 13 +++++----
15
hw/sd/sdmmc-internal.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++
16
hw/sd/trace-events | 8 +++---
17
5 files changed, 109 insertions(+), 10 deletions(-)
18
create mode 100644 hw/sd/sdmmc-internal.c
19
20
diff --git a/hw/sd/Makefile.objs b/hw/sd/Makefile.objs
21
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/sd/Makefile.objs
14
--- a/target/arm/translate-mve.c
23
+++ b/hw/sd/Makefile.objs
15
+++ b/target/arm/translate-mve.c
24
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
25
common-obj-$(CONFIG_PL181) += pl181.o
17
return true;
26
common-obj-$(CONFIG_SSI_SD) += ssi-sd.o
18
}
27
-common-obj-$(CONFIG_SD) += sd.o core.o
19
28
+common-obj-$(CONFIG_SD) += sd.o core.o sdmmc-internal.o
20
-static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
29
common-obj-$(CONFIG_SDHCI) += sdhci.o
21
+static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn,
30
22
+ GVecGen2Fn vecfn)
31
obj-$(CONFIG_MILKYMIST) += milkymist-memcard.o
23
{
32
diff --git a/hw/sd/sdmmc-internal.h b/hw/sd/sdmmc-internal.h
24
TCGv_ptr qd, qm;
33
index XXXXXXX..XXXXXXX 100644
25
34
--- a/hw/sd/sdmmc-internal.h
26
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
35
+++ b/hw/sd/sdmmc-internal.h
27
return true;
36
@@ -XXX,XX +XXX,XX @@
37
38
#define SDMMC_CMD_MAX 64
39
40
+/**
41
+ * sd_cmd_name:
42
+ * @cmd: A SD "normal" command, up to SDMMC_CMD_MAX.
43
+ *
44
+ * Returns a human-readable name describing the command.
45
+ * The return value is always a static string which does not need
46
+ * to be freed after use.
47
+ *
48
+ * Returns: The command name of @cmd or "UNKNOWN_CMD".
49
+ */
50
+const char *sd_cmd_name(uint8_t cmd);
51
+
52
+/**
53
+ * sd_acmd_name:
54
+ * @cmd: A SD "Application-Specific" command, up to SDMMC_CMD_MAX.
55
+ *
56
+ * Returns a human-readable name describing the application command.
57
+ * The return value is always a static string which does not need
58
+ * to be freed after use.
59
+ *
60
+ * Returns: The application command name of @cmd or "UNKNOWN_ACMD".
61
+ */
62
+const char *sd_acmd_name(uint8_t cmd);
63
+
64
#endif
65
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/hw/sd/sd.c
68
+++ b/hw/sd/sd.c
69
@@ -XXX,XX +XXX,XX @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
70
* However there is no ACMD55, so we want to trace this particular case.
71
*/
72
if (req.cmd != 55 || sd->expecting_acmd) {
73
- trace_sdcard_normal_command(req.cmd, req.arg,
74
- sd_state_name(sd->state));
75
+ trace_sdcard_normal_command(sd_cmd_name(req.cmd), req.cmd,
76
+ req.arg, sd_state_name(sd->state));
77
}
28
}
78
29
79
/* Not interpreting this as an app command */
30
- qd = mve_qreg_ptr(a->qd);
80
@@ -XXX,XX +XXX,XX @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
31
- qm = mve_qreg_ptr(a->qm);
81
static sd_rsp_type_t sd_app_command(SDState *sd,
32
- fn(cpu_env, qd, qm);
82
SDRequest req)
33
- tcg_temp_free_ptr(qd);
83
{
34
- tcg_temp_free_ptr(qm);
84
- trace_sdcard_app_command(req.cmd, req.arg);
35
+ if (vecfn && mve_no_predication(s)) {
85
+ trace_sdcard_app_command(sd_acmd_name(req.cmd),
36
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm), 16, 16);
86
+ req.cmd, req.arg, sd_state_name(sd->state));
37
+ } else {
87
sd->card_status |= APP_CMD;
38
+ qd = mve_qreg_ptr(a->qd);
88
switch (req.cmd) {
39
+ qm = mve_qreg_ptr(a->qm);
89
case 6:    /* ACMD6: SET_BUS_WIDTH */
40
+ fn(cpu_env, qd, qm);
90
@@ -XXX,XX +XXX,XX @@ void sd_write_data(SDState *sd, uint8_t value)
41
+ tcg_temp_free_ptr(qd);
91
if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION))
42
+ tcg_temp_free_ptr(qm);
92
return;
43
+ }
93
44
mve_update_eci(s);
94
- trace_sdcard_write_data(sd->current_cmd, value);
45
return true;
95
+ trace_sdcard_write_data(sd_acmd_name(sd->current_cmd),
46
}
96
+ sd->current_cmd, value);
47
97
switch (sd->current_cmd) {
48
-#define DO_1OP(INSN, FN) \
98
case 24:    /* CMD24: WRITE_SINGLE_BLOCK */
49
+static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
99
sd->data[sd->data_offset ++] = value;
100
@@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd)
101
102
io_len = (sd->ocr & (1 << 30)) ? 512 : sd->blk_len;
103
104
- trace_sdcard_read_data(sd->current_cmd, io_len);
105
+ trace_sdcard_read_data(sd_acmd_name(sd->current_cmd),
106
+ sd->current_cmd, io_len);
107
switch (sd->current_cmd) {
108
case 6:    /* CMD6: SWITCH_FUNCTION */
109
ret = sd->data[sd->data_offset ++];
110
diff --git a/hw/sd/sdmmc-internal.c b/hw/sd/sdmmc-internal.c
111
new file mode 100644
112
index XXXXXXX..XXXXXXX
113
--- /dev/null
114
+++ b/hw/sd/sdmmc-internal.c
115
@@ -XXX,XX +XXX,XX @@
116
+/*
117
+ * SD/MMC cards common helpers
118
+ *
119
+ * Copyright (c) 2018 Philippe Mathieu-Daudé <f4bug@amsat.org>
120
+ *
121
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
122
+ * See the COPYING file in the top-level directory.
123
+ * SPDX-License-Identifier: GPL-2.0-or-later
124
+ */
125
+
126
+#include "qemu/osdep.h"
127
+#include "sdmmc-internal.h"
128
+
129
+const char *sd_cmd_name(uint8_t cmd)
130
+{
50
+{
131
+ static const char *cmd_abbrev[SDMMC_CMD_MAX] = {
51
+ return do_1op_vec(s, a, fn, NULL);
132
+ [0] = "GO_IDLE_STATE",
133
+ [2] = "ALL_SEND_CID", [3] = "SEND_RELATIVE_ADDR",
134
+ [4] = "SET_DSR", [5] = "IO_SEND_OP_COND",
135
+ [6] = "SWITCH_FUNC", [7] = "SELECT/DESELECT_CARD",
136
+ [8] = "SEND_IF_COND", [9] = "SEND_CSD",
137
+ [10] = "SEND_CID", [11] = "VOLTAGE_SWITCH",
138
+ [12] = "STOP_TRANSMISSION", [13] = "SEND_STATUS",
139
+ [15] = "GO_INACTIVE_STATE",
140
+ [16] = "SET_BLOCKLEN", [17] = "READ_SINGLE_BLOCK",
141
+ [18] = "READ_MULTIPLE_BLOCK", [19] = "SEND_TUNING_BLOCK",
142
+ [20] = "SPEED_CLASS_CONTROL", [21] = "DPS_spec",
143
+ [23] = "SET_BLOCK_COUNT",
144
+ [24] = "WRITE_BLOCK", [25] = "WRITE_MULTIPLE_BLOCK",
145
+ [26] = "MANUF_RSVD", [27] = "PROGRAM_CSD",
146
+ [28] = "SET_WRITE_PROT", [29] = "CLR_WRITE_PROT",
147
+ [30] = "SEND_WRITE_PROT",
148
+ [32] = "ERASE_WR_BLK_START", [33] = "ERASE_WR_BLK_END",
149
+ [34] = "SW_FUNC_RSVD", [35] = "SW_FUNC_RSVD",
150
+ [36] = "SW_FUNC_RSVD", [37] = "SW_FUNC_RSVD",
151
+ [38] = "ERASE",
152
+ [40] = "DPS_spec",
153
+ [42] = "LOCK_UNLOCK", [43] = "Q_MANAGEMENT",
154
+ [44] = "Q_TASK_INFO_A", [45] = "Q_TASK_INFO_B",
155
+ [46] = "Q_RD_TASK", [47] = "Q_WR_TASK",
156
+ [48] = "READ_EXTR_SINGLE", [49] = "WRITE_EXTR_SINGLE",
157
+ [50] = "SW_FUNC_RSVD",
158
+ [52] = "IO_RW_DIRECT", [53] = "IO_RW_EXTENDED",
159
+ [54] = "SDIO_RSVD", [55] = "APP_CMD",
160
+ [56] = "GEN_CMD", [57] = "SW_FUNC_RSVD",
161
+ [58] = "READ_EXTR_MULTI", [59] = "WRITE_EXTR_MULTI",
162
+ [60] = "MANUF_RSVD", [61] = "MANUF_RSVD",
163
+ [62] = "MANUF_RSVD", [63] = "MANUF_RSVD",
164
+ };
165
+ return cmd_abbrev[cmd] ? cmd_abbrev[cmd] : "UNKNOWN_CMD";
166
+}
52
+}
167
+
53
+
168
+const char *sd_acmd_name(uint8_t cmd)
54
+#define DO_1OP_VEC(INSN, FN, VECFN) \
169
+{
55
static bool trans_##INSN(DisasContext *s, arg_1op *a) \
170
+ static const char *acmd_abbrev[SDMMC_CMD_MAX] = {
56
{ \
171
+ [6] = "SET_BUS_WIDTH",
57
static MVEGenOneOpFn * const fns[] = { \
172
+ [13] = "SD_STATUS",
58
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
173
+ [14] = "DPS_spec", [15] = "DPS_spec",
59
gen_helper_mve_##FN##w, \
174
+ [16] = "DPS_spec",
60
NULL, \
175
+ [18] = "SECU_spec",
61
}; \
176
+ [22] = "SEND_NUM_WR_BLOCKS", [23] = "SET_WR_BLK_ERASE_COUNT",
62
- return do_1op(s, a, fns[a->size]); \
177
+ [41] = "SD_SEND_OP_COND",
63
+ return do_1op_vec(s, a, fns[a->size], VECFN); \
178
+ [42] = "SET_CLR_CARD_DETECT",
64
}
179
+ [51] = "SEND_SCR",
65
180
+ [52] = "SECU_spec", [53] = "SECU_spec",
66
+#define DO_1OP(INSN, FN) DO_1OP_VEC(INSN, FN, NULL)
181
+ [54] = "SECU_spec",
182
+ [56] = "SECU_spec", [57] = "SECU_spec",
183
+ [58] = "SECU_spec", [59] = "SECU_spec",
184
+ };
185
+
67
+
186
+ return acmd_abbrev[cmd] ? acmd_abbrev[cmd] : "UNKNOWN_ACMD";
68
DO_1OP(VCLZ, vclz)
187
+}
69
DO_1OP(VCLS, vcls)
188
diff --git a/hw/sd/trace-events b/hw/sd/trace-events
70
-DO_1OP(VABS, vabs)
189
index XXXXXXX..XXXXXXX 100644
71
-DO_1OP(VNEG, vneg)
190
--- a/hw/sd/trace-events
72
+DO_1OP_VEC(VABS, vabs, tcg_gen_gvec_abs)
191
+++ b/hw/sd/trace-events
73
+DO_1OP_VEC(VNEG, vneg, tcg_gen_gvec_neg)
192
@@ -XXX,XX +XXX,XX @@ sdhci_write_dataport(uint16_t data_count) "write buffer filled with %u bytes of
74
DO_1OP(VQABS, vqabs)
193
sdhci_capareg(const char *desc, uint16_t val) "%s: %u"
75
DO_1OP(VQNEG, vqneg)
194
76
DO_1OP(VMAXA, vmaxa)
195
# hw/sd/sd.c
196
-sdcard_normal_command(uint8_t cmd, uint32_t arg, const char *state) "CMD%d arg 0x%08x (state %s)"
197
-sdcard_app_command(uint8_t acmd, uint32_t arg) "ACMD%d arg 0x%08x"
198
+sdcard_normal_command(const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *state) "%20s/ CMD%02d arg 0x%08x (state %s)"
199
+sdcard_app_command(const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *state) "%23s/ACMD%02d arg 0x%08x (state %s)"
200
sdcard_response(const char *rspdesc, int rsplen) "%s (sz:%d)"
201
sdcard_powerup(void) ""
202
sdcard_inquiry_cmd41(void) ""
203
@@ -XXX,XX +XXX,XX @@ sdcard_lock(void) ""
204
sdcard_unlock(void) ""
205
sdcard_read_block(uint64_t addr, uint32_t len) "addr 0x%" PRIx64 " size 0x%x"
206
sdcard_write_block(uint64_t addr, uint32_t len) "addr 0x%" PRIx64 " size 0x%x"
207
-sdcard_write_data(uint8_t cmd, uint8_t value) "CMD%02d value 0x%02x"
208
-sdcard_read_data(uint8_t cmd, int length) "CMD%02d len %d"
209
+sdcard_write_data(const char *cmd_desc, uint8_t cmd, uint8_t value) "%20s/ CMD%02d value 0x%02x"
210
+sdcard_read_data(const char *cmd_desc, uint8_t cmd, int length) "%20s/ CMD%02d len %d"
211
sdcard_set_voltage(uint16_t millivolts) "%u mV"
212
213
# hw/sd/milkymist-memcard.c
214
--
77
--
215
2.16.2
78
2.20.1
216
79
217
80
diff view generated by jsdifflib
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
1
Optimize the MVE VDUP insns by using TCG vector ops when possible.
2
2
3
Spotted by ASAN:
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
QTEST_QEMU_BINARY=aarch64-softmmu/qemu-system-aarch64 tests/boot-serial-test
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210913095440.13462-8-peter.maydell@linaro.org
6
---
7
target/arm/translate-mve.c | 12 ++++++++----
8
1 file changed, 8 insertions(+), 4 deletions(-)
5
9
6
Direct leak of 48 byte(s) in 1 object(s) allocated from:
10
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
7
#0 0x7ff8a9b0ca38 in __interceptor_calloc (/lib64/libasan.so.4+0xdea38)
8
#1 0x7ff8a8ea7f75 in g_malloc0 ../glib/gmem.c:124
9
#2 0x55fef3d99129 in error_setv /home/elmarco/src/qemu/util/error.c:59
10
#3 0x55fef3d99738 in error_setg_internal /home/elmarco/src/qemu/util/error.c:95
11
#4 0x55fef323acb2 in load_elf_hdr /home/elmarco/src/qemu/hw/core/loader.c:393
12
#5 0x55fef2d15776 in arm_load_elf /home/elmarco/src/qemu/hw/arm/boot.c:830
13
#6 0x55fef2d16d39 in arm_load_kernel_notify /home/elmarco/src/qemu/hw/arm/boot.c:1022
14
#7 0x55fef3dc634d in notifier_list_notify /home/elmarco/src/qemu/util/notify.c:40
15
#8 0x55fef2fc3182 in qemu_run_machine_init_done_notifiers /home/elmarco/src/qemu/vl.c:2716
16
#9 0x55fef2fcbbd1 in main /home/elmarco/src/qemu/vl.c:4679
17
#10 0x7ff89dfed009 in __libc_start_main (/lib64/libc.so.6+0x21009)
18
19
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
---
23
hw/arm/boot.c | 1 +
24
1 file changed, 1 insertion(+)
25
26
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
27
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
28
--- a/hw/arm/boot.c
12
--- a/target/arm/translate-mve.c
29
+++ b/hw/arm/boot.c
13
+++ b/target/arm/translate-mve.c
30
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
14
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
31
15
return true;
32
load_elf_hdr(info->kernel_filename, &elf_header, &elf_is64, &err);
33
if (err) {
34
+ error_free(err);
35
return ret;
36
}
16
}
37
17
18
- qd = mve_qreg_ptr(a->qd);
19
rt = load_reg(s, a->rt);
20
- tcg_gen_dup_i32(a->size, rt, rt);
21
- gen_helper_mve_vdup(cpu_env, qd, rt);
22
- tcg_temp_free_ptr(qd);
23
+ if (mve_no_predication(s)) {
24
+ tcg_gen_gvec_dup_i32(a->size, mve_qreg_offset(a->qd), 16, 16, rt);
25
+ } else {
26
+ qd = mve_qreg_ptr(a->qd);
27
+ tcg_gen_dup_i32(a->size, rt, rt);
28
+ gen_helper_mve_vdup(cpu_env, qd, rt);
29
+ tcg_temp_free_ptr(qd);
30
+ }
31
tcg_temp_free_i32(rt);
32
mve_update_eci(s);
33
return true;
38
--
34
--
39
2.16.2
35
2.20.1
40
36
41
37
diff view generated by jsdifflib
1
From: Alistair Francis <alistair.francis@xilinx.com>
1
Optimize the MVE VMVN insn by using TCG vector ops when possible.
2
2
3
Set the ARM CPU core count property for the A53's attached to the Xilnx
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
ZynqMP machine.
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210913095440.13462-9-peter.maydell@linaro.org
6
---
7
target/arm/translate-mve.c | 2 +-
8
1 file changed, 1 insertion(+), 1 deletion(-)
5
9
6
Signed-off-by: Alistair Francis <alistair.francis@xilinx.com>
10
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: fe0dd90b85ac73f9fc9548c253bededa70a07006.1520018138.git.alistair.francis@xilinx.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
hw/arm/xlnx-zynqmp.c | 2 ++
12
1 file changed, 2 insertions(+)
13
14
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/arm/xlnx-zynqmp.c
12
--- a/target/arm/translate-mve.c
17
+++ b/hw/arm/xlnx-zynqmp.c
13
+++ b/target/arm/translate-mve.c
18
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
14
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_1op *a)
19
s->virt, "has_el2", NULL);
15
20
object_property_set_int(OBJECT(&s->apu_cpu[i]), GIC_BASE_ADDR,
16
static bool trans_VMVN(DisasContext *s, arg_1op *a)
21
"reset-cbar", &error_abort);
17
{
22
+ object_property_set_int(OBJECT(&s->apu_cpu[i]), num_apus,
18
- return do_1op(s, a, gen_helper_mve_vmvn);
23
+ "core-count", &error_abort);
19
+ return do_1op_vec(s, a, gen_helper_mve_vmvn, tcg_gen_gvec_not);
24
object_property_set_bool(OBJECT(&s->apu_cpu[i]), true, "realized",
20
}
25
&err);
21
26
if (err) {
22
static bool trans_VABS_fp(DisasContext *s, arg_1op *a)
27
--
23
--
28
2.16.2
24
2.20.1
29
25
30
26
diff view generated by jsdifflib
1
From: Andrey Smirnov <andrew.smirnov@gmail.com>
1
Optimize the MVE VSHL and VSHR immediate forms by using TCG vector
2
ops when possible.
2
3
3
Implement code needed to set up emulation of MCIMX7SABRE board from
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
NXP. For more info about the HW see:
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210913095440.13462-10-peter.maydell@linaro.org
7
---
8
target/arm/translate-mve.c | 83 +++++++++++++++++++++++++++++---------
9
1 file changed, 63 insertions(+), 20 deletions(-)
5
10
6
https://www.nxp.com/support/developer-resources/hardware-development-tools/sabre-development-system/sabre-board-for-smart-devices-based-on-the-i.mx-7dual-applications-processors:MCIMX7SABRE
11
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
7
8
Cc: Peter Maydell <peter.maydell@linaro.org>
9
Cc: Jason Wang <jasowang@redhat.com>
10
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Cc: Marcel Apfelbaum <marcel.apfelbaum@zoho.com>
12
Cc: Michael S. Tsirkin <mst@redhat.com>
13
Cc: qemu-devel@nongnu.org
14
Cc: qemu-arm@nongnu.org
15
Cc: yurovsky@gmail.com
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
20
hw/arm/Makefile.objs | 2 +-
21
hw/arm/mcimx7d-sabre.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++
22
2 files changed, 91 insertions(+), 1 deletion(-)
23
create mode 100644 hw/arm/mcimx7d-sabre.c
24
25
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
26
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/arm/Makefile.objs
13
--- a/target/arm/translate-mve.c
28
+++ b/hw/arm/Makefile.objs
14
+++ b/target/arm/translate-mve.c
29
@@ -XXX,XX +XXX,XX @@ obj-$(CONFIG_MPS2) += mps2.o
15
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
30
obj-$(CONFIG_MPS2) += mps2-tz.o
16
return do_1imm(s, a, fn);
31
obj-$(CONFIG_MSF2) += msf2-soc.o msf2-som.o
17
}
32
obj-$(CONFIG_IOTKIT) += iotkit.o
18
33
-obj-$(CONFIG_FSL_IMX7) += fsl-imx7.o
19
-static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
34
+obj-$(CONFIG_FSL_IMX7) += fsl-imx7.o mcimx7d-sabre.o
20
- bool negateshift)
35
diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c
21
+static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
36
new file mode 100644
22
+ bool negateshift, GVecGen2iFn vecfn)
37
index XXXXXXX..XXXXXXX
23
{
38
--- /dev/null
24
TCGv_ptr qd, qm;
39
+++ b/hw/arm/mcimx7d-sabre.c
25
int shift = a->shift;
40
@@ -XXX,XX +XXX,XX @@
26
@@ -XXX,XX +XXX,XX @@ static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
41
+/*
27
shift = -shift;
42
+ * Copyright (c) 2018, Impinj, Inc.
28
}
43
+ *
29
44
+ * MCIMX7D_SABRE Board System emulation.
30
- qd = mve_qreg_ptr(a->qd);
45
+ *
31
- qm = mve_qreg_ptr(a->qm);
46
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
32
- fn(cpu_env, qd, qm, tcg_constant_i32(shift));
47
+ *
33
- tcg_temp_free_ptr(qd);
48
+ * This code is licensed under the GPL, version 2 or later.
34
- tcg_temp_free_ptr(qm);
49
+ * See the file `COPYING' in the top level directory.
35
+ if (vecfn && mve_no_predication(s)) {
50
+ *
36
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm),
51
+ * It (partially) emulates a mcimx7d_sabre board, with a Freescale
37
+ shift, 16, 16);
52
+ * i.MX7 SoC
38
+ } else {
53
+ */
39
+ qd = mve_qreg_ptr(a->qd);
40
+ qm = mve_qreg_ptr(a->qm);
41
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
42
+ tcg_temp_free_ptr(qd);
43
+ tcg_temp_free_ptr(qm);
44
+ }
45
mve_update_eci(s);
46
return true;
47
}
48
49
-#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
50
- static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
51
- { \
52
- static MVEGenTwoOpShiftFn * const fns[] = { \
53
- gen_helper_mve_##FN##b, \
54
- gen_helper_mve_##FN##h, \
55
- gen_helper_mve_##FN##w, \
56
- NULL, \
57
- }; \
58
- return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
59
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
60
+ bool negateshift)
61
+{
62
+ return do_2shift_vec(s, a, fn, negateshift, NULL);
63
+}
54
+
64
+
55
+#include "qemu/osdep.h"
65
+#define DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, VECFN) \
56
+#include "qapi/error.h"
66
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
57
+#include "qemu-common.h"
67
+ { \
58
+#include "hw/arm/fsl-imx7.h"
68
+ static MVEGenTwoOpShiftFn * const fns[] = { \
59
+#include "hw/boards.h"
69
+ gen_helper_mve_##FN##b, \
60
+#include "sysemu/sysemu.h"
70
+ gen_helper_mve_##FN##h, \
61
+#include "sysemu/device_tree.h"
71
+ gen_helper_mve_##FN##w, \
62
+#include "qemu/error-report.h"
72
+ NULL, \
63
+#include "sysemu/qtest.h"
73
+ }; \
64
+#include "net/net.h"
74
+ return do_2shift_vec(s, a, fns[a->size], NEGATESHIFT, VECFN); \
75
}
76
77
-DO_2SHIFT(VSHLI, vshli_u, false)
78
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
79
+ DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, NULL)
65
+
80
+
66
+typedef struct {
81
+static void do_gvec_shri_s(unsigned vece, uint32_t dofs, uint32_t aofs,
67
+ FslIMX7State soc;
82
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
68
+ MemoryRegion ram;
83
+{
69
+} MCIMX7Sabre;
84
+ /*
85
+ * We get here with a negated shift count, and we must handle
86
+ * shifts by the element size, which tcg_gen_gvec_sari() does not do.
87
+ */
88
+ shift = -shift;
89
+ if (shift == (8 << vece)) {
90
+ shift--;
91
+ }
92
+ tcg_gen_gvec_sari(vece, dofs, aofs, shift, oprsz, maxsz);
93
+}
70
+
94
+
71
+static void mcimx7d_sabre_init(MachineState *machine)
95
+static void do_gvec_shri_u(unsigned vece, uint32_t dofs, uint32_t aofs,
96
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
72
+{
97
+{
73
+ static struct arm_boot_info boot_info;
98
+ /*
74
+ MCIMX7Sabre *s = g_new0(MCIMX7Sabre, 1);
99
+ * We get here with a negated shift count, and we must handle
75
+ Object *soc;
100
+ * shifts by the element size, which tcg_gen_gvec_shri() does not do.
76
+ int i;
101
+ */
77
+
102
+ shift = -shift;
78
+ if (machine->ram_size > FSL_IMX7_MMDC_SIZE) {
103
+ if (shift == (8 << vece)) {
79
+ error_report("RAM size " RAM_ADDR_FMT " above max supported (%08x)",
104
+ tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, 0);
80
+ machine->ram_size, FSL_IMX7_MMDC_SIZE);
105
+ } else {
81
+ exit(1);
106
+ tcg_gen_gvec_shri(vece, dofs, aofs, shift, oprsz, maxsz);
82
+ }
83
+
84
+ boot_info = (struct arm_boot_info) {
85
+ .loader_start = FSL_IMX7_MMDC_ADDR,
86
+ .board_id = -1,
87
+ .ram_size = machine->ram_size,
88
+ .kernel_filename = machine->kernel_filename,
89
+ .kernel_cmdline = machine->kernel_cmdline,
90
+ .initrd_filename = machine->initrd_filename,
91
+ .nb_cpus = smp_cpus,
92
+ };
93
+
94
+ object_initialize(&s->soc, sizeof(s->soc), TYPE_FSL_IMX7);
95
+ soc = OBJECT(&s->soc);
96
+ object_property_add_child(OBJECT(machine), "soc", soc, &error_fatal);
97
+ object_property_set_bool(soc, true, "realized", &error_fatal);
98
+
99
+ memory_region_allocate_system_memory(&s->ram, NULL, "mcimx7d-sabre.ram",
100
+ machine->ram_size);
101
+ memory_region_add_subregion(get_system_memory(),
102
+ FSL_IMX7_MMDC_ADDR, &s->ram);
103
+
104
+ for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) {
105
+ BusState *bus;
106
+ DeviceState *carddev;
107
+ DriveInfo *di;
108
+ BlockBackend *blk;
109
+
110
+ di = drive_get_next(IF_SD);
111
+ blk = di ? blk_by_legacy_dinfo(di) : NULL;
112
+ bus = qdev_get_child_bus(DEVICE(&s->soc.usdhc[i]), "sd-bus");
113
+ carddev = qdev_create(bus, TYPE_SD_CARD);
114
+ qdev_prop_set_drive(carddev, "drive", blk, &error_fatal);
115
+ object_property_set_bool(OBJECT(carddev), true,
116
+ "realized", &error_fatal);
117
+ }
118
+
119
+ if (!qtest_enabled()) {
120
+ arm_load_kernel(&s->soc.cpu[0], &boot_info);
121
+ }
107
+ }
122
+}
108
+}
123
+
109
+
124
+static void mcimx7d_sabre_machine_init(MachineClass *mc)
110
+DO_2SHIFT_VEC(VSHLI, vshli_u, false, tcg_gen_gvec_shli)
125
+{
111
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
126
+ mc->desc = "Freescale i.MX7 DUAL SABRE (Cortex A7)";
112
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
127
+ mc->init = mcimx7d_sabre_init;
113
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
128
+ mc->max_cpus = FSL_IMX7_NUM_CPUS;
114
/* These right shifts use a left-shift helper with negated shift count */
129
+}
115
-DO_2SHIFT(VSHRI_S, vshli_s, true)
130
+DEFINE_MACHINE("mcimx7d-sabre", mcimx7d_sabre_machine_init)
116
-DO_2SHIFT(VSHRI_U, vshli_u, true)
117
+DO_2SHIFT_VEC(VSHRI_S, vshli_s, true, do_gvec_shri_s)
118
+DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u)
119
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
120
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
121
131
--
122
--
132
2.16.2
123
2.20.1
133
124
134
125
diff view generated by jsdifflib
1
From: Andrey Smirnov <andrew.smirnov@gmail.com>
1
Optimize the MVE VSHLL insns by using TCG vector ops when possible.
2
This includes the VMOVL insn, which we handle in mve.decode as "VSHLL
3
with zero shift count".
2
4
3
Add code needed to get a functional PCI subsytem when using in
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
conjunction with upstream Linux guest (4.13+). Tested to work against
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
"e1000e" (network adapter, using MSI interrupts) as well as
7
Message-id: 20210913095440.13462-11-peter.maydell@linaro.org
6
"usb-ehci" (USB controller, using legacy PCI interrupts).
8
---
9
target/arm/translate-mve.c | 67 +++++++++++++++++++++++++++++++++-----
10
1 file changed, 59 insertions(+), 8 deletions(-)
7
11
8
Based on "i.MX6 Applications Processor Reference Manual" (Document
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
9
Number: IMX6DQRM Rev. 4) as well as corresponding dirver in Linux
10
kernel (circa 4.13 - 4.16 found in drivers/pci/dwc/*)
11
12
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
hw/pci-host/Makefile.objs | 2 +
17
include/hw/pci-host/designware.h | 102 ++++++
18
include/hw/pci/pci_ids.h | 2 +
19
hw/pci-host/designware.c | 754 +++++++++++++++++++++++++++++++++++++++
20
default-configs/arm-softmmu.mak | 1 +
21
5 files changed, 861 insertions(+)
22
create mode 100644 include/hw/pci-host/designware.h
23
create mode 100644 hw/pci-host/designware.c
24
25
diff --git a/hw/pci-host/Makefile.objs b/hw/pci-host/Makefile.objs
26
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/pci-host/Makefile.objs
14
--- a/target/arm/translate-mve.c
28
+++ b/hw/pci-host/Makefile.objs
15
+++ b/target/arm/translate-mve.c
29
@@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_PCI_PIIX) += piix.o
16
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SCALAR(VQSHL_U_scalar, vqshli_u)
30
common-obj-$(CONFIG_PCI_Q35) += q35.o
17
DO_2SHIFT_SCALAR(VQRSHL_S_scalar, vqrshli_s)
31
common-obj-$(CONFIG_PCI_GENERIC) += gpex.o
18
DO_2SHIFT_SCALAR(VQRSHL_U_scalar, vqrshli_u)
32
common-obj-$(CONFIG_PCI_XILINX) += xilinx-pcie.o
19
33
+
20
-#define DO_VSHLL(INSN, FN) \
34
+common-obj-$(CONFIG_PCI_DESIGNWARE) += designware.o
21
- static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
35
diff --git a/include/hw/pci-host/designware.h b/include/hw/pci-host/designware.h
22
- { \
36
new file mode 100644
23
- static MVEGenTwoOpShiftFn * const fns[] = { \
37
index XXXXXXX..XXXXXXX
24
- gen_helper_mve_##FN##b, \
38
--- /dev/null
25
- gen_helper_mve_##FN##h, \
39
+++ b/include/hw/pci-host/designware.h
26
- }; \
40
@@ -XXX,XX +XXX,XX @@
27
- return do_2shift(s, a, fns[a->size], false); \
28
+#define DO_VSHLL(INSN, FN) \
29
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
30
+ { \
31
+ static MVEGenTwoOpShiftFn * const fns[] = { \
32
+ gen_helper_mve_##FN##b, \
33
+ gen_helper_mve_##FN##h, \
34
+ }; \
35
+ return do_2shift_vec(s, a, fns[a->size], false, do_gvec_##FN); \
36
}
37
41
+/*
38
+/*
42
+ * Copyright (c) 2017, Impinj, Inc.
39
+ * For the VSHLL vector helpers, the vece is the size of the input
43
+ *
40
+ * (ie MO_8 or MO_16); the helpers want to work in the output size.
44
+ * Designware PCIe IP block emulation
41
+ * The shift count can be 0..<input size>, inclusive. (0 is VMOVL.)
45
+ *
46
+ * This library is free software; you can redistribute it and/or
47
+ * modify it under the terms of the GNU Lesser General Public
48
+ * License as published by the Free Software Foundation; either
49
+ * version 2 of the License, or (at your option) any later version.
50
+ *
51
+ * This library is distributed in the hope that it will be useful,
52
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
53
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
54
+ * Lesser General Public License for more details.
55
+ *
56
+ * You should have received a copy of the GNU Lesser General Public
57
+ * License along with this library; if not, see
58
+ * <http://www.gnu.org/licenses/>.
59
+ */
42
+ */
60
+
43
+static void do_gvec_vshllbs(unsigned vece, uint32_t dofs, uint32_t aofs,
61
+#ifndef DESIGNWARE_H
44
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
62
+#define DESIGNWARE_H
63
+
64
+#include "hw/hw.h"
65
+#include "hw/sysbus.h"
66
+#include "hw/pci/pci.h"
67
+#include "hw/pci/pci_bus.h"
68
+#include "hw/pci/pcie_host.h"
69
+#include "hw/pci/pci_bridge.h"
70
+
71
+#define TYPE_DESIGNWARE_PCIE_HOST "designware-pcie-host"
72
+#define DESIGNWARE_PCIE_HOST(obj) \
73
+ OBJECT_CHECK(DesignwarePCIEHost, (obj), TYPE_DESIGNWARE_PCIE_HOST)
74
+
75
+#define TYPE_DESIGNWARE_PCIE_ROOT "designware-pcie-root"
76
+#define DESIGNWARE_PCIE_ROOT(obj) \
77
+ OBJECT_CHECK(DesignwarePCIERoot, (obj), TYPE_DESIGNWARE_PCIE_ROOT)
78
+
79
+struct DesignwarePCIERoot;
80
+typedef struct DesignwarePCIERoot DesignwarePCIERoot;
81
+
82
+typedef struct DesignwarePCIEViewport {
83
+ DesignwarePCIERoot *root;
84
+
85
+ MemoryRegion cfg;
86
+ MemoryRegion mem;
87
+
88
+ uint64_t base;
89
+ uint64_t target;
90
+ uint32_t limit;
91
+ uint32_t cr[2];
92
+
93
+ bool inbound;
94
+} DesignwarePCIEViewport;
95
+
96
+typedef struct DesignwarePCIEMSIBank {
97
+ uint32_t enable;
98
+ uint32_t mask;
99
+ uint32_t status;
100
+} DesignwarePCIEMSIBank;
101
+
102
+typedef struct DesignwarePCIEMSI {
103
+ uint64_t base;
104
+ MemoryRegion iomem;
105
+
106
+#define DESIGNWARE_PCIE_NUM_MSI_BANKS 1
107
+
108
+ DesignwarePCIEMSIBank intr[DESIGNWARE_PCIE_NUM_MSI_BANKS];
109
+} DesignwarePCIEMSI;
110
+
111
+struct DesignwarePCIERoot {
112
+ PCIBridge parent_obj;
113
+
114
+ uint32_t atu_viewport;
115
+
116
+#define DESIGNWARE_PCIE_VIEWPORT_OUTBOUND 0
117
+#define DESIGNWARE_PCIE_VIEWPORT_INBOUND 1
118
+#define DESIGNWARE_PCIE_NUM_VIEWPORTS 4
119
+
120
+ DesignwarePCIEViewport viewports[2][DESIGNWARE_PCIE_NUM_VIEWPORTS];
121
+ DesignwarePCIEMSI msi;
122
+};
123
+
124
+typedef struct DesignwarePCIEHost {
125
+ PCIHostState parent_obj;
126
+
127
+ DesignwarePCIERoot root;
128
+
129
+ struct {
130
+ AddressSpace address_space;
131
+ MemoryRegion address_space_root;
132
+
133
+ MemoryRegion memory;
134
+ MemoryRegion io;
135
+
136
+ qemu_irq irqs[4];
137
+ } pci;
138
+
139
+ MemoryRegion mmio;
140
+} DesignwarePCIEHost;
141
+
142
+#endif /* DESIGNWARE_H */
143
diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h
144
index XXXXXXX..XXXXXXX 100644
145
--- a/include/hw/pci/pci_ids.h
146
+++ b/include/hw/pci/pci_ids.h
147
@@ -XXX,XX +XXX,XX @@
148
#define PCI_VENDOR_ID_VMWARE 0x15ad
149
#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
150
151
+#define PCI_VENDOR_ID_SYNOPSYS 0x16C3
152
+
153
#endif
154
diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c
155
new file mode 100644
156
index XXXXXXX..XXXXXXX
157
--- /dev/null
158
+++ b/hw/pci-host/designware.c
159
@@ -XXX,XX +XXX,XX @@
160
+/*
161
+ * Copyright (c) 2018, Impinj, Inc.
162
+ *
163
+ * Designware PCIe IP block emulation
164
+ *
165
+ * This library is free software; you can redistribute it and/or
166
+ * modify it under the terms of the GNU Lesser General Public
167
+ * License as published by the Free Software Foundation; either
168
+ * version 2 of the License, or (at your option) any later version.
169
+ *
170
+ * This library is distributed in the hope that it will be useful,
171
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
172
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
173
+ * Lesser General Public License for more details.
174
+ *
175
+ * You should have received a copy of the GNU Lesser General Public
176
+ * License along with this library; if not, see
177
+ * <http://www.gnu.org/licenses/>.
178
+ */
179
+
180
+#include "qemu/osdep.h"
181
+#include "qapi/error.h"
182
+#include "hw/pci/msi.h"
183
+#include "hw/pci/pci_bridge.h"
184
+#include "hw/pci/pci_host.h"
185
+#include "hw/pci/pcie_port.h"
186
+#include "hw/pci-host/designware.h"
187
+
188
+#define DESIGNWARE_PCIE_PORT_LINK_CONTROL 0x710
189
+#define DESIGNWARE_PCIE_PHY_DEBUG_R1 0x72C
190
+#define DESIGNWARE_PCIE_PHY_DEBUG_R1_XMLH_LINK_UP BIT(4)
191
+#define DESIGNWARE_PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
192
+#define DESIGNWARE_PCIE_PORT_LOGIC_SPEED_CHANGE BIT(17)
193
+#define DESIGNWARE_PCIE_MSI_ADDR_LO 0x820
194
+#define DESIGNWARE_PCIE_MSI_ADDR_HI 0x824
195
+#define DESIGNWARE_PCIE_MSI_INTR0_ENABLE 0x828
196
+#define DESIGNWARE_PCIE_MSI_INTR0_MASK 0x82C
197
+#define DESIGNWARE_PCIE_MSI_INTR0_STATUS 0x830
198
+#define DESIGNWARE_PCIE_ATU_VIEWPORT 0x900
199
+#define DESIGNWARE_PCIE_ATU_REGION_INBOUND BIT(31)
200
+#define DESIGNWARE_PCIE_ATU_CR1 0x904
201
+#define DESIGNWARE_PCIE_ATU_TYPE_MEM (0x0 << 0)
202
+#define DESIGNWARE_PCIE_ATU_CR2 0x908
203
+#define DESIGNWARE_PCIE_ATU_ENABLE BIT(31)
204
+#define DESIGNWARE_PCIE_ATU_LOWER_BASE 0x90C
205
+#define DESIGNWARE_PCIE_ATU_UPPER_BASE 0x910
206
+#define DESIGNWARE_PCIE_ATU_LIMIT 0x914
207
+#define DESIGNWARE_PCIE_ATU_LOWER_TARGET 0x918
208
+#define DESIGNWARE_PCIE_ATU_BUS(x) (((x) >> 24) & 0xff)
209
+#define DESIGNWARE_PCIE_ATU_DEVFN(x) (((x) >> 16) & 0xff)
210
+#define DESIGNWARE_PCIE_ATU_UPPER_TARGET 0x91C
211
+
212
+static DesignwarePCIEHost *
213
+designware_pcie_root_to_host(DesignwarePCIERoot *root)
214
+{
45
+{
215
+ BusState *bus = qdev_get_parent_bus(DEVICE(root));
46
+ unsigned ovece = vece + 1;
216
+ return DESIGNWARE_PCIE_HOST(bus->parent);
47
+ unsigned ibits = vece == MO_8 ? 8 : 16;
48
+ tcg_gen_gvec_shli(ovece, dofs, aofs, ibits, oprsz, maxsz);
49
+ tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
217
+}
50
+}
218
+
51
+
219
+static void designware_pcie_root_msi_write(void *opaque, hwaddr addr,
52
+static void do_gvec_vshllbu(unsigned vece, uint32_t dofs, uint32_t aofs,
220
+ uint64_t val, unsigned len)
53
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
221
+{
54
+{
222
+ DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(opaque);
55
+ unsigned ovece = vece + 1;
223
+ DesignwarePCIEHost *host = designware_pcie_root_to_host(root);
56
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
57
+ ovece == MO_16 ? 0xff : 0xffff, oprsz, maxsz);
58
+ tcg_gen_gvec_shli(ovece, dofs, dofs, shift, oprsz, maxsz);
59
+}
224
+
60
+
225
+ root->msi.intr[0].status |= BIT(val) & root->msi.intr[0].enable;
61
+static void do_gvec_vshllts(unsigned vece, uint32_t dofs, uint32_t aofs,
226
+
62
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
227
+ if (root->msi.intr[0].status & ~root->msi.intr[0].mask) {
63
+{
228
+ qemu_set_irq(host->pci.irqs[0], 1);
64
+ unsigned ovece = vece + 1;
65
+ unsigned ibits = vece == MO_8 ? 8 : 16;
66
+ if (shift == 0) {
67
+ tcg_gen_gvec_sari(ovece, dofs, aofs, ibits, oprsz, maxsz);
68
+ } else {
69
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
70
+ ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz);
71
+ tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
229
+ }
72
+ }
230
+}
73
+}
231
+
74
+
232
+static const MemoryRegionOps designware_pci_host_msi_ops = {
75
+static void do_gvec_vshlltu(unsigned vece, uint32_t dofs, uint32_t aofs,
233
+ .write = designware_pcie_root_msi_write,
76
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
234
+ .endianness = DEVICE_LITTLE_ENDIAN,
235
+ .valid = {
236
+ .min_access_size = 4,
237
+ .max_access_size = 4,
238
+ },
239
+};
240
+
241
+static void designware_pcie_root_update_msi_mapping(DesignwarePCIERoot *root)
242
+
243
+{
77
+{
244
+ MemoryRegion *mem = &root->msi.iomem;
78
+ unsigned ovece = vece + 1;
245
+ const uint64_t base = root->msi.base;
79
+ unsigned ibits = vece == MO_8 ? 8 : 16;
246
+ const bool enable = root->msi.intr[0].enable;
80
+ if (shift == 0) {
247
+
81
+ tcg_gen_gvec_shri(ovece, dofs, aofs, ibits, oprsz, maxsz);
248
+ memory_region_set_address(mem, base);
249
+ memory_region_set_enabled(mem, enable);
250
+}
251
+
252
+static DesignwarePCIEViewport *
253
+designware_pcie_root_get_current_viewport(DesignwarePCIERoot *root)
254
+{
255
+ const unsigned int idx = root->atu_viewport & 0xF;
256
+ const unsigned int dir =
257
+ !!(root->atu_viewport & DESIGNWARE_PCIE_ATU_REGION_INBOUND);
258
+ return &root->viewports[dir][idx];
259
+}
260
+
261
+static uint32_t
262
+designware_pcie_root_config_read(PCIDevice *d, uint32_t address, int len)
263
+{
264
+ DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(d);
265
+ DesignwarePCIEViewport *viewport =
266
+ designware_pcie_root_get_current_viewport(root);
267
+
268
+ uint32_t val;
269
+
270
+ switch (address) {
271
+ case DESIGNWARE_PCIE_PORT_LINK_CONTROL:
272
+ /*
273
+ * Linux guest uses this register only to configure number of
274
+ * PCIE lane (which in our case is irrelevant) and doesn't
275
+ * really care about the value it reads from this register
276
+ */
277
+ val = 0xDEADBEEF;
278
+ break;
279
+
280
+ case DESIGNWARE_PCIE_LINK_WIDTH_SPEED_CONTROL:
281
+ /*
282
+ * To make sure that any code in guest waiting for speed
283
+ * change does not time out we always report
284
+ * PORT_LOGIC_SPEED_CHANGE as set
285
+ */
286
+ val = DESIGNWARE_PCIE_PORT_LOGIC_SPEED_CHANGE;
287
+ break;
288
+
289
+ case DESIGNWARE_PCIE_MSI_ADDR_LO:
290
+ val = root->msi.base;
291
+ break;
292
+
293
+ case DESIGNWARE_PCIE_MSI_ADDR_HI:
294
+ val = root->msi.base >> 32;
295
+ break;
296
+
297
+ case DESIGNWARE_PCIE_MSI_INTR0_ENABLE:
298
+ val = root->msi.intr[0].enable;
299
+ break;
300
+
301
+ case DESIGNWARE_PCIE_MSI_INTR0_MASK:
302
+ val = root->msi.intr[0].mask;
303
+ break;
304
+
305
+ case DESIGNWARE_PCIE_MSI_INTR0_STATUS:
306
+ val = root->msi.intr[0].status;
307
+ break;
308
+
309
+ case DESIGNWARE_PCIE_PHY_DEBUG_R1:
310
+ val = DESIGNWARE_PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
311
+ break;
312
+
313
+ case DESIGNWARE_PCIE_ATU_VIEWPORT:
314
+ val = root->atu_viewport;
315
+ break;
316
+
317
+ case DESIGNWARE_PCIE_ATU_LOWER_BASE:
318
+ val = viewport->base;
319
+ break;
320
+
321
+ case DESIGNWARE_PCIE_ATU_UPPER_BASE:
322
+ val = viewport->base >> 32;
323
+ break;
324
+
325
+ case DESIGNWARE_PCIE_ATU_LOWER_TARGET:
326
+ val = viewport->target;
327
+ break;
328
+
329
+ case DESIGNWARE_PCIE_ATU_UPPER_TARGET:
330
+ val = viewport->target >> 32;
331
+ break;
332
+
333
+ case DESIGNWARE_PCIE_ATU_LIMIT:
334
+ val = viewport->limit;
335
+ break;
336
+
337
+ case DESIGNWARE_PCIE_ATU_CR1:
338
+ case DESIGNWARE_PCIE_ATU_CR2: /* FALLTHROUGH */
339
+ val = viewport->cr[(address - DESIGNWARE_PCIE_ATU_CR1) /
340
+ sizeof(uint32_t)];
341
+ break;
342
+
343
+ default:
344
+ val = pci_default_read_config(d, address, len);
345
+ break;
346
+ }
347
+
348
+ return val;
349
+}
350
+
351
+static uint64_t designware_pcie_root_data_access(void *opaque, hwaddr addr,
352
+ uint64_t *val, unsigned len)
353
+{
354
+ DesignwarePCIEViewport *viewport = opaque;
355
+ DesignwarePCIERoot *root = viewport->root;
356
+
357
+ const uint8_t busnum = DESIGNWARE_PCIE_ATU_BUS(viewport->target);
358
+ const uint8_t devfn = DESIGNWARE_PCIE_ATU_DEVFN(viewport->target);
359
+ PCIBus *pcibus = pci_get_bus(PCI_DEVICE(root));
360
+ PCIDevice *pcidev = pci_find_device(pcibus, busnum, devfn);
361
+
362
+ if (pcidev) {
363
+ addr &= pci_config_size(pcidev) - 1;
364
+
365
+ if (val) {
366
+ pci_host_config_write_common(pcidev, addr,
367
+ pci_config_size(pcidev),
368
+ *val, len);
369
+ } else {
370
+ return pci_host_config_read_common(pcidev, addr,
371
+ pci_config_size(pcidev),
372
+ len);
373
+ }
374
+ }
375
+
376
+ return UINT64_MAX;
377
+}
378
+
379
+static uint64_t designware_pcie_root_data_read(void *opaque, hwaddr addr,
380
+ unsigned len)
381
+{
382
+ return designware_pcie_root_data_access(opaque, addr, NULL, len);
383
+}
384
+
385
+static void designware_pcie_root_data_write(void *opaque, hwaddr addr,
386
+ uint64_t val, unsigned len)
387
+{
388
+ designware_pcie_root_data_access(opaque, addr, &val, len);
389
+}
390
+
391
+static const MemoryRegionOps designware_pci_host_conf_ops = {
392
+ .read = designware_pcie_root_data_read,
393
+ .write = designware_pcie_root_data_write,
394
+ .endianness = DEVICE_LITTLE_ENDIAN,
395
+ .valid = {
396
+ .min_access_size = 1,
397
+ .max_access_size = 4,
398
+ },
399
+};
400
+
401
+static void designware_pcie_update_viewport(DesignwarePCIERoot *root,
402
+ DesignwarePCIEViewport *viewport)
403
+{
404
+ const uint64_t target = viewport->target;
405
+ const uint64_t base = viewport->base;
406
+ const uint64_t size = (uint64_t)viewport->limit - base + 1;
407
+ const bool enabled = viewport->cr[1] & DESIGNWARE_PCIE_ATU_ENABLE;
408
+
409
+ MemoryRegion *current, *other;
410
+
411
+ if (viewport->cr[0] == DESIGNWARE_PCIE_ATU_TYPE_MEM) {
412
+ current = &viewport->mem;
413
+ other = &viewport->cfg;
414
+ memory_region_set_alias_offset(current, target);
415
+ } else {
82
+ } else {
416
+ current = &viewport->cfg;
83
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
417
+ other = &viewport->mem;
84
+ ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz);
418
+ }
85
+ tcg_gen_gvec_shri(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
419
+
420
+ /*
421
+ * An outbound viewport can be reconfigure from being MEM to CFG,
422
+ * to account for that we disable the "other" memory region that
423
+ * becomes unused due to that fact.
424
+ */
425
+ memory_region_set_enabled(other, false);
426
+ if (enabled) {
427
+ memory_region_set_size(current, size);
428
+ memory_region_set_address(current, base);
429
+ }
430
+ memory_region_set_enabled(current, enabled);
431
+}
432
+
433
+static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address,
434
+ uint32_t val, int len)
435
+{
436
+ DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(d);
437
+ DesignwarePCIEHost *host = designware_pcie_root_to_host(root);
438
+ DesignwarePCIEViewport *viewport =
439
+ designware_pcie_root_get_current_viewport(root);
440
+
441
+ switch (address) {
442
+ case DESIGNWARE_PCIE_PORT_LINK_CONTROL:
443
+ case DESIGNWARE_PCIE_LINK_WIDTH_SPEED_CONTROL:
444
+ case DESIGNWARE_PCIE_PHY_DEBUG_R1:
445
+ /* No-op */
446
+ break;
447
+
448
+ case DESIGNWARE_PCIE_MSI_ADDR_LO:
449
+ root->msi.base &= 0xFFFFFFFF00000000ULL;
450
+ root->msi.base |= val;
451
+ break;
452
+
453
+ case DESIGNWARE_PCIE_MSI_ADDR_HI:
454
+ root->msi.base &= 0x00000000FFFFFFFFULL;
455
+ root->msi.base |= (uint64_t)val << 32;
456
+ break;
457
+
458
+ case DESIGNWARE_PCIE_MSI_INTR0_ENABLE: {
459
+ const bool update_msi_mapping = !root->msi.intr[0].enable ^ !!val;
460
+
461
+ root->msi.intr[0].enable = val;
462
+
463
+ if (update_msi_mapping) {
464
+ designware_pcie_root_update_msi_mapping(root);
465
+ }
466
+ break;
467
+ }
468
+
469
+ case DESIGNWARE_PCIE_MSI_INTR0_MASK:
470
+ root->msi.intr[0].mask = val;
471
+ break;
472
+
473
+ case DESIGNWARE_PCIE_MSI_INTR0_STATUS:
474
+ root->msi.intr[0].status ^= val;
475
+ if (!root->msi.intr[0].status) {
476
+ qemu_set_irq(host->pci.irqs[0], 0);
477
+ }
478
+ break;
479
+
480
+ case DESIGNWARE_PCIE_ATU_VIEWPORT:
481
+ root->atu_viewport = val;
482
+ break;
483
+
484
+ case DESIGNWARE_PCIE_ATU_LOWER_BASE:
485
+ viewport->base &= 0xFFFFFFFF00000000ULL;
486
+ viewport->base |= val;
487
+ break;
488
+
489
+ case DESIGNWARE_PCIE_ATU_UPPER_BASE:
490
+ viewport->base &= 0x00000000FFFFFFFFULL;
491
+ viewport->base |= (uint64_t)val << 32;
492
+ break;
493
+
494
+ case DESIGNWARE_PCIE_ATU_LOWER_TARGET:
495
+ viewport->target &= 0xFFFFFFFF00000000ULL;
496
+ viewport->target |= val;
497
+ break;
498
+
499
+ case DESIGNWARE_PCIE_ATU_UPPER_TARGET:
500
+ viewport->target &= 0x00000000FFFFFFFFULL;
501
+ viewport->target |= val;
502
+ break;
503
+
504
+ case DESIGNWARE_PCIE_ATU_LIMIT:
505
+ viewport->limit = val;
506
+ break;
507
+
508
+ case DESIGNWARE_PCIE_ATU_CR1:
509
+ viewport->cr[0] = val;
510
+ break;
511
+ case DESIGNWARE_PCIE_ATU_CR2:
512
+ viewport->cr[1] = val;
513
+ designware_pcie_update_viewport(root, viewport);
514
+ break;
515
+
516
+ default:
517
+ pci_bridge_write_config(d, address, val, len);
518
+ break;
519
+ }
86
+ }
520
+}
87
+}
521
+
88
+
522
+static char *designware_pcie_viewport_name(const char *direction,
89
DO_VSHLL(VSHLL_BS, vshllbs)
523
+ unsigned int i,
90
DO_VSHLL(VSHLL_BU, vshllbu)
524
+ const char *type)
91
DO_VSHLL(VSHLL_TS, vshllts)
525
+{
526
+ return g_strdup_printf("PCI %s Viewport %u [%s]",
527
+ direction, i, type);
528
+}
529
+
530
+static void designware_pcie_root_realize(PCIDevice *dev, Error **errp)
531
+{
532
+ DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(dev);
533
+ DesignwarePCIEHost *host = designware_pcie_root_to_host(root);
534
+ MemoryRegion *address_space = &host->pci.memory;
535
+ PCIBridge *br = PCI_BRIDGE(dev);
536
+ DesignwarePCIEViewport *viewport;
537
+ /*
538
+ * Dummy values used for initial configuration of MemoryRegions
539
+ * that belong to a given viewport
540
+ */
541
+ const hwaddr dummy_offset = 0;
542
+ const uint64_t dummy_size = 4;
543
+ size_t i;
544
+
545
+ br->bus_name = "dw-pcie";
546
+
547
+ pci_set_word(dev->config + PCI_COMMAND,
548
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
549
+
550
+ pci_config_set_interrupt_pin(dev->config, 1);
551
+ pci_bridge_initfn(dev, TYPE_PCIE_BUS);
552
+
553
+ pcie_port_init_reg(dev);
554
+
555
+ pcie_cap_init(dev, 0x70, PCI_EXP_TYPE_ROOT_PORT,
556
+ 0, &error_fatal);
557
+
558
+ msi_nonbroken = true;
559
+ msi_init(dev, 0x50, 32, true, true, &error_fatal);
560
+
561
+ for (i = 0; i < DESIGNWARE_PCIE_NUM_VIEWPORTS; i++) {
562
+ MemoryRegion *source, *destination, *mem;
563
+ const char *direction;
564
+ char *name;
565
+
566
+ viewport = &root->viewports[DESIGNWARE_PCIE_VIEWPORT_INBOUND][i];
567
+ viewport->inbound = true;
568
+ viewport->base = 0x0000000000000000ULL;
569
+ viewport->target = 0x0000000000000000ULL;
570
+ viewport->limit = UINT32_MAX;
571
+ viewport->cr[0] = DESIGNWARE_PCIE_ATU_TYPE_MEM;
572
+
573
+ source = &host->pci.address_space_root;
574
+ destination = get_system_memory();
575
+ direction = "Inbound";
576
+
577
+ /*
578
+ * Configure MemoryRegion implementing PCI -> CPU memory
579
+ * access
580
+ */
581
+ mem = &viewport->mem;
582
+ name = designware_pcie_viewport_name(direction, i, "MEM");
583
+ memory_region_init_alias(mem, OBJECT(root), name, destination,
584
+ dummy_offset, dummy_size);
585
+ memory_region_add_subregion_overlap(source, dummy_offset, mem, -1);
586
+ memory_region_set_enabled(mem, false);
587
+ g_free(name);
588
+
589
+ viewport = &root->viewports[DESIGNWARE_PCIE_VIEWPORT_OUTBOUND][i];
590
+ viewport->root = root;
591
+ viewport->inbound = false;
592
+ viewport->base = 0x0000000000000000ULL;
593
+ viewport->target = 0x0000000000000000ULL;
594
+ viewport->limit = UINT32_MAX;
595
+ viewport->cr[0] = DESIGNWARE_PCIE_ATU_TYPE_MEM;
596
+
597
+ destination = &host->pci.memory;
598
+ direction = "Outbound";
599
+ source = get_system_memory();
600
+
601
+ /*
602
+ * Configure MemoryRegion implementing CPU -> PCI memory
603
+ * access
604
+ */
605
+ mem = &viewport->mem;
606
+ name = designware_pcie_viewport_name(direction, i, "MEM");
607
+ memory_region_init_alias(mem, OBJECT(root), name, destination,
608
+ dummy_offset, dummy_size);
609
+ memory_region_add_subregion(source, dummy_offset, mem);
610
+ memory_region_set_enabled(mem, false);
611
+ g_free(name);
612
+
613
+ /*
614
+ * Configure MemoryRegion implementing access to configuration
615
+ * space
616
+ */
617
+ mem = &viewport->cfg;
618
+ name = designware_pcie_viewport_name(direction, i, "CFG");
619
+ memory_region_init_io(&viewport->cfg, OBJECT(root),
620
+ &designware_pci_host_conf_ops,
621
+ viewport, name, dummy_size);
622
+ memory_region_add_subregion(source, dummy_offset, mem);
623
+ memory_region_set_enabled(mem, false);
624
+ g_free(name);
625
+ }
626
+
627
+ /*
628
+ * If no inbound iATU windows are configured, HW defaults to
629
+ * letting inbound TLPs to pass in. We emulate that by exlicitly
630
+ * configuring first inbound window to cover all of target's
631
+ * address space.
632
+ *
633
+ * NOTE: This will not work correctly for the case when first
634
+ * configured inbound window is window 0
635
+ */
636
+ viewport = &root->viewports[DESIGNWARE_PCIE_VIEWPORT_INBOUND][0];
637
+ viewport->cr[1] = DESIGNWARE_PCIE_ATU_ENABLE;
638
+ designware_pcie_update_viewport(root, viewport);
639
+
640
+ memory_region_init_io(&root->msi.iomem, OBJECT(root),
641
+ &designware_pci_host_msi_ops,
642
+ root, "pcie-msi", 0x4);
643
+ /*
644
+ * We initially place MSI interrupt I/O region a adress 0 and
645
+ * disable it. It'll be later moved to correct offset and enabled
646
+ * in designware_pcie_root_update_msi_mapping() as a part of
647
+ * initialization done by guest OS
648
+ */
649
+ memory_region_add_subregion(address_space, dummy_offset, &root->msi.iomem);
650
+ memory_region_set_enabled(&root->msi.iomem, false);
651
+}
652
+
653
+static void designware_pcie_set_irq(void *opaque, int irq_num, int level)
654
+{
655
+ DesignwarePCIEHost *host = DESIGNWARE_PCIE_HOST(opaque);
656
+
657
+ qemu_set_irq(host->pci.irqs[irq_num], level);
658
+}
659
+
660
+static const char *
661
+designware_pcie_host_root_bus_path(PCIHostState *host_bridge, PCIBus *rootbus)
662
+{
663
+ return "0000:00";
664
+}
665
+
666
+static const VMStateDescription vmstate_designware_pcie_msi_bank = {
667
+ .name = "designware-pcie-msi-bank",
668
+ .version_id = 1,
669
+ .minimum_version_id = 1,
670
+ .fields = (VMStateField[]) {
671
+ VMSTATE_UINT32(enable, DesignwarePCIEMSIBank),
672
+ VMSTATE_UINT32(mask, DesignwarePCIEMSIBank),
673
+ VMSTATE_UINT32(status, DesignwarePCIEMSIBank),
674
+ VMSTATE_END_OF_LIST()
675
+ }
676
+};
677
+
678
+static const VMStateDescription vmstate_designware_pcie_msi = {
679
+ .name = "designware-pcie-msi",
680
+ .version_id = 1,
681
+ .minimum_version_id = 1,
682
+ .fields = (VMStateField[]) {
683
+ VMSTATE_UINT64(base, DesignwarePCIEMSI),
684
+ VMSTATE_STRUCT_ARRAY(intr,
685
+ DesignwarePCIEMSI,
686
+ DESIGNWARE_PCIE_NUM_MSI_BANKS,
687
+ 1,
688
+ vmstate_designware_pcie_msi_bank,
689
+ DesignwarePCIEMSIBank),
690
+ VMSTATE_END_OF_LIST()
691
+ }
692
+};
693
+
694
+static const VMStateDescription vmstate_designware_pcie_viewport = {
695
+ .name = "designware-pcie-viewport",
696
+ .version_id = 1,
697
+ .minimum_version_id = 1,
698
+ .fields = (VMStateField[]) {
699
+ VMSTATE_UINT64(base, DesignwarePCIEViewport),
700
+ VMSTATE_UINT64(target, DesignwarePCIEViewport),
701
+ VMSTATE_UINT32(limit, DesignwarePCIEViewport),
702
+ VMSTATE_UINT32_ARRAY(cr, DesignwarePCIEViewport, 2),
703
+ VMSTATE_END_OF_LIST()
704
+ }
705
+};
706
+
707
+static const VMStateDescription vmstate_designware_pcie_root = {
708
+ .name = "designware-pcie-root",
709
+ .version_id = 1,
710
+ .minimum_version_id = 1,
711
+ .fields = (VMStateField[]) {
712
+ VMSTATE_PCI_DEVICE(parent_obj, PCIBridge),
713
+ VMSTATE_UINT32(atu_viewport, DesignwarePCIERoot),
714
+ VMSTATE_STRUCT_2DARRAY(viewports,
715
+ DesignwarePCIERoot,
716
+ 2,
717
+ DESIGNWARE_PCIE_NUM_VIEWPORTS,
718
+ 1,
719
+ vmstate_designware_pcie_viewport,
720
+ DesignwarePCIEViewport),
721
+ VMSTATE_STRUCT(msi,
722
+ DesignwarePCIERoot,
723
+ 1,
724
+ vmstate_designware_pcie_msi,
725
+ DesignwarePCIEMSI),
726
+ VMSTATE_END_OF_LIST()
727
+ }
728
+};
729
+
730
+static void designware_pcie_root_class_init(ObjectClass *klass, void *data)
731
+{
732
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
733
+ DeviceClass *dc = DEVICE_CLASS(klass);
734
+
735
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
736
+
737
+ k->vendor_id = PCI_VENDOR_ID_SYNOPSYS;
738
+ k->device_id = 0xABCD;
739
+ k->revision = 0;
740
+ k->class_id = PCI_CLASS_BRIDGE_PCI;
741
+ k->is_bridge = true;
742
+ k->exit = pci_bridge_exitfn;
743
+ k->realize = designware_pcie_root_realize;
744
+ k->config_read = designware_pcie_root_config_read;
745
+ k->config_write = designware_pcie_root_config_write;
746
+
747
+ dc->reset = pci_bridge_reset;
748
+ /*
749
+ * PCI-facing part of the host bridge, not usable without the
750
+ * host-facing part, which can't be device_add'ed, yet.
751
+ */
752
+ dc->user_creatable = false;
753
+ dc->vmsd = &vmstate_designware_pcie_root;
754
+}
755
+
756
+static uint64_t designware_pcie_host_mmio_read(void *opaque, hwaddr addr,
757
+ unsigned int size)
758
+{
759
+ PCIHostState *pci = PCI_HOST_BRIDGE(opaque);
760
+ PCIDevice *device = pci_find_device(pci->bus, 0, 0);
761
+
762
+ return pci_host_config_read_common(device,
763
+ addr,
764
+ pci_config_size(device),
765
+ size);
766
+}
767
+
768
+static void designware_pcie_host_mmio_write(void *opaque, hwaddr addr,
769
+ uint64_t val, unsigned int size)
770
+{
771
+ PCIHostState *pci = PCI_HOST_BRIDGE(opaque);
772
+ PCIDevice *device = pci_find_device(pci->bus, 0, 0);
773
+
774
+ return pci_host_config_write_common(device,
775
+ addr,
776
+ pci_config_size(device),
777
+ val, size);
778
+}
779
+
780
+static const MemoryRegionOps designware_pci_mmio_ops = {
781
+ .read = designware_pcie_host_mmio_read,
782
+ .write = designware_pcie_host_mmio_write,
783
+ .endianness = DEVICE_LITTLE_ENDIAN,
784
+ .impl = {
785
+ /*
786
+ * Our device would not work correctly if the guest was doing
787
+ * unaligned access. This might not be a limitation on the real
788
+ * device but in practice there is no reason for a guest to access
789
+ * this device unaligned.
790
+ */
791
+ .min_access_size = 4,
792
+ .max_access_size = 4,
793
+ .unaligned = false,
794
+ },
795
+};
796
+
797
+static AddressSpace *designware_pcie_host_set_iommu(PCIBus *bus, void *opaque,
798
+ int devfn)
799
+{
800
+ DesignwarePCIEHost *s = DESIGNWARE_PCIE_HOST(opaque);
801
+
802
+ return &s->pci.address_space;
803
+}
804
+
805
+static void designware_pcie_host_realize(DeviceState *dev, Error **errp)
806
+{
807
+ PCIHostState *pci = PCI_HOST_BRIDGE(dev);
808
+ DesignwarePCIEHost *s = DESIGNWARE_PCIE_HOST(dev);
809
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
810
+ size_t i;
811
+
812
+ for (i = 0; i < ARRAY_SIZE(s->pci.irqs); i++) {
813
+ sysbus_init_irq(sbd, &s->pci.irqs[i]);
814
+ }
815
+
816
+ memory_region_init_io(&s->mmio,
817
+ OBJECT(s),
818
+ &designware_pci_mmio_ops,
819
+ s,
820
+ "pcie.reg", 4 * 1024);
821
+ sysbus_init_mmio(sbd, &s->mmio);
822
+
823
+ memory_region_init(&s->pci.io, OBJECT(s), "pcie-pio", 16);
824
+ memory_region_init(&s->pci.memory, OBJECT(s),
825
+ "pcie-bus-memory",
826
+ UINT64_MAX);
827
+
828
+ pci->bus = pci_register_root_bus(dev, "pcie",
829
+ designware_pcie_set_irq,
830
+ pci_swizzle_map_irq_fn,
831
+ s,
832
+ &s->pci.memory,
833
+ &s->pci.io,
834
+ 0, 4,
835
+ TYPE_PCIE_BUS);
836
+
837
+ memory_region_init(&s->pci.address_space_root,
838
+ OBJECT(s),
839
+ "pcie-bus-address-space-root",
840
+ UINT64_MAX);
841
+ memory_region_add_subregion(&s->pci.address_space_root,
842
+ 0x0, &s->pci.memory);
843
+ address_space_init(&s->pci.address_space,
844
+ &s->pci.address_space_root,
845
+ "pcie-bus-address-space");
846
+ pci_setup_iommu(pci->bus, designware_pcie_host_set_iommu, s);
847
+
848
+ qdev_set_parent_bus(DEVICE(&s->root), BUS(pci->bus));
849
+ qdev_init_nofail(DEVICE(&s->root));
850
+}
851
+
852
+static const VMStateDescription vmstate_designware_pcie_host = {
853
+ .name = "designware-pcie-host",
854
+ .version_id = 1,
855
+ .minimum_version_id = 1,
856
+ .fields = (VMStateField[]) {
857
+ VMSTATE_STRUCT(root,
858
+ DesignwarePCIEHost,
859
+ 1,
860
+ vmstate_designware_pcie_root,
861
+ DesignwarePCIERoot),
862
+ VMSTATE_END_OF_LIST()
863
+ }
864
+};
865
+
866
+static void designware_pcie_host_class_init(ObjectClass *klass, void *data)
867
+{
868
+ DeviceClass *dc = DEVICE_CLASS(klass);
869
+ PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
870
+
871
+ hc->root_bus_path = designware_pcie_host_root_bus_path;
872
+ dc->realize = designware_pcie_host_realize;
873
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
874
+ dc->fw_name = "pci";
875
+ dc->vmsd = &vmstate_designware_pcie_host;
876
+}
877
+
878
+static void designware_pcie_host_init(Object *obj)
879
+{
880
+ DesignwarePCIEHost *s = DESIGNWARE_PCIE_HOST(obj);
881
+ DesignwarePCIERoot *root = &s->root;
882
+
883
+ object_initialize(root, sizeof(*root), TYPE_DESIGNWARE_PCIE_ROOT);
884
+ object_property_add_child(obj, "root", OBJECT(root), NULL);
885
+ qdev_prop_set_int32(DEVICE(root), "addr", PCI_DEVFN(0, 0));
886
+ qdev_prop_set_bit(DEVICE(root), "multifunction", false);
887
+}
888
+
889
+static const TypeInfo designware_pcie_root_info = {
890
+ .name = TYPE_DESIGNWARE_PCIE_ROOT,
891
+ .parent = TYPE_PCI_BRIDGE,
892
+ .instance_size = sizeof(DesignwarePCIERoot),
893
+ .class_init = designware_pcie_root_class_init,
894
+ .interfaces = (InterfaceInfo[]) {
895
+ { INTERFACE_PCIE_DEVICE },
896
+ { }
897
+ },
898
+};
899
+
900
+static const TypeInfo designware_pcie_host_info = {
901
+ .name = TYPE_DESIGNWARE_PCIE_HOST,
902
+ .parent = TYPE_PCI_HOST_BRIDGE,
903
+ .instance_size = sizeof(DesignwarePCIEHost),
904
+ .instance_init = designware_pcie_host_init,
905
+ .class_init = designware_pcie_host_class_init,
906
+};
907
+
908
+static void designware_pcie_register(void)
909
+{
910
+ type_register_static(&designware_pcie_root_info);
911
+ type_register_static(&designware_pcie_host_info);
912
+}
913
+type_init(designware_pcie_register)
914
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
915
index XXXXXXX..XXXXXXX 100644
916
--- a/default-configs/arm-softmmu.mak
917
+++ b/default-configs/arm-softmmu.mak
918
@@ -XXX,XX +XXX,XX @@ CONFIG_GPIO_KEY=y
919
CONFIG_MSF2=y
920
CONFIG_FW_CFG_DMA=y
921
CONFIG_XILINX_AXI=y
922
+CONFIG_PCI_DESIGNWARE=y
923
--
92
--
924
2.16.2
93
2.20.1
925
94
926
95
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Optimize the MVE shift-and-insert insns by using TCG
2
vector ops when possible.
2
3
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Message-id: 20180309153654.13518-8-f4bug@amsat.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210913095440.13462-12-peter.maydell@linaro.org
7
---
7
---
8
hw/sd/sdhci.c | 4 ++--
8
target/arm/translate-mve.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
10
11
diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
11
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/sd/sdhci.c
13
--- a/target/arm/translate-mve.c
14
+++ b/hw/sd/sdhci.c
14
+++ b/target/arm/translate-mve.c
15
@@ -XXX,XX +XXX,XX @@ static void sdhci_read_block_from_card(SDHCIState *s)
15
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u)
16
for (index = 0; index < blk_size; index++) {
16
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
17
data = sdbus_read_data(&s->sdbus);
17
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
18
if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) {
18
19
- /* Device is not in tunning */
19
-DO_2SHIFT(VSRI, vsri, false)
20
+ /* Device is not in tuning */
20
-DO_2SHIFT(VSLI, vsli, false)
21
s->fifo_buffer[index] = data;
21
+DO_2SHIFT_VEC(VSRI, vsri, false, gen_gvec_sri)
22
}
22
+DO_2SHIFT_VEC(VSLI, vsli, false, gen_gvec_sli)
23
}
23
24
24
#define DO_2SHIFT_FP(INSN, FN) \
25
if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) {
25
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
26
- /* Device is in tunning */
27
+ /* Device is in tuning */
28
s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK;
29
s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK;
30
s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ |
31
--
26
--
32
2.16.2
27
2.20.1
33
28
34
29
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Optimize the MVE 1op-immediate insns (VORR, VBIC, VMOV) to
2
use TCG vector ops when possible.
2
3
3
The EXTRA record allows for additional space to be allocated
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
beyon what is currently reserved. Add code to emit and read
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
this record type.
6
Message-id: 20210913095440.13462-13-peter.maydell@linaro.org
7
---
8
target/arm/translate-mve.c | 26 +++++++++++++++++++++-----
9
1 file changed, 21 insertions(+), 5 deletions(-)
6
10
7
Nothing uses extra space yet.
11
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20180303143823.27055-5-richard.henderson@linaro.org
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
linux-user/signal.c | 74 +++++++++++++++++++++++++++++++++++++++++++++--------
16
1 file changed, 63 insertions(+), 11 deletions(-)
17
18
diff --git a/linux-user/signal.c b/linux-user/signal.c
19
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
20
--- a/linux-user/signal.c
13
--- a/target/arm/translate-mve.c
21
+++ b/linux-user/signal.c
14
+++ b/target/arm/translate-mve.c
22
@@ -XXX,XX +XXX,XX @@ struct target_fpsimd_context {
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
23
uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
16
return true;
24
};
25
26
+#define TARGET_EXTRA_MAGIC 0x45585401
27
+
28
+struct target_extra_context {
29
+ struct target_aarch64_ctx head;
30
+ uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */
31
+ uint32_t size; /* size in bytes of the extra space */
32
+ uint32_t reserved[3];
33
+};
34
+
35
struct target_rt_sigframe {
36
struct target_siginfo info;
37
struct target_ucontext uc;
38
@@ -XXX,XX +XXX,XX @@ static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
39
}
40
}
17
}
41
18
42
+static void target_setup_extra_record(struct target_extra_context *extra,
19
-static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
43
+ uint64_t datap, uint32_t extra_size)
20
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn,
21
+ GVecGen2iFn *vecfn)
22
{
23
TCGv_ptr qd;
24
uint64_t imm;
25
@@ -XXX,XX +XXX,XX @@ static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
26
27
imm = asimd_imm_const(a->imm, a->cmode, a->op);
28
29
- qd = mve_qreg_ptr(a->qd);
30
- fn(cpu_env, qd, tcg_constant_i64(imm));
31
- tcg_temp_free_ptr(qd);
32
+ if (vecfn && mve_no_predication(s)) {
33
+ vecfn(MO_64, mve_qreg_offset(a->qd), mve_qreg_offset(a->qd),
34
+ imm, 16, 16);
35
+ } else {
36
+ qd = mve_qreg_ptr(a->qd);
37
+ fn(cpu_env, qd, tcg_constant_i64(imm));
38
+ tcg_temp_free_ptr(qd);
39
+ }
40
mve_update_eci(s);
41
return true;
42
}
43
44
+static void gen_gvec_vmovi(unsigned vece, uint32_t dofs, uint32_t aofs,
45
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
44
+{
46
+{
45
+ __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
47
+ tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, c);
46
+ __put_user(sizeof(struct target_extra_context), &extra->head.size);
47
+ __put_user(datap, &extra->datap);
48
+ __put_user(extra_size, &extra->size);
49
+}
48
+}
50
+
49
+
51
static void target_setup_end_record(struct target_aarch64_ctx *end)
50
static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
52
{
51
{
53
__put_user(0, &end->magic);
52
/* Handle decode of cmode/op here between VORR/VBIC/VMOV */
54
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
53
MVEGenOneOpImmFn *fn;
55
static int target_restore_sigframe(CPUARMState *env,
54
+ GVecGen2iFn *vecfn;
56
struct target_rt_sigframe *sf)
55
57
{
56
if ((a->cmode & 1) && a->cmode < 12) {
58
- struct target_aarch64_ctx *ctx;
57
if (a->op) {
59
+ struct target_aarch64_ctx *ctx, *extra = NULL;
58
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
60
struct target_fpsimd_context *fpsimd = NULL;
59
* so the VBIC becomes a logical AND operation.
61
+ uint64_t extra_datap = 0;
62
+ bool used_extra = false;
63
+ bool err = false;
64
65
target_restore_general_frame(env, sf);
66
67
ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
68
while (ctx) {
69
- uint32_t magic, size;
70
+ uint32_t magic, size, extra_size;
71
72
__get_user(magic, &ctx->magic);
73
__get_user(size, &ctx->size);
74
switch (magic) {
75
case 0:
76
if (size != 0) {
77
- return 1;
78
+ err = true;
79
+ goto exit;
80
+ }
81
+ if (used_extra) {
82
+ ctx = NULL;
83
+ } else {
84
+ ctx = extra;
85
+ used_extra = true;
86
}
87
- ctx = NULL;
88
continue;
89
90
case TARGET_FPSIMD_MAGIC:
91
if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
92
- return 1;
93
+ err = true;
94
+ goto exit;
95
}
96
fpsimd = (struct target_fpsimd_context *)ctx;
97
break;
98
99
+ case TARGET_EXTRA_MAGIC:
100
+ if (extra || size != sizeof(struct target_extra_context)) {
101
+ err = true;
102
+ goto exit;
103
+ }
104
+ __get_user(extra_datap,
105
+ &((struct target_extra_context *)ctx)->datap);
106
+ __get_user(extra_size,
107
+ &((struct target_extra_context *)ctx)->size);
108
+ extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
109
+ break;
110
+
111
default:
112
/* Unknown record -- we certainly didn't generate it.
113
* Did we in fact get out of sync?
114
*/
60
*/
115
- return 1;
61
fn = gen_helper_mve_vandi;
116
+ err = true;
62
+ vecfn = tcg_gen_gvec_andi;
117
+ goto exit;
63
} else {
64
fn = gen_helper_mve_vorri;
65
+ vecfn = tcg_gen_gvec_ori;
118
}
66
}
119
ctx = (void *)ctx + size;
67
} else {
68
/* There is one unallocated cmode/op combination in this space */
69
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
70
}
71
/* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
72
fn = gen_helper_mve_vmovi;
73
+ vecfn = gen_gvec_vmovi;
120
}
74
}
121
75
- return do_1imm(s, a, fn);
122
/* Require FPSIMD always. */
76
+ return do_1imm(s, a, fn, vecfn);
123
- if (!fpsimd) {
124
- return 1;
125
+ if (fpsimd) {
126
+ target_restore_fpsimd_record(env, fpsimd);
127
+ } else {
128
+ err = true;
129
}
130
- target_restore_fpsimd_record(env, fpsimd);
131
132
- return 0;
133
+ exit:
134
+ unlock_user(extra, extra_datap, 0);
135
+ return err;
136
}
77
}
137
78
138
static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
79
static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
139
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
140
CPUARMState *env)
141
{
142
int size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved);
143
- int fpsimd_ofs, end1_ofs, fr_ofs;
144
+ int fpsimd_ofs, end1_ofs, fr_ofs, end2_ofs = 0;
145
+ int extra_ofs = 0, extra_base = 0, extra_size = 0;
146
struct target_rt_sigframe *frame;
147
struct target_rt_frame_record *fr;
148
abi_ulong frame_addr, return_addr;
149
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
150
151
target_setup_general_frame(frame, env, set);
152
target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
153
+ if (extra_ofs) {
154
+ target_setup_extra_record((void *)frame + extra_ofs,
155
+ frame_addr + extra_base, extra_size);
156
+ }
157
target_setup_end_record((void *)frame + end1_ofs);
158
+ if (end2_ofs) {
159
+ target_setup_end_record((void *)frame + end2_ofs);
160
+ }
161
162
/* Set up the stack frame for unwinding. */
163
fr = (void *)frame + fr_ofs;
164
--
80
--
165
2.16.2
81
2.20.1
166
82
167
83
diff view generated by jsdifflib