1
Now that Apple Silicon is widely available, people are obviously excited
1
Now that Apple Silicon is widely available, people are obviously excited
2
to try and run virtualized workloads on them, such as Linux and Windows.
2
to try and run virtualized workloads on them, such as Linux and Windows.
3
3
4
This patch set implements a rudimentary, first version to get the ball
4
This patch set implements a fully functional version to get the ball
5
going on that. With this applied, I can successfully run both Linux and
5
going on that. With this applied, I can successfully run both Linux and
6
Windows as guests, albeit with a few caveats:
6
Windows as guests. I am not aware of any limitations specific to
7
Hypervisor.framework apart from:
7
8
8
* no WFI emulation, a vCPU always uses 100%
9
- Live migration / savevm
9
* vtimer handling is a bit hacky
10
- gdbstub debugging (SP register)
10
* we handle most sysregs flying blindly, just returning 0
11
* XHCI breaks in OVMF, works in Linux+Windows
12
11
13
Despite those drawbacks, it's still an exciting place to start playing
14
with the power of Apple Silicon.
15
12
16
Enjoy!
13
Enjoy!
17
14
18
Alex
15
Alex
19
16
20
Alexander Graf (8):
17
v1 -> v2:
18
19
- New patch: hvf: Actually set SIG_IPI mask
20
- New patch: hvf: Introduce hvf vcpu struct
21
- New patch: hvf: arm: Mark CPU as dirty on reset
22
- Removed patch: hw/arm/virt: Disable highmem when on hypervisor.framework
23
- Removed patch: arm: Synchronize CPU on PSCI on
24
- Fix build on 32bit arm
25
- Merge vcpu kick function patch into ARM enablement
26
- Implement WFI handling (allows vCPUs to sleep)
27
- Synchronize system registers (fixes OVMF crashes and reboot)
28
- Don't always call cpu_synchronize_state()
29
- Use more fine grained iothread locking
30
- Populate aa64mmfr0 from hardware
31
- Make safe to ctrl-C entitlement application
32
33
v2 -> v3:
34
35
- Removed patch: hvf: Actually set SIG_IPI mask
36
- New patch: hvf: arm: Add support for GICv3
37
- New patch: hvf: arm: Implement -cpu host
38
- Advance PC on SMC
39
- Use cp list interface for sysreg syncs
40
- Do not set current_cpu
41
- Fix sysreg isread mask
42
- Move sysreg handling to functions
43
- Remove WFI logic again
44
- Revert to global iothread locking
45
46
v3 -> v4:
47
48
- Removed patch: hvf: arm: Mark CPU as dirty on reset
49
- New patch: hvf: Simplify post reset/init/loadvm hooks
50
- Remove i386-softmmu target (meson.build for hvf target)
51
- Combine both if statements (PSCI)
52
- Use hv.h instead of Hypervisor.h for 10.15 compat
53
- Remove manual inclusion of Hypervisor.h in common .c files
54
- No longer include Hypervisor.h in arm hvf .c files
55
- Remove unused exe_full variable
56
- Reuse exe_name variable
57
58
v4 -> v5:
59
60
- Use g_free() on destroy
61
62
Alexander Graf (10):
21
hvf: Add hypervisor entitlement to output binaries
63
hvf: Add hypervisor entitlement to output binaries
64
hvf: x86: Remove unused definitions
22
hvf: Move common code out
65
hvf: Move common code out
66
hvf: Introduce hvf vcpu struct
23
arm: Set PSCI to 0.2 for HVF
67
arm: Set PSCI to 0.2 for HVF
24
arm: Synchronize CPU on PSCI on
68
hvf: Simplify post reset/init/loadvm hooks
25
hvf: Add Apple Silicon support
69
hvf: Add Apple Silicon support
26
hvf: Use OS provided vcpu kick function
27
arm: Add Hypervisor.framework build target
70
arm: Add Hypervisor.framework build target
28
hw/arm/virt: Disable highmem when on hypervisor.framework
71
hvf: arm: Add support for GICv3
72
hvf: arm: Implement -cpu host
73
74
Peter Collingbourne (1):
75
arm/hvf: Add a WFI handler
29
76
30
MAINTAINERS | 14 +-
77
MAINTAINERS | 14 +-
31
accel/hvf/entitlements.plist | 8 +
78
accel/hvf/entitlements.plist | 8 +
32
accel/hvf/hvf-all.c | 56 ++++
79
accel/hvf/hvf-all.c | 54 +++
33
accel/hvf/hvf-cpus.c | 484 +++++++++++++++++++++++++++++++++++
80
accel/hvf/hvf-cpus.c | 466 +++++++++++++++++++
34
accel/hvf/meson.build | 7 +
81
accel/hvf/meson.build | 7 +
35
accel/meson.build | 1 +
82
accel/meson.build | 1 +
36
hw/arm/virt.c | 9 +
37
include/hw/core/cpu.h | 3 +-
83
include/hw/core/cpu.h | 3 +-
38
include/sysemu/hvf_int.h | 69 +++++
84
include/sysemu/hvf.h | 2 +
39
meson.build | 39 ++-
85
include/sysemu/hvf_int.h | 66 +++
40
scripts/entitlement.sh | 11 +
86
meson.build | 40 +-
41
target/arm/arm-powerctl.c | 3 +
87
scripts/entitlement.sh | 13 +
42
target/arm/cpu.c | 4 +
88
target/arm/cpu.c | 13 +-
43
target/arm/hvf/hvf.c | 345 +++++++++++++++++++++++++
89
target/arm/cpu.h | 2 +
90
target/arm/hvf/hvf.c | 856 +++++++++++++++++++++++++++++++++++
44
target/arm/hvf/meson.build | 3 +
91
target/arm/hvf/meson.build | 3 +
92
target/arm/kvm_arm.h | 2 -
45
target/arm/meson.build | 2 +
93
target/arm/meson.build | 2 +
46
target/i386/hvf/hvf-cpus.c | 131 ----------
94
target/i386/hvf/hvf-cpus.c | 131 ------
47
target/i386/hvf/hvf-cpus.h | 25 --
95
target/i386/hvf/hvf-cpus.h | 25 -
48
target/i386/hvf/hvf-i386.h | 48 +---
96
target/i386/hvf/hvf-i386.h | 49 +-
49
target/i386/hvf/hvf.c | 360 +-------------------------
97
target/i386/hvf/hvf.c | 462 +++----------------
50
target/i386/hvf/meson.build | 1 -
98
target/i386/hvf/meson.build | 1 -
51
target/i386/hvf/x86hvf.c | 11 +-
99
target/i386/hvf/vmx.h | 24 +-
100
target/i386/hvf/x86.c | 28 +-
101
target/i386/hvf/x86_descr.c | 26 +-
102
target/i386/hvf/x86_emu.c | 62 +--
103
target/i386/hvf/x86_mmu.c | 4 +-
104
target/i386/hvf/x86_task.c | 12 +-
105
target/i386/hvf/x86hvf.c | 224 ++++-----
52
target/i386/hvf/x86hvf.h | 2 -
106
target/i386/hvf/x86hvf.h | 2 -
53
23 files changed, 1061 insertions(+), 575 deletions(-)
107
30 files changed, 1786 insertions(+), 816 deletions(-)
54
create mode 100644 accel/hvf/entitlements.plist
108
create mode 100644 accel/hvf/entitlements.plist
55
create mode 100644 accel/hvf/hvf-all.c
109
create mode 100644 accel/hvf/hvf-all.c
56
create mode 100644 accel/hvf/hvf-cpus.c
110
create mode 100644 accel/hvf/hvf-cpus.c
57
create mode 100644 accel/hvf/meson.build
111
create mode 100644 accel/hvf/meson.build
58
create mode 100644 include/sysemu/hvf_int.h
112
create mode 100644 include/sysemu/hvf_int.h
...
...
diff view generated by jsdifflib
1
In macOS 11, QEMU only gets access to Hypervisor.framework if it has the
1
In macOS 11, QEMU only gets access to Hypervisor.framework if it has the
2
respective entitlement. Add an entitlement template and automatically self
2
respective entitlement. Add an entitlement template and automatically self
3
sign and apply the entitlement in the build.
3
sign and apply the entitlement in the build.
4
4
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
6
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
7
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
8
9
---
10
11
v1 -> v2:
12
13
- Make safe to ctrl-C
14
15
v3 -> v4:
16
17
- Remove unused exe_full variable
18
- Reuse exe_name variable
6
---
19
---
7
accel/hvf/entitlements.plist | 8 ++++++++
20
accel/hvf/entitlements.plist | 8 ++++++++
8
meson.build | 30 ++++++++++++++++++++++++++----
21
meson.build | 29 +++++++++++++++++++++++++----
9
scripts/entitlement.sh | 11 +++++++++++
22
scripts/entitlement.sh | 13 +++++++++++++
10
3 files changed, 45 insertions(+), 4 deletions(-)
23
3 files changed, 46 insertions(+), 4 deletions(-)
11
create mode 100644 accel/hvf/entitlements.plist
24
create mode 100644 accel/hvf/entitlements.plist
12
create mode 100755 scripts/entitlement.sh
25
create mode 100755 scripts/entitlement.sh
13
26
14
diff --git a/accel/hvf/entitlements.plist b/accel/hvf/entitlements.plist
27
diff --git a/accel/hvf/entitlements.plist b/accel/hvf/entitlements.plist
15
new file mode 100644
28
new file mode 100644
...
...
52
link_args: link_args,
65
link_args: link_args,
53
gui_app: exe['gui'])
66
gui_app: exe['gui'])
54
- }
67
- }
55
+
68
+
56
+ if exe_sign
69
+ if exe_sign
57
+ exe_full = meson.current_build_dir() / exe['name']
58
+ emulators += {exe['name'] : custom_target(exe['name'],
70
+ emulators += {exe['name'] : custom_target(exe['name'],
59
+ install: true,
71
+ install: true,
60
+ install_dir: get_option('bindir'),
72
+ install_dir: get_option('bindir'),
61
+ depends: emulator,
73
+ depends: emulator,
62
+ output: exe['name'],
74
+ output: exe['name'],
63
+ command: [
75
+ command: [
64
+ meson.current_source_dir() / 'scripts/entitlement.sh',
76
+ meson.current_source_dir() / 'scripts/entitlement.sh',
65
+ meson.current_build_dir() / exe['name'] + '-unsigned',
77
+ meson.current_build_dir() / exe_name,
66
+ meson.current_build_dir() / exe['name'],
78
+ meson.current_build_dir() / exe['name'],
67
+ meson.current_source_dir() / 'accel/hvf/entitlements.plist'
79
+ meson.current_source_dir() / 'accel/hvf/entitlements.plist'
68
+ ])
80
+ ])
69
+ }
81
+ }
70
+ else
82
+ else
...
...
85
+
97
+
86
+SRC="$1"
98
+SRC="$1"
87
+DST="$2"
99
+DST="$2"
88
+ENTITLEMENT="$3"
100
+ENTITLEMENT="$3"
89
+
101
+
90
+rm -f "$2"
102
+trap 'rm "$DST.tmp"' exit
91
+cp -a "$SRC" "$DST"
103
+cp -af "$SRC" "$DST.tmp"
92
+codesign --entitlements "$ENTITLEMENT" --force -s - "$DST"
104
+codesign --entitlements "$ENTITLEMENT" --force -s - "$DST.tmp"
105
+mv "$DST.tmp" "$DST"
106
+trap '' exit
93
--
107
--
94
2.24.3 (Apple Git-128)
108
2.24.3 (Apple Git-128)
95
109
96
110
diff view generated by jsdifflib
New patch
1
The hvf i386 has a few struct and cpp definitions that are never
2
used. Remove them.
1
3
4
Suggested-by: Roman Bolshakov <r.bolshakov@yadro.com>
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
6
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
7
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
8
---
9
target/i386/hvf/hvf-i386.h | 16 ----------------
10
1 file changed, 16 deletions(-)
11
12
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/i386/hvf/hvf-i386.h
15
+++ b/target/i386/hvf/hvf-i386.h
16
@@ -XXX,XX +XXX,XX @@
17
#include "cpu.h"
18
#include "x86.h"
19
20
-#define HVF_MAX_VCPU 0x10
21
-
22
-extern struct hvf_state hvf_global;
23
-
24
-struct hvf_vm {
25
- int id;
26
- struct hvf_vcpu_state *vcpus[HVF_MAX_VCPU];
27
-};
28
-
29
-struct hvf_state {
30
- uint32_t version;
31
- struct hvf_vm *vm;
32
- uint64_t mem_quota;
33
-};
34
-
35
/* hvf_slot flags */
36
#define HVF_SLOT_LOG (1 << 0)
37
38
@@ -XXX,XX +XXX,XX @@ hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
39
40
/* Host specific functions */
41
int hvf_inject_interrupt(CPUArchState *env, int vector);
42
-int hvf_vcpu_run(struct hvf_vcpu_state *vcpu);
43
#endif
44
45
#endif
46
--
47
2.24.3 (Apple Git-128)
48
49
diff view generated by jsdifflib
1
Until now, Hypervisor.framework has only been available on x86_64 systems.
1
Until now, Hypervisor.framework has only been available on x86_64 systems.
2
With Apple Silicon shipping now, it extends its reach to aarch64. To
2
With Apple Silicon shipping now, it extends its reach to aarch64. To
3
prepare for support for multiple architectures, let's move common code out
3
prepare for support for multiple architectures, let's move common code out
4
into its own accel directory.
4
into its own accel directory.
5
5
6
Signed-off-by: Alexander Graf <agraf@csgraf.de>
6
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
8
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
9
10
---
11
12
v3 -> v4:
13
14
- Use hv.h instead of Hypervisor.h for 10.15 compat
15
- Remove manual inclusion of Hypervisor.h in common .c files
7
---
16
---
8
MAINTAINERS | 9 +-
17
MAINTAINERS | 9 +-
9
accel/hvf/hvf-all.c | 56 +++++
18
accel/hvf/hvf-all.c | 54 +++++
10
accel/hvf/hvf-cpus.c | 468 ++++++++++++++++++++++++++++++++++++
19
accel/hvf/hvf-cpus.c | 462 ++++++++++++++++++++++++++++++++++++
11
accel/hvf/meson.build | 7 +
20
accel/hvf/meson.build | 7 +
12
accel/meson.build | 1 +
21
accel/meson.build | 1 +
13
include/sysemu/hvf_int.h | 69 ++++++
22
include/sysemu/hvf_int.h | 54 +++++
14
target/i386/hvf/hvf-cpus.c | 131 ----------
23
target/i386/hvf/hvf-cpus.c | 131 ----------
15
target/i386/hvf/hvf-cpus.h | 25 --
24
target/i386/hvf/hvf-cpus.h | 25 --
16
target/i386/hvf/hvf-i386.h | 48 +---
25
target/i386/hvf/hvf-i386.h | 33 +--
17
target/i386/hvf/hvf.c | 360 +--------------------------
26
target/i386/hvf/hvf.c | 360 +---------------------------
18
target/i386/hvf/meson.build | 1 -
27
target/i386/hvf/meson.build | 1 -
19
target/i386/hvf/x86hvf.c | 11 +-
28
target/i386/hvf/x86hvf.c | 11 +-
20
target/i386/hvf/x86hvf.h | 2 -
29
target/i386/hvf/x86hvf.h | 2 -
21
13 files changed, 619 insertions(+), 569 deletions(-)
30
13 files changed, 596 insertions(+), 554 deletions(-)
22
create mode 100644 accel/hvf/hvf-all.c
31
create mode 100644 accel/hvf/hvf-all.c
23
create mode 100644 accel/hvf/hvf-cpus.c
32
create mode 100644 accel/hvf/hvf-cpus.c
24
create mode 100644 accel/hvf/meson.build
33
create mode 100644 accel/hvf/meson.build
25
create mode 100644 include/sysemu/hvf_int.h
34
create mode 100644 include/sysemu/hvf_int.h
26
delete mode 100644 target/i386/hvf/hvf-cpus.c
35
delete mode 100644 target/i386/hvf/hvf-cpus.c
...
...
71
+#include "sysemu/hvf_int.h"
80
+#include "sysemu/hvf_int.h"
72
+#include "sysemu/runstate.h"
81
+#include "sysemu/runstate.h"
73
+
82
+
74
+#include "qemu/main-loop.h"
83
+#include "qemu/main-loop.h"
75
+#include "sysemu/accel.h"
84
+#include "sysemu/accel.h"
76
+
77
+#include <Hypervisor/Hypervisor.h>
78
+
85
+
79
+bool hvf_allowed;
86
+bool hvf_allowed;
80
+HVFState *hvf_state;
87
+HVFState *hvf_state;
81
+
88
+
82
+void assert_hvf_ok(hv_return_t ret)
89
+void assert_hvf_ok(hv_return_t ret)
...
...
174
+#include "sysemu/hvf.h"
181
+#include "sysemu/hvf.h"
175
+#include "sysemu/hvf_int.h"
182
+#include "sysemu/hvf_int.h"
176
+#include "sysemu/runstate.h"
183
+#include "sysemu/runstate.h"
177
+#include "qemu/guest-random.h"
184
+#include "qemu/guest-random.h"
178
+
185
+
179
+#include <Hypervisor/Hypervisor.h>
180
+
181
+/* Memory slots */
186
+/* Memory slots */
182
+
187
+
183
+struct mac_slot {
188
+struct mac_slot {
184
+ int present;
189
+ int present;
185
+ uint64_t size;
190
+ uint64_t size;
...
...
455
+ sigaction(SIG_IPI, &sigact, NULL);
460
+ sigaction(SIG_IPI, &sigact, NULL);
456
+
461
+
457
+ pthread_sigmask(SIG_BLOCK, NULL, &set);
462
+ pthread_sigmask(SIG_BLOCK, NULL, &set);
458
+ sigdelset(&set, SIG_IPI);
463
+ sigdelset(&set, SIG_IPI);
459
+
464
+
460
+#ifdef __aarch64__
461
+ r = hv_vcpu_create(&cpu->hvf_fd, (hv_vcpu_exit_t **)&cpu->hvf_exit, NULL);
462
+#else
463
+ r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
465
+ r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
464
+#endif
465
+ cpu->vcpu_dirty = 1;
466
+ cpu->vcpu_dirty = 1;
466
+ assert_hvf_ok(r);
467
+ assert_hvf_ok(r);
467
+
468
+
468
+ return hvf_arch_init_vcpu(cpu);
469
+ return hvf_arch_init_vcpu(cpu);
469
+}
470
+}
...
...
625
+/* header to be included in HVF-specific code */
626
+/* header to be included in HVF-specific code */
626
+
627
+
627
+#ifndef HVF_INT_H
628
+#ifndef HVF_INT_H
628
+#define HVF_INT_H
629
+#define HVF_INT_H
629
+
630
+
630
+#include <Hypervisor/Hypervisor.h>
631
+#include <Hypervisor/hv.h>
631
+
632
+#define HVF_MAX_VCPU 0x10
633
+
634
+extern struct hvf_state hvf_global;
635
+
636
+struct hvf_vm {
637
+ int id;
638
+ struct hvf_vcpu_state *vcpus[HVF_MAX_VCPU];
639
+};
640
+
641
+struct hvf_state {
642
+ uint32_t version;
643
+ struct hvf_vm *vm;
644
+ uint64_t mem_quota;
645
+};
646
+
632
+
647
+/* hvf_slot flags */
633
+/* hvf_slot flags */
648
+#define HVF_SLOT_LOG (1 << 0)
634
+#define HVF_SLOT_LOG (1 << 0)
649
+
635
+
650
+typedef struct hvf_slot {
636
+typedef struct hvf_slot {
...
...
861
#include "sysemu/hvf.h"
847
#include "sysemu/hvf.h"
862
+#include "sysemu/hvf_int.h"
848
+#include "sysemu/hvf_int.h"
863
#include "cpu.h"
849
#include "cpu.h"
864
#include "x86.h"
850
#include "x86.h"
865
851
866
-#define HVF_MAX_VCPU 0x10
867
-
868
-extern struct hvf_state hvf_global;
869
-
870
-struct hvf_vm {
871
- int id;
872
- struct hvf_vcpu_state *vcpus[HVF_MAX_VCPU];
873
-};
874
-
875
-struct hvf_state {
876
- uint32_t version;
877
- struct hvf_vm *vm;
878
- uint64_t mem_quota;
879
-};
880
-
881
-/* hvf_slot flags */
852
-/* hvf_slot flags */
882
-#define HVF_SLOT_LOG (1 << 0)
853
-#define HVF_SLOT_LOG (1 << 0)
883
-
854
-
884
-typedef struct hvf_slot {
855
-typedef struct hvf_slot {
885
- uint64_t start;
856
- uint64_t start;
...
...
diff view generated by jsdifflib
New patch
1
We will need more than a single field for hvf going forward. To keep
2
the global vcpu struct uncluttered, let's allocate a special hvf vcpu
3
struct, similar to how hax does it.
1
4
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
6
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
7
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
10
---
11
12
v4 -> v5:
13
14
- Use g_free() on destroy
15
---
16
accel/hvf/hvf-cpus.c | 8 +-
17
include/hw/core/cpu.h | 3 +-
18
include/sysemu/hvf_int.h | 4 +
19
target/i386/hvf/hvf.c | 102 +++++++++---------
20
target/i386/hvf/vmx.h | 24 +++--
21
target/i386/hvf/x86.c | 28 ++---
22
target/i386/hvf/x86_descr.c | 26 ++---
23
target/i386/hvf/x86_emu.c | 62 +++++------
24
target/i386/hvf/x86_mmu.c | 4 +-
25
target/i386/hvf/x86_task.c | 12 +--
26
target/i386/hvf/x86hvf.c | 210 ++++++++++++++++++------------------
27
11 files changed, 247 insertions(+), 236 deletions(-)
28
29
diff --git a/accel/hvf/hvf-cpus.c b/accel/hvf/hvf-cpus.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/hvf/hvf-cpus.c
32
+++ b/accel/hvf/hvf-cpus.c
33
@@ -XXX,XX +XXX,XX @@ static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
34
35
static void hvf_vcpu_destroy(CPUState *cpu)
36
{
37
- hv_return_t ret = hv_vcpu_destroy(cpu->hvf_fd);
38
+ hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
39
assert_hvf_ok(ret);
40
41
hvf_arch_vcpu_destroy(cpu);
42
+ g_free(cpu->hvf);
43
+ cpu->hvf = NULL;
44
}
45
46
static void dummy_signal(int sig)
47
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
48
{
49
int r;
50
51
+ cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
52
+
53
/* init cpu signals */
54
sigset_t set;
55
struct sigaction sigact;
56
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
57
pthread_sigmask(SIG_BLOCK, NULL, &set);
58
sigdelset(&set, SIG_IPI);
59
60
- r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
61
+ r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
62
cpu->vcpu_dirty = 1;
63
assert_hvf_ok(r);
64
65
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/include/hw/core/cpu.h
68
+++ b/include/hw/core/cpu.h
69
@@ -XXX,XX +XXX,XX @@ struct KVMState;
70
struct kvm_run;
71
72
struct hax_vcpu_state;
73
+struct hvf_vcpu_state;
74
75
#define TB_JMP_CACHE_BITS 12
76
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
77
@@ -XXX,XX +XXX,XX @@ struct CPUState {
78
79
struct hax_vcpu_state *hax_vcpu;
80
81
- int hvf_fd;
82
+ struct hvf_vcpu_state *hvf;
83
84
/* track IOMMUs whose translations we've cached in the TCG TLB */
85
GArray *iommu_notifiers;
86
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/include/sysemu/hvf_int.h
89
+++ b/include/sysemu/hvf_int.h
90
@@ -XXX,XX +XXX,XX @@ struct HVFState {
91
};
92
extern HVFState *hvf_state;
93
94
+struct hvf_vcpu_state {
95
+ int fd;
96
+};
97
+
98
void assert_hvf_ok(hv_return_t ret);
99
int hvf_get_registers(CPUState *cpu);
100
int hvf_put_registers(CPUState *cpu);
101
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/target/i386/hvf/hvf.c
104
+++ b/target/i386/hvf/hvf.c
105
@@ -XXX,XX +XXX,XX @@ void vmx_update_tpr(CPUState *cpu)
106
int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
107
int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
108
109
- wreg(cpu->hvf_fd, HV_X86_TPR, tpr);
110
+ wreg(cpu->hvf->fd, HV_X86_TPR, tpr);
111
if (irr == -1) {
112
- wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
113
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
114
} else {
115
- wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
116
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
117
irr >> 4);
118
}
119
}
120
@@ -XXX,XX +XXX,XX @@ void vmx_update_tpr(CPUState *cpu)
121
static void update_apic_tpr(CPUState *cpu)
122
{
123
X86CPU *x86_cpu = X86_CPU(cpu);
124
- int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
125
+ int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4;
126
cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
127
}
128
129
@@ -XXX,XX +XXX,XX @@ int hvf_arch_init_vcpu(CPUState *cpu)
130
}
131
132
/* set VMCS control fields */
133
- wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,
134
+ wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS,
135
cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
136
VMCS_PIN_BASED_CTLS_EXTINT |
137
VMCS_PIN_BASED_CTLS_NMI |
138
VMCS_PIN_BASED_CTLS_VNMI));
139
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,
140
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS,
141
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
142
VMCS_PRI_PROC_BASED_CTLS_HLT |
143
VMCS_PRI_PROC_BASED_CTLS_MWAIT |
144
VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
145
VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
146
VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
147
- wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
148
+ wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS,
149
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
150
VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
151
152
- wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
153
+ wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
154
0));
155
- wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
156
+ wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
157
158
- wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
159
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
160
161
x86cpu = X86_CPU(cpu);
162
x86cpu->env.xsave_buf = qemu_memalign(4096, 4096);
163
164
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
165
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
166
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
167
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
168
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
169
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
170
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
171
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
172
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);
173
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
174
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
175
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
176
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1);
177
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1);
178
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1);
179
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1);
180
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1);
181
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1);
182
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1);
183
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1);
184
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1);
185
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1);
186
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1);
187
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1);
188
189
return 0;
190
}
191
@@ -XXX,XX +XXX,XX @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in
192
}
193
if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
194
env->has_error_code = true;
195
- env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);
196
+ env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR);
197
}
198
}
199
- if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
200
+ if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
201
VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
202
env->hflags2 |= HF2_NMI_MASK;
203
} else {
204
env->hflags2 &= ~HF2_NMI_MASK;
205
}
206
- if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
207
+ if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
208
(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
209
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
210
env->hflags |= HF_INHIBIT_IRQ_MASK;
211
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
212
return EXCP_HLT;
213
}
214
215
- hv_return_t r = hv_vcpu_run(cpu->hvf_fd);
216
+ hv_return_t r = hv_vcpu_run(cpu->hvf->fd);
217
assert_hvf_ok(r);
218
219
/* handle VMEXIT */
220
- uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
221
- uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
222
- uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,
223
+ uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON);
224
+ uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION);
225
+ uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd,
226
VMCS_EXIT_INSTRUCTION_LENGTH);
227
228
- uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
229
+ uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
230
231
hvf_store_events(cpu, ins_len, idtvec_info);
232
- rip = rreg(cpu->hvf_fd, HV_X86_RIP);
233
- env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
234
+ rip = rreg(cpu->hvf->fd, HV_X86_RIP);
235
+ env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
236
237
qemu_mutex_lock_iothread();
238
239
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
240
case EXIT_REASON_EPT_FAULT:
241
{
242
hvf_slot *slot;
243
- uint64_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);
244
+ uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS);
245
246
if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
247
((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
248
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
249
store_regs(cpu);
250
break;
251
} else if (!string && !in) {
252
- RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);
253
+ RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX);
254
hvf_handle_io(env, port, &RAX(env), 1, size, 1);
255
macvm_set_rip(cpu, rip + ins_len);
256
break;
257
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
258
break;
259
}
260
case EXIT_REASON_CPUID: {
261
- uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
262
- uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);
263
- uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
264
- uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
265
+ uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
266
+ uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX);
267
+ uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
268
+ uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
269
270
cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
271
272
- wreg(cpu->hvf_fd, HV_X86_RAX, rax);
273
- wreg(cpu->hvf_fd, HV_X86_RBX, rbx);
274
- wreg(cpu->hvf_fd, HV_X86_RCX, rcx);
275
- wreg(cpu->hvf_fd, HV_X86_RDX, rdx);
276
+ wreg(cpu->hvf->fd, HV_X86_RAX, rax);
277
+ wreg(cpu->hvf->fd, HV_X86_RBX, rbx);
278
+ wreg(cpu->hvf->fd, HV_X86_RCX, rcx);
279
+ wreg(cpu->hvf->fd, HV_X86_RDX, rdx);
280
281
macvm_set_rip(cpu, rip + ins_len);
282
break;
283
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
284
case EXIT_REASON_XSETBV: {
285
X86CPU *x86_cpu = X86_CPU(cpu);
286
CPUX86State *env = &x86_cpu->env;
287
- uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
288
- uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
289
- uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
290
+ uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
291
+ uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
292
+ uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
293
294
if (ecx) {
295
macvm_set_rip(cpu, rip + ins_len);
296
break;
297
}
298
env->xcr0 = ((uint64_t)edx << 32) | eax;
299
- wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);
300
+ wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1);
301
macvm_set_rip(cpu, rip + ins_len);
302
break;
303
}
304
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
305
306
switch (cr) {
307
case 0x0: {
308
- macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));
309
+ macvm_set_cr0(cpu->hvf->fd, RRX(env, reg));
310
break;
311
}
312
case 4: {
313
- macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));
314
+ macvm_set_cr4(cpu->hvf->fd, RRX(env, reg));
315
break;
316
}
317
case 8: {
318
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
319
break;
320
}
321
case EXIT_REASON_TASK_SWITCH: {
322
- uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
323
+ uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
324
x68_segment_selector sel = {.sel = exit_qual & 0xffff};
325
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
326
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
327
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
328
break;
329
}
330
case EXIT_REASON_RDPMC:
331
- wreg(cpu->hvf_fd, HV_X86_RAX, 0);
332
- wreg(cpu->hvf_fd, HV_X86_RDX, 0);
333
+ wreg(cpu->hvf->fd, HV_X86_RAX, 0);
334
+ wreg(cpu->hvf->fd, HV_X86_RDX, 0);
335
macvm_set_rip(cpu, rip + ins_len);
336
break;
337
case VMX_REASON_VMCALL:
338
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
339
index XXXXXXX..XXXXXXX 100644
340
--- a/target/i386/hvf/vmx.h
341
+++ b/target/i386/hvf/vmx.h
342
@@ -XXX,XX +XXX,XX @@
343
#include "vmcs.h"
344
#include "cpu.h"
345
#include "x86.h"
346
+#include "sysemu/hvf.h"
347
+#include "sysemu/hvf_int.h"
348
349
#include "exec/address-spaces.h"
350
351
@@ -XXX,XX +XXX,XX @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
352
uint64_t val;
353
354
/* BUG, should take considering overlap.. */
355
- wreg(cpu->hvf_fd, HV_X86_RIP, rip);
356
+ wreg(cpu->hvf->fd, HV_X86_RIP, rip);
357
env->eip = rip;
358
359
/* after moving forward in rip, we need to clean INTERRUPTABILITY */
360
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
361
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
362
if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
363
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
364
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
365
- wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
366
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY,
367
val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
368
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
369
}
370
@@ -XXX,XX +XXX,XX @@ static inline void vmx_clear_nmi_blocking(CPUState *cpu)
371
CPUX86State *env = &x86_cpu->env;
372
373
env->hflags2 &= ~HF2_NMI_MASK;
374
- uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
375
+ uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
376
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
377
- wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
378
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
379
}
380
381
static inline void vmx_set_nmi_blocking(CPUState *cpu)
382
@@ -XXX,XX +XXX,XX @@ static inline void vmx_set_nmi_blocking(CPUState *cpu)
383
CPUX86State *env = &x86_cpu->env;
384
385
env->hflags2 |= HF2_NMI_MASK;
386
- uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
387
+ uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
388
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
389
- wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
390
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
391
}
392
393
static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
394
{
395
uint64_t val;
396
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
397
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
398
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
399
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
400
VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
401
402
}
403
@@ -XXX,XX +XXX,XX @@ static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
404
{
405
406
uint64_t val;
407
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
408
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
409
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
410
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
411
~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
412
}
413
414
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
415
index XXXXXXX..XXXXXXX 100644
416
--- a/target/i386/hvf/x86.c
417
+++ b/target/i386/hvf/x86.c
418
@@ -XXX,XX +XXX,XX @@ bool x86_read_segment_descriptor(struct CPUState *cpu,
419
}
420
421
if (GDT_SEL == sel.ti) {
422
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
423
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
424
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
425
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
426
} else {
427
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
428
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
429
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
430
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
431
}
432
433
if (sel.index * 8 >= limit) {
434
@@ -XXX,XX +XXX,XX @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
435
uint32_t limit;
436
437
if (GDT_SEL == sel.ti) {
438
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
439
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
440
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
441
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
442
} else {
443
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
444
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
445
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
446
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
447
}
448
449
if (sel.index * 8 >= limit) {
450
@@ -XXX,XX +XXX,XX @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
451
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
452
int gate)
453
{
454
- target_ulong base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);
455
- uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
456
+ target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE);
457
+ uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
458
459
memset(idt_desc, 0, sizeof(*idt_desc));
460
if (gate * 8 >= limit) {
461
@@ -XXX,XX +XXX,XX @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
462
463
bool x86_is_protected(struct CPUState *cpu)
464
{
465
- uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
466
+ uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
467
return cr0 & CR0_PE;
468
}
469
470
@@ -XXX,XX +XXX,XX @@ bool x86_is_v8086(struct CPUState *cpu)
471
472
bool x86_is_long_mode(struct CPUState *cpu)
473
{
474
- return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
475
+ return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
476
}
477
478
bool x86_is_long64_mode(struct CPUState *cpu)
479
@@ -XXX,XX +XXX,XX @@ bool x86_is_long64_mode(struct CPUState *cpu)
480
481
bool x86_is_paging_mode(struct CPUState *cpu)
482
{
483
- uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
484
+ uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
485
return cr0 & CR0_PG;
486
}
487
488
bool x86_is_pae_enabled(struct CPUState *cpu)
489
{
490
- uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
491
+ uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
492
return cr4 & CR4_PAE;
493
}
494
495
diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/i386/hvf/x86_descr.c
498
+++ b/target/i386/hvf/x86_descr.c
499
@@ -XXX,XX +XXX,XX @@ static const struct vmx_segment_field {
500
501
uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg)
502
{
503
- return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
504
+ return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
505
}
506
507
uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg)
508
{
509
- return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
510
+ return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
511
}
512
513
uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
514
{
515
- return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
516
+ return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
517
}
518
519
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
520
{
521
x68_segment_selector sel;
522
- sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
523
+ sel.sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
524
return sel;
525
}
526
527
void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg)
528
{
529
- wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);
530
+ wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel);
531
}
532
533
void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
534
{
535
- desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
536
- desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
537
- desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
538
- desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
539
+ desc->sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
540
+ desc->base = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
541
+ desc->limit = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
542
+ desc->ar = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
543
}
544
545
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
546
{
547
const struct vmx_segment_field *sf = &vmx_segment_fields[seg];
548
549
- wvmcs(cpu->hvf_fd, sf->base, desc->base);
550
- wvmcs(cpu->hvf_fd, sf->limit, desc->limit);
551
- wvmcs(cpu->hvf_fd, sf->selector, desc->sel);
552
- wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);
553
+ wvmcs(cpu->hvf->fd, sf->base, desc->base);
554
+ wvmcs(cpu->hvf->fd, sf->limit, desc->limit);
555
+ wvmcs(cpu->hvf->fd, sf->selector, desc->sel);
556
+ wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar);
557
}
558
559
void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)
560
diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c
561
index XXXXXXX..XXXXXXX 100644
562
--- a/target/i386/hvf/x86_emu.c
563
+++ b/target/i386/hvf/x86_emu.c
564
@@ -XXX,XX +XXX,XX @@ void simulate_rdmsr(struct CPUState *cpu)
565
566
switch (msr) {
567
case MSR_IA32_TSC:
568
- val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
569
+ val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET);
570
break;
571
case MSR_IA32_APICBASE:
572
val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
573
@@ -XXX,XX +XXX,XX @@ void simulate_rdmsr(struct CPUState *cpu)
574
val = x86_cpu->ucode_rev;
575
break;
576
case MSR_EFER:
577
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
578
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER);
579
break;
580
case MSR_FSBASE:
581
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
582
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE);
583
break;
584
case MSR_GSBASE:
585
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
586
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE);
587
break;
588
case MSR_KERNELGSBASE:
589
- val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
590
+ val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE);
591
break;
592
case MSR_STAR:
593
abort();
594
@@ -XXX,XX +XXX,XX @@ void simulate_wrmsr(struct CPUState *cpu)
595
cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
596
break;
597
case MSR_FSBASE:
598
- wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
599
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data);
600
break;
601
case MSR_GSBASE:
602
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
603
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data);
604
break;
605
case MSR_KERNELGSBASE:
606
- wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
607
+ wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data);
608
break;
609
case MSR_STAR:
610
abort();
611
@@ -XXX,XX +XXX,XX @@ void simulate_wrmsr(struct CPUState *cpu)
612
break;
613
case MSR_EFER:
614
/*printf("new efer %llx\n", EFER(cpu));*/
615
- wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
616
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data);
617
if (data & MSR_EFER_NXE) {
618
- hv_vcpu_invalidate_tlb(cpu->hvf_fd);
619
+ hv_vcpu_invalidate_tlb(cpu->hvf->fd);
620
}
621
break;
622
case MSR_MTRRphysBase(0):
623
@@ -XXX,XX +XXX,XX @@ void load_regs(struct CPUState *cpu)
624
CPUX86State *env = &x86_cpu->env;
625
626
int i = 0;
627
- RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
628
- RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
629
- RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
630
- RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
631
- RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
632
- RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
633
- RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
634
- RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
635
+ RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX);
636
+ RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX);
637
+ RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX);
638
+ RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX);
639
+ RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI);
640
+ RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI);
641
+ RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP);
642
+ RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP);
643
for (i = 8; i < 16; i++) {
644
- RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
645
+ RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i);
646
}
647
648
- env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
649
+ env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
650
rflags_to_lflags(env);
651
- env->eip = rreg(cpu->hvf_fd, HV_X86_RIP);
652
+ env->eip = rreg(cpu->hvf->fd, HV_X86_RIP);
653
}
654
655
void store_regs(struct CPUState *cpu)
656
@@ -XXX,XX +XXX,XX @@ void store_regs(struct CPUState *cpu)
657
CPUX86State *env = &x86_cpu->env;
658
659
int i = 0;
660
- wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
661
- wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
662
- wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
663
- wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
664
- wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
665
- wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
666
- wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
667
- wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
668
+ wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env));
669
+ wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env));
670
+ wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env));
671
+ wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env));
672
+ wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env));
673
+ wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env));
674
+ wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env));
675
+ wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env));
676
for (i = 8; i < 16; i++) {
677
- wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
678
+ wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i));
679
}
680
681
lflags_to_rflags(env);
682
- wreg(cpu->hvf_fd, HV_X86_RFLAGS, env->eflags);
683
+ wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags);
684
macvm_set_rip(cpu, env->eip);
685
}
686
687
diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c
688
index XXXXXXX..XXXXXXX 100644
689
--- a/target/i386/hvf/x86_mmu.c
690
+++ b/target/i386/hvf/x86_mmu.c
691
@@ -XXX,XX +XXX,XX @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
692
pt->err_code |= MMU_PAGE_PT;
693
}
694
695
- uint32_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
696
+ uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
697
/* check protection */
698
if (cr0 & CR0_WP) {
699
if (pt->write_access && !pte_write_access(pte)) {
700
@@ -XXX,XX +XXX,XX @@ static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,
701
{
702
int top_level, level;
703
bool is_large = false;
704
- target_ulong cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);
705
+ target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3);
706
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
707
708
memset(pt, 0, sizeof(*pt));
709
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/i386/hvf/x86_task.c
712
+++ b/target/i386/hvf/x86_task.c
713
@@ -XXX,XX +XXX,XX @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
714
X86CPU *x86_cpu = X86_CPU(cpu);
715
CPUX86State *env = &x86_cpu->env;
716
717
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
718
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3);
719
720
env->eip = tss->eip;
721
env->eflags = tss->eflags | 2;
722
@@ -XXX,XX +XXX,XX @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme
723
724
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
725
{
726
- uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
727
+ uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP);
728
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
729
gate_type != VMCS_INTR_T_HWINTR &&
730
gate_type != VMCS_INTR_T_NMI)) {
731
- int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
732
+ int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH);
733
macvm_set_rip(cpu, rip + ins_len);
734
return;
735
}
736
@@ -XXX,XX +XXX,XX @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
737
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
738
VM_PANIC("task_switch_16");
739
740
- macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
741
+ macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);
742
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
743
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
744
745
store_regs(cpu);
746
747
- hv_vcpu_invalidate_tlb(cpu->hvf_fd);
748
- hv_vcpu_flush(cpu->hvf_fd);
749
+ hv_vcpu_invalidate_tlb(cpu->hvf->fd);
750
+ hv_vcpu_flush(cpu->hvf->fd);
751
}
752
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
753
index XXXXXXX..XXXXXXX 100644
754
--- a/target/i386/hvf/x86hvf.c
755
+++ b/target/i386/hvf/x86hvf.c
756
@@ -XXX,XX +XXX,XX @@ void hvf_put_xsave(CPUState *cpu_state)
757
758
x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);
759
760
- if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
761
+ if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {
762
abort();
763
}
764
}
765
@@ -XXX,XX +XXX,XX @@ void hvf_put_segments(CPUState *cpu_state)
766
CPUX86State *env = &X86_CPU(cpu_state)->env;
767
struct vmx_segment seg;
768
769
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
770
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
771
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
772
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
773
774
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
775
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
776
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
777
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
778
779
- /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
780
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);
781
+ /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */
782
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]);
783
vmx_update_tpr(cpu_state);
784
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);
785
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer);
786
787
- macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);
788
- macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);
789
+ macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]);
790
+ macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]);
791
792
hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);
793
vmx_write_segment_descriptor(cpu_state, &seg, R_CS);
794
@@ -XXX,XX +XXX,XX @@ void hvf_put_segments(CPUState *cpu_state)
795
hvf_set_segment(cpu_state, &seg, &env->ldt, false);
796
vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);
797
798
- hv_vcpu_flush(cpu_state->hvf_fd);
799
+ hv_vcpu_flush(cpu_state->hvf->fd);
800
}
801
802
void hvf_put_msrs(CPUState *cpu_state)
803
{
804
CPUX86State *env = &X86_CPU(cpu_state)->env;
805
806
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,
807
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS,
808
env->sysenter_cs);
809
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,
810
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP,
811
env->sysenter_esp);
812
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,
813
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP,
814
env->sysenter_eip);
815
816
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);
817
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star);
818
819
#ifdef TARGET_X86_64
820
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);
821
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);
822
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);
823
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);
824
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar);
825
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase);
826
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask);
827
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar);
828
#endif
829
830
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);
831
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);
832
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base);
833
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base);
834
}
835
836
837
@@ -XXX,XX +XXX,XX @@ void hvf_get_xsave(CPUState *cpu_state)
838
839
xsave = X86_CPU(cpu_state)->env.xsave_buf;
840
841
- if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
842
+ if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {
843
abort();
844
}
845
846
@@ -XXX,XX +XXX,XX @@ void hvf_get_segments(CPUState *cpu_state)
847
vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR);
848
hvf_get_segment(&env->ldt, &seg);
849
850
- env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
851
- env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);
852
- env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
853
- env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);
854
+ env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
855
+ env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE);
856
+ env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
857
+ env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE);
858
859
- env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);
860
+ env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0);
861
env->cr[2] = 0;
862
- env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);
863
- env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);
864
+ env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3);
865
+ env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4);
866
867
- env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);
868
+ env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER);
869
}
870
871
void hvf_get_msrs(CPUState *cpu_state)
872
@@ -XXX,XX +XXX,XX @@ void hvf_get_msrs(CPUState *cpu_state)
873
CPUX86State *env = &X86_CPU(cpu_state)->env;
874
uint64_t tmp;
875
876
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);
877
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp);
878
env->sysenter_cs = tmp;
879
880
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);
881
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp);
882
env->sysenter_esp = tmp;
883
884
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);
885
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp);
886
env->sysenter_eip = tmp;
887
888
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);
889
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star);
890
891
#ifdef TARGET_X86_64
892
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);
893
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);
894
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);
895
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);
896
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar);
897
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase);
898
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask);
899
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar);
900
#endif
901
902
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);
903
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp);
904
905
- env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);
906
+ env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET);
907
}
908
909
int hvf_put_registers(CPUState *cpu_state)
910
@@ -XXX,XX +XXX,XX @@ int hvf_put_registers(CPUState *cpu_state)
911
X86CPU *x86cpu = X86_CPU(cpu_state);
912
CPUX86State *env = &x86cpu->env;
913
914
- wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);
915
- wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);
916
- wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);
917
- wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);
918
- wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);
919
- wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);
920
- wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);
921
- wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);
922
- wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);
923
- wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);
924
- wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);
925
- wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);
926
- wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);
927
- wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);
928
- wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);
929
- wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);
930
- wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);
931
- wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);
932
+ wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]);
933
+ wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]);
934
+ wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]);
935
+ wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]);
936
+ wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]);
937
+ wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]);
938
+ wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]);
939
+ wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]);
940
+ wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]);
941
+ wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]);
942
+ wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]);
943
+ wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]);
944
+ wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]);
945
+ wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]);
946
+ wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]);
947
+ wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]);
948
+ wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags);
949
+ wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip);
950
951
- wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);
952
+ wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0);
953
954
hvf_put_xsave(cpu_state);
955
956
@@ -XXX,XX +XXX,XX @@ int hvf_put_registers(CPUState *cpu_state)
957
958
hvf_put_msrs(cpu_state);
959
960
- wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);
961
- wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);
962
- wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);
963
- wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);
964
- wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);
965
- wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);
966
- wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);
967
- wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);
968
+ wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]);
969
+ wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]);
970
+ wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]);
971
+ wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]);
972
+ wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]);
973
+ wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]);
974
+ wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]);
975
+ wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]);
976
977
return 0;
978
}
979
@@ -XXX,XX +XXX,XX @@ int hvf_get_registers(CPUState *cpu_state)
980
X86CPU *x86cpu = X86_CPU(cpu_state);
981
CPUX86State *env = &x86cpu->env;
982
983
- env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);
984
- env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);
985
- env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);
986
- env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);
987
- env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);
988
- env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);
989
- env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);
990
- env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);
991
- env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);
992
- env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);
993
- env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);
994
- env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);
995
- env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);
996
- env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);
997
- env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);
998
- env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);
999
+ env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX);
1000
+ env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX);
1001
+ env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX);
1002
+ env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX);
1003
+ env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP);
1004
+ env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP);
1005
+ env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI);
1006
+ env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI);
1007
+ env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8);
1008
+ env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9);
1009
+ env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10);
1010
+ env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11);
1011
+ env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12);
1012
+ env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13);
1013
+ env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14);
1014
+ env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15);
1015
1016
- env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
1017
- env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);
1018
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
1019
+ env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP);
1020
1021
hvf_get_xsave(cpu_state);
1022
- env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);
1023
+ env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0);
1024
1025
hvf_get_segments(cpu_state);
1026
hvf_get_msrs(cpu_state);
1027
1028
- env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);
1029
- env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);
1030
- env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);
1031
- env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);
1032
- env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);
1033
- env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);
1034
- env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);
1035
- env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);
1036
+ env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0);
1037
+ env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1);
1038
+ env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2);
1039
+ env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3);
1040
+ env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4);
1041
+ env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5);
1042
+ env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6);
1043
+ env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7);
1044
1045
x86_update_hflags(env);
1046
return 0;
1047
@@ -XXX,XX +XXX,XX @@ int hvf_get_registers(CPUState *cpu_state)
1048
static void vmx_set_int_window_exiting(CPUState *cpu)
1049
{
1050
uint64_t val;
1051
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
1052
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
1053
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
1054
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
1055
VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
1056
}
1057
1058
void vmx_clear_int_window_exiting(CPUState *cpu)
1059
{
1060
uint64_t val;
1061
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
1062
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
1063
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
1064
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
1065
~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
1066
}
1067
1068
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1069
uint64_t info = 0;
1070
if (have_event) {
1071
info = vector | intr_type | VMCS_INTR_VALID;
1072
- uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);
1073
+ uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON);
1074
if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {
1075
vmx_clear_nmi_blocking(cpu_state);
1076
}
1077
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1078
info &= ~(1 << 12); /* clear undefined bit */
1079
if (intr_type == VMCS_INTR_T_SWINTR ||
1080
intr_type == VMCS_INTR_T_SWEXCEPTION) {
1081
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
1082
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
1083
}
1084
1085
if (env->has_error_code) {
1086
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR,
1087
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR,
1088
env->error_code);
1089
/* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */
1090
info |= VMCS_INTR_DEL_ERRCODE;
1091
}
1092
/*printf("reinject %lx err %d\n", info, err);*/
1093
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
1094
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
1095
};
1096
}
1097
1098
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1099
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
1100
cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
1101
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
1102
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
1103
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
1104
} else {
1105
vmx_set_nmi_window_exiting(cpu_state);
1106
}
1107
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1108
int line = cpu_get_pic_interrupt(&x86cpu->env);
1109
cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
1110
if (line >= 0) {
1111
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
1112
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line |
1113
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
1114
}
1115
}
1116
@@ -XXX,XX +XXX,XX @@ int hvf_process_events(CPUState *cpu_state)
1117
X86CPU *cpu = X86_CPU(cpu_state);
1118
CPUX86State *env = &cpu->env;
1119
1120
- env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
1121
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
1122
1123
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
1124
cpu_synchronize_state(cpu_state);
1125
--
1126
2.24.3 (Apple Git-128)
1127
1128
diff view generated by jsdifflib
1
In Hypervisor.framework, we just pass PSCI calls straight on to the QEMU emulation
1
In Hypervisor.framework, we just pass PSCI calls straight on to the QEMU emulation
2
of it. That means, if TCG is compatible with PSCI 0.2, so are we. Let's transpose
2
of it. That means, if TCG is compatible with PSCI 0.2, so are we. Let's transpose
3
that fact in code too.
3
that fact in code too.
4
4
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
6
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
7
6
---
8
---
7
target/arm/cpu.c | 4 ++++
9
8
1 file changed, 4 insertions(+)
10
v3 -> v4:
11
12
- Combine both if statements
13
---
14
target/arm/cpu.c | 4 ++--
15
1 file changed, 2 insertions(+), 2 deletions(-)
9
16
10
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
17
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
11
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
12
--- a/target/arm/cpu.c
19
--- a/target/arm/cpu.c
13
+++ b/target/arm/cpu.c
20
+++ b/target/arm/cpu.c
14
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
21
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
15
if (tcg_enabled()) {
22
cpu->psci_version = 1; /* By default assume PSCI v0.1 */
16
cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
23
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
24
25
- if (tcg_enabled()) {
26
- cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
27
+ if (tcg_enabled() || hvf_enabled()) {
28
+ cpu->psci_version = 2; /* TCG and HVF implement PSCI 0.2 */
17
}
29
}
18
+
19
+ if (hvf_enabled()) {
20
+ cpu->psci_version = 2; /* HVF uses TCG's PSCI */
21
+ }
22
}
30
}
23
31
24
static Property arm_cpu_gt_cntfrq_property =
25
--
32
--
26
2.24.3 (Apple Git-128)
33
2.24.3 (Apple Git-128)
27
34
28
35
diff view generated by jsdifflib
1
When kicking another vCPU, we get an OS function that explicitly does that for us
1
The hooks we have that call us after reset, init and loadvm really all
2
on Apple Silicon. That works better than the current signaling logic, let's make
2
just want to say "The reference of all register state is in the QEMU
3
use of it there.
3
vcpu struct, please push it".
4
5
We already have a working pushing mechanism though called cpu->vcpu_dirty,
6
so we can just reuse that for all of the above, syncing state properly the
7
next time we actually execute a vCPU.
8
9
This fixes PSCI resets on ARM, as they modify CPU state even after the
10
post init call has completed, but before we execute the vCPU again.
11
12
To also make the scheme work for x86, we have to make sure we don't
13
move stale eflags into our env when the vcpu state is dirty.
4
14
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
15
Signed-off-by: Alexander Graf <agraf@csgraf.de>
16
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
17
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
6
---
18
---
7
accel/hvf/hvf-cpus.c | 12 ++++++++++++
19
accel/hvf/hvf-cpus.c | 27 +++++++--------------------
8
1 file changed, 12 insertions(+)
20
target/i386/hvf/x86hvf.c | 5 ++++-
21
2 files changed, 11 insertions(+), 21 deletions(-)
9
22
10
diff --git a/accel/hvf/hvf-cpus.c b/accel/hvf/hvf-cpus.c
23
diff --git a/accel/hvf/hvf-cpus.c b/accel/hvf/hvf-cpus.c
11
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/hvf/hvf-cpus.c
25
--- a/accel/hvf/hvf-cpus.c
13
+++ b/accel/hvf/hvf-cpus.c
26
+++ b/accel/hvf/hvf-cpus.c
14
@@ -XXX,XX +XXX,XX @@ static void hvf_start_vcpu_thread(CPUState *cpu)
27
@@ -XXX,XX +XXX,XX @@ static void hvf_cpu_synchronize_state(CPUState *cpu)
15
cpu, QEMU_THREAD_JOINABLE);
28
}
16
}
29
}
17
30
18
+#ifdef __aarch64__
31
-static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
19
+static void hvf_kick_vcpu_thread(CPUState *cpu)
32
- run_on_cpu_data arg)
20
+{
33
+static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
21
+ if (!qemu_cpu_is_self(cpu)) {
34
+ run_on_cpu_data arg)
22
+ hv_vcpus_exit(&cpu->hvf_fd, 1);
35
{
36
- hvf_put_registers(cpu);
37
- cpu->vcpu_dirty = false;
38
+ /* QEMU state is the reference, push it to HVF now and on next entry */
39
+ cpu->vcpu_dirty = true;
40
}
41
42
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
43
{
44
- run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
45
-}
46
-
47
-static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
48
- run_on_cpu_data arg)
49
-{
50
- hvf_put_registers(cpu);
51
- cpu->vcpu_dirty = false;
52
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
53
}
54
55
static void hvf_cpu_synchronize_post_init(CPUState *cpu)
56
{
57
- run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
58
-}
59
-
60
-static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
61
- run_on_cpu_data arg)
62
-{
63
- cpu->vcpu_dirty = true;
64
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
65
}
66
67
static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
68
{
69
- run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
70
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
71
}
72
73
static void hvf_vcpu_destroy(CPUState *cpu)
74
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/i386/hvf/x86hvf.c
77
+++ b/target/i386/hvf/x86hvf.c
78
@@ -XXX,XX +XXX,XX @@ int hvf_process_events(CPUState *cpu_state)
79
X86CPU *cpu = X86_CPU(cpu_state);
80
CPUX86State *env = &cpu->env;
81
82
- env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
83
+ if (!cpu_state->vcpu_dirty) {
84
+ /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */
85
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
23
+ }
86
+ }
24
+}
87
25
+#endif
88
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
26
+
89
cpu_synchronize_state(cpu_state);
27
static const CpusAccel hvf_cpus = {
28
.create_vcpu_thread = hvf_start_vcpu_thread,
29
+#ifdef __aarch64__
30
+ .kick_vcpu_thread = hvf_kick_vcpu_thread,
31
+#endif
32
33
.synchronize_post_reset = hvf_cpu_synchronize_post_reset,
34
.synchronize_post_init = hvf_cpu_synchronize_post_init,
35
--
90
--
36
2.24.3 (Apple Git-128)
91
2.24.3 (Apple Git-128)
37
92
38
93
diff view generated by jsdifflib
...
...
9
- Vtimer acknowledgement is hacky
9
- Vtimer acknowledgement is hacky
10
- Should implement more sysregs and fault on invalid ones then
10
- Should implement more sysregs and fault on invalid ones then
11
- WFI handling is missing, need to marry it with vtimer
11
- WFI handling is missing, need to marry it with vtimer
12
12
13
Signed-off-by: Alexander Graf <agraf@csgraf.de>
13
Signed-off-by: Alexander Graf <agraf@csgraf.de>
14
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
15
14
---
16
---
15
MAINTAINERS | 5 +
17
16
accel/hvf/hvf-cpus.c | 4 +
18
v1 -> v2:
17
include/hw/core/cpu.h | 3 +-
19
18
target/arm/hvf/hvf.c | 345 ++++++++++++++++++++++++++++++++++++++++++
20
- Merge vcpu kick function patch
19
4 files changed, 356 insertions(+), 1 deletion(-)
21
- Implement WFI handling (allows vCPUs to sleep)
22
- Synchronize system registers (fixes OVMF crashes and reboot)
23
- Don't always call cpu_synchronize_state()
24
- Use more fine grained iothread locking
25
- Populate aa64mmfr0 from hardware
26
27
v2 -> v3:
28
29
- Advance PC on SMC
30
- Use cp list interface for sysreg syncs
31
- Do not set current_cpu
32
- Fix sysreg isread mask
33
- Move sysreg handling to functions
34
- Remove WFI logic again
35
- Revert to global iothread locking
36
- Use Hypervisor.h on arm, hv.h does not contain aarch64 definitions
37
38
v3 -> v4:
39
40
- No longer include Hypervisor.h
41
---
42
MAINTAINERS | 5 +
43
accel/hvf/hvf-cpus.c | 14 +
44
include/sysemu/hvf_int.h | 9 +-
45
target/arm/hvf/hvf.c | 618 +++++++++++++++++++++++++++++++++++++++
46
4 files changed, 645 insertions(+), 1 deletion(-)
20
create mode 100644 target/arm/hvf/hvf.c
47
create mode 100644 target/arm/hvf/hvf.c
21
48
22
diff --git a/MAINTAINERS b/MAINTAINERS
49
diff --git a/MAINTAINERS b/MAINTAINERS
23
index XXXXXXX..XXXXXXX 100644
50
index XXXXXXX..XXXXXXX 100644
24
--- a/MAINTAINERS
51
--- a/MAINTAINERS
...
...
38
diff --git a/accel/hvf/hvf-cpus.c b/accel/hvf/hvf-cpus.c
65
diff --git a/accel/hvf/hvf-cpus.c b/accel/hvf/hvf-cpus.c
39
index XXXXXXX..XXXXXXX 100644
66
index XXXXXXX..XXXXXXX 100644
40
--- a/accel/hvf/hvf-cpus.c
67
--- a/accel/hvf/hvf-cpus.c
41
+++ b/accel/hvf/hvf-cpus.c
68
+++ b/accel/hvf/hvf-cpus.c
42
@@ -XXX,XX +XXX,XX @@
69
@@ -XXX,XX +XXX,XX @@
43
70
#include "sysemu/runstate.h"
44
#include <Hypervisor/Hypervisor.h>
71
#include "qemu/guest-random.h"
45
72
46
+#ifdef __aarch64__
73
+#ifdef __aarch64__
47
+#define HV_VM_DEFAULT NULL
74
+#define HV_VM_DEFAULT NULL
48
+#endif
75
+#endif
49
+
76
+
50
/* Memory slots */
77
/* Memory slots */
51
78
52
struct mac_slot {
79
struct mac_slot {
53
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
80
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
81
pthread_sigmask(SIG_BLOCK, NULL, &set);
82
sigdelset(&set, SIG_IPI);
83
84
+#ifdef __aarch64__
85
+ r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
86
+#else
87
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
88
+#endif
89
cpu->vcpu_dirty = 1;
90
assert_hvf_ok(r);
91
92
@@ -XXX,XX +XXX,XX @@ static void hvf_start_vcpu_thread(CPUState *cpu)
93
cpu, QEMU_THREAD_JOINABLE);
94
}
95
96
+__attribute__((weak)) void hvf_kick_vcpu_thread(CPUState *cpu)
97
+{
98
+ cpus_kick_thread(cpu);
99
+}
100
+
101
static const CpusAccel hvf_cpus = {
102
.create_vcpu_thread = hvf_start_vcpu_thread,
103
+ .kick_vcpu_thread = hvf_kick_vcpu_thread,
104
105
.synchronize_post_reset = hvf_cpu_synchronize_post_reset,
106
.synchronize_post_init = hvf_cpu_synchronize_post_init,
107
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
54
index XXXXXXX..XXXXXXX 100644
108
index XXXXXXX..XXXXXXX 100644
55
--- a/include/hw/core/cpu.h
109
--- a/include/sysemu/hvf_int.h
56
+++ b/include/hw/core/cpu.h
110
+++ b/include/sysemu/hvf_int.h
57
@@ -XXX,XX +XXX,XX @@ struct CPUState {
111
@@ -XXX,XX +XXX,XX @@
58
112
#ifndef HVF_INT_H
59
struct hax_vcpu_state *hax_vcpu;
113
#define HVF_INT_H
60
114
61
- int hvf_fd;
115
+#include "qemu/osdep.h"
62
+ uint64_t hvf_fd;
116
+#ifdef __aarch64__
63
+ void *hvf_exit;
117
+#include <Hypervisor/Hypervisor.h>
64
118
+#else
65
/* track IOMMUs whose translations we've cached in the TCG TLB */
119
#include <Hypervisor/hv.h>
66
GArray *iommu_notifiers;
120
+#endif
121
122
/* hvf_slot flags */
123
#define HVF_SLOT_LOG (1 << 0)
124
@@ -XXX,XX +XXX,XX @@ struct HVFState {
125
extern HVFState *hvf_state;
126
127
struct hvf_vcpu_state {
128
- int fd;
129
+ uint64_t fd;
130
+ void *exit;
131
};
132
133
void assert_hvf_ok(hv_return_t ret);
134
@@ -XXX,XX +XXX,XX @@ int hvf_arch_init_vcpu(CPUState *cpu);
135
void hvf_arch_vcpu_destroy(CPUState *cpu);
136
int hvf_vcpu_exec(CPUState *cpu);
137
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
138
+void hvf_kick_vcpu_thread(CPUState *cpu);
139
140
#endif
67
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
141
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
68
new file mode 100644
142
new file mode 100644
69
index XXXXXXX..XXXXXXX
143
index XXXXXXX..XXXXXXX
70
--- /dev/null
144
--- /dev/null
71
+++ b/target/arm/hvf/hvf.c
145
+++ b/target/arm/hvf/hvf.c
...
...
87
+#include "sysemu/runstate.h"
161
+#include "sysemu/runstate.h"
88
+#include "sysemu/hvf.h"
162
+#include "sysemu/hvf.h"
89
+#include "sysemu/hvf_int.h"
163
+#include "sysemu/hvf_int.h"
90
+#include "sysemu/hw_accel.h"
164
+#include "sysemu/hw_accel.h"
91
+
165
+
92
+#include <Hypervisor/Hypervisor.h>
93
+
94
+#include "exec/address-spaces.h"
166
+#include "exec/address-spaces.h"
95
+#include "hw/irq.h"
167
+#include "hw/irq.h"
96
+#include "qemu/main-loop.h"
168
+#include "qemu/main-loop.h"
97
+#include "sysemu/accel.h"
169
+#include "sysemu/accel.h"
170
+#include "sysemu/cpus.h"
98
+#include "target/arm/cpu.h"
171
+#include "target/arm/cpu.h"
99
+#include "target/arm/internals.h"
172
+#include "target/arm/internals.h"
100
+
173
+
101
+#define HVF_DEBUG 0
174
+#define HVF_DEBUG 0
102
+#define DPRINTF(...) \
175
+#define DPRINTF(...) \
103
+ if (HVF_DEBUG) { \
176
+ if (HVF_DEBUG) { \
104
+ fprintf(stderr, "HVF %s:%d ", __func__, __LINE__); \
177
+ fprintf(stderr, "HVF %s:%d ", __func__, __LINE__); \
105
+ fprintf(stderr, __VA_ARGS__); \
178
+ fprintf(stderr, __VA_ARGS__); \
106
+ fprintf(stderr, "\n"); \
179
+ fprintf(stderr, "\n"); \
107
+ }
180
+ }
108
+
181
+
182
+#define HVF_SYSREG(crn, crm, op0, op1, op2) \
183
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
184
+#define PL1_WRITE_MASK 0x4
185
+
109
+#define SYSREG(op0, op1, op2, crn, crm) \
186
+#define SYSREG(op0, op1, op2, crn, crm) \
110
+ ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1))
187
+ ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1))
111
+#define SYSREG_MASK SYSREG(0x3, 0x7, 0x7, 0xf, 0xf)
188
+#define SYSREG_MASK SYSREG(0x3, 0x7, 0x7, 0xf, 0xf)
112
+#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 1, 14, 0)
189
+#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 1, 14, 0)
113
+#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 0, 9, 13)
190
+#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 0, 9, 13)
191
+
192
+#define WFX_IS_WFE (1 << 0)
114
+
193
+
115
+struct hvf_reg_match {
194
+struct hvf_reg_match {
116
+ int reg;
195
+ int reg;
117
+ uint64_t offset;
196
+ uint64_t offset;
118
+};
197
+};
...
...
150
+ { HV_REG_X29, offsetof(CPUARMState, xregs[29]) },
229
+ { HV_REG_X29, offsetof(CPUARMState, xregs[29]) },
151
+ { HV_REG_X30, offsetof(CPUARMState, xregs[30]) },
230
+ { HV_REG_X30, offsetof(CPUARMState, xregs[30]) },
152
+ { HV_REG_PC, offsetof(CPUARMState, pc) },
231
+ { HV_REG_PC, offsetof(CPUARMState, pc) },
153
+};
232
+};
154
+
233
+
234
+struct hvf_sreg_match {
235
+ int reg;
236
+ uint32_t key;
237
+};
238
+
239
+static const struct hvf_sreg_match hvf_sreg_match[] = {
240
+ { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
241
+ { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
242
+ { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
243
+ { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
244
+
245
+ { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
246
+ { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
247
+ { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
248
+ { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
249
+
250
+ { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
251
+ { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
252
+ { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
253
+ { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
254
+
255
+ { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
256
+ { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
257
+ { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
258
+ { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
259
+
260
+ { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
261
+ { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
262
+ { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
263
+ { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
264
+
265
+ { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
266
+ { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
267
+ { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
268
+ { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
269
+
270
+ { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
271
+ { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
272
+ { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
273
+ { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
274
+
275
+ { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
276
+ { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
277
+ { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
278
+ { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
279
+
280
+ { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
281
+ { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
282
+ { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
283
+ { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
284
+
285
+ { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
286
+ { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
287
+ { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
288
+ { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
289
+
290
+ { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
291
+ { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
292
+ { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
293
+ { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
294
+
295
+ { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
296
+ { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
297
+ { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
298
+ { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
299
+
300
+ { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
301
+ { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
302
+ { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
303
+ { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
304
+
305
+ { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
306
+ { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
307
+ { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
308
+ { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
309
+
310
+ { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
311
+ { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
312
+ { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
313
+ { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
314
+
315
+ { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
316
+ { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
317
+ { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
318
+ { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
319
+
320
+#ifdef SYNC_NO_RAW_REGS
321
+ /*
322
+ * The registers below are manually synced on init because they are
323
+ * marked as NO_RAW. We still list them to make number space sync easier.
324
+ */
325
+ { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
326
+ { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
327
+ { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
328
+ { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
329
+#endif
330
+ { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
331
+ { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
332
+ { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
333
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
334
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
335
+#ifdef SYNC_NO_MMFR0
336
+ /* We keep the hardware MMFR0 around. HW limits are there anyway */
337
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
338
+#endif
339
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
340
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
341
+
342
+ { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
343
+ { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
344
+ { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
345
+ { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
346
+ { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
347
+ { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
348
+
349
+ { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
350
+ { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
351
+ { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
352
+ { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
353
+ { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
354
+ { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
355
+ { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
356
+ { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
357
+ { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
358
+ { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
359
+
360
+ { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 1, 0) },
361
+ { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
362
+ { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
363
+ { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
364
+ { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
365
+ { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
366
+ { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
367
+ { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
368
+ { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
369
+ { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
370
+ { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
371
+ { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
372
+ { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
373
+ { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
374
+ { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
375
+ { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
376
+ { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
377
+ { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
378
+ { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
379
+ { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
380
+};
381
+
155
+int hvf_get_registers(CPUState *cpu)
382
+int hvf_get_registers(CPUState *cpu)
156
+{
383
+{
157
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
384
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
158
+ CPUARMState *env = &arm_cpu->env;
385
+ CPUARMState *env = &arm_cpu->env;
159
+ hv_return_t ret;
386
+ hv_return_t ret;
160
+ uint64_t val;
387
+ uint64_t val;
161
+ int i;
388
+ int i;
162
+
389
+
163
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
390
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
164
+ ret = hv_vcpu_get_reg(cpu->hvf_fd, hvf_reg_match[i].reg, &val);
391
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
165
+ *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
392
+ *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
166
+ assert_hvf_ok(ret);
393
+ assert_hvf_ok(ret);
167
+ }
394
+ }
168
+
395
+
169
+ val = 0;
396
+ val = 0;
170
+ ret = hv_vcpu_get_reg(cpu->hvf_fd, HV_REG_FPCR, &val);
397
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
171
+ assert_hvf_ok(ret);
398
+ assert_hvf_ok(ret);
172
+ vfp_set_fpcr(env, val);
399
+ vfp_set_fpcr(env, val);
173
+
400
+
174
+ val = 0;
401
+ val = 0;
175
+ ret = hv_vcpu_get_reg(cpu->hvf_fd, HV_REG_FPSR, &val);
402
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
176
+ assert_hvf_ok(ret);
403
+ assert_hvf_ok(ret);
177
+ vfp_set_fpsr(env, val);
404
+ vfp_set_fpsr(env, val);
178
+
405
+
179
+ ret = hv_vcpu_get_reg(cpu->hvf_fd, HV_REG_CPSR, &val);
406
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
180
+ assert_hvf_ok(ret);
407
+ assert_hvf_ok(ret);
181
+ pstate_write(env, val);
408
+ pstate_write(env, val);
409
+
410
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
411
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
412
+ assert_hvf_ok(ret);
413
+
414
+ arm_cpu->cpreg_values[i] = val;
415
+ }
416
+ write_list_to_cpustate(arm_cpu);
182
+
417
+
183
+ return 0;
418
+ return 0;
184
+}
419
+}
185
+
420
+
186
+int hvf_put_registers(CPUState *cpu)
421
+int hvf_put_registers(CPUState *cpu)
...
...
191
+ uint64_t val;
426
+ uint64_t val;
192
+ int i;
427
+ int i;
193
+
428
+
194
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
429
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
195
+ val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
430
+ val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
196
+ ret = hv_vcpu_set_reg(cpu->hvf_fd, hvf_reg_match[i].reg, val);
431
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
197
+
432
+
198
+ assert_hvf_ok(ret);
433
+ assert_hvf_ok(ret);
199
+ }
434
+ }
200
+
435
+
201
+ ret = hv_vcpu_set_reg(cpu->hvf_fd, HV_REG_FPCR, vfp_get_fpcr(env));
436
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
202
+ assert_hvf_ok(ret);
437
+ assert_hvf_ok(ret);
203
+
438
+
204
+ ret = hv_vcpu_set_reg(cpu->hvf_fd, HV_REG_FPSR, vfp_get_fpsr(env));
439
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
205
+ assert_hvf_ok(ret);
440
+ assert_hvf_ok(ret);
206
+
441
+
207
+ ret = hv_vcpu_set_reg(cpu->hvf_fd, HV_REG_CPSR, pstate_read(env));
442
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
208
+ assert_hvf_ok(ret);
443
+ assert_hvf_ok(ret);
209
+
444
+
210
+ ret = hv_vcpu_set_sys_reg(cpu->hvf_fd, HV_SYS_REG_MPIDR_EL1,
445
+ write_cpustate_to_list(arm_cpu, false);
211
+ arm_cpu->mp_affinity);
446
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
212
+ assert_hvf_ok(ret);
447
+ val = arm_cpu->cpreg_values[i];
448
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
449
+ assert_hvf_ok(ret);
450
+ }
213
+
451
+
214
+ return 0;
452
+ return 0;
453
+}
454
+
455
+static void flush_cpu_state(CPUState *cpu)
456
+{
457
+ if (cpu->vcpu_dirty) {
458
+ hvf_put_registers(cpu);
459
+ cpu->vcpu_dirty = false;
460
+ }
461
+}
462
+
463
+static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
464
+{
465
+ hv_return_t r;
466
+
467
+ flush_cpu_state(cpu);
468
+
469
+ if (rt < 31) {
470
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
471
+ assert_hvf_ok(r);
472
+ }
473
+}
474
+
475
+static uint64_t hvf_get_reg(CPUState *cpu, int rt)
476
+{
477
+ uint64_t val = 0;
478
+ hv_return_t r;
479
+
480
+ flush_cpu_state(cpu);
481
+
482
+ if (rt < 31) {
483
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
484
+ assert_hvf_ok(r);
485
+ }
486
+
487
+ return val;
215
+}
488
+}
216
+
489
+
217
+void hvf_arch_vcpu_destroy(CPUState *cpu)
490
+void hvf_arch_vcpu_destroy(CPUState *cpu)
218
+{
491
+{
219
+}
492
+}
220
+
493
+
221
+int hvf_arch_init_vcpu(CPUState *cpu)
494
+int hvf_arch_init_vcpu(CPUState *cpu)
222
+{
495
+{
223
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
496
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
224
+ CPUARMState *env = &arm_cpu->env;
497
+ CPUARMState *env = &arm_cpu->env;
498
+ uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
499
+ uint64_t pfr;
500
+ hv_return_t ret;
501
+ int i;
225
+
502
+
226
+ env->aarch64 = 1;
503
+ env->aarch64 = 1;
504
+ asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
505
+
506
+ /* Allocate enough space for our sysreg sync */
507
+ arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
508
+ sregs_match_len);
509
+ arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
510
+ sregs_match_len);
511
+ arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
512
+ arm_cpu->cpreg_vmstate_indexes,
513
+ sregs_match_len);
514
+ arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
515
+ arm_cpu->cpreg_vmstate_values,
516
+ sregs_match_len);
517
+
518
+ memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
519
+ arm_cpu->cpreg_array_len = sregs_match_len;
520
+ arm_cpu->cpreg_vmstate_array_len = sregs_match_len;
521
+
522
+ /* Populate cp list for all known sysregs */
523
+ for (i = 0; i < sregs_match_len; i++) {
524
+ const ARMCPRegInfo *ri;
525
+
526
+ arm_cpu->cpreg_indexes[i] = cpreg_to_kvm_id(hvf_sreg_match[i].key);
527
+
528
+ ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key);
529
+ if (ri) {
530
+ assert(!(ri->type & ARM_CP_NO_RAW));
531
+ }
532
+ }
533
+ write_cpustate_to_list(arm_cpu, false);
534
+
535
+ /* Set CP_NO_RAW system registers on init */
536
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
537
+ arm_cpu->midr);
538
+ assert_hvf_ok(ret);
539
+
540
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
541
+ arm_cpu->mp_affinity);
542
+ assert_hvf_ok(ret);
543
+
544
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
545
+ assert_hvf_ok(ret);
546
+ pfr |= env->gicv3state ? (1 << 24) : 0;
547
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
548
+ assert_hvf_ok(ret);
549
+
550
+ /* We're limited to underlying hardware caps, override internal versions */
551
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
552
+ &arm_cpu->isar.id_aa64mmfr0);
553
+ assert_hvf_ok(ret);
227
+
554
+
228
+ return 0;
555
+ return 0;
229
+}
556
+}
230
+
557
+
231
+static int hvf_process_events(CPUState *cpu)
558
+void hvf_kick_vcpu_thread(CPUState *cpu)
232
+{
559
+{
233
+ DPRINTF("");
560
+ hv_vcpus_exit(&cpu->hvf->fd, 1);
234
+ return 0;
561
+}
562
+
563
+static uint64_t hvf_sysreg_read(CPUState *cpu, uint32_t reg)
564
+{
565
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
566
+ uint64_t val = 0;
567
+
568
+ switch (reg) {
569
+ case SYSREG_CNTPCT_EL0:
570
+ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
571
+ gt_cntfrq_period_ns(arm_cpu);
572
+ break;
573
+ case SYSREG_PMCCNTR_EL0:
574
+ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
575
+ break;
576
+ default:
577
+ DPRINTF("unhandled sysreg read %08x (op0=%d op1=%d op2=%d "
578
+ "crn=%d crm=%d)", reg, (reg >> 20) & 0x3,
579
+ (reg >> 14) & 0x7, (reg >> 17) & 0x7,
580
+ (reg >> 10) & 0xf, (reg >> 1) & 0xf);
581
+ break;
582
+ }
583
+
584
+ return val;
585
+}
586
+
587
+static void hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
588
+{
589
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
590
+
591
+ switch (reg) {
592
+ case SYSREG_CNTPCT_EL0:
593
+ break;
594
+ default:
595
+ DPRINTF("unhandled sysreg write %08x", reg);
596
+ break;
597
+ }
235
+}
598
+}
236
+
599
+
237
+static int hvf_inject_interrupts(CPUState *cpu)
600
+static int hvf_inject_interrupts(CPUState *cpu)
238
+{
601
+{
239
+ if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
602
+ if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
240
+ DPRINTF("injecting FIQ");
603
+ DPRINTF("injecting FIQ");
241
+ hv_vcpu_set_pending_interrupt(cpu->hvf_fd, HV_INTERRUPT_TYPE_FIQ, true);
604
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, true);
242
+ }
605
+ }
243
+
606
+
244
+ if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
607
+ if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
245
+ DPRINTF("injecting IRQ");
608
+ DPRINTF("injecting IRQ");
246
+ hv_vcpu_set_pending_interrupt(cpu->hvf_fd, HV_INTERRUPT_TYPE_IRQ, true);
609
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, true);
247
+ }
610
+ }
248
+
611
+
249
+ return 0;
612
+ return 0;
250
+}
613
+}
251
+
614
+
252
+int hvf_vcpu_exec(CPUState *cpu)
615
+int hvf_vcpu_exec(CPUState *cpu)
253
+{
616
+{
254
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
617
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
255
+ CPUARMState *env = &arm_cpu->env;
618
+ CPUARMState *env = &arm_cpu->env;
256
+ hv_vcpu_exit_t *hvf_exit = cpu->hvf_exit;
619
+ hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
257
+ int ret = 0;
620
+ hv_return_t r;
258
+
621
+
259
+ if (hvf_process_events(cpu)) {
622
+ while (1) {
260
+ return EXCP_HLT;
623
+ bool advance_pc = false;
261
+ }
624
+
262
+
625
+ qemu_wait_io_event_common(cpu);
263
+ do {
626
+ flush_cpu_state(cpu);
264
+ process_queued_cpu_work(cpu);
265
+
266
+ if (cpu->vcpu_dirty) {
267
+ hvf_put_registers(cpu);
268
+ cpu->vcpu_dirty = false;
269
+ }
270
+
627
+
271
+ if (hvf_inject_interrupts(cpu)) {
628
+ if (hvf_inject_interrupts(cpu)) {
272
+ return EXCP_INTERRUPT;
629
+ return EXCP_INTERRUPT;
273
+ }
630
+ }
274
+
631
+
275
+ qemu_mutex_unlock_iothread();
632
+ if (cpu->halted) {
276
+ if (cpu->cpu_index && cpu->halted) {
277
+ qemu_mutex_lock_iothread();
278
+ return EXCP_HLT;
633
+ return EXCP_HLT;
279
+ }
634
+ }
280
+
635
+
281
+ assert_hvf_ok(hv_vcpu_run(cpu->hvf_fd));
636
+ qemu_mutex_unlock_iothread();
637
+ assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
282
+
638
+
283
+ /* handle VMEXIT */
639
+ /* handle VMEXIT */
284
+ uint64_t exit_reason = hvf_exit->reason;
640
+ uint64_t exit_reason = hvf_exit->reason;
285
+ uint64_t syndrome = hvf_exit->exception.syndrome;
641
+ uint64_t syndrome = hvf_exit->exception.syndrome;
286
+ uint32_t ec = syn_get_ec(syndrome);
642
+ uint32_t ec = syn_get_ec(syndrome);
287
+
643
+
288
+ cpu_synchronize_state(cpu);
289
+
290
+ qemu_mutex_lock_iothread();
644
+ qemu_mutex_lock_iothread();
291
+
292
+ current_cpu = cpu;
293
+
294
+ switch (exit_reason) {
645
+ switch (exit_reason) {
295
+ case HV_EXIT_REASON_EXCEPTION:
646
+ case HV_EXIT_REASON_EXCEPTION:
296
+ /* This is the main one, handle below. */
647
+ /* This is the main one, handle below. */
297
+ break;
648
+ break;
298
+ case HV_EXIT_REASON_VTIMER_ACTIVATED:
649
+ case HV_EXIT_REASON_VTIMER_ACTIVATED:
...
...
303
+ continue;
654
+ continue;
304
+ default:
655
+ default:
305
+ assert(0);
656
+ assert(0);
306
+ }
657
+ }
307
+
658
+
308
+ ret = 0;
309
+ switch (ec) {
659
+ switch (ec) {
310
+ case EC_DATAABORT: {
660
+ case EC_DATAABORT: {
311
+ bool isv = syndrome & ARM_EL_ISV;
661
+ bool isv = syndrome & ARM_EL_ISV;
312
+ bool iswrite = (syndrome >> 6) & 1;
662
+ bool iswrite = (syndrome >> 6) & 1;
313
+ bool s1ptw = (syndrome >> 7) & 1;
663
+ bool s1ptw = (syndrome >> 7) & 1;
...
...
323
+ s1ptw, len, srt);
673
+ s1ptw, len, srt);
324
+
674
+
325
+ assert(isv);
675
+ assert(isv);
326
+
676
+
327
+ if (iswrite) {
677
+ if (iswrite) {
328
+ val = env->xregs[srt];
678
+ val = hvf_get_reg(cpu, srt);
329
+ address_space_write(&address_space_memory,
679
+ address_space_write(&address_space_memory,
330
+ hvf_exit->exception.physical_address,
680
+ hvf_exit->exception.physical_address,
331
+ MEMTXATTRS_UNSPECIFIED, &val, len);
681
+ MEMTXATTRS_UNSPECIFIED, &val, len);
332
+
682
+
333
+ /*
683
+ /*
334
+ * We do not have a callback to see if the timer is out of
684
+ * We do not have a callback to see if the timer is out of
335
+ * state. That means every MMIO write could potentially be
685
+ * pending state. That means every MMIO write could
336
+ * an EOI ends the vtimer. Until we get an actual callback,
686
+ * potentially be an EOI ends the vtimer. Until we get an
337
+ * let's just see if the timer is still pending on every
687
+ * actual callback, let's just see if the timer is still
338
+ * possible toggle point.
688
+ * pending on every possible toggle point.
339
+ */
689
+ */
340
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 0);
690
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 0);
341
+ hv_vcpu_set_vtimer_mask(cpu->hvf_fd, false);
691
+ hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
342
+ } else {
692
+ } else {
343
+ address_space_read(&address_space_memory,
693
+ address_space_read(&address_space_memory,
344
+ hvf_exit->exception.physical_address,
694
+ hvf_exit->exception.physical_address,
345
+ MEMTXATTRS_UNSPECIFIED, &val, len);
695
+ MEMTXATTRS_UNSPECIFIED, &val, len);
346
+ env->xregs[srt] = val;
696
+ hvf_set_reg(cpu, srt, val);
347
+ }
697
+ }
348
+
698
+
349
+ env->pc += 4;
699
+ advance_pc = true;
350
+ break;
700
+ break;
351
+ }
701
+ }
352
+ case EC_SYSTEMREGISTERTRAP: {
702
+ case EC_SYSTEMREGISTERTRAP: {
353
+ bool isread = (syndrome >> 21) & 1;
703
+ bool isread = (syndrome >> 0) & 1;
354
+ uint32_t rt = (syndrome >> 5) & 0x1f;
704
+ uint32_t rt = (syndrome >> 5) & 0x1f;
355
+ uint32_t reg = syndrome & SYSREG_MASK;
705
+ uint32_t reg = syndrome & SYSREG_MASK;
356
+ uint64_t val = 0;
706
+ uint64_t val = 0;
357
+
707
+
708
+ DPRINTF("sysreg %s operation reg=%08x (op0=%d op1=%d op2=%d "
709
+ "crn=%d crm=%d)", (isread) ? "read" : "write",
710
+ reg, (reg >> 20) & 0x3,
711
+ (reg >> 14) & 0x7, (reg >> 17) & 0x7,
712
+ (reg >> 10) & 0xf, (reg >> 1) & 0xf);
713
+
358
+ if (isread) {
714
+ if (isread) {
359
+ switch (reg) {
715
+ hvf_set_reg(cpu, rt, hvf_sysreg_read(cpu, reg));
360
+ case SYSREG_CNTPCT_EL0:
361
+ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
362
+ gt_cntfrq_period_ns(arm_cpu);
363
+ break;
364
+ case SYSREG_PMCCNTR_EL0:
365
+ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
366
+ break;
367
+ default:
368
+ DPRINTF("unhandled sysreg read %08x (op0=%d op1=%d op2=%d "
369
+ "crn=%d crm=%d)", reg, (reg >> 20) & 0x3,
370
+ (reg >> 14) & 0x7, (reg >> 17) & 0x7,
371
+ (reg >> 10) & 0xf, (reg >> 1) & 0xf);
372
+ break;
373
+ }
374
+
375
+ env->xregs[rt] = val;
376
+ } else {
716
+ } else {
377
+ val = env->xregs[rt];
717
+ val = hvf_get_reg(cpu, rt);
378
+ switch (reg) {
718
+ hvf_sysreg_write(cpu, reg, val);
379
+ case SYSREG_CNTPCT_EL0:
380
+ break;
381
+ default:
382
+ DPRINTF("unhandled sysreg write %08x", reg);
383
+ break;
384
+ }
385
+ }
719
+ }
386
+
720
+
387
+ env->pc += 4;
721
+ advance_pc = true;
388
+ break;
722
+ break;
389
+ }
723
+ }
390
+ case EC_WFX_TRAP:
724
+ case EC_WFX_TRAP:
391
+ /* No halting yet */
725
+ advance_pc = true;
392
+ break;
726
+ break;
393
+ case EC_AA64_HVC:
727
+ case EC_AA64_HVC:
728
+ cpu_synchronize_state(cpu);
394
+ if (arm_is_psci_call(arm_cpu, EXCP_HVC)) {
729
+ if (arm_is_psci_call(arm_cpu, EXCP_HVC)) {
395
+ arm_handle_psci_call(arm_cpu);
730
+ arm_handle_psci_call(arm_cpu);
396
+ } else {
731
+ } else {
397
+ DPRINTF("unknown HVC! %016llx", env->xregs[0]);
732
+ DPRINTF("unknown HVC! %016llx", env->xregs[0]);
398
+ env->xregs[0] = -1;
733
+ env->xregs[0] = -1;
399
+ }
734
+ }
400
+ break;
735
+ break;
401
+ case EC_AA64_SMC:
736
+ case EC_AA64_SMC:
737
+ cpu_synchronize_state(cpu);
402
+ if (arm_is_psci_call(arm_cpu, EXCP_SMC)) {
738
+ if (arm_is_psci_call(arm_cpu, EXCP_SMC)) {
403
+ arm_handle_psci_call(arm_cpu);
739
+ arm_handle_psci_call(arm_cpu);
404
+ } else {
740
+ } else {
405
+ DPRINTF("unknown SMC! %016llx", env->xregs[0]);
741
+ DPRINTF("unknown SMC! %016llx", env->xregs[0]);
406
+ env->xregs[0] = -1;
742
+ env->xregs[0] = -1;
407
+ env->pc += 4;
408
+ }
743
+ }
744
+ env->pc += 4;
409
+ break;
745
+ break;
410
+ default:
746
+ default:
747
+ cpu_synchronize_state(cpu);
411
+ DPRINTF("exit: %llx [ec=0x%x pc=0x%llx]", syndrome, ec, env->pc);
748
+ DPRINTF("exit: %llx [ec=0x%x pc=0x%llx]", syndrome, ec, env->pc);
412
+ error_report("%llx: unhandled exit %llx", env->pc, exit_reason);
749
+ error_report("%llx: unhandled exit %llx", env->pc, exit_reason);
413
+ }
750
+ }
414
+ } while (ret == 0);
751
+
415
+
752
+ if (advance_pc) {
416
+ return ret;
753
+ uint64_t pc;
754
+
755
+ flush_cpu_state(cpu);
756
+
757
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
758
+ assert_hvf_ok(r);
759
+ pc += 4;
760
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
761
+ assert_hvf_ok(r);
762
+ }
763
+ }
417
+}
764
+}
418
--
765
--
419
2.24.3 (Apple Git-128)
766
2.24.3 (Apple Git-128)
420
767
421
768
diff view generated by jsdifflib
1
Now that we have all logic in place that we need to handle Hypervisor.framework
1
Now that we have all logic in place that we need to handle Hypervisor.framework
2
on Apple Silicon systems, let's add CONFIG_HVF for aarch64 as well so that we
2
on Apple Silicon systems, let's add CONFIG_HVF for aarch64 as well so that we
3
can build it.
3
can build it.
4
4
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
5
Signed-off-by: Alexander Graf <agraf@csgraf.de>
6
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
7
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com> (x86 only)
8
6
---
9
---
7
meson.build | 9 ++++++++-
10
8
target/arm/hvf/meson.build | 3 +++
11
v1 -> v2:
9
target/arm/meson.build | 2 ++
12
10
3 files changed, 13 insertions(+), 1 deletion(-)
13
- Fix build on 32bit arm
14
15
v3 -> v4:
16
17
- Remove i386-softmmu target
18
---
19
meson.build | 11 ++++++++++-
20
target/arm/hvf/meson.build | 3 +++
21
target/arm/meson.build | 2 ++
22
3 files changed, 15 insertions(+), 1 deletion(-)
11
create mode 100644 target/arm/hvf/meson.build
23
create mode 100644 target/arm/hvf/meson.build
12
24
13
diff --git a/meson.build b/meson.build
25
diff --git a/meson.build b/meson.build
14
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
15
--- a/meson.build
27
--- a/meson.build
...
...
18
endif
30
endif
19
31
20
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
32
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
21
+
33
+
22
+if cpu in ['x86', 'x86_64']
34
+if cpu in ['x86', 'x86_64']
23
+ hvf_targets = ['i386-softmmu', 'x86_64-softmmu']
35
+ hvf_targets = ['x86_64-softmmu']
24
+elif cpu in ['aarch64']
36
+elif cpu in ['aarch64']
25
+ hvf_targets = ['aarch64-softmmu']
37
+ hvf_targets = ['aarch64-softmmu']
38
+else
39
+ hvf_targets = []
26
+endif
40
+endif
27
+
41
+
28
if cpu in ['x86', 'x86_64', 'arm', 'aarch64']
42
if cpu in ['x86', 'x86_64', 'arm', 'aarch64']
29
# i368 emulator provides xenpv machine type for multiple architectures
43
# i368 emulator provides xenpv machine type for multiple architectures
30
accelerator_targets += {
44
accelerator_targets += {
...
...
diff view generated by jsdifflib
New patch
1
From: Peter Collingbourne <pcc@google.com>
1
2
3
Sleep on WFI until the VTIMER is due but allow ourselves to be woken
4
up on IPI.
5
6
In this implementation IPI is blocked on the CPU thread at startup and
7
pselect() is used to atomically unblock the signal and begin sleeping.
8
The signal is sent unconditionally so there's no need to worry about
9
races between actually sleeping and the "we think we're sleeping"
10
state. It may lead to an extra wakeup but that's better than missing
11
it entirely.
12
13
Signed-off-by: Peter Collingbourne <pcc@google.com>
14
[agraf: Remove unused 'set' variable, always advance PC on WFX trap]
15
Signed-off-by: Alexander Graf <agraf@csgraf.de>
16
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
17
---
18
accel/hvf/hvf-cpus.c | 5 ++--
19
include/sysemu/hvf_int.h | 1 +
20
target/arm/hvf/hvf.c | 56 ++++++++++++++++++++++++++++++++++++++++
21
3 files changed, 59 insertions(+), 3 deletions(-)
22
23
diff --git a/accel/hvf/hvf-cpus.c b/accel/hvf/hvf-cpus.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/accel/hvf/hvf-cpus.c
26
+++ b/accel/hvf/hvf-cpus.c
27
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
28
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
29
30
/* init cpu signals */
31
- sigset_t set;
32
struct sigaction sigact;
33
34
memset(&sigact, 0, sizeof(sigact));
35
sigact.sa_handler = dummy_signal;
36
sigaction(SIG_IPI, &sigact, NULL);
37
38
- pthread_sigmask(SIG_BLOCK, NULL, &set);
39
- sigdelset(&set, SIG_IPI);
40
+ pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
41
+ sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
42
43
#ifdef __aarch64__
44
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
45
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/include/sysemu/hvf_int.h
48
+++ b/include/sysemu/hvf_int.h
49
@@ -XXX,XX +XXX,XX @@ extern HVFState *hvf_state;
50
struct hvf_vcpu_state {
51
uint64_t fd;
52
void *exit;
53
+ sigset_t unblock_ipi_mask;
54
};
55
56
void assert_hvf_ok(hv_return_t ret);
57
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/hvf/hvf.c
60
+++ b/target/arm/hvf/hvf.c
61
@@ -XXX,XX +XXX,XX @@
62
* QEMU Hypervisor.framework support for Apple Silicon
63
64
* Copyright 2020 Alexander Graf <agraf@csgraf.de>
65
+ * Copyright 2020 Google LLC
66
*
67
* This work is licensed under the terms of the GNU GPL, version 2 or later.
68
* See the COPYING file in the top-level directory.
69
@@ -XXX,XX +XXX,XX @@
70
#include "sysemu/hvf_int.h"
71
#include "sysemu/hw_accel.h"
72
73
+#include <mach/mach_time.h>
74
+
75
#include "exec/address-spaces.h"
76
#include "hw/irq.h"
77
#include "qemu/main-loop.h"
78
@@ -XXX,XX +XXX,XX @@ int hvf_arch_init_vcpu(CPUState *cpu)
79
80
void hvf_kick_vcpu_thread(CPUState *cpu)
81
{
82
+ cpus_kick_thread(cpu);
83
hv_vcpus_exit(&cpu->hvf->fd, 1);
84
}
85
86
@@ -XXX,XX +XXX,XX @@ static int hvf_inject_interrupts(CPUState *cpu)
87
return 0;
88
}
89
90
+static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
91
+{
92
+ /*
93
+ * Use pselect to sleep so that other threads can IPI us while we're
94
+ * sleeping.
95
+ */
96
+ qatomic_mb_set(&cpu->thread_kicked, false);
97
+ qemu_mutex_unlock_iothread();
98
+ pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
99
+ qemu_mutex_lock_iothread();
100
+}
101
+
102
int hvf_vcpu_exec(CPUState *cpu)
103
{
104
ARMCPU *arm_cpu = ARM_CPU(cpu);
105
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
106
}
107
case EC_WFX_TRAP:
108
advance_pc = true;
109
+ if (!(syndrome & WFX_IS_WFE) && !(cpu->interrupt_request &
110
+ (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ))) {
111
+
112
+ uint64_t ctl;
113
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0,
114
+ &ctl);
115
+ assert_hvf_ok(r);
116
+
117
+ if (!(ctl & 1) || (ctl & 2)) {
118
+ /* Timer disabled or masked, just wait for an IPI. */
119
+ hvf_wait_for_ipi(cpu, NULL);
120
+ break;
121
+ }
122
+
123
+ uint64_t cval;
124
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0,
125
+ &cval);
126
+ assert_hvf_ok(r);
127
+
128
+ int64_t ticks_to_sleep = cval - mach_absolute_time();
129
+ if (ticks_to_sleep < 0) {
130
+ break;
131
+ }
132
+
133
+ uint64_t seconds = ticks_to_sleep / arm_cpu->gt_cntfrq_hz;
134
+ uint64_t nanos =
135
+ (ticks_to_sleep - arm_cpu->gt_cntfrq_hz * seconds) *
136
+ 1000000000 / arm_cpu->gt_cntfrq_hz;
137
+
138
+ /*
139
+ * Don't sleep for less than 2ms. This is believed to improve
140
+ * latency of message passing workloads.
141
+ */
142
+ if (!seconds && nanos < 2000000) {
143
+ break;
144
+ }
145
+
146
+ struct timespec ts = { seconds, nanos };
147
+ hvf_wait_for_ipi(cpu, &ts);
148
+ }
149
break;
150
case EC_AA64_HVC:
151
cpu_synchronize_state(cpu);
152
--
153
2.24.3 (Apple Git-128)
154
155
diff view generated by jsdifflib
1
The Apple M1 only supports up to 36 bits of physical address space. That
1
We currently only support GICv2 emulation. To also support GICv3, we will
2
means we can not fit the 64bit MMIO BAR region into our address space.
2
need to pass a few system registers into their respective handler functions.
3
3
4
To fix this, let's not expose a 64bit MMIO BAR region when running on
4
This patch adds handling for all of the required system registers, so that
5
Apple Silicon.
5
we can run with more than 8 vCPUs.
6
7
I have not been able to find a way to enumerate that easily, so let's
8
just assume we always have that little PA space on hypervisor.framework
9
systems.
10
6
11
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
8
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
12
---
9
---
13
hw/arm/virt.c | 9 +++++++++
10
target/arm/hvf/hvf.c | 141 +++++++++++++++++++++++++++++++++++++++++++
14
1 file changed, 9 insertions(+)
11
1 file changed, 141 insertions(+)
15
12
16
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
13
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/virt.c
15
--- a/target/arm/hvf/hvf.c
19
+++ b/hw/arm/virt.c
16
+++ b/target/arm/hvf/hvf.c
20
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@
21
#include "hw/display/ramfb.h"
18
22
#include "net/net.h"
19
#include "exec/address-spaces.h"
23
#include "sysemu/device_tree.h"
20
#include "hw/irq.h"
24
+#include "sysemu/hvf.h"
21
+#include "hw/intc/gicv3_internal.h"
25
#include "sysemu/numa.h"
22
#include "qemu/main-loop.h"
26
#include "sysemu/runstate.h"
23
#include "sysemu/accel.h"
27
#include "sysemu/sysemu.h"
24
#include "sysemu/cpus.h"
28
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
25
@@ -XXX,XX +XXX,XX @@
29
unsigned int smp_cpus = machine->smp.cpus;
26
#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 1, 14, 0)
30
unsigned int max_cpus = machine->smp.max_cpus;
27
#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 0, 9, 13)
31
28
32
+ /*
29
+#define SYSREG_ICC_AP0R0_EL1 SYSREG(3, 0, 4, 12, 8)
33
+ * On Hypervisor.framework capable systems, we only have 36 bits of PA
30
+#define SYSREG_ICC_AP0R1_EL1 SYSREG(3, 0, 5, 12, 8)
34
+ * space, which is not enough to fit a 64bit BAR space
31
+#define SYSREG_ICC_AP0R2_EL1 SYSREG(3, 0, 6, 12, 8)
35
+ */
32
+#define SYSREG_ICC_AP0R3_EL1 SYSREG(3, 0, 7, 12, 8)
36
+ if (hvf_enabled()) {
33
+#define SYSREG_ICC_AP1R0_EL1 SYSREG(3, 0, 0, 12, 9)
37
+ vms->highmem = false;
34
+#define SYSREG_ICC_AP1R1_EL1 SYSREG(3, 0, 1, 12, 9)
35
+#define SYSREG_ICC_AP1R2_EL1 SYSREG(3, 0, 2, 12, 9)
36
+#define SYSREG_ICC_AP1R3_EL1 SYSREG(3, 0, 3, 12, 9)
37
+#define SYSREG_ICC_ASGI1R_EL1 SYSREG(3, 0, 6, 12, 11)
38
+#define SYSREG_ICC_BPR0_EL1 SYSREG(3, 0, 3, 12, 8)
39
+#define SYSREG_ICC_BPR1_EL1 SYSREG(3, 0, 3, 12, 12)
40
+#define SYSREG_ICC_CTLR_EL1 SYSREG(3, 0, 4, 12, 12)
41
+#define SYSREG_ICC_DIR_EL1 SYSREG(3, 0, 1, 12, 11)
42
+#define SYSREG_ICC_EOIR0_EL1 SYSREG(3, 0, 1, 12, 8)
43
+#define SYSREG_ICC_EOIR1_EL1 SYSREG(3, 0, 1, 12, 12)
44
+#define SYSREG_ICC_HPPIR0_EL1 SYSREG(3, 0, 2, 12, 8)
45
+#define SYSREG_ICC_HPPIR1_EL1 SYSREG(3, 0, 2, 12, 12)
46
+#define SYSREG_ICC_IAR0_EL1 SYSREG(3, 0, 0, 12, 8)
47
+#define SYSREG_ICC_IAR1_EL1 SYSREG(3, 0, 0, 12, 12)
48
+#define SYSREG_ICC_IGRPEN0_EL1 SYSREG(3, 0, 6, 12, 12)
49
+#define SYSREG_ICC_IGRPEN1_EL1 SYSREG(3, 0, 7, 12, 12)
50
+#define SYSREG_ICC_PMR_EL1 SYSREG(3, 0, 0, 4, 6)
51
+#define SYSREG_ICC_RPR_EL1 SYSREG(3, 0, 3, 12, 11)
52
+#define SYSREG_ICC_SGI0R_EL1 SYSREG(3, 0, 7, 12, 11)
53
+#define SYSREG_ICC_SGI1R_EL1 SYSREG(3, 0, 5, 12, 11)
54
+#define SYSREG_ICC_SRE_EL1 SYSREG(3, 0, 5, 12, 12)
55
+
56
#define WFX_IS_WFE (1 << 0)
57
58
struct hvf_reg_match {
59
@@ -XXX,XX +XXX,XX @@ void hvf_kick_vcpu_thread(CPUState *cpu)
60
hv_vcpus_exit(&cpu->hvf->fd, 1);
61
}
62
63
+static uint32_t hvf_reg2cp_reg(uint32_t reg)
64
+{
65
+ return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
66
+ (reg >> 10) & 0xf,
67
+ (reg >> 1) & 0xf,
68
+ (reg >> 20) & 0x3,
69
+ (reg >> 14) & 0x7,
70
+ (reg >> 17) & 0x7);
71
+}
72
+
73
+static uint64_t hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg)
74
+{
75
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
76
+ CPUARMState *env = &arm_cpu->env;
77
+ const ARMCPRegInfo *ri;
78
+ uint64_t val = 0;
79
+
80
+ ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
81
+ if (ri) {
82
+ if (ri->type & ARM_CP_CONST) {
83
+ val = ri->resetvalue;
84
+ } else if (ri->readfn) {
85
+ val = ri->readfn(env, ri);
86
+ } else {
87
+ val = CPREG_FIELD64(env, ri);
88
+ }
89
+ DPRINTF("vgic read from %s [val=%016llx]", ri->name, val);
38
+ }
90
+ }
39
+
91
+
40
/*
92
+ return val;
41
* In accelerated mode, the memory map is computed earlier in kvm_type()
93
+}
42
* to create a VM with the right number of IPA bits.
94
+
95
static uint64_t hvf_sysreg_read(CPUState *cpu, uint32_t reg)
96
{
97
ARMCPU *arm_cpu = ARM_CPU(cpu);
98
@@ -XXX,XX +XXX,XX @@ static uint64_t hvf_sysreg_read(CPUState *cpu, uint32_t reg)
99
case SYSREG_PMCCNTR_EL0:
100
val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
101
break;
102
+ case SYSREG_ICC_AP0R0_EL1:
103
+ case SYSREG_ICC_AP0R1_EL1:
104
+ case SYSREG_ICC_AP0R2_EL1:
105
+ case SYSREG_ICC_AP0R3_EL1:
106
+ case SYSREG_ICC_AP1R0_EL1:
107
+ case SYSREG_ICC_AP1R1_EL1:
108
+ case SYSREG_ICC_AP1R2_EL1:
109
+ case SYSREG_ICC_AP1R3_EL1:
110
+ case SYSREG_ICC_ASGI1R_EL1:
111
+ case SYSREG_ICC_BPR0_EL1:
112
+ case SYSREG_ICC_BPR1_EL1:
113
+ case SYSREG_ICC_DIR_EL1:
114
+ case SYSREG_ICC_EOIR0_EL1:
115
+ case SYSREG_ICC_EOIR1_EL1:
116
+ case SYSREG_ICC_HPPIR0_EL1:
117
+ case SYSREG_ICC_HPPIR1_EL1:
118
+ case SYSREG_ICC_IAR0_EL1:
119
+ case SYSREG_ICC_IAR1_EL1:
120
+ case SYSREG_ICC_IGRPEN0_EL1:
121
+ case SYSREG_ICC_IGRPEN1_EL1:
122
+ case SYSREG_ICC_PMR_EL1:
123
+ case SYSREG_ICC_SGI0R_EL1:
124
+ case SYSREG_ICC_SGI1R_EL1:
125
+ case SYSREG_ICC_SRE_EL1:
126
+ val = hvf_sysreg_read_cp(cpu, reg);
127
+ break;
128
+ case SYSREG_ICC_CTLR_EL1:
129
+ val = hvf_sysreg_read_cp(cpu, reg);
130
+
131
+ /* AP0R registers above 0 don't trap, expose less PRIs to fit */
132
+ val &= ~ICC_CTLR_EL1_PRIBITS_MASK;
133
+ val |= 4 << ICC_CTLR_EL1_PRIBITS_SHIFT;
134
+ break;
135
default:
136
DPRINTF("unhandled sysreg read %08x (op0=%d op1=%d op2=%d "
137
"crn=%d crm=%d)", reg, (reg >> 20) & 0x3,
138
@@ -XXX,XX +XXX,XX @@ static uint64_t hvf_sysreg_read(CPUState *cpu, uint32_t reg)
139
return val;
140
}
141
142
+static void hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
143
+{
144
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
145
+ CPUARMState *env = &arm_cpu->env;
146
+ const ARMCPRegInfo *ri;
147
+
148
+ ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
149
+
150
+ if (ri) {
151
+ if (ri->writefn) {
152
+ ri->writefn(env, ri, val);
153
+ } else {
154
+ CPREG_FIELD64(env, ri) = val;
155
+ }
156
+ DPRINTF("vgic write to %s [val=%016llx]", ri->name, val);
157
+ }
158
+}
159
+
160
static void hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
161
{
162
ARMCPU *arm_cpu = ARM_CPU(cpu);
163
@@ -XXX,XX +XXX,XX @@ static void hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
164
switch (reg) {
165
case SYSREG_CNTPCT_EL0:
166
break;
167
+ case SYSREG_ICC_AP0R0_EL1:
168
+ case SYSREG_ICC_AP0R1_EL1:
169
+ case SYSREG_ICC_AP0R2_EL1:
170
+ case SYSREG_ICC_AP0R3_EL1:
171
+ case SYSREG_ICC_AP1R0_EL1:
172
+ case SYSREG_ICC_AP1R1_EL1:
173
+ case SYSREG_ICC_AP1R2_EL1:
174
+ case SYSREG_ICC_AP1R3_EL1:
175
+ case SYSREG_ICC_ASGI1R_EL1:
176
+ case SYSREG_ICC_BPR0_EL1:
177
+ case SYSREG_ICC_BPR1_EL1:
178
+ case SYSREG_ICC_CTLR_EL1:
179
+ case SYSREG_ICC_DIR_EL1:
180
+ case SYSREG_ICC_HPPIR0_EL1:
181
+ case SYSREG_ICC_HPPIR1_EL1:
182
+ case SYSREG_ICC_IAR0_EL1:
183
+ case SYSREG_ICC_IAR1_EL1:
184
+ case SYSREG_ICC_IGRPEN0_EL1:
185
+ case SYSREG_ICC_IGRPEN1_EL1:
186
+ case SYSREG_ICC_PMR_EL1:
187
+ case SYSREG_ICC_SGI0R_EL1:
188
+ case SYSREG_ICC_SGI1R_EL1:
189
+ case SYSREG_ICC_SRE_EL1:
190
+ hvf_sysreg_write_cp(cpu, reg, val);
191
+ break;
192
+ case SYSREG_ICC_EOIR0_EL1:
193
+ case SYSREG_ICC_EOIR1_EL1:
194
+ hvf_sysreg_write_cp(cpu, reg, val);
195
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 0);
196
+ hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
197
default:
198
DPRINTF("unhandled sysreg write %08x", reg);
199
break;
43
--
200
--
44
2.24.3 (Apple Git-128)
201
2.24.3 (Apple Git-128)
45
202
46
203
diff view generated by jsdifflib
1
We are going to reuse the TCG PSCI code for HVF. This however means that we
1
Now that we have working system register sync, we push more target CPU
2
need to ensure that CPU register state is synchronized properly between the
2
properties into the virtual machine. That might be useful in some
3
two worlds.
3
situations, but is not the typical case that users want.
4
4
5
So let's make sure that at least on the PSCI on call, the secondary core gets
5
So let's add a -cpu host option that allows them to explicitly pass all
6
to sync its registers after reset, so that changes also propagate.
6
CPU capabilities of their host CPU into the guest.
7
7
8
Signed-off-by: Alexander Graf <agraf@csgraf.de>
8
Signed-off-by: Alexander Graf <agraf@csgraf.de>
9
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
9
---
10
---
10
target/arm/arm-powerctl.c | 3 +++
11
include/sysemu/hvf.h | 2 ++
11
1 file changed, 3 insertions(+)
12
target/arm/cpu.c | 9 ++++++---
13
target/arm/cpu.h | 2 ++
14
target/arm/hvf/hvf.c | 41 +++++++++++++++++++++++++++++++++++++++++
15
target/arm/kvm_arm.h | 2 --
16
5 files changed, 51 insertions(+), 5 deletions(-)
12
17
13
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
18
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
14
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/arm-powerctl.c
20
--- a/include/sysemu/hvf.h
16
+++ b/target/arm/arm-powerctl.c
21
+++ b/include/sysemu/hvf.h
17
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@
18
#include "arm-powerctl.h"
23
#ifdef CONFIG_HVF
19
#include "qemu/log.h"
24
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
20
#include "qemu/main-loop.h"
25
int reg);
21
+#include "sysemu/hw_accel.h"
26
+struct ARMCPU;
22
27
+void hvf_arm_set_cpu_features_from_host(struct ARMCPU *cpu);
23
#ifndef DEBUG_ARM_POWERCTL
28
extern bool hvf_allowed;
24
#define DEBUG_ARM_POWERCTL 0
29
#define hvf_enabled() (hvf_allowed)
25
@@ -XXX,XX +XXX,XX @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
30
#else /* !CONFIG_HVF */
26
cpu_reset(target_cpu_state);
31
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
27
target_cpu_state->halted = 0;
32
index XXXXXXX..XXXXXXX 100644
28
33
--- a/target/arm/cpu.c
29
+ cpu_synchronize_state(target_cpu_state);
34
+++ b/target/arm/cpu.c
35
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
36
#endif
37
}
38
39
-#ifdef CONFIG_KVM
40
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
41
static void arm_host_initfn(Object *obj)
42
{
43
ARMCPU *cpu = ARM_CPU(obj);
44
45
+#ifdef CONFIG_KVM
46
kvm_arm_set_cpu_features_from_host(cpu);
47
+#else
48
+ hvf_arm_set_cpu_features_from_host(cpu);
49
+#endif
50
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
51
aarch64_add_sve_properties(obj);
52
}
53
@@ -XXX,XX +XXX,XX @@ static const TypeInfo host_arm_cpu_type_info = {
54
.parent = TYPE_AARCH64_CPU,
55
.instance_init = arm_host_initfn,
56
};
57
-
58
#endif
59
60
static void arm_cpu_instance_init(Object *obj)
61
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_register_types(void)
62
63
type_register_static(&arm_cpu_type_info);
64
65
-#ifdef CONFIG_KVM
66
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
67
type_register_static(&host_arm_cpu_type_info);
68
#endif
69
70
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/cpu.h
73
+++ b/target/arm/cpu.h
74
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
75
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
76
#define CPU_RESOLVING_TYPE TYPE_ARM_CPU
77
78
+#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
30
+
79
+
31
if (info->target_aa64) {
80
#define cpu_signal_handler cpu_arm_signal_handler
32
if ((info->target_el < 3) && arm_feature(&target_cpu->env,
81
#define cpu_list arm_cpu_list
33
ARM_FEATURE_EL3)) {
82
83
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/arm/hvf/hvf.c
86
+++ b/target/arm/hvf/hvf.c
87
@@ -XXX,XX +XXX,XX @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
88
return val;
89
}
90
91
+void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
92
+{
93
+ ARMISARegisters host_isar;
94
+ const struct isar_regs {
95
+ int reg;
96
+ uint64_t *val;
97
+ } regs[] = {
98
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
99
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
100
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
101
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
102
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
103
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
104
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
105
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
106
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
107
+ };
108
+ hv_vcpu_t fd;
109
+ hv_vcpu_exit_t *exit;
110
+ int i;
111
+
112
+ cpu->dtb_compatible = "arm,arm-v8";
113
+ cpu->env.features = (1ULL << ARM_FEATURE_V8) |
114
+ (1ULL << ARM_FEATURE_NEON) |
115
+ (1ULL << ARM_FEATURE_AARCH64) |
116
+ (1ULL << ARM_FEATURE_PMU) |
117
+ (1ULL << ARM_FEATURE_GENERIC_TIMER);
118
+
119
+ /* We set up a small vcpu to extract host registers */
120
+
121
+ assert_hvf_ok(hv_vcpu_create(&fd, &exit, NULL));
122
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
123
+ assert_hvf_ok(hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val));
124
+ }
125
+ assert_hvf_ok(hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &cpu->midr));
126
+ assert_hvf_ok(hv_vcpu_destroy(fd));
127
+
128
+ cpu->isar = host_isar;
129
+ cpu->reset_sctlr = 0x00c50078;
130
+}
131
+
132
void hvf_arch_vcpu_destroy(CPUState *cpu)
133
{
134
}
135
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/target/arm/kvm_arm.h
138
+++ b/target/arm/kvm_arm.h
139
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
140
*/
141
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
142
143
-#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
144
-
145
/**
146
* ARMHostCPUFeatures: information about the host CPU (identified
147
* by asking the host kernel)
34
--
148
--
35
2.24.3 (Apple Git-128)
149
2.24.3 (Apple Git-128)
36
150
37
151
diff view generated by jsdifflib