1 | <meta> | ||
---|---|---|---|
2 | This patch series has been through months of review and | ||
3 | refinement. It now has end-to-end Reviewed-by: tags and | ||
4 | all code patches but one have Tested-by: tags. No significant | ||
5 | issues have been found via review for some weeks. | ||
6 | |||
7 | The patch set creates two new subsystems: | ||
8 | hw/display/apple-gfx | ||
9 | hw/vmapple | ||
10 | so it doesn't fall within the responsibility of existing | ||
11 | maintainers. How do we proceed to get this merged now that | ||
12 | 10.0 development is open? | ||
13 | </meta> | ||
14 | |||
15 | |||
1 | This patch set introduces a new ARM and macOS HVF specific machine type | 16 | This patch set introduces a new ARM and macOS HVF specific machine type |
2 | called "vmapple", as well as a family of display devices based on the | 17 | called "vmapple", as well as a family of display devices based on the |
3 | ParavirtualizedGraphics.framework in macOS. One of the display adapter | 18 | ParavirtualizedGraphics.framework in macOS. One of the display adapter |
4 | variants, apple-gfx-mmio, is required for the new machine type, while | 19 | variants, apple-gfx-mmio, is required for the new machine type, while |
5 | apple-gfx-pci can be used to enable 3D graphics acceleration with x86-64 | 20 | apple-gfx-pci can be used to enable 3D graphics acceleration with x86-64 |
... | ... | ||
220 | * 02/15 (apple-gfx): Fixed memory management regressions introduced in v10; | 235 | * 02/15 (apple-gfx): Fixed memory management regressions introduced in v10; |
221 | improved error handling; various more conmetic code adjustments | 236 | improved error handling; various more conmetic code adjustments |
222 | * 09/15 (GPEX): Fixed uses of deleted GPEX_NUM_IRQS constant that have been | 237 | * 09/15 (GPEX): Fixed uses of deleted GPEX_NUM_IRQS constant that have been |
223 | added to QEMU since this patch was originally written. | 238 | added to QEMU since this patch was originally written. |
224 | 239 | ||
225 | v12 -> v13 | 240 | v12 -> v13: |
226 | 241 | ||
227 | * 15/15 (vmapple machine type): Bumped the machine type version from 9.2 | 242 | * 15/15 (vmapple machine type): Bumped the machine type version from 9.2 |
228 | to 10.0. | 243 | to 10.0. |
229 | * All patches in the series now have been positively reviewed and received | 244 | * All patches in the series now have been positively reviewed and received |
230 | corresponding reviewed-by tags. | 245 | corresponding reviewed-by tags. |
246 | |||
247 | v13 -> v14: | ||
248 | |||
249 | * 6/15 (hw/vmapple directory): Changed myself from reviewer | ||
250 | to maintainer, as that seemed appropriate at this point. | ||
251 | * 15/15 (vmapple machine type): Gate creation of XHCI and | ||
252 | USB HID devices behind if (defaults_enabled()). | ||
231 | 253 | ||
232 | Alexander Graf (9): | 254 | Alexander Graf (9): |
233 | hw: Add vmapple subdir | 255 | hw: Add vmapple subdir |
234 | hw/misc/pvpanic: Add MMIO interface | 256 | hw/misc/pvpanic: Add MMIO interface |
235 | hvf: arm: Ignore writes to CNTP_CTL_EL0 | 257 | hvf: arm: Ignore writes to CNTP_CTL_EL0 |
... | ... | diff view generated by jsdifflib |
1 | macOS's Cocoa event handling must be done on the initial (main) thread | 1 | macOS's Cocoa event handling must be done on the initial (main) thread |
---|---|---|---|
2 | of the process. Furthermore, if library or application code uses | 2 | of the process. Furthermore, if library or application code uses |
3 | libdispatch, the main dispatch queue must be handling events on the main | 3 | libdispatch, the main dispatch queue must be handling events on the main |
4 | thread as well. | 4 | thread as well. |
5 | 5 | ||
6 | So far, this has affected Qemu in both the Cocoa and SDL UIs, although | 6 | So far, this has affected Qemu in both the Cocoa and SDL UIs, although |
7 | in different ways: the Cocoa UI replaces the default qemu_main function | 7 | in different ways: the Cocoa UI replaces the default qemu_main function |
8 | with one that spins Qemu's internal main event loop off onto a | 8 | with one that spins Qemu's internal main event loop off onto a |
9 | background thread. SDL (which uses Cocoa internally) on the other hand | 9 | background thread. SDL (which uses Cocoa internally) on the other hand |
10 | uses a polling approach within Qemu's main event loop. Events are | 10 | uses a polling approach within Qemu's main event loop. Events are |
11 | polled during the SDL UI's dpy_refresh callback, which happens to run | 11 | polled during the SDL UI's dpy_refresh callback, which happens to run |
12 | on the main thread by default. | 12 | on the main thread by default. |
13 | 13 | ||
14 | As UIs are mutually exclusive, this works OK as long as nothing else | 14 | As UIs are mutually exclusive, this works OK as long as nothing else |
15 | needs platform-native event handling. In the next patch, a new device is | 15 | needs platform-native event handling. In the next patch, a new device is |
16 | introduced based on the ParavirtualizedGraphics.framework in macOS. | 16 | introduced based on the ParavirtualizedGraphics.framework in macOS. |
17 | This uses libdispatch internally, and only works when events are being | 17 | This uses libdispatch internally, and only works when events are being |
18 | handled on the main runloop. With the current system, it works when | 18 | handled on the main runloop. With the current system, it works when |
19 | using either the Cocoa or the SDL UI. However, it does not when running | 19 | using either the Cocoa or the SDL UI. However, it does not when running |
20 | headless. Moreover, any attempt to install a similar scheme to the | 20 | headless. Moreover, any attempt to install a similar scheme to the |
21 | Cocoa UI's main thread replacement fails when combined with the SDL | 21 | Cocoa UI's main thread replacement fails when combined with the SDL |
22 | UI. | 22 | UI. |
23 | 23 | ||
24 | This change tidies up main thread management to be more flexible. | 24 | This change tidies up main thread management to be more flexible. |
25 | 25 | ||
26 | * The qemu_main global function pointer is a custom function for the | 26 | * The qemu_main global function pointer is a custom function for the |
27 | main thread, and it may now be NULL. When it is, the main thread | 27 | main thread, and it may now be NULL. When it is, the main thread |
28 | runs the main Qemu loop. This represents the traditional setup. | 28 | runs the main Qemu loop. This represents the traditional setup. |
29 | * When non-null, spawning the main Qemu event loop on a separate | 29 | * When non-null, spawning the main Qemu event loop on a separate |
30 | thread is now done centrally rather than inside the Cocoa UI code. | 30 | thread is now done centrally rather than inside the Cocoa UI code. |
31 | * For most platforms, qemu_main is indeed NULL by default, but on | 31 | * For most platforms, qemu_main is indeed NULL by default, but on |
32 | Darwin, it defaults to a function that runs the CFRunLoop. | 32 | Darwin, it defaults to a function that runs the CFRunLoop. |
33 | * The Cocoa UI sets qemu_main to a function which runs the | 33 | * The Cocoa UI sets qemu_main to a function which runs the |
34 | NSApplication event handling runloop, as is usual for a Cocoa app. | 34 | NSApplication event handling runloop, as is usual for a Cocoa app. |
35 | * The SDL UI overrides the qemu_main function to NULL, thus | 35 | * The SDL UI overrides the qemu_main function to NULL, thus |
36 | specifying that Qemu's main loop must run on the main | 36 | specifying that Qemu's main loop must run on the main |
37 | thread. | 37 | thread. |
38 | * The GTK UI also overrides the qemu_main function to NULL. | 38 | * The GTK UI also overrides the qemu_main function to NULL. |
39 | * For other UIs, or in the absence of UIs, the platform's default | 39 | * For other UIs, or in the absence of UIs, the platform's default |
40 | behaviour is followed. | 40 | behaviour is followed. |
41 | 41 | ||
42 | This means that on macOS, the platform's runloop events are always | 42 | This means that on macOS, the platform's runloop events are always |
43 | handled, regardless of chosen UI. The new PV graphics device will | 43 | handled, regardless of chosen UI. The new PV graphics device will |
44 | thus work in all configurations. There is no functional change on other | 44 | thus work in all configurations. There is no functional change on other |
45 | operating systems. | 45 | operating systems. |
46 | 46 | ||
47 | Implementing this via a global function pointer variable is a bit | 47 | Implementing this via a global function pointer variable is a bit |
48 | ugly, but it's probably worth investigating the existing UI thread rule | 48 | ugly, but it's probably worth investigating the existing UI thread rule |
49 | violations in the SDL (e.g. #2537) and GTK+ back-ends. Fixing those | 49 | violations in the SDL (e.g. #2537) and GTK+ back-ends. Fixing those |
50 | issues might precipitate requirements similar but not identical to those | 50 | issues might precipitate requirements similar but not identical to those |
51 | of the Cocoa UI; hopefully we'll see some kind of pattern emerge, which | 51 | of the Cocoa UI; hopefully we'll see some kind of pattern emerge, which |
52 | can then be used as a basis for an overhaul. (In fact, it may turn | 52 | can then be used as a basis for an overhaul. (In fact, it may turn |
53 | out to be simplest to split the UI/native platform event thread from the | 53 | out to be simplest to split the UI/native platform event thread from the |
54 | QEMU main event loop on all platforms, with any UI or even none at all.) | 54 | QEMU main event loop on all platforms, with any UI or even none at all.) |
55 | 55 | ||
56 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 56 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
57 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 57 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
58 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 58 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
59 | --- | 59 | --- |
60 | 60 | ||
61 | v5: | 61 | v5: |
62 | 62 | ||
63 | * Simplified the way of setting/clearing the main loop by going back | 63 | * Simplified the way of setting/clearing the main loop by going back |
64 | to setting qemu_main directly, but narrowing the scope of what it | 64 | to setting qemu_main directly, but narrowing the scope of what it |
65 | needs to do, and it can now be NULL. | 65 | needs to do, and it can now be NULL. |
66 | 66 | ||
67 | v6: | 67 | v6: |
68 | 68 | ||
69 | * Folded function qemu_run_default_main_on_new_thread's code into | 69 | * Folded function qemu_run_default_main_on_new_thread's code into |
70 | main() | 70 | main() |
71 | * Removed whitespace changes left over on lines near code removed | 71 | * Removed whitespace changes left over on lines near code removed |
72 | between v4 and v5 | 72 | between v4 and v5 |
73 | 73 | ||
74 | v9: | 74 | v9: |
75 | 75 | ||
76 | * Set qemu_main to NULL for GTK UI as well. | 76 | * Set qemu_main to NULL for GTK UI as well. |
77 | 77 | ||
78 | v10: | 78 | v10: |
79 | 79 | ||
80 | * Added comments clarifying the functionality and purpose of qemu_main. | 80 | * Added comments clarifying the functionality and purpose of qemu_main. |
81 | 81 | ||
82 | v11: | 82 | v11: |
83 | 83 | ||
84 | * Removed the qemu_main_fn typedef again. | 84 | * Removed the qemu_main_fn typedef again. |
85 | * Consolidation of main, qemu_default_main, and call_qemu_default_main | 85 | * Consolidation of main, qemu_default_main, and call_qemu_default_main |
86 | so that the latter has been eliminated altogether. | 86 | so that the latter has been eliminated altogether. |
87 | * Reinstated the #include <SDL.h> directive, added comment saying | 87 | * Reinstated the #include <SDL.h> directive, added comment saying |
88 | why it's needed. | 88 | why it's needed. |
89 | * Improved the comment on the qemu_main global variable. | 89 | * Improved the comment on the qemu_main global variable. |
90 | * Expanded the commit message. | 90 | * Expanded the commit message. |
91 | 91 | ||
92 | v12: | 92 | v12: |
93 | 93 | ||
94 | * More precise wording of code comments. | 94 | * More precise wording of code comments. |
95 | 95 | ||
96 | include/qemu-main.h | 14 +++++++++++- | 96 | include/qemu-main.h | 14 +++++++++++- |
97 | system/main.c | 37 +++++++++++++++++++++++++++---- | 97 | system/main.c | 37 +++++++++++++++++++++++++++---- |
98 | ui/cocoa.m | 54 +++++++++++---------------------------------- | 98 | ui/cocoa.m | 54 +++++++++++---------------------------------- |
99 | ui/gtk.c | 4 ++++ | 99 | ui/gtk.c | 4 ++++ |
100 | ui/sdl2.c | 4 ++++ | 100 | ui/sdl2.c | 4 ++++ |
101 | 5 files changed, 67 insertions(+), 46 deletions(-) | 101 | 5 files changed, 67 insertions(+), 46 deletions(-) |
102 | 102 | ||
103 | diff --git a/include/qemu-main.h b/include/qemu-main.h | 103 | diff --git a/include/qemu-main.h b/include/qemu-main.h |
104 | index XXXXXXX..XXXXXXX 100644 | 104 | index XXXXXXX..XXXXXXX 100644 |
105 | --- a/include/qemu-main.h | 105 | --- a/include/qemu-main.h |
106 | +++ b/include/qemu-main.h | 106 | +++ b/include/qemu-main.h |
107 | @@ -XXX,XX +XXX,XX @@ | 107 | @@ -XXX,XX +XXX,XX @@ |
108 | #ifndef QEMU_MAIN_H | 108 | #ifndef QEMU_MAIN_H |
109 | #define QEMU_MAIN_H | 109 | #define QEMU_MAIN_H |
110 | 110 | ||
111 | -int qemu_default_main(void); | 111 | -int qemu_default_main(void); |
112 | +/* | 112 | +/* |
113 | + * The function to run on the main (initial) thread of the process. | 113 | + * The function to run on the main (initial) thread of the process. |
114 | + * NULL means QEMU's main event loop. | 114 | + * NULL means QEMU's main event loop. |
115 | + * When non-NULL, QEMU's main event loop will run on a purposely created | 115 | + * When non-NULL, QEMU's main event loop will run on a purposely created |
116 | + * thread, after which the provided function pointer will be invoked on | 116 | + * thread, after which the provided function pointer will be invoked on |
117 | + * the initial thread. | 117 | + * the initial thread. |
118 | + * This is useful on platforms which treat the main thread as special | 118 | + * This is useful on platforms which treat the main thread as special |
119 | + * (macOS/Darwin) and/or require all UI API calls to occur from the main | 119 | + * (macOS/Darwin) and/or require all UI API calls to occur from the main |
120 | + * thread. Those platforms can initialise it to a specific function, | 120 | + * thread. Those platforms can initialise it to a specific function, |
121 | + * while UI implementations may reset it to NULL during their init if they | 121 | + * while UI implementations may reset it to NULL during their init if they |
122 | + * will handle system and UI events on the main thread via QEMU's own main | 122 | + * will handle system and UI events on the main thread via QEMU's own main |
123 | + * event loop. | 123 | + * event loop. |
124 | + */ | 124 | + */ |
125 | extern int (*qemu_main)(void); | 125 | extern int (*qemu_main)(void); |
126 | 126 | ||
127 | #endif /* QEMU_MAIN_H */ | 127 | #endif /* QEMU_MAIN_H */ |
128 | diff --git a/system/main.c b/system/main.c | 128 | diff --git a/system/main.c b/system/main.c |
129 | index XXXXXXX..XXXXXXX 100644 | 129 | index XXXXXXX..XXXXXXX 100644 |
130 | --- a/system/main.c | 130 | --- a/system/main.c |
131 | +++ b/system/main.c | 131 | +++ b/system/main.c |
132 | @@ -XXX,XX +XXX,XX @@ | 132 | @@ -XXX,XX +XXX,XX @@ |
133 | 133 | ||
134 | #include "qemu/osdep.h" | 134 | #include "qemu/osdep.h" |
135 | #include "qemu-main.h" | 135 | #include "qemu-main.h" |
136 | +#include "qemu/main-loop.h" | 136 | +#include "qemu/main-loop.h" |
137 | #include "sysemu/sysemu.h" | 137 | #include "sysemu/sysemu.h" |
138 | 138 | ||
139 | #ifdef CONFIG_SDL | 139 | #ifdef CONFIG_SDL |
140 | +/* | 140 | +/* |
141 | + * SDL insists on wrapping the main() function with its own implementation on | 141 | + * SDL insists on wrapping the main() function with its own implementation on |
142 | + * some platforms; it does so via a macro that renames our main function, so | 142 | + * some platforms; it does so via a macro that renames our main function, so |
143 | + * <SDL.h> must be #included here even with no SDL code called from this file. | 143 | + * <SDL.h> must be #included here even with no SDL code called from this file. |
144 | + */ | 144 | + */ |
145 | #include <SDL.h> | 145 | #include <SDL.h> |
146 | #endif | 146 | #endif |
147 | 147 | ||
148 | -int qemu_default_main(void) | 148 | -int qemu_default_main(void) |
149 | +#ifdef CONFIG_DARWIN | 149 | +#ifdef CONFIG_DARWIN |
150 | +#include <CoreFoundation/CoreFoundation.h> | 150 | +#include <CoreFoundation/CoreFoundation.h> |
151 | +#endif | 151 | +#endif |
152 | + | 152 | + |
153 | +static void *qemu_default_main(void *opaque) | 153 | +static void *qemu_default_main(void *opaque) |
154 | { | 154 | { |
155 | int status; | 155 | int status; |
156 | 156 | ||
157 | + bql_lock(); | 157 | + bql_lock(); |
158 | status = qemu_main_loop(); | 158 | status = qemu_main_loop(); |
159 | qemu_cleanup(status); | 159 | qemu_cleanup(status); |
160 | + bql_unlock(); | 160 | + bql_unlock(); |
161 | 161 | ||
162 | - return status; | 162 | - return status; |
163 | + exit(status); | 163 | + exit(status); |
164 | } | 164 | } |
165 | 165 | ||
166 | -int (*qemu_main)(void) = qemu_default_main; | 166 | -int (*qemu_main)(void) = qemu_default_main; |
167 | +int (*qemu_main)(void); | 167 | +int (*qemu_main)(void); |
168 | + | 168 | + |
169 | +#ifdef CONFIG_DARWIN | 169 | +#ifdef CONFIG_DARWIN |
170 | +static int os_darwin_cfrunloop_main(void) | 170 | +static int os_darwin_cfrunloop_main(void) |
171 | +{ | 171 | +{ |
172 | + CFRunLoopRun(); | 172 | + CFRunLoopRun(); |
173 | + g_assert_not_reached(); | 173 | + g_assert_not_reached(); |
174 | +} | 174 | +} |
175 | +int (*qemu_main)(void) = os_darwin_cfrunloop_main; | 175 | +int (*qemu_main)(void) = os_darwin_cfrunloop_main; |
176 | +#endif | 176 | +#endif |
177 | 177 | ||
178 | int main(int argc, char **argv) | 178 | int main(int argc, char **argv) |
179 | { | 179 | { |
180 | qemu_init(argc, argv); | 180 | qemu_init(argc, argv); |
181 | - return qemu_main(); | 181 | - return qemu_main(); |
182 | + bql_unlock(); | 182 | + bql_unlock(); |
183 | + if (qemu_main) { | 183 | + if (qemu_main) { |
184 | + QemuThread main_loop_thread; | 184 | + QemuThread main_loop_thread; |
185 | + qemu_thread_create(&main_loop_thread, "qemu_main", | 185 | + qemu_thread_create(&main_loop_thread, "qemu_main", |
186 | + qemu_default_main, NULL, QEMU_THREAD_DETACHED); | 186 | + qemu_default_main, NULL, QEMU_THREAD_DETACHED); |
187 | + return qemu_main(); | 187 | + return qemu_main(); |
188 | + } else { | 188 | + } else { |
189 | + qemu_default_main(NULL); | 189 | + qemu_default_main(NULL); |
190 | + } | 190 | + } |
191 | } | 191 | } |
192 | diff --git a/ui/cocoa.m b/ui/cocoa.m | 192 | diff --git a/ui/cocoa.m b/ui/cocoa.m |
193 | index XXXXXXX..XXXXXXX 100644 | 193 | index XXXXXXX..XXXXXXX 100644 |
194 | --- a/ui/cocoa.m | 194 | --- a/ui/cocoa.m |
195 | +++ b/ui/cocoa.m | 195 | +++ b/ui/cocoa.m |
196 | @@ -XXX,XX +XXX,XX @@ | 196 | @@ -XXX,XX +XXX,XX @@ |
197 | int height; | 197 | int height; |
198 | } QEMUScreen; | 198 | } QEMUScreen; |
199 | 199 | ||
200 | +@class QemuCocoaPasteboardTypeOwner; | 200 | +@class QemuCocoaPasteboardTypeOwner; |
201 | + | 201 | + |
202 | static void cocoa_update(DisplayChangeListener *dcl, | 202 | static void cocoa_update(DisplayChangeListener *dcl, |
203 | int x, int y, int w, int h); | 203 | int x, int y, int w, int h); |
204 | 204 | ||
205 | @@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl, | 205 | @@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl, |
206 | static NSInteger cbchangecount = -1; | 206 | static NSInteger cbchangecount = -1; |
207 | static QemuClipboardInfo *cbinfo; | 207 | static QemuClipboardInfo *cbinfo; |
208 | static QemuEvent cbevent; | 208 | static QemuEvent cbevent; |
209 | +static QemuCocoaPasteboardTypeOwner *cbowner; | 209 | +static QemuCocoaPasteboardTypeOwner *cbowner; |
210 | 210 | ||
211 | // Utility functions to run specified code block with the BQL held | 211 | // Utility functions to run specified code block with the BQL held |
212 | typedef void (^CodeBlock)(void); | 212 | typedef void (^CodeBlock)(void); |
213 | @@ -XXX,XX +XXX,XX @@ - (void) dealloc | 213 | @@ -XXX,XX +XXX,XX @@ - (void) dealloc |
214 | { | 214 | { |
215 | COCOA_DEBUG("QemuCocoaAppController: dealloc\n"); | 215 | COCOA_DEBUG("QemuCocoaAppController: dealloc\n"); |
216 | 216 | ||
217 | - if (cocoaView) | 217 | - if (cocoaView) |
218 | - [cocoaView release]; | 218 | - [cocoaView release]; |
219 | + [cocoaView release]; | 219 | + [cocoaView release]; |
220 | + [cbowner release]; | 220 | + [cbowner release]; |
221 | + cbowner = nil; | 221 | + cbowner = nil; |
222 | + | 222 | + |
223 | [super dealloc]; | 223 | [super dealloc]; |
224 | } | 224 | } |
225 | 225 | ||
226 | @@ -XXX,XX +XXX,XX @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t | 226 | @@ -XXX,XX +XXX,XX @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t |
227 | 227 | ||
228 | @end | 228 | @end |
229 | 229 | ||
230 | -static QemuCocoaPasteboardTypeOwner *cbowner; | 230 | -static QemuCocoaPasteboardTypeOwner *cbowner; |
231 | - | 231 | - |
232 | static void cocoa_clipboard_notify(Notifier *notifier, void *data); | 232 | static void cocoa_clipboard_notify(Notifier *notifier, void *data); |
233 | static void cocoa_clipboard_request(QemuClipboardInfo *info, | 233 | static void cocoa_clipboard_request(QemuClipboardInfo *info, |
234 | QemuClipboardType type); | 234 | QemuClipboardType type); |
235 | @@ -XXX,XX +XXX,XX @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, | 235 | @@ -XXX,XX +XXX,XX @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, |
236 | } | 236 | } |
237 | } | 237 | } |
238 | 238 | ||
239 | -/* | 239 | -/* |
240 | - * The startup process for the OSX/Cocoa UI is complicated, because | 240 | - * The startup process for the OSX/Cocoa UI is complicated, because |
241 | - * OSX insists that the UI runs on the initial main thread, and so we | 241 | - * OSX insists that the UI runs on the initial main thread, and so we |
242 | - * need to start a second thread which runs the qemu_default_main(): | 242 | - * need to start a second thread which runs the qemu_default_main(): |
243 | - * in main(): | 243 | - * in main(): |
244 | - * in cocoa_display_init(): | 244 | - * in cocoa_display_init(): |
245 | - * assign cocoa_main to qemu_main | 245 | - * assign cocoa_main to qemu_main |
246 | - * create application, menus, etc | 246 | - * create application, menus, etc |
247 | - * in cocoa_main(): | 247 | - * in cocoa_main(): |
248 | - * create qemu-main thread | 248 | - * create qemu-main thread |
249 | - * enter OSX run loop | 249 | - * enter OSX run loop |
250 | - */ | 250 | - */ |
251 | - | 251 | - |
252 | -static void *call_qemu_main(void *opaque) | 252 | -static void *call_qemu_main(void *opaque) |
253 | -{ | 253 | -{ |
254 | - int status; | 254 | - int status; |
255 | - | 255 | - |
256 | - COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); | 256 | - COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); |
257 | - bql_lock(); | 257 | - bql_lock(); |
258 | - status = qemu_default_main(); | 258 | - status = qemu_default_main(); |
259 | - bql_unlock(); | 259 | - bql_unlock(); |
260 | - COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); | 260 | - COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); |
261 | - [cbowner release]; | 261 | - [cbowner release]; |
262 | - exit(status); | 262 | - exit(status); |
263 | -} | 263 | -} |
264 | - | 264 | - |
265 | static int cocoa_main(void) | 265 | static int cocoa_main(void) |
266 | { | 266 | { |
267 | - QemuThread thread; | 267 | - QemuThread thread; |
268 | - | 268 | - |
269 | - COCOA_DEBUG("Entered %s()\n", __func__); | 269 | - COCOA_DEBUG("Entered %s()\n", __func__); |
270 | - | 270 | - |
271 | - bql_unlock(); | 271 | - bql_unlock(); |
272 | - qemu_thread_create(&thread, "qemu_main", call_qemu_main, | 272 | - qemu_thread_create(&thread, "qemu_main", call_qemu_main, |
273 | - NULL, QEMU_THREAD_DETACHED); | 273 | - NULL, QEMU_THREAD_DETACHED); |
274 | - | 274 | - |
275 | - // Start the main event loop | 275 | - // Start the main event loop |
276 | COCOA_DEBUG("Main thread: entering OSX run loop\n"); | 276 | COCOA_DEBUG("Main thread: entering OSX run loop\n"); |
277 | [NSApp run]; | 277 | [NSApp run]; |
278 | COCOA_DEBUG("Main thread: left OSX run loop, which should never happen\n"); | 278 | COCOA_DEBUG("Main thread: left OSX run loop, which should never happen\n"); |
279 | @@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) | 279 | @@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) |
280 | 280 | ||
281 | COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n"); | 281 | COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n"); |
282 | 282 | ||
283 | - qemu_main = cocoa_main; | 283 | - qemu_main = cocoa_main; |
284 | - | 284 | - |
285 | // Pull this console process up to being a fully-fledged graphical | 285 | // Pull this console process up to being a fully-fledged graphical |
286 | // app with a menubar and Dock icon | 286 | // app with a menubar and Dock icon |
287 | ProcessSerialNumber psn = { 0, kCurrentProcess }; | 287 | ProcessSerialNumber psn = { 0, kCurrentProcess }; |
288 | @@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) | 288 | @@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) |
289 | qemu_clipboard_peer_register(&cbpeer); | 289 | qemu_clipboard_peer_register(&cbpeer); |
290 | 290 | ||
291 | [pool release]; | 291 | [pool release]; |
292 | + | 292 | + |
293 | + /* | 293 | + /* |
294 | + * The Cocoa UI will run the NSApplication runloop on the main thread | 294 | + * The Cocoa UI will run the NSApplication runloop on the main thread |
295 | + * rather than the default Core Foundation one. | 295 | + * rather than the default Core Foundation one. |
296 | + */ | 296 | + */ |
297 | + qemu_main = cocoa_main; | 297 | + qemu_main = cocoa_main; |
298 | } | 298 | } |
299 | 299 | ||
300 | static QemuDisplay qemu_display_cocoa = { | 300 | static QemuDisplay qemu_display_cocoa = { |
301 | diff --git a/ui/gtk.c b/ui/gtk.c | 301 | diff --git a/ui/gtk.c b/ui/gtk.c |
302 | index XXXXXXX..XXXXXXX 100644 | 302 | index XXXXXXX..XXXXXXX 100644 |
303 | --- a/ui/gtk.c | 303 | --- a/ui/gtk.c |
304 | +++ b/ui/gtk.c | 304 | +++ b/ui/gtk.c |
305 | @@ -XXX,XX +XXX,XX @@ | 305 | @@ -XXX,XX +XXX,XX @@ |
306 | #include "qemu/cutils.h" | 306 | #include "qemu/cutils.h" |
307 | #include "qemu/error-report.h" | 307 | #include "qemu/error-report.h" |
308 | #include "qemu/main-loop.h" | 308 | #include "qemu/main-loop.h" |
309 | +#include "qemu-main.h" | 309 | +#include "qemu-main.h" |
310 | 310 | ||
311 | #include "ui/console.h" | 311 | #include "ui/console.h" |
312 | #include "ui/gtk.h" | 312 | #include "ui/gtk.h" |
313 | @@ -XXX,XX +XXX,XX @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) | 313 | @@ -XXX,XX +XXX,XX @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) |
314 | #ifdef CONFIG_GTK_CLIPBOARD | 314 | #ifdef CONFIG_GTK_CLIPBOARD |
315 | gd_clipboard_init(s); | 315 | gd_clipboard_init(s); |
316 | #endif /* CONFIG_GTK_CLIPBOARD */ | 316 | #endif /* CONFIG_GTK_CLIPBOARD */ |
317 | + | 317 | + |
318 | + /* GTK's event polling must happen on the main thread. */ | 318 | + /* GTK's event polling must happen on the main thread. */ |
319 | + qemu_main = NULL; | 319 | + qemu_main = NULL; |
320 | } | 320 | } |
321 | 321 | ||
322 | static void early_gtk_display_init(DisplayOptions *opts) | 322 | static void early_gtk_display_init(DisplayOptions *opts) |
323 | diff --git a/ui/sdl2.c b/ui/sdl2.c | 323 | diff --git a/ui/sdl2.c b/ui/sdl2.c |
324 | index XXXXXXX..XXXXXXX 100644 | 324 | index XXXXXXX..XXXXXXX 100644 |
325 | --- a/ui/sdl2.c | 325 | --- a/ui/sdl2.c |
326 | +++ b/ui/sdl2.c | 326 | +++ b/ui/sdl2.c |
327 | @@ -XXX,XX +XXX,XX @@ | 327 | @@ -XXX,XX +XXX,XX @@ |
328 | #include "sysemu/sysemu.h" | 328 | #include "sysemu/sysemu.h" |
329 | #include "ui/win32-kbd-hook.h" | 329 | #include "ui/win32-kbd-hook.h" |
330 | #include "qemu/log.h" | 330 | #include "qemu/log.h" |
331 | +#include "qemu-main.h" | 331 | +#include "qemu-main.h" |
332 | 332 | ||
333 | static int sdl2_num_outputs; | 333 | static int sdl2_num_outputs; |
334 | static struct sdl2_console *sdl2_console; | 334 | static struct sdl2_console *sdl2_console; |
335 | @@ -XXX,XX +XXX,XX @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) | 335 | @@ -XXX,XX +XXX,XX @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) |
336 | } | 336 | } |
337 | 337 | ||
338 | atexit(sdl_cleanup); | 338 | atexit(sdl_cleanup); |
339 | + | 339 | + |
340 | + /* SDL's event polling (in dpy_refresh) must happen on the main thread. */ | 340 | + /* SDL's event polling (in dpy_refresh) must happen on the main thread. */ |
341 | + qemu_main = NULL; | 341 | + qemu_main = NULL; |
342 | } | 342 | } |
343 | 343 | ||
344 | static QemuDisplay qemu_display_sdl2 = { | 344 | static QemuDisplay qemu_display_sdl2 = { |
345 | -- | 345 | -- |
346 | 2.39.5 (Apple Git-154) | 346 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | macOS's Cocoa event handling must be done on the initial (main) thread | |
2 | of the process. Furthermore, if library or application code uses | ||
3 | libdispatch, the main dispatch queue must be handling events on the main | ||
4 | thread as well. | ||
5 | |||
6 | So far, this has affected Qemu in both the Cocoa and SDL UIs, although | ||
7 | in different ways: the Cocoa UI replaces the default qemu_main function | ||
8 | with one that spins Qemu's internal main event loop off onto a | ||
9 | background thread. SDL (which uses Cocoa internally) on the other hand | ||
10 | uses a polling approach within Qemu's main event loop. Events are | ||
11 | polled during the SDL UI's dpy_refresh callback, which happens to run | ||
12 | on the main thread by default. | ||
13 | |||
14 | As UIs are mutually exclusive, this works OK as long as nothing else | ||
15 | needs platform-native event handling. In the next patch, a new device is | ||
16 | introduced based on the ParavirtualizedGraphics.framework in macOS. | ||
17 | This uses libdispatch internally, and only works when events are being | ||
18 | handled on the main runloop. With the current system, it works when | ||
19 | using either the Cocoa or the SDL UI. However, it does not when running | ||
20 | headless. Moreover, any attempt to install a similar scheme to the | ||
21 | Cocoa UI's main thread replacement fails when combined with the SDL | ||
22 | UI. | ||
23 | |||
24 | This change tidies up main thread management to be more flexible. | ||
25 | |||
26 | * The qemu_main global function pointer is a custom function for the | ||
27 | main thread, and it may now be NULL. When it is, the main thread | ||
28 | runs the main Qemu loop. This represents the traditional setup. | ||
29 | * When non-null, spawning the main Qemu event loop on a separate | ||
30 | thread is now done centrally rather than inside the Cocoa UI code. | ||
31 | * For most platforms, qemu_main is indeed NULL by default, but on | ||
32 | Darwin, it defaults to a function that runs the CFRunLoop. | ||
33 | * The Cocoa UI sets qemu_main to a function which runs the | ||
34 | NSApplication event handling runloop, as is usual for a Cocoa app. | ||
35 | * The SDL UI overrides the qemu_main function to NULL, thus | ||
36 | specifying that Qemu's main loop must run on the main | ||
37 | thread. | ||
38 | * The GTK UI also overrides the qemu_main function to NULL. | ||
39 | * For other UIs, or in the absence of UIs, the platform's default | ||
40 | behaviour is followed. | ||
41 | |||
42 | This means that on macOS, the platform's runloop events are always | ||
43 | handled, regardless of chosen UI. The new PV graphics device will | ||
44 | thus work in all configurations. There is no functional change on other | ||
45 | operating systems. | ||
46 | |||
47 | Implementing this via a global function pointer variable is a bit | ||
48 | ugly, but it's probably worth investigating the existing UI thread rule | ||
49 | violations in the SDL (e.g. #2537) and GTK+ back-ends. Fixing those | ||
50 | issues might precipitate requirements similar but not identical to those | ||
51 | of the Cocoa UI; hopefully we'll see some kind of pattern emerge, which | ||
52 | can then be used as a basis for an overhaul. (In fact, it may turn | ||
53 | out to be simplest to split the UI/native platform event thread from the | ||
54 | QEMU main event loop on all platforms, with any UI or even none at all.) | ||
55 | |||
56 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
57 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
58 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
59 | --- | ||
60 | |||
61 | v5: | ||
62 | |||
63 | * Simplified the way of setting/clearing the main loop by going back | ||
64 | to setting qemu_main directly, but narrowing the scope of what it | ||
65 | needs to do, and it can now be NULL. | ||
66 | |||
67 | v6: | ||
68 | |||
69 | * Folded function qemu_run_default_main_on_new_thread's code into | ||
70 | main() | ||
71 | * Removed whitespace changes left over on lines near code removed | ||
72 | between v4 and v5 | ||
73 | |||
74 | v9: | ||
75 | |||
76 | * Set qemu_main to NULL for GTK UI as well. | ||
77 | |||
78 | v10: | ||
79 | |||
80 | * Added comments clarifying the functionality and purpose of qemu_main. | ||
81 | |||
82 | v11: | ||
83 | |||
84 | * Removed the qemu_main_fn typedef again. | ||
85 | * Consolidation of main, qemu_default_main, and call_qemu_default_main | ||
86 | so that the latter has been eliminated altogether. | ||
87 | * Reinstated the #include <SDL.h> directive, added comment saying | ||
88 | why it's needed. | ||
89 | * Improved the comment on the qemu_main global variable. | ||
90 | * Expanded the commit message. | ||
91 | |||
92 | v12: | ||
93 | |||
94 | * More precise wording of code comments. | ||
95 | |||
96 | include/qemu-main.h | 14 +++++++++++- | ||
97 | system/main.c | 37 +++++++++++++++++++++++++++---- | ||
98 | ui/cocoa.m | 54 +++++++++++---------------------------------- | ||
99 | ui/gtk.c | 4 ++++ | ||
100 | ui/sdl2.c | 4 ++++ | ||
101 | 5 files changed, 67 insertions(+), 46 deletions(-) | ||
102 | |||
103 | diff --git a/include/qemu-main.h b/include/qemu-main.h | ||
104 | index XXXXXXX..XXXXXXX 100644 | ||
105 | --- a/include/qemu-main.h | ||
106 | +++ b/include/qemu-main.h | ||
107 | @@ -XXX,XX +XXX,XX @@ | ||
108 | #ifndef QEMU_MAIN_H | ||
109 | #define QEMU_MAIN_H | ||
110 | |||
111 | -int qemu_default_main(void); | ||
112 | +/* | ||
113 | + * The function to run on the main (initial) thread of the process. | ||
114 | + * NULL means QEMU's main event loop. | ||
115 | + * When non-NULL, QEMU's main event loop will run on a purposely created | ||
116 | + * thread, after which the provided function pointer will be invoked on | ||
117 | + * the initial thread. | ||
118 | + * This is useful on platforms which treat the main thread as special | ||
119 | + * (macOS/Darwin) and/or require all UI API calls to occur from the main | ||
120 | + * thread. Those platforms can initialise it to a specific function, | ||
121 | + * while UI implementations may reset it to NULL during their init if they | ||
122 | + * will handle system and UI events on the main thread via QEMU's own main | ||
123 | + * event loop. | ||
124 | + */ | ||
125 | extern int (*qemu_main)(void); | ||
126 | |||
127 | #endif /* QEMU_MAIN_H */ | ||
128 | diff --git a/system/main.c b/system/main.c | ||
129 | index XXXXXXX..XXXXXXX 100644 | ||
130 | --- a/system/main.c | ||
131 | +++ b/system/main.c | ||
132 | @@ -XXX,XX +XXX,XX @@ | ||
133 | |||
134 | #include "qemu/osdep.h" | ||
135 | #include "qemu-main.h" | ||
136 | +#include "qemu/main-loop.h" | ||
137 | #include "sysemu/sysemu.h" | ||
138 | |||
139 | #ifdef CONFIG_SDL | ||
140 | +/* | ||
141 | + * SDL insists on wrapping the main() function with its own implementation on | ||
142 | + * some platforms; it does so via a macro that renames our main function, so | ||
143 | + * <SDL.h> must be #included here even with no SDL code called from this file. | ||
144 | + */ | ||
145 | #include <SDL.h> | ||
146 | #endif | ||
147 | |||
148 | -int qemu_default_main(void) | ||
149 | +#ifdef CONFIG_DARWIN | ||
150 | +#include <CoreFoundation/CoreFoundation.h> | ||
151 | +#endif | ||
152 | + | ||
153 | +static void *qemu_default_main(void *opaque) | ||
154 | { | ||
155 | int status; | ||
156 | |||
157 | + bql_lock(); | ||
158 | status = qemu_main_loop(); | ||
159 | qemu_cleanup(status); | ||
160 | + bql_unlock(); | ||
161 | |||
162 | - return status; | ||
163 | + exit(status); | ||
164 | } | ||
165 | |||
166 | -int (*qemu_main)(void) = qemu_default_main; | ||
167 | +int (*qemu_main)(void); | ||
168 | + | ||
169 | +#ifdef CONFIG_DARWIN | ||
170 | +static int os_darwin_cfrunloop_main(void) | ||
171 | +{ | ||
172 | + CFRunLoopRun(); | ||
173 | + g_assert_not_reached(); | ||
174 | +} | ||
175 | +int (*qemu_main)(void) = os_darwin_cfrunloop_main; | ||
176 | +#endif | ||
177 | |||
178 | int main(int argc, char **argv) | ||
179 | { | ||
180 | qemu_init(argc, argv); | ||
181 | - return qemu_main(); | ||
182 | + bql_unlock(); | ||
183 | + if (qemu_main) { | ||
184 | + QemuThread main_loop_thread; | ||
185 | + qemu_thread_create(&main_loop_thread, "qemu_main", | ||
186 | + qemu_default_main, NULL, QEMU_THREAD_DETACHED); | ||
187 | + return qemu_main(); | ||
188 | + } else { | ||
189 | + qemu_default_main(NULL); | ||
190 | + } | ||
191 | } | ||
192 | diff --git a/ui/cocoa.m b/ui/cocoa.m | ||
193 | index XXXXXXX..XXXXXXX 100644 | ||
194 | --- a/ui/cocoa.m | ||
195 | +++ b/ui/cocoa.m | ||
196 | @@ -XXX,XX +XXX,XX @@ | ||
197 | int height; | ||
198 | } QEMUScreen; | ||
199 | |||
200 | +@class QemuCocoaPasteboardTypeOwner; | ||
201 | + | ||
202 | static void cocoa_update(DisplayChangeListener *dcl, | ||
203 | int x, int y, int w, int h); | ||
204 | |||
205 | @@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl, | ||
206 | static NSInteger cbchangecount = -1; | ||
207 | static QemuClipboardInfo *cbinfo; | ||
208 | static QemuEvent cbevent; | ||
209 | +static QemuCocoaPasteboardTypeOwner *cbowner; | ||
210 | |||
211 | // Utility functions to run specified code block with the BQL held | ||
212 | typedef void (^CodeBlock)(void); | ||
213 | @@ -XXX,XX +XXX,XX @@ - (void) dealloc | ||
214 | { | ||
215 | COCOA_DEBUG("QemuCocoaAppController: dealloc\n"); | ||
216 | |||
217 | - if (cocoaView) | ||
218 | - [cocoaView release]; | ||
219 | + [cocoaView release]; | ||
220 | + [cbowner release]; | ||
221 | + cbowner = nil; | ||
222 | + | ||
223 | [super dealloc]; | ||
224 | } | ||
225 | |||
226 | @@ -XXX,XX +XXX,XX @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t | ||
227 | |||
228 | @end | ||
229 | |||
230 | -static QemuCocoaPasteboardTypeOwner *cbowner; | ||
231 | - | ||
232 | static void cocoa_clipboard_notify(Notifier *notifier, void *data); | ||
233 | static void cocoa_clipboard_request(QemuClipboardInfo *info, | ||
234 | QemuClipboardType type); | ||
235 | @@ -XXX,XX +XXX,XX @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, | ||
236 | } | ||
237 | } | ||
238 | |||
239 | -/* | ||
240 | - * The startup process for the OSX/Cocoa UI is complicated, because | ||
241 | - * OSX insists that the UI runs on the initial main thread, and so we | ||
242 | - * need to start a second thread which runs the qemu_default_main(): | ||
243 | - * in main(): | ||
244 | - * in cocoa_display_init(): | ||
245 | - * assign cocoa_main to qemu_main | ||
246 | - * create application, menus, etc | ||
247 | - * in cocoa_main(): | ||
248 | - * create qemu-main thread | ||
249 | - * enter OSX run loop | ||
250 | - */ | ||
251 | - | ||
252 | -static void *call_qemu_main(void *opaque) | ||
253 | -{ | ||
254 | - int status; | ||
255 | - | ||
256 | - COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); | ||
257 | - bql_lock(); | ||
258 | - status = qemu_default_main(); | ||
259 | - bql_unlock(); | ||
260 | - COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); | ||
261 | - [cbowner release]; | ||
262 | - exit(status); | ||
263 | -} | ||
264 | - | ||
265 | static int cocoa_main(void) | ||
266 | { | ||
267 | - QemuThread thread; | ||
268 | - | ||
269 | - COCOA_DEBUG("Entered %s()\n", __func__); | ||
270 | - | ||
271 | - bql_unlock(); | ||
272 | - qemu_thread_create(&thread, "qemu_main", call_qemu_main, | ||
273 | - NULL, QEMU_THREAD_DETACHED); | ||
274 | - | ||
275 | - // Start the main event loop | ||
276 | COCOA_DEBUG("Main thread: entering OSX run loop\n"); | ||
277 | [NSApp run]; | ||
278 | COCOA_DEBUG("Main thread: left OSX run loop, which should never happen\n"); | ||
279 | @@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) | ||
280 | |||
281 | COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n"); | ||
282 | |||
283 | - qemu_main = cocoa_main; | ||
284 | - | ||
285 | // Pull this console process up to being a fully-fledged graphical | ||
286 | // app with a menubar and Dock icon | ||
287 | ProcessSerialNumber psn = { 0, kCurrentProcess }; | ||
288 | @@ -XXX,XX +XXX,XX @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) | ||
289 | qemu_clipboard_peer_register(&cbpeer); | ||
290 | |||
291 | [pool release]; | ||
292 | + | ||
293 | + /* | ||
294 | + * The Cocoa UI will run the NSApplication runloop on the main thread | ||
295 | + * rather than the default Core Foundation one. | ||
296 | + */ | ||
297 | + qemu_main = cocoa_main; | ||
298 | } | ||
299 | |||
300 | static QemuDisplay qemu_display_cocoa = { | ||
301 | diff --git a/ui/gtk.c b/ui/gtk.c | ||
302 | index XXXXXXX..XXXXXXX 100644 | ||
303 | --- a/ui/gtk.c | ||
304 | +++ b/ui/gtk.c | ||
305 | @@ -XXX,XX +XXX,XX @@ | ||
306 | #include "qemu/cutils.h" | ||
307 | #include "qemu/error-report.h" | ||
308 | #include "qemu/main-loop.h" | ||
309 | +#include "qemu-main.h" | ||
310 | |||
311 | #include "ui/console.h" | ||
312 | #include "ui/gtk.h" | ||
313 | @@ -XXX,XX +XXX,XX @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) | ||
314 | #ifdef CONFIG_GTK_CLIPBOARD | ||
315 | gd_clipboard_init(s); | ||
316 | #endif /* CONFIG_GTK_CLIPBOARD */ | ||
317 | + | ||
318 | + /* GTK's event polling must happen on the main thread. */ | ||
319 | + qemu_main = NULL; | ||
320 | } | ||
321 | |||
322 | static void early_gtk_display_init(DisplayOptions *opts) | ||
323 | diff --git a/ui/sdl2.c b/ui/sdl2.c | ||
324 | index XXXXXXX..XXXXXXX 100644 | ||
325 | --- a/ui/sdl2.c | ||
326 | +++ b/ui/sdl2.c | ||
327 | @@ -XXX,XX +XXX,XX @@ | ||
328 | #include "sysemu/sysemu.h" | ||
329 | #include "ui/win32-kbd-hook.h" | ||
330 | #include "qemu/log.h" | ||
331 | +#include "qemu-main.h" | ||
332 | |||
333 | static int sdl2_num_outputs; | ||
334 | static struct sdl2_console *sdl2_console; | ||
335 | @@ -XXX,XX +XXX,XX @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) | ||
336 | } | ||
337 | |||
338 | atexit(sdl_cleanup); | ||
339 | + | ||
340 | + /* SDL's event polling (in dpy_refresh) must happen on the main thread. */ | ||
341 | + qemu_main = NULL; | ||
342 | } | ||
343 | |||
344 | static QemuDisplay qemu_display_sdl2 = { | ||
345 | -- | ||
346 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
1 | MacOS provides a framework (library) that allows any vmm to implement a | 1 | MacOS provides a framework (library) that allows any vmm to implement a |
---|---|---|---|
2 | paravirtualized 3d graphics passthrough to the host metal stack called | 2 | paravirtualized 3d graphics passthrough to the host metal stack called |
3 | ParavirtualizedGraphics.Framework (PVG). The library abstracts away | 3 | ParavirtualizedGraphics.Framework (PVG). The library abstracts away |
4 | almost every aspect of the paravirtualized device model and only provides | 4 | almost every aspect of the paravirtualized device model and only provides |
5 | and receives callbacks on MMIO access as well as to share memory address | 5 | and receives callbacks on MMIO access as well as to share memory address |
6 | space between the VM and PVG. | 6 | space between the VM and PVG. |
7 | 7 | ||
8 | This patch implements a QEMU device that drives PVG for the VMApple | 8 | This patch implements a QEMU device that drives PVG for the VMApple |
9 | variant of it. | 9 | variant of it. |
10 | 10 | ||
11 | Signed-off-by: Alexander Graf <graf@amazon.com> | 11 | Signed-off-by: Alexander Graf <graf@amazon.com> |
12 | Co-authored-by: Alexander Graf <graf@amazon.com> | 12 | Co-authored-by: Alexander Graf <graf@amazon.com> |
13 | 13 | ||
14 | Subsequent changes: | 14 | Subsequent changes: |
15 | 15 | ||
16 | * Cherry-pick/rebase conflict fixes, API use updates. | 16 | * Cherry-pick/rebase conflict fixes, API use updates. |
17 | * Moved from hw/vmapple/ (useful outside that machine type) | 17 | * Moved from hw/vmapple/ (useful outside that machine type) |
18 | * Overhaul of threading model, many thread safety improvements. | 18 | * Overhaul of threading model, many thread safety improvements. |
19 | * Asynchronous rendering. | 19 | * Asynchronous rendering. |
20 | * Memory and object lifetime fixes. | 20 | * Memory and object lifetime fixes. |
21 | * Refactoring to split generic and (vmapple) MMIO variant specific | 21 | * Refactoring to split generic and (vmapple) MMIO variant specific |
22 | code. | 22 | code. |
23 | 23 | ||
24 | Implementation wise, most of the complexity lies in the differing threading | 24 | Implementation wise, most of the complexity lies in the differing threading |
25 | models of ParavirtualizedGraphics.framework, which uses libdispatch and | 25 | models of ParavirtualizedGraphics.framework, which uses libdispatch and |
26 | internal locks, versus QEMU, which heavily uses the BQL, especially during | 26 | internal locks, versus QEMU, which heavily uses the BQL, especially during |
27 | memory-mapped device I/O. Great care has therefore been taken to prevent | 27 | memory-mapped device I/O. Great care has therefore been taken to prevent |
28 | deadlocks by never calling into PVG methods while holding the BQL, and | 28 | deadlocks by never calling into PVG methods while holding the BQL, and |
29 | similarly never acquiring the BQL in a callback from PVG. Different strategies | 29 | similarly never acquiring the BQL in a callback from PVG. Different strategies |
30 | have been used (libdispatch, blocking and non-blocking BHs, RCU, etc.) | 30 | have been used (libdispatch, blocking and non-blocking BHs, RCU, etc.) |
31 | depending on the specific requirements at each framework entry and exit point. | 31 | depending on the specific requirements at each framework entry and exit point. |
32 | 32 | ||
33 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 33 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
34 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 34 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
35 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 35 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
36 | --- | 36 | --- |
37 | 37 | ||
38 | v2: | 38 | v2: |
39 | 39 | ||
40 | * Cherry-pick/rebase conflict fixes | 40 | * Cherry-pick/rebase conflict fixes |
41 | * BQL function renaming | 41 | * BQL function renaming |
42 | * Moved from hw/vmapple/ (useful outside that machine type) | 42 | * Moved from hw/vmapple/ (useful outside that machine type) |
43 | * Code review comments: Switched to DEFINE_TYPES macro & little endian | 43 | * Code review comments: Switched to DEFINE_TYPES macro & little endian |
44 | MMIO. | 44 | MMIO. |
45 | * Removed some dead/superfluous code | 45 | * Removed some dead/superfluous code |
46 | * Mad set_mode thread & memory safe | 46 | * Mad set_mode thread & memory safe |
47 | * Added migration blocker due to lack of (de-)serialisation. | 47 | * Added migration blocker due to lack of (de-)serialisation. |
48 | * Fixes to ObjC refcounting and autorelease pool usage. | 48 | * Fixes to ObjC refcounting and autorelease pool usage. |
49 | * Fixed ObjC new/init misuse | 49 | * Fixed ObjC new/init misuse |
50 | * Switched to ObjC category extension for private property. | 50 | * Switched to ObjC category extension for private property. |
51 | * Simplified task memory mapping and made it thread safe. | 51 | * Simplified task memory mapping and made it thread safe. |
52 | * Refactoring to split generic and vmapple MMIO variant specific | 52 | * Refactoring to split generic and vmapple MMIO variant specific |
53 | code. | 53 | code. |
54 | * Switched to asynchronous MMIO writes on x86-64 | 54 | * Switched to asynchronous MMIO writes on x86-64 |
55 | * Rendering and graphics update are now done asynchronously | 55 | * Rendering and graphics update are now done asynchronously |
56 | * Fixed cursor handling | 56 | * Fixed cursor handling |
57 | * Coding convention fixes | 57 | * Coding convention fixes |
58 | * Removed software cursor compositing | 58 | * Removed software cursor compositing |
59 | 59 | ||
60 | v3: | 60 | v3: |
61 | 61 | ||
62 | * Rebased on latest upstream, fixed breakages including switching to Resettable methods. | 62 | * Rebased on latest upstream, fixed breakages including switching to Resettable methods. |
63 | * Squashed patches dealing with dGPUs, MMIO area size, and GPU picking. | 63 | * Squashed patches dealing with dGPUs, MMIO area size, and GPU picking. |
64 | * Allow re-entrant MMIO; this simplifies the code and solves the divergence | 64 | * Allow re-entrant MMIO; this simplifies the code and solves the divergence |
65 | between x86-64 and arm64 variants. | 65 | between x86-64 and arm64 variants. |
66 | 66 | ||
67 | v4: | 67 | v4: |
68 | 68 | ||
69 | * Renamed '-vmapple' device variant to '-mmio' | 69 | * Renamed '-vmapple' device variant to '-mmio' |
70 | * MMIO device type now requires aarch64 host and guest | 70 | * MMIO device type now requires aarch64 host and guest |
71 | * Complete overhaul of the glue code for making Qemu's and | 71 | * Complete overhaul of the glue code for making Qemu's and |
72 | ParavirtualizedGraphics.framework's threading and synchronisation models | 72 | ParavirtualizedGraphics.framework's threading and synchronisation models |
73 | work together. Calls into PVG are from dispatch queues while the | 73 | work together. Calls into PVG are from dispatch queues while the |
74 | BQL-holding initiating thread processes AIO context events; callbacks from | 74 | BQL-holding initiating thread processes AIO context events; callbacks from |
75 | PVG are scheduled as BHs on the BQL/main AIO context, awaiting completion | 75 | PVG are scheduled as BHs on the BQL/main AIO context, awaiting completion |
76 | where necessary. | 76 | where necessary. |
77 | * Guest frame rendering state is covered by the BQL, with only the PVG calls | 77 | * Guest frame rendering state is covered by the BQL, with only the PVG calls |
78 | outside the lock, and serialised on the named render_queue. | 78 | outside the lock, and serialised on the named render_queue. |
79 | * Simplified logic for dropping frames in-flight during mode changes, fixed | 79 | * Simplified logic for dropping frames in-flight during mode changes, fixed |
80 | bug in pending frames logic. | 80 | bug in pending frames logic. |
81 | * Addressed smaller code review notes such as: function naming, object type | 81 | * Addressed smaller code review notes such as: function naming, object type |
82 | declarations, type names/declarations/casts, code formatting, #include | 82 | declarations, type names/declarations/casts, code formatting, #include |
83 | order, over-cautious ObjC retain/release, what goes in init vs realize, | 83 | order, over-cautious ObjC retain/release, what goes in init vs realize, |
84 | etc. | 84 | etc. |
85 | 85 | ||
86 | v5: | 86 | v5: |
87 | 87 | ||
88 | * Smaller non-functional fixes in response to review comments, such as using | 88 | * Smaller non-functional fixes in response to review comments, such as using |
89 | NULL for the AIO_WAIT_WHILE context argument, type name formatting, | 89 | NULL for the AIO_WAIT_WHILE context argument, type name formatting, |
90 | deleting leftover debug code, logging improvements, state struct field | 90 | deleting leftover debug code, logging improvements, state struct field |
91 | order and documentation improvements, etc. | 91 | order and documentation improvements, etc. |
92 | * Instead of a single condition variable for all synchronous BH job types, | 92 | * Instead of a single condition variable for all synchronous BH job types, |
93 | there is now one for each callback block. This reduces the number | 93 | there is now one for each callback block. This reduces the number |
94 | of threads being awoken unnecessarily to near zero. | 94 | of threads being awoken unnecessarily to near zero. |
95 | * MMIO device variant: Unified the BH job for raising interrupts. | 95 | * MMIO device variant: Unified the BH job for raising interrupts. |
96 | * Use DMA APIs for PVG framework's guest memory read requests. | 96 | * Use DMA APIs for PVG framework's guest memory read requests. |
97 | * Thread safety improvements: ensure mutable AppleGFXState fields are not | 97 | * Thread safety improvements: ensure mutable AppleGFXState fields are not |
98 | accessed outside the appropriate lock. Added dedicated mutex for the task | 98 | accessed outside the appropriate lock. Added dedicated mutex for the task |
99 | list. | 99 | list. |
100 | * Retain references to MemoryRegions for which there exist mappings in each | 100 | * Retain references to MemoryRegions for which there exist mappings in each |
101 | PGTask, and for IOSurface mappings. | 101 | PGTask, and for IOSurface mappings. |
102 | 102 | ||
103 | v6: | 103 | v6: |
104 | 104 | ||
105 | * Switched PGTask_s's' mapped_regions from GPtrArray to GArray | 105 | * Switched PGTask_s's' mapped_regions from GPtrArray to GArray |
106 | * Allow DisplaySurface to manage its own vram now that texture -> vram copy | 106 | * Allow DisplaySurface to manage its own vram now that texture -> vram copy |
107 | occurs under BQL. | 107 | occurs under BQL. |
108 | * Memory mapping operations now use RCU_READ_LOCK_GUARD() where possible | 108 | * Memory mapping operations now use RCU_READ_LOCK_GUARD() where possible |
109 | instead of a heavy-weight BH job to acquire the BQL. | 109 | instead of a heavy-weight BH job to acquire the BQL. |
110 | * Changed PVG cursor and mode setting callbacks to kick off BHs instead of | 110 | * Changed PVG cursor and mode setting callbacks to kick off BHs instead of |
111 | libdispatch tasks which then locked the BQL explicitly. | 111 | libdispatch tasks which then locked the BQL explicitly. |
112 | * The single remaining callback which must wait for a BH to complete now | 112 | * The single remaining callback which must wait for a BH to complete now |
113 | creates an ephemeral QemuSemaphore to await completion. | 113 | creates an ephemeral QemuSemaphore to await completion. |
114 | * Re-removed tracking of mapped surface manager memory regions. Just look up | 114 | * Re-removed tracking of mapped surface manager memory regions. Just look up |
115 | and ref/unref the memory regions in the map/unmap callbacks. | 115 | and ref/unref the memory regions in the map/unmap callbacks. |
116 | * Re-ordered functions in apple-gfx.m to group them by area of functionality. | 116 | * Re-ordered functions in apple-gfx.m to group them by area of functionality. |
117 | * Improved comments and tweaked some names. | 117 | * Improved comments and tweaked some names. |
118 | 118 | ||
119 | v7: | 119 | v7: |
120 | 120 | ||
121 | * Use g_ptr_array_find() helper function | 121 | * Use g_ptr_array_find() helper function |
122 | * Error handling coding style tweak | 122 | * Error handling coding style tweak |
123 | 123 | ||
124 | v8: | 124 | v8: |
125 | 125 | ||
126 | * Renamed apple_gfx_host_address_for_gpa_range() to | 126 | * Renamed apple_gfx_host_address_for_gpa_range() to |
127 | apple_gfx_host_ptr_for_gpa_range(), and made it return a void* instead of | 127 | apple_gfx_host_ptr_for_gpa_range(), and made it return a void* instead of |
128 | uintptr_t. Fixed up callers and related code. | 128 | uintptr_t. Fixed up callers and related code. |
129 | * Some adjustments to types used. | 129 | * Some adjustments to types used. |
130 | * Variable naming tweaks for better clarity. | 130 | * Variable naming tweaks for better clarity. |
131 | * Fixed leak in unlikely realize error case. | 131 | * Fixed leak in unlikely realize error case. |
132 | * Fixed typo in unmap call. | 132 | * Fixed typo in unmap call. |
133 | * Don't bother with dummy argument for g_ptr_array_find(), NULL works too. | 133 | * Don't bother with dummy argument for g_ptr_array_find(), NULL works too. |
134 | 134 | ||
135 | v9: | 135 | v9: |
136 | 136 | ||
137 | * Pass device pointer to graphic_console_init(). | 137 | * Pass device pointer to graphic_console_init(). |
138 | * Slightly re-ordered initialisation code. | 138 | * Slightly re-ordered initialisation code. |
139 | * Simplified error handling during realize(). | 139 | * Simplified error handling during realize(). |
140 | * Simplified code without functional changes, adjusted code & comment | 140 | * Simplified code without functional changes, adjusted code & comment |
141 | formatting. | 141 | formatting. |
142 | 142 | ||
143 | v10: | 143 | v10: |
144 | 144 | ||
145 | * Reworked the way frame rendering code is threaded to use BHs for sections | 145 | * Reworked the way frame rendering code is threaded to use BHs for sections |
146 | requiring BQL. | 146 | requiring BQL. |
147 | * Fix for ./configure error on non-macOS platforms. | 147 | * Fix for ./configure error on non-macOS platforms. |
148 | * Code formatting tweaks. | 148 | * Code formatting tweaks. |
149 | 149 | ||
150 | v11: | 150 | v11: |
151 | 151 | ||
152 | * Generate unique display serial number for each apple-gfx device instance. | 152 | * Generate unique display serial number for each apple-gfx device instance. |
153 | * Dropped redundant local variable initialisation. | 153 | * Dropped redundant local variable initialisation. |
154 | 154 | ||
155 | v12: | 155 | v12: |
156 | 156 | ||
157 | * Removed 2 redundant variable initialisations. | 157 | * Removed 2 redundant variable initialisations. |
158 | * Removed dedicated rendering dispatch_queue, use global queue instead. | 158 | * Removed dedicated rendering dispatch_queue, use global queue instead. |
159 | * Fixed an object leak regression introduced in v10. Solved by placing | 159 | * Fixed an object leak regression introduced in v10. Solved by placing |
160 | @autoreleasepool blocks around the relevant Objective-C code in the BH | 160 | @autoreleasepool blocks around the relevant Objective-C code in the BH |
161 | functions replacing the dispatch_async tasks. (dispatch_async implicitly | 161 | functions replacing the dispatch_async tasks. (dispatch_async implicitly |
162 | cleaned up autoreleased objects.) | 162 | cleaned up autoreleased objects.) |
163 | * Fixed missing retain/release of command buffers when handing off to a | 163 | * Fixed missing retain/release of command buffers when handing off to a |
164 | non-BH thread. (Problem masked at runtime by above leak.) | 164 | non-BH thread. (Problem masked at runtime by above leak.) |
165 | * Better handling of render command encoding errors. | 165 | * Better handling of render command encoding errors. |
166 | * Re-arranged positions of static variables in the file. | 166 | * Re-arranged positions of static variables in the file. |
167 | 167 | ||
168 | hw/display/Kconfig | 9 + | 168 | hw/display/Kconfig | 9 + |
169 | hw/display/apple-gfx-mmio.m | 281 +++++++++++++ | 169 | hw/display/apple-gfx-mmio.m | 281 +++++++++++++ |
170 | hw/display/apple-gfx.h | 66 +++ | 170 | hw/display/apple-gfx.h | 66 +++ |
171 | hw/display/apple-gfx.m | 783 ++++++++++++++++++++++++++++++++++++ | 171 | hw/display/apple-gfx.m | 783 ++++++++++++++++++++++++++++++++++++ |
172 | hw/display/meson.build | 6 + | 172 | hw/display/meson.build | 6 + |
173 | hw/display/trace-events | 28 ++ | 173 | hw/display/trace-events | 28 ++ |
174 | meson.build | 4 + | 174 | meson.build | 4 + |
175 | 7 files changed, 1177 insertions(+) | 175 | 7 files changed, 1177 insertions(+) |
176 | create mode 100644 hw/display/apple-gfx-mmio.m | 176 | create mode 100644 hw/display/apple-gfx-mmio.m |
177 | create mode 100644 hw/display/apple-gfx.h | 177 | create mode 100644 hw/display/apple-gfx.h |
178 | create mode 100644 hw/display/apple-gfx.m | 178 | create mode 100644 hw/display/apple-gfx.m |
179 | 179 | ||
180 | diff --git a/hw/display/Kconfig b/hw/display/Kconfig | 180 | diff --git a/hw/display/Kconfig b/hw/display/Kconfig |
181 | index XXXXXXX..XXXXXXX 100644 | 181 | index XXXXXXX..XXXXXXX 100644 |
182 | --- a/hw/display/Kconfig | 182 | --- a/hw/display/Kconfig |
183 | +++ b/hw/display/Kconfig | 183 | +++ b/hw/display/Kconfig |
184 | @@ -XXX,XX +XXX,XX @@ config XLNX_DISPLAYPORT | 184 | @@ -XXX,XX +XXX,XX @@ config XLNX_DISPLAYPORT |
185 | 185 | ||
186 | config DM163 | 186 | config DM163 |
187 | bool | 187 | bool |
188 | + | 188 | + |
189 | +config MAC_PVG | 189 | +config MAC_PVG |
190 | + bool | 190 | + bool |
191 | + default y | 191 | + default y |
192 | + | 192 | + |
193 | +config MAC_PVG_MMIO | 193 | +config MAC_PVG_MMIO |
194 | + bool | 194 | + bool |
195 | + depends on MAC_PVG && AARCH64 | 195 | + depends on MAC_PVG && AARCH64 |
196 | + | 196 | + |
197 | diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m | 197 | diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m |
198 | new file mode 100644 | 198 | new file mode 100644 |
199 | index XXXXXXX..XXXXXXX | 199 | index XXXXXXX..XXXXXXX |
200 | --- /dev/null | 200 | --- /dev/null |
201 | +++ b/hw/display/apple-gfx-mmio.m | 201 | +++ b/hw/display/apple-gfx-mmio.m |
202 | @@ -XXX,XX +XXX,XX @@ | 202 | @@ -XXX,XX +XXX,XX @@ |
203 | +/* | 203 | +/* |
204 | + * QEMU Apple ParavirtualizedGraphics.framework device, MMIO (arm64) variant | 204 | + * QEMU Apple ParavirtualizedGraphics.framework device, MMIO (arm64) variant |
205 | + * | 205 | + * |
206 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 206 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
207 | + * | 207 | + * |
208 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 208 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
209 | + * See the COPYING file in the top-level directory. | 209 | + * See the COPYING file in the top-level directory. |
210 | + * | 210 | + * |
211 | + * SPDX-License-Identifier: GPL-2.0-or-later | 211 | + * SPDX-License-Identifier: GPL-2.0-or-later |
212 | + * | 212 | + * |
213 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides | 213 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides |
214 | + * which implements 3d graphics passthrough to the host as well as a | 214 | + * which implements 3d graphics passthrough to the host as well as a |
215 | + * proprietary guest communication channel to drive it. This device model | 215 | + * proprietary guest communication channel to drive it. This device model |
216 | + * implements support to drive that library from within QEMU as an MMIO-based | 216 | + * implements support to drive that library from within QEMU as an MMIO-based |
217 | + * system device for macOS on arm64 VMs. | 217 | + * system device for macOS on arm64 VMs. |
218 | + */ | 218 | + */ |
219 | + | 219 | + |
220 | +#include "qemu/osdep.h" | 220 | +#include "qemu/osdep.h" |
221 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | 221 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> |
222 | +#include "apple-gfx.h" | 222 | +#include "apple-gfx.h" |
223 | +#include "monitor/monitor.h" | 223 | +#include "monitor/monitor.h" |
224 | +#include "hw/sysbus.h" | 224 | +#include "hw/sysbus.h" |
225 | +#include "hw/irq.h" | 225 | +#include "hw/irq.h" |
226 | +#include "trace.h" | 226 | +#include "trace.h" |
227 | +#include "qemu/log.h" | 227 | +#include "qemu/log.h" |
228 | + | 228 | + |
229 | +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXMMIOState, APPLE_GFX_MMIO) | 229 | +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXMMIOState, APPLE_GFX_MMIO) |
230 | + | 230 | + |
231 | +/* | 231 | +/* |
232 | + * ParavirtualizedGraphics.Framework only ships header files for the PCI | 232 | + * ParavirtualizedGraphics.Framework only ships header files for the PCI |
233 | + * variant which does not include IOSFC descriptors and host devices. We add | 233 | + * variant which does not include IOSFC descriptors and host devices. We add |
234 | + * their definitions here so that we can also work with the ARM version. | 234 | + * their definitions here so that we can also work with the ARM version. |
235 | + */ | 235 | + */ |
236 | +typedef bool(^IOSFCRaiseInterrupt)(uint32_t vector); | 236 | +typedef bool(^IOSFCRaiseInterrupt)(uint32_t vector); |
237 | +typedef bool(^IOSFCUnmapMemory)( | 237 | +typedef bool(^IOSFCUnmapMemory)( |
238 | + void *, void *, void *, void *, void *, void *); | 238 | + void *, void *, void *, void *, void *, void *); |
239 | +typedef bool(^IOSFCMapMemory)( | 239 | +typedef bool(^IOSFCMapMemory)( |
240 | + uint64_t phys, uint64_t len, bool ro, void **va, void *, void *); | 240 | + uint64_t phys, uint64_t len, bool ro, void **va, void *, void *); |
241 | + | 241 | + |
242 | +@interface PGDeviceDescriptor (IOSurfaceMapper) | 242 | +@interface PGDeviceDescriptor (IOSurfaceMapper) |
243 | +@property (readwrite, nonatomic) bool usingIOSurfaceMapper; | 243 | +@property (readwrite, nonatomic) bool usingIOSurfaceMapper; |
244 | +@end | 244 | +@end |
245 | + | 245 | + |
246 | +@interface PGIOSurfaceHostDeviceDescriptor : NSObject | 246 | +@interface PGIOSurfaceHostDeviceDescriptor : NSObject |
247 | +-(PGIOSurfaceHostDeviceDescriptor *)init; | 247 | +-(PGIOSurfaceHostDeviceDescriptor *)init; |
248 | +@property (readwrite, nonatomic, copy, nullable) IOSFCMapMemory mapMemory; | 248 | +@property (readwrite, nonatomic, copy, nullable) IOSFCMapMemory mapMemory; |
249 | +@property (readwrite, nonatomic, copy, nullable) IOSFCUnmapMemory unmapMemory; | 249 | +@property (readwrite, nonatomic, copy, nullable) IOSFCUnmapMemory unmapMemory; |
250 | +@property (readwrite, nonatomic, copy, nullable) IOSFCRaiseInterrupt raiseInterrupt; | 250 | +@property (readwrite, nonatomic, copy, nullable) IOSFCRaiseInterrupt raiseInterrupt; |
251 | +@end | 251 | +@end |
252 | + | 252 | + |
253 | +@interface PGIOSurfaceHostDevice : NSObject | 253 | +@interface PGIOSurfaceHostDevice : NSObject |
254 | +-(instancetype)initWithDescriptor:(PGIOSurfaceHostDeviceDescriptor *)desc; | 254 | +-(instancetype)initWithDescriptor:(PGIOSurfaceHostDeviceDescriptor *)desc; |
255 | +-(uint32_t)mmioReadAtOffset:(size_t)offset; | 255 | +-(uint32_t)mmioReadAtOffset:(size_t)offset; |
256 | +-(void)mmioWriteAtOffset:(size_t)offset value:(uint32_t)value; | 256 | +-(void)mmioWriteAtOffset:(size_t)offset value:(uint32_t)value; |
257 | +@end | 257 | +@end |
258 | + | 258 | + |
259 | +struct AppleGFXMapSurfaceMemoryJob; | 259 | +struct AppleGFXMapSurfaceMemoryJob; |
260 | +struct AppleGFXMMIOState { | 260 | +struct AppleGFXMMIOState { |
261 | + SysBusDevice parent_obj; | 261 | + SysBusDevice parent_obj; |
262 | + | 262 | + |
263 | + AppleGFXState common; | 263 | + AppleGFXState common; |
264 | + | 264 | + |
265 | + qemu_irq irq_gfx; | 265 | + qemu_irq irq_gfx; |
266 | + qemu_irq irq_iosfc; | 266 | + qemu_irq irq_iosfc; |
267 | + MemoryRegion iomem_iosfc; | 267 | + MemoryRegion iomem_iosfc; |
268 | + PGIOSurfaceHostDevice *pgiosfc; | 268 | + PGIOSurfaceHostDevice *pgiosfc; |
269 | +}; | 269 | +}; |
270 | + | 270 | + |
271 | +typedef struct AppleGFXMMIOJob { | 271 | +typedef struct AppleGFXMMIOJob { |
272 | + AppleGFXMMIOState *state; | 272 | + AppleGFXMMIOState *state; |
273 | + uint64_t offset; | 273 | + uint64_t offset; |
274 | + uint64_t value; | 274 | + uint64_t value; |
275 | + bool completed; | 275 | + bool completed; |
276 | +} AppleGFXMMIOJob; | 276 | +} AppleGFXMMIOJob; |
277 | + | 277 | + |
278 | +static void iosfc_do_read(void *opaque) | 278 | +static void iosfc_do_read(void *opaque) |
279 | +{ | 279 | +{ |
280 | + AppleGFXMMIOJob *job = opaque; | 280 | + AppleGFXMMIOJob *job = opaque; |
281 | + job->value = [job->state->pgiosfc mmioReadAtOffset:job->offset]; | 281 | + job->value = [job->state->pgiosfc mmioReadAtOffset:job->offset]; |
282 | + qatomic_set(&job->completed, true); | 282 | + qatomic_set(&job->completed, true); |
283 | + aio_wait_kick(); | 283 | + aio_wait_kick(); |
284 | +} | 284 | +} |
285 | + | 285 | + |
286 | +static uint64_t iosfc_read(void *opaque, hwaddr offset, unsigned size) | 286 | +static uint64_t iosfc_read(void *opaque, hwaddr offset, unsigned size) |
287 | +{ | 287 | +{ |
288 | + AppleGFXMMIOJob job = { | 288 | + AppleGFXMMIOJob job = { |
289 | + .state = opaque, | 289 | + .state = opaque, |
290 | + .offset = offset, | 290 | + .offset = offset, |
291 | + .completed = false, | 291 | + .completed = false, |
292 | + }; | 292 | + }; |
293 | + dispatch_queue_t queue = | 293 | + dispatch_queue_t queue = |
294 | + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); | 294 | + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); |
295 | + | 295 | + |
296 | + dispatch_async_f(queue, &job, iosfc_do_read); | 296 | + dispatch_async_f(queue, &job, iosfc_do_read); |
297 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | 297 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); |
298 | + | 298 | + |
299 | + trace_apple_gfx_mmio_iosfc_read(offset, job.value); | 299 | + trace_apple_gfx_mmio_iosfc_read(offset, job.value); |
300 | + return job.value; | 300 | + return job.value; |
301 | +} | 301 | +} |
302 | + | 302 | + |
303 | +static void iosfc_do_write(void *opaque) | 303 | +static void iosfc_do_write(void *opaque) |
304 | +{ | 304 | +{ |
305 | + AppleGFXMMIOJob *job = opaque; | 305 | + AppleGFXMMIOJob *job = opaque; |
306 | + [job->state->pgiosfc mmioWriteAtOffset:job->offset value:job->value]; | 306 | + [job->state->pgiosfc mmioWriteAtOffset:job->offset value:job->value]; |
307 | + qatomic_set(&job->completed, true); | 307 | + qatomic_set(&job->completed, true); |
308 | + aio_wait_kick(); | 308 | + aio_wait_kick(); |
309 | +} | 309 | +} |
310 | + | 310 | + |
311 | +static void iosfc_write(void *opaque, hwaddr offset, uint64_t val, | 311 | +static void iosfc_write(void *opaque, hwaddr offset, uint64_t val, |
312 | + unsigned size) | 312 | + unsigned size) |
313 | +{ | 313 | +{ |
314 | + AppleGFXMMIOJob job = { | 314 | + AppleGFXMMIOJob job = { |
315 | + .state = opaque, | 315 | + .state = opaque, |
316 | + .offset = offset, | 316 | + .offset = offset, |
317 | + .value = val, | 317 | + .value = val, |
318 | + .completed = false, | 318 | + .completed = false, |
319 | + }; | 319 | + }; |
320 | + dispatch_queue_t queue = | 320 | + dispatch_queue_t queue = |
321 | + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); | 321 | + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); |
322 | + | 322 | + |
323 | + dispatch_async_f(queue, &job, iosfc_do_write); | 323 | + dispatch_async_f(queue, &job, iosfc_do_write); |
324 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | 324 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); |
325 | + | 325 | + |
326 | + trace_apple_gfx_mmio_iosfc_write(offset, val); | 326 | + trace_apple_gfx_mmio_iosfc_write(offset, val); |
327 | +} | 327 | +} |
328 | + | 328 | + |
329 | +static const MemoryRegionOps apple_iosfc_ops = { | 329 | +static const MemoryRegionOps apple_iosfc_ops = { |
330 | + .read = iosfc_read, | 330 | + .read = iosfc_read, |
331 | + .write = iosfc_write, | 331 | + .write = iosfc_write, |
332 | + .endianness = DEVICE_LITTLE_ENDIAN, | 332 | + .endianness = DEVICE_LITTLE_ENDIAN, |
333 | + .valid = { | 333 | + .valid = { |
334 | + .min_access_size = 4, | 334 | + .min_access_size = 4, |
335 | + .max_access_size = 8, | 335 | + .max_access_size = 8, |
336 | + }, | 336 | + }, |
337 | + .impl = { | 337 | + .impl = { |
338 | + .min_access_size = 4, | 338 | + .min_access_size = 4, |
339 | + .max_access_size = 8, | 339 | + .max_access_size = 8, |
340 | + }, | 340 | + }, |
341 | +}; | 341 | +}; |
342 | + | 342 | + |
343 | +static void raise_irq_bh(void *opaque) | 343 | +static void raise_irq_bh(void *opaque) |
344 | +{ | 344 | +{ |
345 | + qemu_irq *irq = opaque; | 345 | + qemu_irq *irq = opaque; |
346 | + | 346 | + |
347 | + qemu_irq_pulse(*irq); | 347 | + qemu_irq_pulse(*irq); |
348 | +} | 348 | +} |
349 | + | 349 | + |
350 | +static void *apple_gfx_mmio_map_surface_memory(uint64_t guest_physical_address, | 350 | +static void *apple_gfx_mmio_map_surface_memory(uint64_t guest_physical_address, |
351 | + uint64_t length, bool read_only) | 351 | + uint64_t length, bool read_only) |
352 | +{ | 352 | +{ |
353 | + void *mem; | 353 | + void *mem; |
354 | + MemoryRegion *region = NULL; | 354 | + MemoryRegion *region = NULL; |
355 | + | 355 | + |
356 | + RCU_READ_LOCK_GUARD(); | 356 | + RCU_READ_LOCK_GUARD(); |
357 | + mem = apple_gfx_host_ptr_for_gpa_range(guest_physical_address, | 357 | + mem = apple_gfx_host_ptr_for_gpa_range(guest_physical_address, |
358 | + length, read_only, ®ion); | 358 | + length, read_only, ®ion); |
359 | + if (mem) { | 359 | + if (mem) { |
360 | + memory_region_ref(region); | 360 | + memory_region_ref(region); |
361 | + } | 361 | + } |
362 | + return mem; | 362 | + return mem; |
363 | +} | 363 | +} |
364 | + | 364 | + |
365 | +static bool apple_gfx_mmio_unmap_surface_memory(void *ptr) | 365 | +static bool apple_gfx_mmio_unmap_surface_memory(void *ptr) |
366 | +{ | 366 | +{ |
367 | + MemoryRegion *region; | 367 | + MemoryRegion *region; |
368 | + ram_addr_t offset = 0; | 368 | + ram_addr_t offset = 0; |
369 | + | 369 | + |
370 | + RCU_READ_LOCK_GUARD(); | 370 | + RCU_READ_LOCK_GUARD(); |
371 | + region = memory_region_from_host(ptr, &offset); | 371 | + region = memory_region_from_host(ptr, &offset); |
372 | + if (!region) { | 372 | + if (!region) { |
373 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: memory at %p to be unmapped not " | 373 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: memory at %p to be unmapped not " |
374 | + "found.\n", | 374 | + "found.\n", |
375 | + __func__, ptr); | 375 | + __func__, ptr); |
376 | + return false; | 376 | + return false; |
377 | + } | 377 | + } |
378 | + | 378 | + |
379 | + trace_apple_gfx_iosfc_unmap_memory_region(ptr, region); | 379 | + trace_apple_gfx_iosfc_unmap_memory_region(ptr, region); |
380 | + memory_region_unref(region); | 380 | + memory_region_unref(region); |
381 | + return true; | 381 | + return true; |
382 | +} | 382 | +} |
383 | + | 383 | + |
384 | +static PGIOSurfaceHostDevice *apple_gfx_prepare_iosurface_host_device( | 384 | +static PGIOSurfaceHostDevice *apple_gfx_prepare_iosurface_host_device( |
385 | + AppleGFXMMIOState *s) | 385 | + AppleGFXMMIOState *s) |
386 | +{ | 386 | +{ |
387 | + PGIOSurfaceHostDeviceDescriptor *iosfc_desc = | 387 | + PGIOSurfaceHostDeviceDescriptor *iosfc_desc = |
388 | + [PGIOSurfaceHostDeviceDescriptor new]; | 388 | + [PGIOSurfaceHostDeviceDescriptor new]; |
389 | + PGIOSurfaceHostDevice *iosfc_host_dev; | 389 | + PGIOSurfaceHostDevice *iosfc_host_dev; |
390 | + | 390 | + |
391 | + iosfc_desc.mapMemory = | 391 | + iosfc_desc.mapMemory = |
392 | + ^bool(uint64_t phys, uint64_t len, bool ro, void **va, void *e, void *f) { | 392 | + ^bool(uint64_t phys, uint64_t len, bool ro, void **va, void *e, void *f) { |
393 | + *va = apple_gfx_mmio_map_surface_memory(phys, len, ro); | 393 | + *va = apple_gfx_mmio_map_surface_memory(phys, len, ro); |
394 | + | 394 | + |
395 | + trace_apple_gfx_iosfc_map_memory(phys, len, ro, va, e, f, *va); | 395 | + trace_apple_gfx_iosfc_map_memory(phys, len, ro, va, e, f, *va); |
396 | + | 396 | + |
397 | + return *va != NULL; | 397 | + return *va != NULL; |
398 | + }; | 398 | + }; |
399 | + | 399 | + |
400 | + iosfc_desc.unmapMemory = | 400 | + iosfc_desc.unmapMemory = |
401 | + ^bool(void *va, void *b, void *c, void *d, void *e, void *f) { | 401 | + ^bool(void *va, void *b, void *c, void *d, void *e, void *f) { |
402 | + return apple_gfx_mmio_unmap_surface_memory(va); | 402 | + return apple_gfx_mmio_unmap_surface_memory(va); |
403 | + }; | 403 | + }; |
404 | + | 404 | + |
405 | + iosfc_desc.raiseInterrupt = ^bool(uint32_t vector) { | 405 | + iosfc_desc.raiseInterrupt = ^bool(uint32_t vector) { |
406 | + trace_apple_gfx_iosfc_raise_irq(vector); | 406 | + trace_apple_gfx_iosfc_raise_irq(vector); |
407 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 407 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
408 | + raise_irq_bh, &s->irq_iosfc); | 408 | + raise_irq_bh, &s->irq_iosfc); |
409 | + return true; | 409 | + return true; |
410 | + }; | 410 | + }; |
411 | + | 411 | + |
412 | + iosfc_host_dev = | 412 | + iosfc_host_dev = |
413 | + [[PGIOSurfaceHostDevice alloc] initWithDescriptor:iosfc_desc]; | 413 | + [[PGIOSurfaceHostDevice alloc] initWithDescriptor:iosfc_desc]; |
414 | + [iosfc_desc release]; | 414 | + [iosfc_desc release]; |
415 | + return iosfc_host_dev; | 415 | + return iosfc_host_dev; |
416 | +} | 416 | +} |
417 | + | 417 | + |
418 | +static void apple_gfx_mmio_realize(DeviceState *dev, Error **errp) | 418 | +static void apple_gfx_mmio_realize(DeviceState *dev, Error **errp) |
419 | +{ | 419 | +{ |
420 | + @autoreleasepool { | 420 | + @autoreleasepool { |
421 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(dev); | 421 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(dev); |
422 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; | 422 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; |
423 | + | 423 | + |
424 | + desc.raiseInterrupt = ^(uint32_t vector) { | 424 | + desc.raiseInterrupt = ^(uint32_t vector) { |
425 | + trace_apple_gfx_raise_irq(vector); | 425 | + trace_apple_gfx_raise_irq(vector); |
426 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 426 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
427 | + raise_irq_bh, &s->irq_gfx); | 427 | + raise_irq_bh, &s->irq_gfx); |
428 | + }; | 428 | + }; |
429 | + | 429 | + |
430 | + desc.usingIOSurfaceMapper = true; | 430 | + desc.usingIOSurfaceMapper = true; |
431 | + s->pgiosfc = apple_gfx_prepare_iosurface_host_device(s); | 431 | + s->pgiosfc = apple_gfx_prepare_iosurface_host_device(s); |
432 | + | 432 | + |
433 | + if (!apple_gfx_common_realize(&s->common, dev, desc, errp)) { | 433 | + if (!apple_gfx_common_realize(&s->common, dev, desc, errp)) { |
434 | + [s->pgiosfc release]; | 434 | + [s->pgiosfc release]; |
435 | + s->pgiosfc = nil; | 435 | + s->pgiosfc = nil; |
436 | + } | 436 | + } |
437 | + | 437 | + |
438 | + [desc release]; | 438 | + [desc release]; |
439 | + desc = nil; | 439 | + desc = nil; |
440 | + } | 440 | + } |
441 | +} | 441 | +} |
442 | + | 442 | + |
443 | +static void apple_gfx_mmio_init(Object *obj) | 443 | +static void apple_gfx_mmio_init(Object *obj) |
444 | +{ | 444 | +{ |
445 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); | 445 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); |
446 | + | 446 | + |
447 | + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_MMIO); | 447 | + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_MMIO); |
448 | + | 448 | + |
449 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->common.iomem_gfx); | 449 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->common.iomem_gfx); |
450 | + memory_region_init_io(&s->iomem_iosfc, obj, &apple_iosfc_ops, s, | 450 | + memory_region_init_io(&s->iomem_iosfc, obj, &apple_iosfc_ops, s, |
451 | + TYPE_APPLE_GFX_MMIO, 0x10000); | 451 | + TYPE_APPLE_GFX_MMIO, 0x10000); |
452 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem_iosfc); | 452 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem_iosfc); |
453 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_gfx); | 453 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_gfx); |
454 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_iosfc); | 454 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_iosfc); |
455 | +} | 455 | +} |
456 | + | 456 | + |
457 | +static void apple_gfx_mmio_reset(Object *obj, ResetType type) | 457 | +static void apple_gfx_mmio_reset(Object *obj, ResetType type) |
458 | +{ | 458 | +{ |
459 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); | 459 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); |
460 | + [s->common.pgdev reset]; | 460 | + [s->common.pgdev reset]; |
461 | +} | 461 | +} |
462 | + | 462 | + |
463 | + | 463 | + |
464 | +static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) | 464 | +static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) |
465 | +{ | 465 | +{ |
466 | + DeviceClass *dc = DEVICE_CLASS(klass); | 466 | + DeviceClass *dc = DEVICE_CLASS(klass); |
467 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | 467 | + ResettableClass *rc = RESETTABLE_CLASS(klass); |
468 | + | 468 | + |
469 | + rc->phases.hold = apple_gfx_mmio_reset; | 469 | + rc->phases.hold = apple_gfx_mmio_reset; |
470 | + dc->hotpluggable = false; | 470 | + dc->hotpluggable = false; |
471 | + dc->realize = apple_gfx_mmio_realize; | 471 | + dc->realize = apple_gfx_mmio_realize; |
472 | +} | 472 | +} |
473 | + | 473 | + |
474 | +static TypeInfo apple_gfx_mmio_types[] = { | 474 | +static TypeInfo apple_gfx_mmio_types[] = { |
475 | + { | 475 | + { |
476 | + .name = TYPE_APPLE_GFX_MMIO, | 476 | + .name = TYPE_APPLE_GFX_MMIO, |
477 | + .parent = TYPE_SYS_BUS_DEVICE, | 477 | + .parent = TYPE_SYS_BUS_DEVICE, |
478 | + .instance_size = sizeof(AppleGFXMMIOState), | 478 | + .instance_size = sizeof(AppleGFXMMIOState), |
479 | + .class_init = apple_gfx_mmio_class_init, | 479 | + .class_init = apple_gfx_mmio_class_init, |
480 | + .instance_init = apple_gfx_mmio_init, | 480 | + .instance_init = apple_gfx_mmio_init, |
481 | + } | 481 | + } |
482 | +}; | 482 | +}; |
483 | +DEFINE_TYPES(apple_gfx_mmio_types) | 483 | +DEFINE_TYPES(apple_gfx_mmio_types) |
484 | diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h | 484 | diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h |
485 | new file mode 100644 | 485 | new file mode 100644 |
486 | index XXXXXXX..XXXXXXX | 486 | index XXXXXXX..XXXXXXX |
487 | --- /dev/null | 487 | --- /dev/null |
488 | +++ b/hw/display/apple-gfx.h | 488 | +++ b/hw/display/apple-gfx.h |
489 | @@ -XXX,XX +XXX,XX @@ | 489 | @@ -XXX,XX +XXX,XX @@ |
490 | +/* | 490 | +/* |
491 | + * Data structures and functions shared between variants of the macOS | 491 | + * Data structures and functions shared between variants of the macOS |
492 | + * ParavirtualizedGraphics.framework based apple-gfx display adapter. | 492 | + * ParavirtualizedGraphics.framework based apple-gfx display adapter. |
493 | + * | 493 | + * |
494 | + * SPDX-License-Identifier: GPL-2.0-or-later | 494 | + * SPDX-License-Identifier: GPL-2.0-or-later |
495 | + */ | 495 | + */ |
496 | + | 496 | + |
497 | +#ifndef QEMU_APPLE_GFX_H | 497 | +#ifndef QEMU_APPLE_GFX_H |
498 | +#define QEMU_APPLE_GFX_H | 498 | +#define QEMU_APPLE_GFX_H |
499 | + | 499 | + |
500 | +#define TYPE_APPLE_GFX_MMIO "apple-gfx-mmio" | 500 | +#define TYPE_APPLE_GFX_MMIO "apple-gfx-mmio" |
501 | +#define TYPE_APPLE_GFX_PCI "apple-gfx-pci" | 501 | +#define TYPE_APPLE_GFX_PCI "apple-gfx-pci" |
502 | + | 502 | + |
503 | +#include "qemu/osdep.h" | 503 | +#include "qemu/osdep.h" |
504 | +#include <dispatch/dispatch.h> | 504 | +#include <dispatch/dispatch.h> |
505 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | 505 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> |
506 | +#include "qemu/typedefs.h" | 506 | +#include "qemu/typedefs.h" |
507 | +#include "exec/memory.h" | 507 | +#include "exec/memory.h" |
508 | +#include "ui/surface.h" | 508 | +#include "ui/surface.h" |
509 | + | 509 | + |
510 | +@class PGDeviceDescriptor; | 510 | +@class PGDeviceDescriptor; |
511 | +@protocol PGDevice; | 511 | +@protocol PGDevice; |
512 | +@protocol PGDisplay; | 512 | +@protocol PGDisplay; |
513 | +@protocol MTLDevice; | 513 | +@protocol MTLDevice; |
514 | +@protocol MTLTexture; | 514 | +@protocol MTLTexture; |
515 | +@protocol MTLCommandQueue; | 515 | +@protocol MTLCommandQueue; |
516 | + | 516 | + |
517 | +typedef QTAILQ_HEAD(, PGTask_s) PGTaskList; | 517 | +typedef QTAILQ_HEAD(, PGTask_s) PGTaskList; |
518 | + | 518 | + |
519 | +typedef struct AppleGFXState { | 519 | +typedef struct AppleGFXState { |
520 | + /* Initialised on init/realize() */ | 520 | + /* Initialised on init/realize() */ |
521 | + MemoryRegion iomem_gfx; | 521 | + MemoryRegion iomem_gfx; |
522 | + id<PGDevice> pgdev; | 522 | + id<PGDevice> pgdev; |
523 | + id<PGDisplay> pgdisp; | 523 | + id<PGDisplay> pgdisp; |
524 | + QemuConsole *con; | 524 | + QemuConsole *con; |
525 | + id<MTLDevice> mtl; | 525 | + id<MTLDevice> mtl; |
526 | + id<MTLCommandQueue> mtl_queue; | 526 | + id<MTLCommandQueue> mtl_queue; |
527 | + | 527 | + |
528 | + /* List `tasks` is protected by task_mutex */ | 528 | + /* List `tasks` is protected by task_mutex */ |
529 | + QemuMutex task_mutex; | 529 | + QemuMutex task_mutex; |
530 | + PGTaskList tasks; | 530 | + PGTaskList tasks; |
531 | + | 531 | + |
532 | + /* Mutable state (BQL protected) */ | 532 | + /* Mutable state (BQL protected) */ |
533 | + QEMUCursor *cursor; | 533 | + QEMUCursor *cursor; |
534 | + DisplaySurface *surface; | 534 | + DisplaySurface *surface; |
535 | + id<MTLTexture> texture; | 535 | + id<MTLTexture> texture; |
536 | + int8_t pending_frames; /* # guest frames in the rendering pipeline */ | 536 | + int8_t pending_frames; /* # guest frames in the rendering pipeline */ |
537 | + bool gfx_update_requested; /* QEMU display system wants a new frame */ | 537 | + bool gfx_update_requested; /* QEMU display system wants a new frame */ |
538 | + bool new_frame_ready; /* Guest has rendered a frame, ready to be used */ | 538 | + bool new_frame_ready; /* Guest has rendered a frame, ready to be used */ |
539 | + bool using_managed_texture_storage; | 539 | + bool using_managed_texture_storage; |
540 | + uint32_t rendering_frame_width; | 540 | + uint32_t rendering_frame_width; |
541 | + uint32_t rendering_frame_height; | 541 | + uint32_t rendering_frame_height; |
542 | + | 542 | + |
543 | + /* Mutable state (atomic) */ | 543 | + /* Mutable state (atomic) */ |
544 | + bool cursor_show; | 544 | + bool cursor_show; |
545 | +} AppleGFXState; | 545 | +} AppleGFXState; |
546 | + | 546 | + |
547 | +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name); | 547 | +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name); |
548 | +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | 548 | +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, |
549 | + PGDeviceDescriptor *desc, Error **errp); | 549 | + PGDeviceDescriptor *desc, Error **errp); |
550 | +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, | 550 | +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, |
551 | + uint64_t length, bool read_only, | 551 | + uint64_t length, bool read_only, |
552 | + MemoryRegion **mapping_in_region); | 552 | + MemoryRegion **mapping_in_region); |
553 | + | 553 | + |
554 | +#endif | 554 | +#endif |
555 | + | 555 | + |
556 | diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m | 556 | diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m |
557 | new file mode 100644 | 557 | new file mode 100644 |
558 | index XXXXXXX..XXXXXXX | 558 | index XXXXXXX..XXXXXXX |
559 | --- /dev/null | 559 | --- /dev/null |
560 | +++ b/hw/display/apple-gfx.m | 560 | +++ b/hw/display/apple-gfx.m |
561 | @@ -XXX,XX +XXX,XX @@ | 561 | @@ -XXX,XX +XXX,XX @@ |
562 | +/* | 562 | +/* |
563 | + * QEMU Apple ParavirtualizedGraphics.framework device | 563 | + * QEMU Apple ParavirtualizedGraphics.framework device |
564 | + * | 564 | + * |
565 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 565 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
566 | + * | 566 | + * |
567 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 567 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
568 | + * See the COPYING file in the top-level directory. | 568 | + * See the COPYING file in the top-level directory. |
569 | + * | 569 | + * |
570 | + * SPDX-License-Identifier: GPL-2.0-or-later | 570 | + * SPDX-License-Identifier: GPL-2.0-or-later |
571 | + * | 571 | + * |
572 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides | 572 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides |
573 | + * which implements 3d graphics passthrough to the host as well as a | 573 | + * which implements 3d graphics passthrough to the host as well as a |
574 | + * proprietary guest communication channel to drive it. This device model | 574 | + * proprietary guest communication channel to drive it. This device model |
575 | + * implements support to drive that library from within QEMU. | 575 | + * implements support to drive that library from within QEMU. |
576 | + */ | 576 | + */ |
577 | + | 577 | + |
578 | +#include "qemu/osdep.h" | 578 | +#include "qemu/osdep.h" |
579 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | 579 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> |
580 | +#include <mach/mach_vm.h> | 580 | +#include <mach/mach_vm.h> |
581 | +#include "apple-gfx.h" | 581 | +#include "apple-gfx.h" |
582 | +#include "trace.h" | 582 | +#include "trace.h" |
583 | +#include "qemu-main.h" | 583 | +#include "qemu-main.h" |
584 | +#include "exec/address-spaces.h" | 584 | +#include "exec/address-spaces.h" |
585 | +#include "migration/blocker.h" | 585 | +#include "migration/blocker.h" |
586 | +#include "monitor/monitor.h" | 586 | +#include "monitor/monitor.h" |
587 | +#include "qemu/main-loop.h" | 587 | +#include "qemu/main-loop.h" |
588 | +#include "qemu/cutils.h" | 588 | +#include "qemu/cutils.h" |
589 | +#include "qemu/log.h" | 589 | +#include "qemu/log.h" |
590 | +#include "qapi/visitor.h" | 590 | +#include "qapi/visitor.h" |
591 | +#include "qapi/error.h" | 591 | +#include "qapi/error.h" |
592 | +#include "sysemu/dma.h" | 592 | +#include "sysemu/dma.h" |
593 | +#include "ui/console.h" | 593 | +#include "ui/console.h" |
594 | + | 594 | + |
595 | +static const PGDisplayCoord_t apple_gfx_modes[] = { | 595 | +static const PGDisplayCoord_t apple_gfx_modes[] = { |
596 | + { .x = 1440, .y = 1080 }, | 596 | + { .x = 1440, .y = 1080 }, |
597 | + { .x = 1280, .y = 1024 }, | 597 | + { .x = 1280, .y = 1024 }, |
598 | +}; | 598 | +}; |
599 | + | 599 | + |
600 | +static Error *apple_gfx_mig_blocker; | 600 | +static Error *apple_gfx_mig_blocker; |
601 | +static uint32_t next_pgdisplay_serial_num = 1; | 601 | +static uint32_t next_pgdisplay_serial_num = 1; |
602 | + | 602 | + |
603 | +static dispatch_queue_t get_background_queue(void) | 603 | +static dispatch_queue_t get_background_queue(void) |
604 | +{ | 604 | +{ |
605 | + return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); | 605 | + return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); |
606 | +} | 606 | +} |
607 | + | 607 | + |
608 | +/* ------ PGTask and task operations: new/destroy/map/unmap ------ */ | 608 | +/* ------ PGTask and task operations: new/destroy/map/unmap ------ */ |
609 | + | 609 | + |
610 | +/* | 610 | +/* |
611 | + * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h> | 611 | + * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h> |
612 | + * which is opaque from the framework's point of view. It is used in callbacks | 612 | + * which is opaque from the framework's point of view. It is used in callbacks |
613 | + * in the form of its typedef PGTask_t, which also already exists in the | 613 | + * in the form of its typedef PGTask_t, which also already exists in the |
614 | + * framework headers. | 614 | + * framework headers. |
615 | + * | 615 | + * |
616 | + * A "task" in PVG terminology represents a host-virtual contiguous address | 616 | + * A "task" in PVG terminology represents a host-virtual contiguous address |
617 | + * range which is reserved in a large chunk on task creation. The mapMemory | 617 | + * range which is reserved in a large chunk on task creation. The mapMemory |
618 | + * callback then requests ranges of guest system memory (identified by their | 618 | + * callback then requests ranges of guest system memory (identified by their |
619 | + * GPA) to be mapped into subranges of this reserved address space. | 619 | + * GPA) to be mapped into subranges of this reserved address space. |
620 | + * This type of operation isn't well-supported by QEMU's memory subsystem, | 620 | + * This type of operation isn't well-supported by QEMU's memory subsystem, |
621 | + * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call, | 621 | + * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call, |
622 | + * which allows us to refer to the same backing memory via multiple virtual | 622 | + * which allows us to refer to the same backing memory via multiple virtual |
623 | + * address ranges. The Mach VM APIs are therefore used throughout for managing | 623 | + * address ranges. The Mach VM APIs are therefore used throughout for managing |
624 | + * task memory. | 624 | + * task memory. |
625 | + */ | 625 | + */ |
626 | +struct PGTask_s { | 626 | +struct PGTask_s { |
627 | + QTAILQ_ENTRY(PGTask_s) node; | 627 | + QTAILQ_ENTRY(PGTask_s) node; |
628 | + AppleGFXState *s; | 628 | + AppleGFXState *s; |
629 | + mach_vm_address_t address; | 629 | + mach_vm_address_t address; |
630 | + uint64_t len; | 630 | + uint64_t len; |
631 | + /* | 631 | + /* |
632 | + * All unique MemoryRegions for which a mapping has been created in in this | 632 | + * All unique MemoryRegions for which a mapping has been created in in this |
633 | + * task, and on which we have thus called memory_region_ref(). There are | 633 | + * task, and on which we have thus called memory_region_ref(). There are |
634 | + * usually very few regions of system RAM in total, so we expect this array | 634 | + * usually very few regions of system RAM in total, so we expect this array |
635 | + * to be very short. Therefore, no need for sorting or fancy search | 635 | + * to be very short. Therefore, no need for sorting or fancy search |
636 | + * algorithms, linear search will do. | 636 | + * algorithms, linear search will do. |
637 | + * Protected by AppleGFXState's task_mutex. | 637 | + * Protected by AppleGFXState's task_mutex. |
638 | + */ | 638 | + */ |
639 | + GPtrArray *mapped_regions; | 639 | + GPtrArray *mapped_regions; |
640 | +}; | 640 | +}; |
641 | + | 641 | + |
642 | +static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len) | 642 | +static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len) |
643 | +{ | 643 | +{ |
644 | + mach_vm_address_t task_mem; | 644 | + mach_vm_address_t task_mem; |
645 | + PGTask_t *task; | 645 | + PGTask_t *task; |
646 | + kern_return_t r; | 646 | + kern_return_t r; |
647 | + | 647 | + |
648 | + r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE); | 648 | + r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE); |
649 | + if (r != KERN_SUCCESS) { | 649 | + if (r != KERN_SUCCESS) { |
650 | + return NULL; | 650 | + return NULL; |
651 | + } | 651 | + } |
652 | + | 652 | + |
653 | + task = g_new0(PGTask_t, 1); | 653 | + task = g_new0(PGTask_t, 1); |
654 | + task->s = s; | 654 | + task->s = s; |
655 | + task->address = task_mem; | 655 | + task->address = task_mem; |
656 | + task->len = len; | 656 | + task->len = len; |
657 | + task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */); | 657 | + task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */); |
658 | + | 658 | + |
659 | + QEMU_LOCK_GUARD(&s->task_mutex); | 659 | + QEMU_LOCK_GUARD(&s->task_mutex); |
660 | + QTAILQ_INSERT_TAIL(&s->tasks, task, node); | 660 | + QTAILQ_INSERT_TAIL(&s->tasks, task, node); |
661 | + | 661 | + |
662 | + return task; | 662 | + return task; |
663 | +} | 663 | +} |
664 | + | 664 | + |
665 | +static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task) | 665 | +static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task) |
666 | +{ | 666 | +{ |
667 | + GPtrArray *regions = task->mapped_regions; | 667 | + GPtrArray *regions = task->mapped_regions; |
668 | + MemoryRegion *region; | 668 | + MemoryRegion *region; |
669 | + size_t i; | 669 | + size_t i; |
670 | + | 670 | + |
671 | + for (i = 0; i < regions->len; ++i) { | 671 | + for (i = 0; i < regions->len; ++i) { |
672 | + region = g_ptr_array_index(regions, i); | 672 | + region = g_ptr_array_index(regions, i); |
673 | + memory_region_unref(region); | 673 | + memory_region_unref(region); |
674 | + } | 674 | + } |
675 | + g_ptr_array_unref(regions); | 675 | + g_ptr_array_unref(regions); |
676 | + | 676 | + |
677 | + mach_vm_deallocate(mach_task_self(), task->address, task->len); | 677 | + mach_vm_deallocate(mach_task_self(), task->address, task->len); |
678 | + | 678 | + |
679 | + QEMU_LOCK_GUARD(&s->task_mutex); | 679 | + QEMU_LOCK_GUARD(&s->task_mutex); |
680 | + QTAILQ_REMOVE(&s->tasks, task, node); | 680 | + QTAILQ_REMOVE(&s->tasks, task, node); |
681 | + g_free(task); | 681 | + g_free(task); |
682 | +} | 682 | +} |
683 | + | 683 | + |
684 | +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, | 684 | +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, |
685 | + uint64_t length, bool read_only, | 685 | + uint64_t length, bool read_only, |
686 | + MemoryRegion **mapping_in_region) | 686 | + MemoryRegion **mapping_in_region) |
687 | +{ | 687 | +{ |
688 | + MemoryRegion *ram_region; | 688 | + MemoryRegion *ram_region; |
689 | + char *host_ptr; | 689 | + char *host_ptr; |
690 | + hwaddr ram_region_offset = 0; | 690 | + hwaddr ram_region_offset = 0; |
691 | + hwaddr ram_region_length = length; | 691 | + hwaddr ram_region_length = length; |
692 | + | 692 | + |
693 | + ram_region = address_space_translate(&address_space_memory, | 693 | + ram_region = address_space_translate(&address_space_memory, |
694 | + guest_physical, | 694 | + guest_physical, |
695 | + &ram_region_offset, | 695 | + &ram_region_offset, |
696 | + &ram_region_length, !read_only, | 696 | + &ram_region_length, !read_only, |
697 | + MEMTXATTRS_UNSPECIFIED); | 697 | + MEMTXATTRS_UNSPECIFIED); |
698 | + | 698 | + |
699 | + if (!ram_region || ram_region_length < length || | 699 | + if (!ram_region || ram_region_length < length || |
700 | + !memory_access_is_direct(ram_region, !read_only)) { | 700 | + !memory_access_is_direct(ram_region, !read_only)) { |
701 | + return NULL; | 701 | + return NULL; |
702 | + } | 702 | + } |
703 | + | 703 | + |
704 | + host_ptr = memory_region_get_ram_ptr(ram_region); | 704 | + host_ptr = memory_region_get_ram_ptr(ram_region); |
705 | + if (!host_ptr) { | 705 | + if (!host_ptr) { |
706 | + return NULL; | 706 | + return NULL; |
707 | + } | 707 | + } |
708 | + host_ptr += ram_region_offset; | 708 | + host_ptr += ram_region_offset; |
709 | + *mapping_in_region = ram_region; | 709 | + *mapping_in_region = ram_region; |
710 | + return host_ptr; | 710 | + return host_ptr; |
711 | +} | 711 | +} |
712 | + | 712 | + |
713 | +static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task, | 713 | +static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task, |
714 | + uint64_t virtual_offset, | 714 | + uint64_t virtual_offset, |
715 | + PGPhysicalMemoryRange_t *ranges, | 715 | + PGPhysicalMemoryRange_t *ranges, |
716 | + uint32_t range_count, bool read_only) | 716 | + uint32_t range_count, bool read_only) |
717 | +{ | 717 | +{ |
718 | + kern_return_t r; | 718 | + kern_return_t r; |
719 | + void *source_ptr; | 719 | + void *source_ptr; |
720 | + mach_vm_address_t target; | 720 | + mach_vm_address_t target; |
721 | + vm_prot_t cur_protection, max_protection; | 721 | + vm_prot_t cur_protection, max_protection; |
722 | + bool success = true; | 722 | + bool success = true; |
723 | + MemoryRegion *region; | 723 | + MemoryRegion *region; |
724 | + | 724 | + |
725 | + RCU_READ_LOCK_GUARD(); | 725 | + RCU_READ_LOCK_GUARD(); |
726 | + QEMU_LOCK_GUARD(&s->task_mutex); | 726 | + QEMU_LOCK_GUARD(&s->task_mutex); |
727 | + | 727 | + |
728 | + trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only); | 728 | + trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only); |
729 | + for (int i = 0; i < range_count; i++) { | 729 | + for (int i = 0; i < range_count; i++) { |
730 | + PGPhysicalMemoryRange_t *range = &ranges[i]; | 730 | + PGPhysicalMemoryRange_t *range = &ranges[i]; |
731 | + | 731 | + |
732 | + target = task->address + virtual_offset; | 732 | + target = task->address + virtual_offset; |
733 | + virtual_offset += range->physicalLength; | 733 | + virtual_offset += range->physicalLength; |
734 | + | 734 | + |
735 | + trace_apple_gfx_map_memory_range(i, range->physicalAddress, | 735 | + trace_apple_gfx_map_memory_range(i, range->physicalAddress, |
736 | + range->physicalLength); | 736 | + range->physicalLength); |
737 | + | 737 | + |
738 | + region = NULL; | 738 | + region = NULL; |
739 | + source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress, | 739 | + source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress, |
740 | + range->physicalLength, | 740 | + range->physicalLength, |
741 | + read_only, ®ion); | 741 | + read_only, ®ion); |
742 | + if (!source_ptr) { | 742 | + if (!source_ptr) { |
743 | + success = false; | 743 | + success = false; |
744 | + continue; | 744 | + continue; |
745 | + } | 745 | + } |
746 | + | 746 | + |
747 | + if (!g_ptr_array_find(task->mapped_regions, region, NULL)) { | 747 | + if (!g_ptr_array_find(task->mapped_regions, region, NULL)) { |
748 | + g_ptr_array_add(task->mapped_regions, region); | 748 | + g_ptr_array_add(task->mapped_regions, region); |
749 | + memory_region_ref(region); | 749 | + memory_region_ref(region); |
750 | + } | 750 | + } |
751 | + | 751 | + |
752 | + cur_protection = 0; | 752 | + cur_protection = 0; |
753 | + max_protection = 0; | 753 | + max_protection = 0; |
754 | + /* Map guest RAM at range->physicalAddress into PG task memory range */ | 754 | + /* Map guest RAM at range->physicalAddress into PG task memory range */ |
755 | + r = mach_vm_remap(mach_task_self(), | 755 | + r = mach_vm_remap(mach_task_self(), |
756 | + &target, range->physicalLength, vm_page_size - 1, | 756 | + &target, range->physicalLength, vm_page_size - 1, |
757 | + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, | 757 | + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, |
758 | + mach_task_self(), (mach_vm_address_t)source_ptr, | 758 | + mach_task_self(), (mach_vm_address_t)source_ptr, |
759 | + false /* shared mapping, no copy */, | 759 | + false /* shared mapping, no copy */, |
760 | + &cur_protection, &max_protection, | 760 | + &cur_protection, &max_protection, |
761 | + VM_INHERIT_COPY); | 761 | + VM_INHERIT_COPY); |
762 | + trace_apple_gfx_remap(r, source_ptr, target); | 762 | + trace_apple_gfx_remap(r, source_ptr, target); |
763 | + g_assert(r == KERN_SUCCESS); | 763 | + g_assert(r == KERN_SUCCESS); |
764 | + } | 764 | + } |
765 | + | 765 | + |
766 | + return success; | 766 | + return success; |
767 | +} | 767 | +} |
768 | + | 768 | + |
769 | +static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task, | 769 | +static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task, |
770 | + uint64_t virtual_offset, uint64_t length) | 770 | + uint64_t virtual_offset, uint64_t length) |
771 | +{ | 771 | +{ |
772 | + kern_return_t r; | 772 | + kern_return_t r; |
773 | + mach_vm_address_t range_address; | 773 | + mach_vm_address_t range_address; |
774 | + | 774 | + |
775 | + trace_apple_gfx_unmap_memory(task, virtual_offset, length); | 775 | + trace_apple_gfx_unmap_memory(task, virtual_offset, length); |
776 | + | 776 | + |
777 | + /* | 777 | + /* |
778 | + * Replace task memory range with fresh 0 pages, undoing the mapping | 778 | + * Replace task memory range with fresh 0 pages, undoing the mapping |
779 | + * from guest RAM. | 779 | + * from guest RAM. |
780 | + */ | 780 | + */ |
781 | + range_address = task->address + virtual_offset; | 781 | + range_address = task->address + virtual_offset; |
782 | + r = mach_vm_allocate(mach_task_self(), &range_address, length, | 782 | + r = mach_vm_allocate(mach_task_self(), &range_address, length, |
783 | + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE); | 783 | + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE); |
784 | + g_assert(r == KERN_SUCCESS); | 784 | + g_assert(r == KERN_SUCCESS); |
785 | +} | 785 | +} |
786 | + | 786 | + |
787 | +/* ------ Rendering and frame management ------ */ | 787 | +/* ------ Rendering and frame management ------ */ |
788 | + | 788 | + |
789 | +static void apple_gfx_render_frame_completed_bh(void *opaque); | 789 | +static void apple_gfx_render_frame_completed_bh(void *opaque); |
790 | + | 790 | + |
791 | +static void apple_gfx_render_new_frame(AppleGFXState *s) | 791 | +static void apple_gfx_render_new_frame(AppleGFXState *s) |
792 | +{ | 792 | +{ |
793 | + bool managed_texture = s->using_managed_texture_storage; | 793 | + bool managed_texture = s->using_managed_texture_storage; |
794 | + uint32_t width = surface_width(s->surface); | 794 | + uint32_t width = surface_width(s->surface); |
795 | + uint32_t height = surface_height(s->surface); | 795 | + uint32_t height = surface_height(s->surface); |
796 | + MTLRegion region = MTLRegionMake2D(0, 0, width, height); | 796 | + MTLRegion region = MTLRegionMake2D(0, 0, width, height); |
797 | + id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer]; | 797 | + id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer]; |
798 | + id<MTLTexture> texture = s->texture; | 798 | + id<MTLTexture> texture = s->texture; |
799 | + | 799 | + |
800 | + assert(bql_locked()); | 800 | + assert(bql_locked()); |
801 | + [texture retain]; | 801 | + [texture retain]; |
802 | + [command_buffer retain]; | 802 | + [command_buffer retain]; |
803 | + | 803 | + |
804 | + s->rendering_frame_width = width; | 804 | + s->rendering_frame_width = width; |
805 | + s->rendering_frame_height = height; | 805 | + s->rendering_frame_height = height; |
806 | + | 806 | + |
807 | + dispatch_async(get_background_queue(), ^{ | 807 | + dispatch_async(get_background_queue(), ^{ |
808 | + /* | 808 | + /* |
809 | + * This is not safe to call from the BQL/BH due to PVG-internal locks | 809 | + * This is not safe to call from the BQL/BH due to PVG-internal locks |
810 | + * causing deadlocks. | 810 | + * causing deadlocks. |
811 | + */ | 811 | + */ |
812 | + bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer | 812 | + bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer |
813 | + texture:texture | 813 | + texture:texture |
814 | + region:region]; | 814 | + region:region]; |
815 | + if (!r) { | 815 | + if (!r) { |
816 | + [texture release]; | 816 | + [texture release]; |
817 | + [command_buffer release]; | 817 | + [command_buffer release]; |
818 | + qemu_log_mask(LOG_GUEST_ERROR, | 818 | + qemu_log_mask(LOG_GUEST_ERROR, |
819 | + "%s: encodeCurrentFrameToCommandBuffer:texture:region: " | 819 | + "%s: encodeCurrentFrameToCommandBuffer:texture:region: " |
820 | + "failed\n", __func__); | 820 | + "failed\n", __func__); |
821 | + bql_lock(); | 821 | + bql_lock(); |
822 | + --s->pending_frames; | 822 | + --s->pending_frames; |
823 | + if (s->pending_frames > 0) { | 823 | + if (s->pending_frames > 0) { |
824 | + apple_gfx_render_new_frame(s); | 824 | + apple_gfx_render_new_frame(s); |
825 | + } | 825 | + } |
826 | + bql_unlock(); | 826 | + bql_unlock(); |
827 | + return; | 827 | + return; |
828 | + } | 828 | + } |
829 | + | 829 | + |
830 | + if (managed_texture) { | 830 | + if (managed_texture) { |
831 | + /* "Managed" textures exist in both VRAM and RAM and must be synced. */ | 831 | + /* "Managed" textures exist in both VRAM and RAM and must be synced. */ |
832 | + id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder]; | 832 | + id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder]; |
833 | + [blit synchronizeResource:texture]; | 833 | + [blit synchronizeResource:texture]; |
834 | + [blit endEncoding]; | 834 | + [blit endEncoding]; |
835 | + } | 835 | + } |
836 | + [texture release]; | 836 | + [texture release]; |
837 | + [command_buffer addCompletedHandler: | 837 | + [command_buffer addCompletedHandler: |
838 | + ^(id<MTLCommandBuffer> cb) | 838 | + ^(id<MTLCommandBuffer> cb) |
839 | + { | 839 | + { |
840 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 840 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
841 | + apple_gfx_render_frame_completed_bh, s); | 841 | + apple_gfx_render_frame_completed_bh, s); |
842 | + }]; | 842 | + }]; |
843 | + [command_buffer commit]; | 843 | + [command_buffer commit]; |
844 | + [command_buffer release]; | 844 | + [command_buffer release]; |
845 | + }); | 845 | + }); |
846 | +} | 846 | +} |
847 | + | 847 | + |
848 | +static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram) | 848 | +static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram) |
849 | +{ | 849 | +{ |
850 | + /* | 850 | + /* |
851 | + * TODO: Skip this entirely on a pure Metal or headless/guest-only | 851 | + * TODO: Skip this entirely on a pure Metal or headless/guest-only |
852 | + * rendering path, else use a blit command encoder? Needs careful | 852 | + * rendering path, else use a blit command encoder? Needs careful |
853 | + * (double?) buffering design. | 853 | + * (double?) buffering design. |
854 | + */ | 854 | + */ |
855 | + size_t width = texture.width, height = texture.height; | 855 | + size_t width = texture.width, height = texture.height; |
856 | + MTLRegion region = MTLRegionMake2D(0, 0, width, height); | 856 | + MTLRegion region = MTLRegionMake2D(0, 0, width, height); |
857 | + [texture getBytes:vram | 857 | + [texture getBytes:vram |
858 | + bytesPerRow:(width * 4) | 858 | + bytesPerRow:(width * 4) |
859 | + bytesPerImage:(width * height * 4) | 859 | + bytesPerImage:(width * height * 4) |
860 | + fromRegion:region | 860 | + fromRegion:region |
861 | + mipmapLevel:0 | 861 | + mipmapLevel:0 |
862 | + slice:0]; | 862 | + slice:0]; |
863 | +} | 863 | +} |
864 | + | 864 | + |
865 | +static void apple_gfx_render_frame_completed_bh(void *opaque) | 865 | +static void apple_gfx_render_frame_completed_bh(void *opaque) |
866 | +{ | 866 | +{ |
867 | + AppleGFXState *s = opaque; | 867 | + AppleGFXState *s = opaque; |
868 | + | 868 | + |
869 | + @autoreleasepool { | 869 | + @autoreleasepool { |
870 | + --s->pending_frames; | 870 | + --s->pending_frames; |
871 | + assert(s->pending_frames >= 0); | 871 | + assert(s->pending_frames >= 0); |
872 | + | 872 | + |
873 | + /* Only update display if mode hasn't changed since we started rendering. */ | 873 | + /* Only update display if mode hasn't changed since we started rendering. */ |
874 | + if (s->rendering_frame_width == surface_width(s->surface) && | 874 | + if (s->rendering_frame_width == surface_width(s->surface) && |
875 | + s->rendering_frame_height == surface_height(s->surface)) { | 875 | + s->rendering_frame_height == surface_height(s->surface)) { |
876 | + copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface)); | 876 | + copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface)); |
877 | + if (s->gfx_update_requested) { | 877 | + if (s->gfx_update_requested) { |
878 | + s->gfx_update_requested = false; | 878 | + s->gfx_update_requested = false; |
879 | + dpy_gfx_update_full(s->con); | 879 | + dpy_gfx_update_full(s->con); |
880 | + graphic_hw_update_done(s->con); | 880 | + graphic_hw_update_done(s->con); |
881 | + s->new_frame_ready = false; | 881 | + s->new_frame_ready = false; |
882 | + } else { | 882 | + } else { |
883 | + s->new_frame_ready = true; | 883 | + s->new_frame_ready = true; |
884 | + } | 884 | + } |
885 | + } | 885 | + } |
886 | + if (s->pending_frames > 0) { | 886 | + if (s->pending_frames > 0) { |
887 | + apple_gfx_render_new_frame(s); | 887 | + apple_gfx_render_new_frame(s); |
888 | + } | 888 | + } |
889 | + } | 889 | + } |
890 | +} | 890 | +} |
891 | + | 891 | + |
892 | +static void apple_gfx_fb_update_display(void *opaque) | 892 | +static void apple_gfx_fb_update_display(void *opaque) |
893 | +{ | 893 | +{ |
894 | + AppleGFXState *s = opaque; | 894 | + AppleGFXState *s = opaque; |
895 | + | 895 | + |
896 | + assert(bql_locked()); | 896 | + assert(bql_locked()); |
897 | + if (s->new_frame_ready) { | 897 | + if (s->new_frame_ready) { |
898 | + dpy_gfx_update_full(s->con); | 898 | + dpy_gfx_update_full(s->con); |
899 | + s->new_frame_ready = false; | 899 | + s->new_frame_ready = false; |
900 | + graphic_hw_update_done(s->con); | 900 | + graphic_hw_update_done(s->con); |
901 | + } else if (s->pending_frames > 0) { | 901 | + } else if (s->pending_frames > 0) { |
902 | + s->gfx_update_requested = true; | 902 | + s->gfx_update_requested = true; |
903 | + } else { | 903 | + } else { |
904 | + graphic_hw_update_done(s->con); | 904 | + graphic_hw_update_done(s->con); |
905 | + } | 905 | + } |
906 | +} | 906 | +} |
907 | + | 907 | + |
908 | +static const GraphicHwOps apple_gfx_fb_ops = { | 908 | +static const GraphicHwOps apple_gfx_fb_ops = { |
909 | + .gfx_update = apple_gfx_fb_update_display, | 909 | + .gfx_update = apple_gfx_fb_update_display, |
910 | + .gfx_update_async = true, | 910 | + .gfx_update_async = true, |
911 | +}; | 911 | +}; |
912 | + | 912 | + |
913 | +/* ------ Mouse cursor and display mode setting ------ */ | 913 | +/* ------ Mouse cursor and display mode setting ------ */ |
914 | + | 914 | + |
915 | +static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height) | 915 | +static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height) |
916 | +{ | 916 | +{ |
917 | + MTLTextureDescriptor *textureDescriptor; | 917 | + MTLTextureDescriptor *textureDescriptor; |
918 | + | 918 | + |
919 | + if (s->surface && | 919 | + if (s->surface && |
920 | + width == surface_width(s->surface) && | 920 | + width == surface_width(s->surface) && |
921 | + height == surface_height(s->surface)) { | 921 | + height == surface_height(s->surface)) { |
922 | + return; | 922 | + return; |
923 | + } | 923 | + } |
924 | + | 924 | + |
925 | + [s->texture release]; | 925 | + [s->texture release]; |
926 | + | 926 | + |
927 | + s->surface = qemu_create_displaysurface(width, height); | 927 | + s->surface = qemu_create_displaysurface(width, height); |
928 | + | 928 | + |
929 | + @autoreleasepool { | 929 | + @autoreleasepool { |
930 | + textureDescriptor = | 930 | + textureDescriptor = |
931 | + [MTLTextureDescriptor | 931 | + [MTLTextureDescriptor |
932 | + texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm | 932 | + texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm |
933 | + width:width | 933 | + width:width |
934 | + height:height | 934 | + height:height |
935 | + mipmapped:NO]; | 935 | + mipmapped:NO]; |
936 | + textureDescriptor.usage = s->pgdisp.minimumTextureUsage; | 936 | + textureDescriptor.usage = s->pgdisp.minimumTextureUsage; |
937 | + s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor]; | 937 | + s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor]; |
938 | + s->using_managed_texture_storage = | 938 | + s->using_managed_texture_storage = |
939 | + (s->texture.storageMode == MTLStorageModeManaged); | 939 | + (s->texture.storageMode == MTLStorageModeManaged); |
940 | + } | 940 | + } |
941 | + | 941 | + |
942 | + dpy_gfx_replace_surface(s->con, s->surface); | 942 | + dpy_gfx_replace_surface(s->con, s->surface); |
943 | +} | 943 | +} |
944 | + | 944 | + |
945 | +static void update_cursor(AppleGFXState *s) | 945 | +static void update_cursor(AppleGFXState *s) |
946 | +{ | 946 | +{ |
947 | + assert(bql_locked()); | 947 | + assert(bql_locked()); |
948 | + dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x, | 948 | + dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x, |
949 | + s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show)); | 949 | + s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show)); |
950 | +} | 950 | +} |
951 | + | 951 | + |
952 | +static void update_cursor_bh(void *opaque) | 952 | +static void update_cursor_bh(void *opaque) |
953 | +{ | 953 | +{ |
954 | + AppleGFXState *s = opaque; | 954 | + AppleGFXState *s = opaque; |
955 | + update_cursor(s); | 955 | + update_cursor(s); |
956 | +} | 956 | +} |
957 | + | 957 | + |
958 | +typedef struct AppleGFXSetCursorGlyphJob { | 958 | +typedef struct AppleGFXSetCursorGlyphJob { |
959 | + AppleGFXState *s; | 959 | + AppleGFXState *s; |
960 | + NSBitmapImageRep *glyph; | 960 | + NSBitmapImageRep *glyph; |
961 | + PGDisplayCoord_t hotspot; | 961 | + PGDisplayCoord_t hotspot; |
962 | +} AppleGFXSetCursorGlyphJob; | 962 | +} AppleGFXSetCursorGlyphJob; |
963 | + | 963 | + |
964 | +static void set_cursor_glyph(void *opaque) | 964 | +static void set_cursor_glyph(void *opaque) |
965 | +{ | 965 | +{ |
966 | + AppleGFXSetCursorGlyphJob *job = opaque; | 966 | + AppleGFXSetCursorGlyphJob *job = opaque; |
967 | + AppleGFXState *s = job->s; | 967 | + AppleGFXState *s = job->s; |
968 | + NSBitmapImageRep *glyph = job->glyph; | 968 | + NSBitmapImageRep *glyph = job->glyph; |
969 | + uint32_t bpp = glyph.bitsPerPixel; | 969 | + uint32_t bpp = glyph.bitsPerPixel; |
970 | + size_t width = glyph.pixelsWide; | 970 | + size_t width = glyph.pixelsWide; |
971 | + size_t height = glyph.pixelsHigh; | 971 | + size_t height = glyph.pixelsHigh; |
972 | + size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4; | 972 | + size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4; |
973 | + const uint8_t* px_data = glyph.bitmapData; | 973 | + const uint8_t* px_data = glyph.bitmapData; |
974 | + | 974 | + |
975 | + trace_apple_gfx_cursor_set(bpp, width, height); | 975 | + trace_apple_gfx_cursor_set(bpp, width, height); |
976 | + | 976 | + |
977 | + if (s->cursor) { | 977 | + if (s->cursor) { |
978 | + cursor_unref(s->cursor); | 978 | + cursor_unref(s->cursor); |
979 | + s->cursor = NULL; | 979 | + s->cursor = NULL; |
980 | + } | 980 | + } |
981 | + | 981 | + |
982 | + if (bpp == 32) { /* Shouldn't be anything else, but just to be safe...*/ | 982 | + if (bpp == 32) { /* Shouldn't be anything else, but just to be safe...*/ |
983 | + s->cursor = cursor_alloc(width, height); | 983 | + s->cursor = cursor_alloc(width, height); |
984 | + s->cursor->hot_x = job->hotspot.x; | 984 | + s->cursor->hot_x = job->hotspot.x; |
985 | + s->cursor->hot_y = job->hotspot.y; | 985 | + s->cursor->hot_y = job->hotspot.y; |
986 | + | 986 | + |
987 | + uint32_t *dest_px = s->cursor->data; | 987 | + uint32_t *dest_px = s->cursor->data; |
988 | + | 988 | + |
989 | + for (size_t y = 0; y < height; ++y) { | 989 | + for (size_t y = 0; y < height; ++y) { |
990 | + for (size_t x = 0; x < width; ++x) { | 990 | + for (size_t x = 0; x < width; ++x) { |
991 | + /* | 991 | + /* |
992 | + * NSBitmapImageRep's red & blue channels are swapped | 992 | + * NSBitmapImageRep's red & blue channels are swapped |
993 | + * compared to QEMUCursor's. | 993 | + * compared to QEMUCursor's. |
994 | + */ | 994 | + */ |
995 | + *dest_px = | 995 | + *dest_px = |
996 | + (px_data[0] << 16u) | | 996 | + (px_data[0] << 16u) | |
997 | + (px_data[1] << 8u) | | 997 | + (px_data[1] << 8u) | |
998 | + (px_data[2] << 0u) | | 998 | + (px_data[2] << 0u) | |
999 | + (px_data[3] << 24u); | 999 | + (px_data[3] << 24u); |
1000 | + ++dest_px; | 1000 | + ++dest_px; |
1001 | + px_data += 4; | 1001 | + px_data += 4; |
1002 | + } | 1002 | + } |
1003 | + px_data += padding_bytes_per_row; | 1003 | + px_data += padding_bytes_per_row; |
1004 | + } | 1004 | + } |
1005 | + dpy_cursor_define(s->con, s->cursor); | 1005 | + dpy_cursor_define(s->con, s->cursor); |
1006 | + update_cursor(s); | 1006 | + update_cursor(s); |
1007 | + } | 1007 | + } |
1008 | + [glyph release]; | 1008 | + [glyph release]; |
1009 | + | 1009 | + |
1010 | + g_free(job); | 1010 | + g_free(job); |
1011 | +} | 1011 | +} |
1012 | + | 1012 | + |
1013 | +/* ------ DMA (device reading system memory) ------ */ | 1013 | +/* ------ DMA (device reading system memory) ------ */ |
1014 | + | 1014 | + |
1015 | +typedef struct AppleGFXReadMemoryJob { | 1015 | +typedef struct AppleGFXReadMemoryJob { |
1016 | + QemuSemaphore sem; | 1016 | + QemuSemaphore sem; |
1017 | + hwaddr physical_address; | 1017 | + hwaddr physical_address; |
1018 | + uint64_t length; | 1018 | + uint64_t length; |
1019 | + void *dst; | 1019 | + void *dst; |
1020 | + bool success; | 1020 | + bool success; |
1021 | +} AppleGFXReadMemoryJob; | 1021 | +} AppleGFXReadMemoryJob; |
1022 | + | 1022 | + |
1023 | +static void apple_gfx_do_read_memory(void *opaque) | 1023 | +static void apple_gfx_do_read_memory(void *opaque) |
1024 | +{ | 1024 | +{ |
1025 | + AppleGFXReadMemoryJob *job = opaque; | 1025 | + AppleGFXReadMemoryJob *job = opaque; |
1026 | + MemTxResult r; | 1026 | + MemTxResult r; |
1027 | + | 1027 | + |
1028 | + r = dma_memory_read(&address_space_memory, job->physical_address, | 1028 | + r = dma_memory_read(&address_space_memory, job->physical_address, |
1029 | + job->dst, job->length, MEMTXATTRS_UNSPECIFIED); | 1029 | + job->dst, job->length, MEMTXATTRS_UNSPECIFIED); |
1030 | + job->success = r == MEMTX_OK; | 1030 | + job->success = r == MEMTX_OK; |
1031 | + | 1031 | + |
1032 | + qemu_sem_post(&job->sem); | 1032 | + qemu_sem_post(&job->sem); |
1033 | +} | 1033 | +} |
1034 | + | 1034 | + |
1035 | +static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address, | 1035 | +static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address, |
1036 | + uint64_t length, void *dst) | 1036 | + uint64_t length, void *dst) |
1037 | +{ | 1037 | +{ |
1038 | + AppleGFXReadMemoryJob job = { | 1038 | + AppleGFXReadMemoryJob job = { |
1039 | + .physical_address = physical_address, .length = length, .dst = dst | 1039 | + .physical_address = physical_address, .length = length, .dst = dst |
1040 | + }; | 1040 | + }; |
1041 | + | 1041 | + |
1042 | + trace_apple_gfx_read_memory(physical_address, length, dst); | 1042 | + trace_apple_gfx_read_memory(physical_address, length, dst); |
1043 | + | 1043 | + |
1044 | + /* Performing DMA requires BQL, so do it in a BH. */ | 1044 | + /* Performing DMA requires BQL, so do it in a BH. */ |
1045 | + qemu_sem_init(&job.sem, 0); | 1045 | + qemu_sem_init(&job.sem, 0); |
1046 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 1046 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
1047 | + apple_gfx_do_read_memory, &job); | 1047 | + apple_gfx_do_read_memory, &job); |
1048 | + qemu_sem_wait(&job.sem); | 1048 | + qemu_sem_wait(&job.sem); |
1049 | + qemu_sem_destroy(&job.sem); | 1049 | + qemu_sem_destroy(&job.sem); |
1050 | + return job.success; | 1050 | + return job.success; |
1051 | +} | 1051 | +} |
1052 | + | 1052 | + |
1053 | +/* ------ Memory-mapped device I/O operations ------ */ | 1053 | +/* ------ Memory-mapped device I/O operations ------ */ |
1054 | + | 1054 | + |
1055 | +typedef struct AppleGFXIOJob { | 1055 | +typedef struct AppleGFXIOJob { |
1056 | + AppleGFXState *state; | 1056 | + AppleGFXState *state; |
1057 | + uint64_t offset; | 1057 | + uint64_t offset; |
1058 | + uint64_t value; | 1058 | + uint64_t value; |
1059 | + bool completed; | 1059 | + bool completed; |
1060 | +} AppleGFXIOJob; | 1060 | +} AppleGFXIOJob; |
1061 | + | 1061 | + |
1062 | +static void apple_gfx_do_read(void *opaque) | 1062 | +static void apple_gfx_do_read(void *opaque) |
1063 | +{ | 1063 | +{ |
1064 | + AppleGFXIOJob *job = opaque; | 1064 | + AppleGFXIOJob *job = opaque; |
1065 | + job->value = [job->state->pgdev mmioReadAtOffset:job->offset]; | 1065 | + job->value = [job->state->pgdev mmioReadAtOffset:job->offset]; |
1066 | + qatomic_set(&job->completed, true); | 1066 | + qatomic_set(&job->completed, true); |
1067 | + aio_wait_kick(); | 1067 | + aio_wait_kick(); |
1068 | +} | 1068 | +} |
1069 | + | 1069 | + |
1070 | +static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size) | 1070 | +static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size) |
1071 | +{ | 1071 | +{ |
1072 | + AppleGFXIOJob job = { | 1072 | + AppleGFXIOJob job = { |
1073 | + .state = opaque, | 1073 | + .state = opaque, |
1074 | + .offset = offset, | 1074 | + .offset = offset, |
1075 | + .completed = false, | 1075 | + .completed = false, |
1076 | + }; | 1076 | + }; |
1077 | + dispatch_queue_t queue = get_background_queue(); | 1077 | + dispatch_queue_t queue = get_background_queue(); |
1078 | + | 1078 | + |
1079 | + dispatch_async_f(queue, &job, apple_gfx_do_read); | 1079 | + dispatch_async_f(queue, &job, apple_gfx_do_read); |
1080 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | 1080 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); |
1081 | + | 1081 | + |
1082 | + trace_apple_gfx_read(offset, job.value); | 1082 | + trace_apple_gfx_read(offset, job.value); |
1083 | + return job.value; | 1083 | + return job.value; |
1084 | +} | 1084 | +} |
1085 | + | 1085 | + |
1086 | +static void apple_gfx_do_write(void *opaque) | 1086 | +static void apple_gfx_do_write(void *opaque) |
1087 | +{ | 1087 | +{ |
1088 | + AppleGFXIOJob *job = opaque; | 1088 | + AppleGFXIOJob *job = opaque; |
1089 | + [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value]; | 1089 | + [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value]; |
1090 | + qatomic_set(&job->completed, true); | 1090 | + qatomic_set(&job->completed, true); |
1091 | + aio_wait_kick(); | 1091 | + aio_wait_kick(); |
1092 | +} | 1092 | +} |
1093 | + | 1093 | + |
1094 | +static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val, | 1094 | +static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val, |
1095 | + unsigned size) | 1095 | + unsigned size) |
1096 | +{ | 1096 | +{ |
1097 | + /* | 1097 | + /* |
1098 | + * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can | 1098 | + * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can |
1099 | + * trigger synchronous operations on other dispatch queues, which in turn | 1099 | + * trigger synchronous operations on other dispatch queues, which in turn |
1100 | + * may call back out on one or more of the callback blocks. For this reason, | 1100 | + * may call back out on one or more of the callback blocks. For this reason, |
1101 | + * and as we are holding the BQL, we invoke the I/O methods on a pool | 1101 | + * and as we are holding the BQL, we invoke the I/O methods on a pool |
1102 | + * thread and handle AIO tasks while we wait. Any work in the callbacks | 1102 | + * thread and handle AIO tasks while we wait. Any work in the callbacks |
1103 | + * requiring the BQL will in turn schedule BHs which this thread will | 1103 | + * requiring the BQL will in turn schedule BHs which this thread will |
1104 | + * process while waiting. | 1104 | + * process while waiting. |
1105 | + */ | 1105 | + */ |
1106 | + AppleGFXIOJob job = { | 1106 | + AppleGFXIOJob job = { |
1107 | + .state = opaque, | 1107 | + .state = opaque, |
1108 | + .offset = offset, | 1108 | + .offset = offset, |
1109 | + .value = val, | 1109 | + .value = val, |
1110 | + .completed = false, | 1110 | + .completed = false, |
1111 | + }; | 1111 | + }; |
1112 | + dispatch_queue_t queue = get_background_queue(); | 1112 | + dispatch_queue_t queue = get_background_queue(); |
1113 | + | 1113 | + |
1114 | + dispatch_async_f(queue, &job, apple_gfx_do_write); | 1114 | + dispatch_async_f(queue, &job, apple_gfx_do_write); |
1115 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | 1115 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); |
1116 | + | 1116 | + |
1117 | + trace_apple_gfx_write(offset, val); | 1117 | + trace_apple_gfx_write(offset, val); |
1118 | +} | 1118 | +} |
1119 | + | 1119 | + |
1120 | +static const MemoryRegionOps apple_gfx_ops = { | 1120 | +static const MemoryRegionOps apple_gfx_ops = { |
1121 | + .read = apple_gfx_read, | 1121 | + .read = apple_gfx_read, |
1122 | + .write = apple_gfx_write, | 1122 | + .write = apple_gfx_write, |
1123 | + .endianness = DEVICE_LITTLE_ENDIAN, | 1123 | + .endianness = DEVICE_LITTLE_ENDIAN, |
1124 | + .valid = { | 1124 | + .valid = { |
1125 | + .min_access_size = 4, | 1125 | + .min_access_size = 4, |
1126 | + .max_access_size = 8, | 1126 | + .max_access_size = 8, |
1127 | + }, | 1127 | + }, |
1128 | + .impl = { | 1128 | + .impl = { |
1129 | + .min_access_size = 4, | 1129 | + .min_access_size = 4, |
1130 | + .max_access_size = 4, | 1130 | + .max_access_size = 4, |
1131 | + }, | 1131 | + }, |
1132 | +}; | 1132 | +}; |
1133 | + | 1133 | + |
1134 | +static size_t apple_gfx_get_default_mmio_range_size(void) | 1134 | +static size_t apple_gfx_get_default_mmio_range_size(void) |
1135 | +{ | 1135 | +{ |
1136 | + size_t mmio_range_size; | 1136 | + size_t mmio_range_size; |
1137 | + @autoreleasepool { | 1137 | + @autoreleasepool { |
1138 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; | 1138 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; |
1139 | + mmio_range_size = desc.mmioLength; | 1139 | + mmio_range_size = desc.mmioLength; |
1140 | + [desc release]; | 1140 | + [desc release]; |
1141 | + } | 1141 | + } |
1142 | + return mmio_range_size; | 1142 | + return mmio_range_size; |
1143 | +} | 1143 | +} |
1144 | + | 1144 | + |
1145 | +/* ------ Initialisation and startup ------ */ | 1145 | +/* ------ Initialisation and startup ------ */ |
1146 | + | 1146 | + |
1147 | +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name) | 1147 | +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name) |
1148 | +{ | 1148 | +{ |
1149 | + size_t mmio_range_size = apple_gfx_get_default_mmio_range_size(); | 1149 | + size_t mmio_range_size = apple_gfx_get_default_mmio_range_size(); |
1150 | + | 1150 | + |
1151 | + trace_apple_gfx_common_init(obj_name, mmio_range_size); | 1151 | + trace_apple_gfx_common_init(obj_name, mmio_range_size); |
1152 | + memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name, | 1152 | + memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name, |
1153 | + mmio_range_size); | 1153 | + mmio_range_size); |
1154 | + | 1154 | + |
1155 | + /* TODO: PVG framework supports serialising device state: integrate it! */ | 1155 | + /* TODO: PVG framework supports serialising device state: integrate it! */ |
1156 | +} | 1156 | +} |
1157 | + | 1157 | + |
1158 | +static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s, | 1158 | +static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s, |
1159 | + PGDeviceDescriptor *desc) | 1159 | + PGDeviceDescriptor *desc) |
1160 | +{ | 1160 | +{ |
1161 | + desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) { | 1161 | + desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) { |
1162 | + PGTask_t *task = apple_gfx_new_task(s, vmSize); | 1162 | + PGTask_t *task = apple_gfx_new_task(s, vmSize); |
1163 | + *baseAddress = (void *)task->address; | 1163 | + *baseAddress = (void *)task->address; |
1164 | + trace_apple_gfx_create_task(vmSize, *baseAddress); | 1164 | + trace_apple_gfx_create_task(vmSize, *baseAddress); |
1165 | + return task; | 1165 | + return task; |
1166 | + }; | 1166 | + }; |
1167 | + | 1167 | + |
1168 | + desc.destroyTask = ^(PGTask_t * _Nonnull task) { | 1168 | + desc.destroyTask = ^(PGTask_t * _Nonnull task) { |
1169 | + trace_apple_gfx_destroy_task(task, task->mapped_regions->len); | 1169 | + trace_apple_gfx_destroy_task(task, task->mapped_regions->len); |
1170 | + | 1170 | + |
1171 | + apple_gfx_destroy_task(s, task); | 1171 | + apple_gfx_destroy_task(s, task); |
1172 | + }; | 1172 | + }; |
1173 | + | 1173 | + |
1174 | + desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count, | 1174 | + desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count, |
1175 | + uint64_t virtual_offset, bool read_only, | 1175 | + uint64_t virtual_offset, bool read_only, |
1176 | + PGPhysicalMemoryRange_t * _Nonnull ranges) { | 1176 | + PGPhysicalMemoryRange_t * _Nonnull ranges) { |
1177 | + return apple_gfx_task_map_memory(s, task, virtual_offset, | 1177 | + return apple_gfx_task_map_memory(s, task, virtual_offset, |
1178 | + ranges, range_count, read_only); | 1178 | + ranges, range_count, read_only); |
1179 | + }; | 1179 | + }; |
1180 | + | 1180 | + |
1181 | + desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset, | 1181 | + desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset, |
1182 | + uint64_t length) { | 1182 | + uint64_t length) { |
1183 | + apple_gfx_task_unmap_memory(s, task, virtual_offset, length); | 1183 | + apple_gfx_task_unmap_memory(s, task, virtual_offset, length); |
1184 | + return true; | 1184 | + return true; |
1185 | + }; | 1185 | + }; |
1186 | + | 1186 | + |
1187 | + desc.readMemory = ^bool(uint64_t physical_address, uint64_t length, | 1187 | + desc.readMemory = ^bool(uint64_t physical_address, uint64_t length, |
1188 | + void * _Nonnull dst) { | 1188 | + void * _Nonnull dst) { |
1189 | + return apple_gfx_read_memory(s, physical_address, length, dst); | 1189 | + return apple_gfx_read_memory(s, physical_address, length, dst); |
1190 | + }; | 1190 | + }; |
1191 | +} | 1191 | +} |
1192 | + | 1192 | + |
1193 | +static void new_frame_handler_bh(void *opaque) | 1193 | +static void new_frame_handler_bh(void *opaque) |
1194 | +{ | 1194 | +{ |
1195 | + AppleGFXState *s = opaque; | 1195 | + AppleGFXState *s = opaque; |
1196 | + | 1196 | + |
1197 | + /* Drop frames if guest gets too far ahead. */ | 1197 | + /* Drop frames if guest gets too far ahead. */ |
1198 | + if (s->pending_frames >= 2) { | 1198 | + if (s->pending_frames >= 2) { |
1199 | + return; | 1199 | + return; |
1200 | + } | 1200 | + } |
1201 | + ++s->pending_frames; | 1201 | + ++s->pending_frames; |
1202 | + if (s->pending_frames > 1) { | 1202 | + if (s->pending_frames > 1) { |
1203 | + return; | 1203 | + return; |
1204 | + } | 1204 | + } |
1205 | + | 1205 | + |
1206 | + @autoreleasepool { | 1206 | + @autoreleasepool { |
1207 | + apple_gfx_render_new_frame(s); | 1207 | + apple_gfx_render_new_frame(s); |
1208 | + } | 1208 | + } |
1209 | +} | 1209 | +} |
1210 | + | 1210 | + |
1211 | +static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s) | 1211 | +static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s) |
1212 | +{ | 1212 | +{ |
1213 | + PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new]; | 1213 | + PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new]; |
1214 | + | 1214 | + |
1215 | + disp_desc.name = @"QEMU display"; | 1215 | + disp_desc.name = @"QEMU display"; |
1216 | + disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */ | 1216 | + disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */ |
1217 | + disp_desc.queue = dispatch_get_main_queue(); | 1217 | + disp_desc.queue = dispatch_get_main_queue(); |
1218 | + disp_desc.newFrameEventHandler = ^(void) { | 1218 | + disp_desc.newFrameEventHandler = ^(void) { |
1219 | + trace_apple_gfx_new_frame(); | 1219 | + trace_apple_gfx_new_frame(); |
1220 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s); | 1220 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s); |
1221 | + }; | 1221 | + }; |
1222 | + disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels, | 1222 | + disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels, |
1223 | + OSType pixelFormat) { | 1223 | + OSType pixelFormat) { |
1224 | + trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y); | 1224 | + trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y); |
1225 | + | 1225 | + |
1226 | + BQL_LOCK_GUARD(); | 1226 | + BQL_LOCK_GUARD(); |
1227 | + set_mode(s, sizeInPixels.x, sizeInPixels.y); | 1227 | + set_mode(s, sizeInPixels.x, sizeInPixels.y); |
1228 | + }; | 1228 | + }; |
1229 | + disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph, | 1229 | + disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph, |
1230 | + PGDisplayCoord_t hotspot) { | 1230 | + PGDisplayCoord_t hotspot) { |
1231 | + AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job)); | 1231 | + AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job)); |
1232 | + job->s = s; | 1232 | + job->s = s; |
1233 | + job->glyph = glyph; | 1233 | + job->glyph = glyph; |
1234 | + job->hotspot = hotspot; | 1234 | + job->hotspot = hotspot; |
1235 | + [glyph retain]; | 1235 | + [glyph retain]; |
1236 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 1236 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
1237 | + set_cursor_glyph, job); | 1237 | + set_cursor_glyph, job); |
1238 | + }; | 1238 | + }; |
1239 | + disp_desc.cursorShowHandler = ^(BOOL show) { | 1239 | + disp_desc.cursorShowHandler = ^(BOOL show) { |
1240 | + trace_apple_gfx_cursor_show(show); | 1240 | + trace_apple_gfx_cursor_show(show); |
1241 | + qatomic_set(&s->cursor_show, show); | 1241 | + qatomic_set(&s->cursor_show, show); |
1242 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 1242 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
1243 | + update_cursor_bh, s); | 1243 | + update_cursor_bh, s); |
1244 | + }; | 1244 | + }; |
1245 | + disp_desc.cursorMoveHandler = ^(void) { | 1245 | + disp_desc.cursorMoveHandler = ^(void) { |
1246 | + trace_apple_gfx_cursor_move(); | 1246 | + trace_apple_gfx_cursor_move(); |
1247 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 1247 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
1248 | + update_cursor_bh, s); | 1248 | + update_cursor_bh, s); |
1249 | + }; | 1249 | + }; |
1250 | + | 1250 | + |
1251 | + return disp_desc; | 1251 | + return disp_desc; |
1252 | +} | 1252 | +} |
1253 | + | 1253 | + |
1254 | +static NSArray<PGDisplayMode*>* apple_gfx_prepare_display_mode_array(void) | 1254 | +static NSArray<PGDisplayMode*>* apple_gfx_prepare_display_mode_array(void) |
1255 | +{ | 1255 | +{ |
1256 | + PGDisplayMode *modes[ARRAY_SIZE(apple_gfx_modes)]; | 1256 | + PGDisplayMode *modes[ARRAY_SIZE(apple_gfx_modes)]; |
1257 | + NSArray<PGDisplayMode*>* mode_array; | 1257 | + NSArray<PGDisplayMode*>* mode_array; |
1258 | + int i; | 1258 | + int i; |
1259 | + | 1259 | + |
1260 | + for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | 1260 | + for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { |
1261 | + modes[i] = | 1261 | + modes[i] = |
1262 | + [[PGDisplayMode alloc] initWithSizeInPixels:apple_gfx_modes[i] refreshRateInHz:60.]; | 1262 | + [[PGDisplayMode alloc] initWithSizeInPixels:apple_gfx_modes[i] refreshRateInHz:60.]; |
1263 | + } | 1263 | + } |
1264 | + | 1264 | + |
1265 | + mode_array = [NSArray arrayWithObjects:modes count:ARRAY_SIZE(apple_gfx_modes)]; | 1265 | + mode_array = [NSArray arrayWithObjects:modes count:ARRAY_SIZE(apple_gfx_modes)]; |
1266 | + | 1266 | + |
1267 | + for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | 1267 | + for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { |
1268 | + [modes[i] release]; | 1268 | + [modes[i] release]; |
1269 | + modes[i] = nil; | 1269 | + modes[i] = nil; |
1270 | + } | 1270 | + } |
1271 | + | 1271 | + |
1272 | + return mode_array; | 1272 | + return mode_array; |
1273 | +} | 1273 | +} |
1274 | + | 1274 | + |
1275 | +static id<MTLDevice> copy_suitable_metal_device(void) | 1275 | +static id<MTLDevice> copy_suitable_metal_device(void) |
1276 | +{ | 1276 | +{ |
1277 | + id<MTLDevice> dev = nil; | 1277 | + id<MTLDevice> dev = nil; |
1278 | + NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices(); | 1278 | + NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices(); |
1279 | + | 1279 | + |
1280 | + /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */ | 1280 | + /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */ |
1281 | + for (size_t i = 0; i < devs.count; ++i) { | 1281 | + for (size_t i = 0; i < devs.count; ++i) { |
1282 | + if (devs[i].hasUnifiedMemory) { | 1282 | + if (devs[i].hasUnifiedMemory) { |
1283 | + dev = devs[i]; | 1283 | + dev = devs[i]; |
1284 | + break; | 1284 | + break; |
1285 | + } | 1285 | + } |
1286 | + if (!devs[i].removable) { | 1286 | + if (!devs[i].removable) { |
1287 | + dev = devs[i]; | 1287 | + dev = devs[i]; |
1288 | + } | 1288 | + } |
1289 | + } | 1289 | + } |
1290 | + | 1290 | + |
1291 | + if (dev != nil) { | 1291 | + if (dev != nil) { |
1292 | + [dev retain]; | 1292 | + [dev retain]; |
1293 | + } else { | 1293 | + } else { |
1294 | + dev = MTLCreateSystemDefaultDevice(); | 1294 | + dev = MTLCreateSystemDefaultDevice(); |
1295 | + } | 1295 | + } |
1296 | + [devs release]; | 1296 | + [devs release]; |
1297 | + | 1297 | + |
1298 | + return dev; | 1298 | + return dev; |
1299 | +} | 1299 | +} |
1300 | + | 1300 | + |
1301 | +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | 1301 | +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, |
1302 | + PGDeviceDescriptor *desc, Error **errp) | 1302 | + PGDeviceDescriptor *desc, Error **errp) |
1303 | +{ | 1303 | +{ |
1304 | + PGDisplayDescriptor *disp_desc; | 1304 | + PGDisplayDescriptor *disp_desc; |
1305 | + | 1305 | + |
1306 | + if (apple_gfx_mig_blocker == NULL) { | 1306 | + if (apple_gfx_mig_blocker == NULL) { |
1307 | + error_setg(&apple_gfx_mig_blocker, | 1307 | + error_setg(&apple_gfx_mig_blocker, |
1308 | + "Migration state blocked by apple-gfx display device"); | 1308 | + "Migration state blocked by apple-gfx display device"); |
1309 | + if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) { | 1309 | + if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) { |
1310 | + return false; | 1310 | + return false; |
1311 | + } | 1311 | + } |
1312 | + } | 1312 | + } |
1313 | + | 1313 | + |
1314 | + qemu_mutex_init(&s->task_mutex); | 1314 | + qemu_mutex_init(&s->task_mutex); |
1315 | + QTAILQ_INIT(&s->tasks); | 1315 | + QTAILQ_INIT(&s->tasks); |
1316 | + s->mtl = copy_suitable_metal_device(); | 1316 | + s->mtl = copy_suitable_metal_device(); |
1317 | + s->mtl_queue = [s->mtl newCommandQueue]; | 1317 | + s->mtl_queue = [s->mtl newCommandQueue]; |
1318 | + | 1318 | + |
1319 | + desc.device = s->mtl; | 1319 | + desc.device = s->mtl; |
1320 | + | 1320 | + |
1321 | + apple_gfx_register_task_mapping_handlers(s, desc); | 1321 | + apple_gfx_register_task_mapping_handlers(s, desc); |
1322 | + | 1322 | + |
1323 | + s->cursor_show = true; | 1323 | + s->cursor_show = true; |
1324 | + | 1324 | + |
1325 | + s->pgdev = PGNewDeviceWithDescriptor(desc); | 1325 | + s->pgdev = PGNewDeviceWithDescriptor(desc); |
1326 | + | 1326 | + |
1327 | + disp_desc = apple_gfx_prepare_display_descriptor(s); | 1327 | + disp_desc = apple_gfx_prepare_display_descriptor(s); |
1328 | + /* | 1328 | + /* |
1329 | + * Although the framework does, this integration currently does not support | 1329 | + * Although the framework does, this integration currently does not support |
1330 | + * multiple virtual displays connected to a single PV graphics device. | 1330 | + * multiple virtual displays connected to a single PV graphics device. |
1331 | + * It is however possible to create | 1331 | + * It is however possible to create |
1332 | + * more than one instance of the device, each with one display. The macOS | 1332 | + * more than one instance of the device, each with one display. The macOS |
1333 | + * guest will ignore these displays if they share the same serial number, | 1333 | + * guest will ignore these displays if they share the same serial number, |
1334 | + * so ensure each instance gets a unique one. | 1334 | + * so ensure each instance gets a unique one. |
1335 | + */ | 1335 | + */ |
1336 | + s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc | 1336 | + s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc |
1337 | + port:0 | 1337 | + port:0 |
1338 | + serialNum:next_pgdisplay_serial_num++]; | 1338 | + serialNum:next_pgdisplay_serial_num++]; |
1339 | + [disp_desc release]; | 1339 | + [disp_desc release]; |
1340 | + s->pgdisp.modeList = apple_gfx_prepare_display_mode_array(); | 1340 | + s->pgdisp.modeList = apple_gfx_prepare_display_mode_array(); |
1341 | + | 1341 | + |
1342 | + s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s); | 1342 | + s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s); |
1343 | + return true; | 1343 | + return true; |
1344 | +} | 1344 | +} |
1345 | diff --git a/hw/display/meson.build b/hw/display/meson.build | 1345 | diff --git a/hw/display/meson.build b/hw/display/meson.build |
1346 | index XXXXXXX..XXXXXXX 100644 | 1346 | index XXXXXXX..XXXXXXX 100644 |
1347 | --- a/hw/display/meson.build | 1347 | --- a/hw/display/meson.build |
1348 | +++ b/hw/display/meson.build | 1348 | +++ b/hw/display/meson.build |
1349 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) | 1349 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) |
1350 | 1350 | ||
1351 | system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_dbg.c'), pixman]) | 1351 | system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_dbg.c'), pixman]) |
1352 | 1352 | ||
1353 | +if host_os == 'darwin' | 1353 | +if host_os == 'darwin' |
1354 | + system_ss.add(when: 'CONFIG_MAC_PVG', if_true: [files('apple-gfx.m'), pvg, metal]) | 1354 | + system_ss.add(when: 'CONFIG_MAC_PVG', if_true: [files('apple-gfx.m'), pvg, metal]) |
1355 | + if cpu == 'aarch64' | 1355 | + if cpu == 'aarch64' |
1356 | + system_ss.add(when: 'CONFIG_MAC_PVG_MMIO', if_true: [files('apple-gfx-mmio.m'), pvg, metal]) | 1356 | + system_ss.add(when: 'CONFIG_MAC_PVG_MMIO', if_true: [files('apple-gfx-mmio.m'), pvg, metal]) |
1357 | + endif | 1357 | + endif |
1358 | +endif | 1358 | +endif |
1359 | 1359 | ||
1360 | if config_all_devices.has_key('CONFIG_VIRTIO_GPU') | 1360 | if config_all_devices.has_key('CONFIG_VIRTIO_GPU') |
1361 | virtio_gpu_ss = ss.source_set() | 1361 | virtio_gpu_ss = ss.source_set() |
1362 | diff --git a/hw/display/trace-events b/hw/display/trace-events | 1362 | diff --git a/hw/display/trace-events b/hw/display/trace-events |
1363 | index XXXXXXX..XXXXXXX 100644 | 1363 | index XXXXXXX..XXXXXXX 100644 |
1364 | --- a/hw/display/trace-events | 1364 | --- a/hw/display/trace-events |
1365 | +++ b/hw/display/trace-events | 1365 | +++ b/hw/display/trace-events |
1366 | @@ -XXX,XX +XXX,XX @@ dm163_bits_ppi(unsigned dest_width) "dest_width : %u" | 1366 | @@ -XXX,XX +XXX,XX @@ dm163_bits_ppi(unsigned dest_width) "dest_width : %u" |
1367 | dm163_leds(int led, uint32_t value) "led %d: 0x%x" | 1367 | dm163_leds(int led, uint32_t value) "led %d: 0x%x" |
1368 | dm163_channels(int channel, uint8_t value) "channel %d: 0x%x" | 1368 | dm163_channels(int channel, uint8_t value) "channel %d: 0x%x" |
1369 | dm163_refresh_rate(uint32_t rr) "refresh rate %d" | 1369 | dm163_refresh_rate(uint32_t rr) "refresh rate %d" |
1370 | + | 1370 | + |
1371 | +# apple-gfx.m | 1371 | +# apple-gfx.m |
1372 | +apple_gfx_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | 1372 | +apple_gfx_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 |
1373 | +apple_gfx_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | 1373 | +apple_gfx_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 |
1374 | +apple_gfx_create_task(uint32_t vm_size, void *va) "vm_size=0x%x base_addr=%p" | 1374 | +apple_gfx_create_task(uint32_t vm_size, void *va) "vm_size=0x%x base_addr=%p" |
1375 | +apple_gfx_destroy_task(void *task, unsigned int num_mapped_regions) "task=%p, task->mapped_regions->len=%u" | 1375 | +apple_gfx_destroy_task(void *task, unsigned int num_mapped_regions) "task=%p, task->mapped_regions->len=%u" |
1376 | +apple_gfx_map_memory(void *task, uint32_t range_count, uint64_t virtual_offset, uint32_t read_only) "task=%p range_count=0x%x virtual_offset=0x%"PRIx64" read_only=%d" | 1376 | +apple_gfx_map_memory(void *task, uint32_t range_count, uint64_t virtual_offset, uint32_t read_only) "task=%p range_count=0x%x virtual_offset=0x%"PRIx64" read_only=%d" |
1377 | +apple_gfx_map_memory_range(uint32_t i, uint64_t phys_addr, uint64_t phys_len) "[%d] phys_addr=0x%"PRIx64" phys_len=0x%"PRIx64 | 1377 | +apple_gfx_map_memory_range(uint32_t i, uint64_t phys_addr, uint64_t phys_len) "[%d] phys_addr=0x%"PRIx64" phys_len=0x%"PRIx64 |
1378 | +apple_gfx_remap(uint64_t retval, void *source_ptr, uint64_t target) "retval=%"PRId64" source=%p target=0x%"PRIx64 | 1378 | +apple_gfx_remap(uint64_t retval, void *source_ptr, uint64_t target) "retval=%"PRId64" source=%p target=0x%"PRIx64 |
1379 | +apple_gfx_unmap_memory(void *task, uint64_t virtual_offset, uint64_t length) "task=%p virtual_offset=0x%"PRIx64" length=0x%"PRIx64 | 1379 | +apple_gfx_unmap_memory(void *task, uint64_t virtual_offset, uint64_t length) "task=%p virtual_offset=0x%"PRIx64" length=0x%"PRIx64 |
1380 | +apple_gfx_read_memory(uint64_t phys_address, uint64_t length, void *dst) "phys_addr=0x%"PRIx64" length=0x%"PRIx64" dest=%p" | 1380 | +apple_gfx_read_memory(uint64_t phys_address, uint64_t length, void *dst) "phys_addr=0x%"PRIx64" length=0x%"PRIx64" dest=%p" |
1381 | +apple_gfx_raise_irq(uint32_t vector) "vector=0x%x" | 1381 | +apple_gfx_raise_irq(uint32_t vector) "vector=0x%x" |
1382 | +apple_gfx_new_frame(void) "" | 1382 | +apple_gfx_new_frame(void) "" |
1383 | +apple_gfx_mode_change(uint64_t x, uint64_t y) "x=%"PRId64" y=%"PRId64 | 1383 | +apple_gfx_mode_change(uint64_t x, uint64_t y) "x=%"PRId64" y=%"PRId64 |
1384 | +apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d width=%"PRId64" height=0x%"PRId64 | 1384 | +apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d width=%"PRId64" height=0x%"PRId64 |
1385 | +apple_gfx_cursor_show(uint32_t show) "show=%d" | 1385 | +apple_gfx_cursor_show(uint32_t show) "show=%d" |
1386 | +apple_gfx_cursor_move(void) "" | 1386 | +apple_gfx_cursor_move(void) "" |
1387 | +apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes" | 1387 | +apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes" |
1388 | + | 1388 | + |
1389 | +# apple-gfx-mmio.m | 1389 | +# apple-gfx-mmio.m |
1390 | +apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | 1390 | +apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 |
1391 | +apple_gfx_mmio_iosfc_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | 1391 | +apple_gfx_mmio_iosfc_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 |
1392 | +apple_gfx_iosfc_map_memory(uint64_t phys, uint64_t len, uint32_t ro, void *va, void *e, void *f, void* va_result) "phys=0x%"PRIx64" len=0x%"PRIx64" ro=%d va=%p e=%p f=%p -> *va=%p" | 1392 | +apple_gfx_iosfc_map_memory(uint64_t phys, uint64_t len, uint32_t ro, void *va, void *e, void *f, void* va_result) "phys=0x%"PRIx64" len=0x%"PRIx64" ro=%d va=%p e=%p f=%p -> *va=%p" |
1393 | +apple_gfx_iosfc_map_memory_new_region(size_t i, void *region, uint64_t start, uint64_t end) "index=%zu, region=%p, 0x%"PRIx64"-0x%"PRIx64 | 1393 | +apple_gfx_iosfc_map_memory_new_region(size_t i, void *region, uint64_t start, uint64_t end) "index=%zu, region=%p, 0x%"PRIx64"-0x%"PRIx64 |
1394 | +apple_gfx_iosfc_unmap_memory(void *a, void *b, void *c, void *d, void *e, void *f) "a=%p b=%p c=%p d=%p e=%p f=%p" | 1394 | +apple_gfx_iosfc_unmap_memory(void *a, void *b, void *c, void *d, void *e, void *f) "a=%p b=%p c=%p d=%p e=%p f=%p" |
1395 | +apple_gfx_iosfc_unmap_memory_region(void* mem, void *region) "unmapping @ %p from memory region %p" | 1395 | +apple_gfx_iosfc_unmap_memory_region(void* mem, void *region) "unmapping @ %p from memory region %p" |
1396 | +apple_gfx_iosfc_raise_irq(uint32_t vector) "vector=0x%x" | 1396 | +apple_gfx_iosfc_raise_irq(uint32_t vector) "vector=0x%x" |
1397 | + | 1397 | + |
1398 | diff --git a/meson.build b/meson.build | 1398 | diff --git a/meson.build b/meson.build |
1399 | index XXXXXXX..XXXXXXX 100644 | 1399 | index XXXXXXX..XXXXXXX 100644 |
1400 | --- a/meson.build | 1400 | --- a/meson.build |
1401 | +++ b/meson.build | 1401 | +++ b/meson.build |
1402 | @@ -XXX,XX +XXX,XX @@ socket = [] | 1402 | @@ -XXX,XX +XXX,XX @@ socket = [] |
1403 | version_res = [] | 1403 | version_res = [] |
1404 | coref = [] | 1404 | coref = [] |
1405 | iokit = [] | 1405 | iokit = [] |
1406 | +pvg = not_found | 1406 | +pvg = not_found |
1407 | +metal = [] | 1407 | +metal = [] |
1408 | emulator_link_args = [] | 1408 | emulator_link_args = [] |
1409 | midl = not_found | 1409 | midl = not_found |
1410 | widl = not_found | 1410 | widl = not_found |
1411 | @@ -XXX,XX +XXX,XX @@ elif host_os == 'darwin' | 1411 | @@ -XXX,XX +XXX,XX @@ elif host_os == 'darwin' |
1412 | coref = dependency('appleframeworks', modules: 'CoreFoundation') | 1412 | coref = dependency('appleframeworks', modules: 'CoreFoundation') |
1413 | iokit = dependency('appleframeworks', modules: 'IOKit', required: false) | 1413 | iokit = dependency('appleframeworks', modules: 'IOKit', required: false) |
1414 | host_dsosuf = '.dylib' | 1414 | host_dsosuf = '.dylib' |
1415 | + pvg = dependency('appleframeworks', modules: 'ParavirtualizedGraphics') | 1415 | + pvg = dependency('appleframeworks', modules: 'ParavirtualizedGraphics') |
1416 | + metal = dependency('appleframeworks', modules: 'Metal') | 1416 | + metal = dependency('appleframeworks', modules: 'Metal') |
1417 | elif host_os == 'sunos' | 1417 | elif host_os == 'sunos' |
1418 | socket = [cc.find_library('socket'), | 1418 | socket = [cc.find_library('socket'), |
1419 | cc.find_library('nsl'), | 1419 | cc.find_library('nsl'), |
1420 | -- | 1420 | -- |
1421 | 2.39.5 (Apple Git-154) | 1421 | 2.39.5 (Apple Git-154) |
1422 | 1422 | ||
1423 | 1423 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | MacOS provides a framework (library) that allows any vmm to implement a | |
2 | paravirtualized 3d graphics passthrough to the host metal stack called | ||
3 | ParavirtualizedGraphics.Framework (PVG). The library abstracts away | ||
4 | almost every aspect of the paravirtualized device model and only provides | ||
5 | and receives callbacks on MMIO access as well as to share memory address | ||
6 | space between the VM and PVG. | ||
7 | |||
8 | This patch implements a QEMU device that drives PVG for the VMApple | ||
9 | variant of it. | ||
10 | |||
11 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
12 | Co-authored-by: Alexander Graf <graf@amazon.com> | ||
13 | |||
14 | Subsequent changes: | ||
15 | |||
16 | * Cherry-pick/rebase conflict fixes, API use updates. | ||
17 | * Moved from hw/vmapple/ (useful outside that machine type) | ||
18 | * Overhaul of threading model, many thread safety improvements. | ||
19 | * Asynchronous rendering. | ||
20 | * Memory and object lifetime fixes. | ||
21 | * Refactoring to split generic and (vmapple) MMIO variant specific | ||
22 | code. | ||
23 | |||
24 | Implementation wise, most of the complexity lies in the differing threading | ||
25 | models of ParavirtualizedGraphics.framework, which uses libdispatch and | ||
26 | internal locks, versus QEMU, which heavily uses the BQL, especially during | ||
27 | memory-mapped device I/O. Great care has therefore been taken to prevent | ||
28 | deadlocks by never calling into PVG methods while holding the BQL, and | ||
29 | similarly never acquiring the BQL in a callback from PVG. Different strategies | ||
30 | have been used (libdispatch, blocking and non-blocking BHs, RCU, etc.) | ||
31 | depending on the specific requirements at each framework entry and exit point. | ||
32 | |||
33 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
34 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
35 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
36 | --- | ||
37 | |||
38 | v2: | ||
39 | |||
40 | * Cherry-pick/rebase conflict fixes | ||
41 | * BQL function renaming | ||
42 | * Moved from hw/vmapple/ (useful outside that machine type) | ||
43 | * Code review comments: Switched to DEFINE_TYPES macro & little endian | ||
44 | MMIO. | ||
45 | * Removed some dead/superfluous code | ||
46 | * Mad set_mode thread & memory safe | ||
47 | * Added migration blocker due to lack of (de-)serialisation. | ||
48 | * Fixes to ObjC refcounting and autorelease pool usage. | ||
49 | * Fixed ObjC new/init misuse | ||
50 | * Switched to ObjC category extension for private property. | ||
51 | * Simplified task memory mapping and made it thread safe. | ||
52 | * Refactoring to split generic and vmapple MMIO variant specific | ||
53 | code. | ||
54 | * Switched to asynchronous MMIO writes on x86-64 | ||
55 | * Rendering and graphics update are now done asynchronously | ||
56 | * Fixed cursor handling | ||
57 | * Coding convention fixes | ||
58 | * Removed software cursor compositing | ||
59 | |||
60 | v3: | ||
61 | |||
62 | * Rebased on latest upstream, fixed breakages including switching to Resettable methods. | ||
63 | * Squashed patches dealing with dGPUs, MMIO area size, and GPU picking. | ||
64 | * Allow re-entrant MMIO; this simplifies the code and solves the divergence | ||
65 | between x86-64 and arm64 variants. | ||
66 | |||
67 | v4: | ||
68 | |||
69 | * Renamed '-vmapple' device variant to '-mmio' | ||
70 | * MMIO device type now requires aarch64 host and guest | ||
71 | * Complete overhaul of the glue code for making Qemu's and | ||
72 | ParavirtualizedGraphics.framework's threading and synchronisation models | ||
73 | work together. Calls into PVG are from dispatch queues while the | ||
74 | BQL-holding initiating thread processes AIO context events; callbacks from | ||
75 | PVG are scheduled as BHs on the BQL/main AIO context, awaiting completion | ||
76 | where necessary. | ||
77 | * Guest frame rendering state is covered by the BQL, with only the PVG calls | ||
78 | outside the lock, and serialised on the named render_queue. | ||
79 | * Simplified logic for dropping frames in-flight during mode changes, fixed | ||
80 | bug in pending frames logic. | ||
81 | * Addressed smaller code review notes such as: function naming, object type | ||
82 | declarations, type names/declarations/casts, code formatting, #include | ||
83 | order, over-cautious ObjC retain/release, what goes in init vs realize, | ||
84 | etc. | ||
85 | |||
86 | v5: | ||
87 | |||
88 | * Smaller non-functional fixes in response to review comments, such as using | ||
89 | NULL for the AIO_WAIT_WHILE context argument, type name formatting, | ||
90 | deleting leftover debug code, logging improvements, state struct field | ||
91 | order and documentation improvements, etc. | ||
92 | * Instead of a single condition variable for all synchronous BH job types, | ||
93 | there is now one for each callback block. This reduces the number | ||
94 | of threads being awoken unnecessarily to near zero. | ||
95 | * MMIO device variant: Unified the BH job for raising interrupts. | ||
96 | * Use DMA APIs for PVG framework's guest memory read requests. | ||
97 | * Thread safety improvements: ensure mutable AppleGFXState fields are not | ||
98 | accessed outside the appropriate lock. Added dedicated mutex for the task | ||
99 | list. | ||
100 | * Retain references to MemoryRegions for which there exist mappings in each | ||
101 | PGTask, and for IOSurface mappings. | ||
102 | |||
103 | v6: | ||
104 | |||
105 | * Switched PGTask_s's' mapped_regions from GPtrArray to GArray | ||
106 | * Allow DisplaySurface to manage its own vram now that texture -> vram copy | ||
107 | occurs under BQL. | ||
108 | * Memory mapping operations now use RCU_READ_LOCK_GUARD() where possible | ||
109 | instead of a heavy-weight BH job to acquire the BQL. | ||
110 | * Changed PVG cursor and mode setting callbacks to kick off BHs instead of | ||
111 | libdispatch tasks which then locked the BQL explicitly. | ||
112 | * The single remaining callback which must wait for a BH to complete now | ||
113 | creates an ephemeral QemuSemaphore to await completion. | ||
114 | * Re-removed tracking of mapped surface manager memory regions. Just look up | ||
115 | and ref/unref the memory regions in the map/unmap callbacks. | ||
116 | * Re-ordered functions in apple-gfx.m to group them by area of functionality. | ||
117 | * Improved comments and tweaked some names. | ||
118 | |||
119 | v7: | ||
120 | |||
121 | * Use g_ptr_array_find() helper function | ||
122 | * Error handling coding style tweak | ||
123 | |||
124 | v8: | ||
125 | |||
126 | * Renamed apple_gfx_host_address_for_gpa_range() to | ||
127 | apple_gfx_host_ptr_for_gpa_range(), and made it return a void* instead of | ||
128 | uintptr_t. Fixed up callers and related code. | ||
129 | * Some adjustments to types used. | ||
130 | * Variable naming tweaks for better clarity. | ||
131 | * Fixed leak in unlikely realize error case. | ||
132 | * Fixed typo in unmap call. | ||
133 | * Don't bother with dummy argument for g_ptr_array_find(), NULL works too. | ||
134 | |||
135 | v9: | ||
136 | |||
137 | * Pass device pointer to graphic_console_init(). | ||
138 | * Slightly re-ordered initialisation code. | ||
139 | * Simplified error handling during realize(). | ||
140 | * Simplified code without functional changes, adjusted code & comment | ||
141 | formatting. | ||
142 | |||
143 | v10: | ||
144 | |||
145 | * Reworked the way frame rendering code is threaded to use BHs for sections | ||
146 | requiring BQL. | ||
147 | * Fix for ./configure error on non-macOS platforms. | ||
148 | * Code formatting tweaks. | ||
149 | |||
150 | v11: | ||
151 | |||
152 | * Generate unique display serial number for each apple-gfx device instance. | ||
153 | * Dropped redundant local variable initialisation. | ||
154 | |||
155 | v12: | ||
156 | |||
157 | * Removed 2 redundant variable initialisations. | ||
158 | * Removed dedicated rendering dispatch_queue, use global queue instead. | ||
159 | * Fixed an object leak regression introduced in v10. Solved by placing | ||
160 | @autoreleasepool blocks around the relevant Objective-C code in the BH | ||
161 | functions replacing the dispatch_async tasks. (dispatch_async implicitly | ||
162 | cleaned up autoreleased objects.) | ||
163 | * Fixed missing retain/release of command buffers when handing off to a | ||
164 | non-BH thread. (Problem masked at runtime by above leak.) | ||
165 | * Better handling of render command encoding errors. | ||
166 | * Re-arranged positions of static variables in the file. | ||
167 | |||
168 | hw/display/Kconfig | 9 + | ||
169 | hw/display/apple-gfx-mmio.m | 281 +++++++++++++ | ||
170 | hw/display/apple-gfx.h | 66 +++ | ||
171 | hw/display/apple-gfx.m | 783 ++++++++++++++++++++++++++++++++++++ | ||
172 | hw/display/meson.build | 6 + | ||
173 | hw/display/trace-events | 28 ++ | ||
174 | meson.build | 4 + | ||
175 | 7 files changed, 1177 insertions(+) | ||
176 | create mode 100644 hw/display/apple-gfx-mmio.m | ||
177 | create mode 100644 hw/display/apple-gfx.h | ||
178 | create mode 100644 hw/display/apple-gfx.m | ||
179 | |||
180 | diff --git a/hw/display/Kconfig b/hw/display/Kconfig | ||
181 | index XXXXXXX..XXXXXXX 100644 | ||
182 | --- a/hw/display/Kconfig | ||
183 | +++ b/hw/display/Kconfig | ||
184 | @@ -XXX,XX +XXX,XX @@ config XLNX_DISPLAYPORT | ||
185 | |||
186 | config DM163 | ||
187 | bool | ||
188 | + | ||
189 | +config MAC_PVG | ||
190 | + bool | ||
191 | + default y | ||
192 | + | ||
193 | +config MAC_PVG_MMIO | ||
194 | + bool | ||
195 | + depends on MAC_PVG && AARCH64 | ||
196 | + | ||
197 | diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m | ||
198 | new file mode 100644 | ||
199 | index XXXXXXX..XXXXXXX | ||
200 | --- /dev/null | ||
201 | +++ b/hw/display/apple-gfx-mmio.m | ||
202 | @@ -XXX,XX +XXX,XX @@ | ||
203 | +/* | ||
204 | + * QEMU Apple ParavirtualizedGraphics.framework device, MMIO (arm64) variant | ||
205 | + * | ||
206 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
207 | + * | ||
208 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
209 | + * See the COPYING file in the top-level directory. | ||
210 | + * | ||
211 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
212 | + * | ||
213 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides | ||
214 | + * which implements 3d graphics passthrough to the host as well as a | ||
215 | + * proprietary guest communication channel to drive it. This device model | ||
216 | + * implements support to drive that library from within QEMU as an MMIO-based | ||
217 | + * system device for macOS on arm64 VMs. | ||
218 | + */ | ||
219 | + | ||
220 | +#include "qemu/osdep.h" | ||
221 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | ||
222 | +#include "apple-gfx.h" | ||
223 | +#include "monitor/monitor.h" | ||
224 | +#include "hw/sysbus.h" | ||
225 | +#include "hw/irq.h" | ||
226 | +#include "trace.h" | ||
227 | +#include "qemu/log.h" | ||
228 | + | ||
229 | +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXMMIOState, APPLE_GFX_MMIO) | ||
230 | + | ||
231 | +/* | ||
232 | + * ParavirtualizedGraphics.Framework only ships header files for the PCI | ||
233 | + * variant which does not include IOSFC descriptors and host devices. We add | ||
234 | + * their definitions here so that we can also work with the ARM version. | ||
235 | + */ | ||
236 | +typedef bool(^IOSFCRaiseInterrupt)(uint32_t vector); | ||
237 | +typedef bool(^IOSFCUnmapMemory)( | ||
238 | + void *, void *, void *, void *, void *, void *); | ||
239 | +typedef bool(^IOSFCMapMemory)( | ||
240 | + uint64_t phys, uint64_t len, bool ro, void **va, void *, void *); | ||
241 | + | ||
242 | +@interface PGDeviceDescriptor (IOSurfaceMapper) | ||
243 | +@property (readwrite, nonatomic) bool usingIOSurfaceMapper; | ||
244 | +@end | ||
245 | + | ||
246 | +@interface PGIOSurfaceHostDeviceDescriptor : NSObject | ||
247 | +-(PGIOSurfaceHostDeviceDescriptor *)init; | ||
248 | +@property (readwrite, nonatomic, copy, nullable) IOSFCMapMemory mapMemory; | ||
249 | +@property (readwrite, nonatomic, copy, nullable) IOSFCUnmapMemory unmapMemory; | ||
250 | +@property (readwrite, nonatomic, copy, nullable) IOSFCRaiseInterrupt raiseInterrupt; | ||
251 | +@end | ||
252 | + | ||
253 | +@interface PGIOSurfaceHostDevice : NSObject | ||
254 | +-(instancetype)initWithDescriptor:(PGIOSurfaceHostDeviceDescriptor *)desc; | ||
255 | +-(uint32_t)mmioReadAtOffset:(size_t)offset; | ||
256 | +-(void)mmioWriteAtOffset:(size_t)offset value:(uint32_t)value; | ||
257 | +@end | ||
258 | + | ||
259 | +struct AppleGFXMapSurfaceMemoryJob; | ||
260 | +struct AppleGFXMMIOState { | ||
261 | + SysBusDevice parent_obj; | ||
262 | + | ||
263 | + AppleGFXState common; | ||
264 | + | ||
265 | + qemu_irq irq_gfx; | ||
266 | + qemu_irq irq_iosfc; | ||
267 | + MemoryRegion iomem_iosfc; | ||
268 | + PGIOSurfaceHostDevice *pgiosfc; | ||
269 | +}; | ||
270 | + | ||
271 | +typedef struct AppleGFXMMIOJob { | ||
272 | + AppleGFXMMIOState *state; | ||
273 | + uint64_t offset; | ||
274 | + uint64_t value; | ||
275 | + bool completed; | ||
276 | +} AppleGFXMMIOJob; | ||
277 | + | ||
278 | +static void iosfc_do_read(void *opaque) | ||
279 | +{ | ||
280 | + AppleGFXMMIOJob *job = opaque; | ||
281 | + job->value = [job->state->pgiosfc mmioReadAtOffset:job->offset]; | ||
282 | + qatomic_set(&job->completed, true); | ||
283 | + aio_wait_kick(); | ||
284 | +} | ||
285 | + | ||
286 | +static uint64_t iosfc_read(void *opaque, hwaddr offset, unsigned size) | ||
287 | +{ | ||
288 | + AppleGFXMMIOJob job = { | ||
289 | + .state = opaque, | ||
290 | + .offset = offset, | ||
291 | + .completed = false, | ||
292 | + }; | ||
293 | + dispatch_queue_t queue = | ||
294 | + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); | ||
295 | + | ||
296 | + dispatch_async_f(queue, &job, iosfc_do_read); | ||
297 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | ||
298 | + | ||
299 | + trace_apple_gfx_mmio_iosfc_read(offset, job.value); | ||
300 | + return job.value; | ||
301 | +} | ||
302 | + | ||
303 | +static void iosfc_do_write(void *opaque) | ||
304 | +{ | ||
305 | + AppleGFXMMIOJob *job = opaque; | ||
306 | + [job->state->pgiosfc mmioWriteAtOffset:job->offset value:job->value]; | ||
307 | + qatomic_set(&job->completed, true); | ||
308 | + aio_wait_kick(); | ||
309 | +} | ||
310 | + | ||
311 | +static void iosfc_write(void *opaque, hwaddr offset, uint64_t val, | ||
312 | + unsigned size) | ||
313 | +{ | ||
314 | + AppleGFXMMIOJob job = { | ||
315 | + .state = opaque, | ||
316 | + .offset = offset, | ||
317 | + .value = val, | ||
318 | + .completed = false, | ||
319 | + }; | ||
320 | + dispatch_queue_t queue = | ||
321 | + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); | ||
322 | + | ||
323 | + dispatch_async_f(queue, &job, iosfc_do_write); | ||
324 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | ||
325 | + | ||
326 | + trace_apple_gfx_mmio_iosfc_write(offset, val); | ||
327 | +} | ||
328 | + | ||
329 | +static const MemoryRegionOps apple_iosfc_ops = { | ||
330 | + .read = iosfc_read, | ||
331 | + .write = iosfc_write, | ||
332 | + .endianness = DEVICE_LITTLE_ENDIAN, | ||
333 | + .valid = { | ||
334 | + .min_access_size = 4, | ||
335 | + .max_access_size = 8, | ||
336 | + }, | ||
337 | + .impl = { | ||
338 | + .min_access_size = 4, | ||
339 | + .max_access_size = 8, | ||
340 | + }, | ||
341 | +}; | ||
342 | + | ||
343 | +static void raise_irq_bh(void *opaque) | ||
344 | +{ | ||
345 | + qemu_irq *irq = opaque; | ||
346 | + | ||
347 | + qemu_irq_pulse(*irq); | ||
348 | +} | ||
349 | + | ||
350 | +static void *apple_gfx_mmio_map_surface_memory(uint64_t guest_physical_address, | ||
351 | + uint64_t length, bool read_only) | ||
352 | +{ | ||
353 | + void *mem; | ||
354 | + MemoryRegion *region = NULL; | ||
355 | + | ||
356 | + RCU_READ_LOCK_GUARD(); | ||
357 | + mem = apple_gfx_host_ptr_for_gpa_range(guest_physical_address, | ||
358 | + length, read_only, ®ion); | ||
359 | + if (mem) { | ||
360 | + memory_region_ref(region); | ||
361 | + } | ||
362 | + return mem; | ||
363 | +} | ||
364 | + | ||
365 | +static bool apple_gfx_mmio_unmap_surface_memory(void *ptr) | ||
366 | +{ | ||
367 | + MemoryRegion *region; | ||
368 | + ram_addr_t offset = 0; | ||
369 | + | ||
370 | + RCU_READ_LOCK_GUARD(); | ||
371 | + region = memory_region_from_host(ptr, &offset); | ||
372 | + if (!region) { | ||
373 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: memory at %p to be unmapped not " | ||
374 | + "found.\n", | ||
375 | + __func__, ptr); | ||
376 | + return false; | ||
377 | + } | ||
378 | + | ||
379 | + trace_apple_gfx_iosfc_unmap_memory_region(ptr, region); | ||
380 | + memory_region_unref(region); | ||
381 | + return true; | ||
382 | +} | ||
383 | + | ||
384 | +static PGIOSurfaceHostDevice *apple_gfx_prepare_iosurface_host_device( | ||
385 | + AppleGFXMMIOState *s) | ||
386 | +{ | ||
387 | + PGIOSurfaceHostDeviceDescriptor *iosfc_desc = | ||
388 | + [PGIOSurfaceHostDeviceDescriptor new]; | ||
389 | + PGIOSurfaceHostDevice *iosfc_host_dev; | ||
390 | + | ||
391 | + iosfc_desc.mapMemory = | ||
392 | + ^bool(uint64_t phys, uint64_t len, bool ro, void **va, void *e, void *f) { | ||
393 | + *va = apple_gfx_mmio_map_surface_memory(phys, len, ro); | ||
394 | + | ||
395 | + trace_apple_gfx_iosfc_map_memory(phys, len, ro, va, e, f, *va); | ||
396 | + | ||
397 | + return *va != NULL; | ||
398 | + }; | ||
399 | + | ||
400 | + iosfc_desc.unmapMemory = | ||
401 | + ^bool(void *va, void *b, void *c, void *d, void *e, void *f) { | ||
402 | + return apple_gfx_mmio_unmap_surface_memory(va); | ||
403 | + }; | ||
404 | + | ||
405 | + iosfc_desc.raiseInterrupt = ^bool(uint32_t vector) { | ||
406 | + trace_apple_gfx_iosfc_raise_irq(vector); | ||
407 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
408 | + raise_irq_bh, &s->irq_iosfc); | ||
409 | + return true; | ||
410 | + }; | ||
411 | + | ||
412 | + iosfc_host_dev = | ||
413 | + [[PGIOSurfaceHostDevice alloc] initWithDescriptor:iosfc_desc]; | ||
414 | + [iosfc_desc release]; | ||
415 | + return iosfc_host_dev; | ||
416 | +} | ||
417 | + | ||
418 | +static void apple_gfx_mmio_realize(DeviceState *dev, Error **errp) | ||
419 | +{ | ||
420 | + @autoreleasepool { | ||
421 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(dev); | ||
422 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; | ||
423 | + | ||
424 | + desc.raiseInterrupt = ^(uint32_t vector) { | ||
425 | + trace_apple_gfx_raise_irq(vector); | ||
426 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
427 | + raise_irq_bh, &s->irq_gfx); | ||
428 | + }; | ||
429 | + | ||
430 | + desc.usingIOSurfaceMapper = true; | ||
431 | + s->pgiosfc = apple_gfx_prepare_iosurface_host_device(s); | ||
432 | + | ||
433 | + if (!apple_gfx_common_realize(&s->common, dev, desc, errp)) { | ||
434 | + [s->pgiosfc release]; | ||
435 | + s->pgiosfc = nil; | ||
436 | + } | ||
437 | + | ||
438 | + [desc release]; | ||
439 | + desc = nil; | ||
440 | + } | ||
441 | +} | ||
442 | + | ||
443 | +static void apple_gfx_mmio_init(Object *obj) | ||
444 | +{ | ||
445 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); | ||
446 | + | ||
447 | + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_MMIO); | ||
448 | + | ||
449 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->common.iomem_gfx); | ||
450 | + memory_region_init_io(&s->iomem_iosfc, obj, &apple_iosfc_ops, s, | ||
451 | + TYPE_APPLE_GFX_MMIO, 0x10000); | ||
452 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem_iosfc); | ||
453 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_gfx); | ||
454 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_iosfc); | ||
455 | +} | ||
456 | + | ||
457 | +static void apple_gfx_mmio_reset(Object *obj, ResetType type) | ||
458 | +{ | ||
459 | + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); | ||
460 | + [s->common.pgdev reset]; | ||
461 | +} | ||
462 | + | ||
463 | + | ||
464 | +static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) | ||
465 | +{ | ||
466 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
467 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | ||
468 | + | ||
469 | + rc->phases.hold = apple_gfx_mmio_reset; | ||
470 | + dc->hotpluggable = false; | ||
471 | + dc->realize = apple_gfx_mmio_realize; | ||
472 | +} | ||
473 | + | ||
474 | +static TypeInfo apple_gfx_mmio_types[] = { | ||
475 | + { | ||
476 | + .name = TYPE_APPLE_GFX_MMIO, | ||
477 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
478 | + .instance_size = sizeof(AppleGFXMMIOState), | ||
479 | + .class_init = apple_gfx_mmio_class_init, | ||
480 | + .instance_init = apple_gfx_mmio_init, | ||
481 | + } | ||
482 | +}; | ||
483 | +DEFINE_TYPES(apple_gfx_mmio_types) | ||
484 | diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h | ||
485 | new file mode 100644 | ||
486 | index XXXXXXX..XXXXXXX | ||
487 | --- /dev/null | ||
488 | +++ b/hw/display/apple-gfx.h | ||
489 | @@ -XXX,XX +XXX,XX @@ | ||
490 | +/* | ||
491 | + * Data structures and functions shared between variants of the macOS | ||
492 | + * ParavirtualizedGraphics.framework based apple-gfx display adapter. | ||
493 | + * | ||
494 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
495 | + */ | ||
496 | + | ||
497 | +#ifndef QEMU_APPLE_GFX_H | ||
498 | +#define QEMU_APPLE_GFX_H | ||
499 | + | ||
500 | +#define TYPE_APPLE_GFX_MMIO "apple-gfx-mmio" | ||
501 | +#define TYPE_APPLE_GFX_PCI "apple-gfx-pci" | ||
502 | + | ||
503 | +#include "qemu/osdep.h" | ||
504 | +#include <dispatch/dispatch.h> | ||
505 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | ||
506 | +#include "qemu/typedefs.h" | ||
507 | +#include "exec/memory.h" | ||
508 | +#include "ui/surface.h" | ||
509 | + | ||
510 | +@class PGDeviceDescriptor; | ||
511 | +@protocol PGDevice; | ||
512 | +@protocol PGDisplay; | ||
513 | +@protocol MTLDevice; | ||
514 | +@protocol MTLTexture; | ||
515 | +@protocol MTLCommandQueue; | ||
516 | + | ||
517 | +typedef QTAILQ_HEAD(, PGTask_s) PGTaskList; | ||
518 | + | ||
519 | +typedef struct AppleGFXState { | ||
520 | + /* Initialised on init/realize() */ | ||
521 | + MemoryRegion iomem_gfx; | ||
522 | + id<PGDevice> pgdev; | ||
523 | + id<PGDisplay> pgdisp; | ||
524 | + QemuConsole *con; | ||
525 | + id<MTLDevice> mtl; | ||
526 | + id<MTLCommandQueue> mtl_queue; | ||
527 | + | ||
528 | + /* List `tasks` is protected by task_mutex */ | ||
529 | + QemuMutex task_mutex; | ||
530 | + PGTaskList tasks; | ||
531 | + | ||
532 | + /* Mutable state (BQL protected) */ | ||
533 | + QEMUCursor *cursor; | ||
534 | + DisplaySurface *surface; | ||
535 | + id<MTLTexture> texture; | ||
536 | + int8_t pending_frames; /* # guest frames in the rendering pipeline */ | ||
537 | + bool gfx_update_requested; /* QEMU display system wants a new frame */ | ||
538 | + bool new_frame_ready; /* Guest has rendered a frame, ready to be used */ | ||
539 | + bool using_managed_texture_storage; | ||
540 | + uint32_t rendering_frame_width; | ||
541 | + uint32_t rendering_frame_height; | ||
542 | + | ||
543 | + /* Mutable state (atomic) */ | ||
544 | + bool cursor_show; | ||
545 | +} AppleGFXState; | ||
546 | + | ||
547 | +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name); | ||
548 | +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | ||
549 | + PGDeviceDescriptor *desc, Error **errp); | ||
550 | +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, | ||
551 | + uint64_t length, bool read_only, | ||
552 | + MemoryRegion **mapping_in_region); | ||
553 | + | ||
554 | +#endif | ||
555 | + | ||
556 | diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m | ||
557 | new file mode 100644 | ||
558 | index XXXXXXX..XXXXXXX | ||
559 | --- /dev/null | ||
560 | +++ b/hw/display/apple-gfx.m | ||
561 | @@ -XXX,XX +XXX,XX @@ | ||
562 | +/* | ||
563 | + * QEMU Apple ParavirtualizedGraphics.framework device | ||
564 | + * | ||
565 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
566 | + * | ||
567 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
568 | + * See the COPYING file in the top-level directory. | ||
569 | + * | ||
570 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
571 | + * | ||
572 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides | ||
573 | + * which implements 3d graphics passthrough to the host as well as a | ||
574 | + * proprietary guest communication channel to drive it. This device model | ||
575 | + * implements support to drive that library from within QEMU. | ||
576 | + */ | ||
577 | + | ||
578 | +#include "qemu/osdep.h" | ||
579 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | ||
580 | +#include <mach/mach_vm.h> | ||
581 | +#include "apple-gfx.h" | ||
582 | +#include "trace.h" | ||
583 | +#include "qemu-main.h" | ||
584 | +#include "exec/address-spaces.h" | ||
585 | +#include "migration/blocker.h" | ||
586 | +#include "monitor/monitor.h" | ||
587 | +#include "qemu/main-loop.h" | ||
588 | +#include "qemu/cutils.h" | ||
589 | +#include "qemu/log.h" | ||
590 | +#include "qapi/visitor.h" | ||
591 | +#include "qapi/error.h" | ||
592 | +#include "sysemu/dma.h" | ||
593 | +#include "ui/console.h" | ||
594 | + | ||
595 | +static const PGDisplayCoord_t apple_gfx_modes[] = { | ||
596 | + { .x = 1440, .y = 1080 }, | ||
597 | + { .x = 1280, .y = 1024 }, | ||
598 | +}; | ||
599 | + | ||
600 | +static Error *apple_gfx_mig_blocker; | ||
601 | +static uint32_t next_pgdisplay_serial_num = 1; | ||
602 | + | ||
603 | +static dispatch_queue_t get_background_queue(void) | ||
604 | +{ | ||
605 | + return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); | ||
606 | +} | ||
607 | + | ||
608 | +/* ------ PGTask and task operations: new/destroy/map/unmap ------ */ | ||
609 | + | ||
610 | +/* | ||
611 | + * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h> | ||
612 | + * which is opaque from the framework's point of view. It is used in callbacks | ||
613 | + * in the form of its typedef PGTask_t, which also already exists in the | ||
614 | + * framework headers. | ||
615 | + * | ||
616 | + * A "task" in PVG terminology represents a host-virtual contiguous address | ||
617 | + * range which is reserved in a large chunk on task creation. The mapMemory | ||
618 | + * callback then requests ranges of guest system memory (identified by their | ||
619 | + * GPA) to be mapped into subranges of this reserved address space. | ||
620 | + * This type of operation isn't well-supported by QEMU's memory subsystem, | ||
621 | + * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call, | ||
622 | + * which allows us to refer to the same backing memory via multiple virtual | ||
623 | + * address ranges. The Mach VM APIs are therefore used throughout for managing | ||
624 | + * task memory. | ||
625 | + */ | ||
626 | +struct PGTask_s { | ||
627 | + QTAILQ_ENTRY(PGTask_s) node; | ||
628 | + AppleGFXState *s; | ||
629 | + mach_vm_address_t address; | ||
630 | + uint64_t len; | ||
631 | + /* | ||
632 | + * All unique MemoryRegions for which a mapping has been created in in this | ||
633 | + * task, and on which we have thus called memory_region_ref(). There are | ||
634 | + * usually very few regions of system RAM in total, so we expect this array | ||
635 | + * to be very short. Therefore, no need for sorting or fancy search | ||
636 | + * algorithms, linear search will do. | ||
637 | + * Protected by AppleGFXState's task_mutex. | ||
638 | + */ | ||
639 | + GPtrArray *mapped_regions; | ||
640 | +}; | ||
641 | + | ||
642 | +static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len) | ||
643 | +{ | ||
644 | + mach_vm_address_t task_mem; | ||
645 | + PGTask_t *task; | ||
646 | + kern_return_t r; | ||
647 | + | ||
648 | + r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE); | ||
649 | + if (r != KERN_SUCCESS) { | ||
650 | + return NULL; | ||
651 | + } | ||
652 | + | ||
653 | + task = g_new0(PGTask_t, 1); | ||
654 | + task->s = s; | ||
655 | + task->address = task_mem; | ||
656 | + task->len = len; | ||
657 | + task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */); | ||
658 | + | ||
659 | + QEMU_LOCK_GUARD(&s->task_mutex); | ||
660 | + QTAILQ_INSERT_TAIL(&s->tasks, task, node); | ||
661 | + | ||
662 | + return task; | ||
663 | +} | ||
664 | + | ||
665 | +static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task) | ||
666 | +{ | ||
667 | + GPtrArray *regions = task->mapped_regions; | ||
668 | + MemoryRegion *region; | ||
669 | + size_t i; | ||
670 | + | ||
671 | + for (i = 0; i < regions->len; ++i) { | ||
672 | + region = g_ptr_array_index(regions, i); | ||
673 | + memory_region_unref(region); | ||
674 | + } | ||
675 | + g_ptr_array_unref(regions); | ||
676 | + | ||
677 | + mach_vm_deallocate(mach_task_self(), task->address, task->len); | ||
678 | + | ||
679 | + QEMU_LOCK_GUARD(&s->task_mutex); | ||
680 | + QTAILQ_REMOVE(&s->tasks, task, node); | ||
681 | + g_free(task); | ||
682 | +} | ||
683 | + | ||
684 | +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, | ||
685 | + uint64_t length, bool read_only, | ||
686 | + MemoryRegion **mapping_in_region) | ||
687 | +{ | ||
688 | + MemoryRegion *ram_region; | ||
689 | + char *host_ptr; | ||
690 | + hwaddr ram_region_offset = 0; | ||
691 | + hwaddr ram_region_length = length; | ||
692 | + | ||
693 | + ram_region = address_space_translate(&address_space_memory, | ||
694 | + guest_physical, | ||
695 | + &ram_region_offset, | ||
696 | + &ram_region_length, !read_only, | ||
697 | + MEMTXATTRS_UNSPECIFIED); | ||
698 | + | ||
699 | + if (!ram_region || ram_region_length < length || | ||
700 | + !memory_access_is_direct(ram_region, !read_only)) { | ||
701 | + return NULL; | ||
702 | + } | ||
703 | + | ||
704 | + host_ptr = memory_region_get_ram_ptr(ram_region); | ||
705 | + if (!host_ptr) { | ||
706 | + return NULL; | ||
707 | + } | ||
708 | + host_ptr += ram_region_offset; | ||
709 | + *mapping_in_region = ram_region; | ||
710 | + return host_ptr; | ||
711 | +} | ||
712 | + | ||
713 | +static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task, | ||
714 | + uint64_t virtual_offset, | ||
715 | + PGPhysicalMemoryRange_t *ranges, | ||
716 | + uint32_t range_count, bool read_only) | ||
717 | +{ | ||
718 | + kern_return_t r; | ||
719 | + void *source_ptr; | ||
720 | + mach_vm_address_t target; | ||
721 | + vm_prot_t cur_protection, max_protection; | ||
722 | + bool success = true; | ||
723 | + MemoryRegion *region; | ||
724 | + | ||
725 | + RCU_READ_LOCK_GUARD(); | ||
726 | + QEMU_LOCK_GUARD(&s->task_mutex); | ||
727 | + | ||
728 | + trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only); | ||
729 | + for (int i = 0; i < range_count; i++) { | ||
730 | + PGPhysicalMemoryRange_t *range = &ranges[i]; | ||
731 | + | ||
732 | + target = task->address + virtual_offset; | ||
733 | + virtual_offset += range->physicalLength; | ||
734 | + | ||
735 | + trace_apple_gfx_map_memory_range(i, range->physicalAddress, | ||
736 | + range->physicalLength); | ||
737 | + | ||
738 | + region = NULL; | ||
739 | + source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress, | ||
740 | + range->physicalLength, | ||
741 | + read_only, ®ion); | ||
742 | + if (!source_ptr) { | ||
743 | + success = false; | ||
744 | + continue; | ||
745 | + } | ||
746 | + | ||
747 | + if (!g_ptr_array_find(task->mapped_regions, region, NULL)) { | ||
748 | + g_ptr_array_add(task->mapped_regions, region); | ||
749 | + memory_region_ref(region); | ||
750 | + } | ||
751 | + | ||
752 | + cur_protection = 0; | ||
753 | + max_protection = 0; | ||
754 | + /* Map guest RAM at range->physicalAddress into PG task memory range */ | ||
755 | + r = mach_vm_remap(mach_task_self(), | ||
756 | + &target, range->physicalLength, vm_page_size - 1, | ||
757 | + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, | ||
758 | + mach_task_self(), (mach_vm_address_t)source_ptr, | ||
759 | + false /* shared mapping, no copy */, | ||
760 | + &cur_protection, &max_protection, | ||
761 | + VM_INHERIT_COPY); | ||
762 | + trace_apple_gfx_remap(r, source_ptr, target); | ||
763 | + g_assert(r == KERN_SUCCESS); | ||
764 | + } | ||
765 | + | ||
766 | + return success; | ||
767 | +} | ||
768 | + | ||
769 | +static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task, | ||
770 | + uint64_t virtual_offset, uint64_t length) | ||
771 | +{ | ||
772 | + kern_return_t r; | ||
773 | + mach_vm_address_t range_address; | ||
774 | + | ||
775 | + trace_apple_gfx_unmap_memory(task, virtual_offset, length); | ||
776 | + | ||
777 | + /* | ||
778 | + * Replace task memory range with fresh 0 pages, undoing the mapping | ||
779 | + * from guest RAM. | ||
780 | + */ | ||
781 | + range_address = task->address + virtual_offset; | ||
782 | + r = mach_vm_allocate(mach_task_self(), &range_address, length, | ||
783 | + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE); | ||
784 | + g_assert(r == KERN_SUCCESS); | ||
785 | +} | ||
786 | + | ||
787 | +/* ------ Rendering and frame management ------ */ | ||
788 | + | ||
789 | +static void apple_gfx_render_frame_completed_bh(void *opaque); | ||
790 | + | ||
791 | +static void apple_gfx_render_new_frame(AppleGFXState *s) | ||
792 | +{ | ||
793 | + bool managed_texture = s->using_managed_texture_storage; | ||
794 | + uint32_t width = surface_width(s->surface); | ||
795 | + uint32_t height = surface_height(s->surface); | ||
796 | + MTLRegion region = MTLRegionMake2D(0, 0, width, height); | ||
797 | + id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer]; | ||
798 | + id<MTLTexture> texture = s->texture; | ||
799 | + | ||
800 | + assert(bql_locked()); | ||
801 | + [texture retain]; | ||
802 | + [command_buffer retain]; | ||
803 | + | ||
804 | + s->rendering_frame_width = width; | ||
805 | + s->rendering_frame_height = height; | ||
806 | + | ||
807 | + dispatch_async(get_background_queue(), ^{ | ||
808 | + /* | ||
809 | + * This is not safe to call from the BQL/BH due to PVG-internal locks | ||
810 | + * causing deadlocks. | ||
811 | + */ | ||
812 | + bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer | ||
813 | + texture:texture | ||
814 | + region:region]; | ||
815 | + if (!r) { | ||
816 | + [texture release]; | ||
817 | + [command_buffer release]; | ||
818 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
819 | + "%s: encodeCurrentFrameToCommandBuffer:texture:region: " | ||
820 | + "failed\n", __func__); | ||
821 | + bql_lock(); | ||
822 | + --s->pending_frames; | ||
823 | + if (s->pending_frames > 0) { | ||
824 | + apple_gfx_render_new_frame(s); | ||
825 | + } | ||
826 | + bql_unlock(); | ||
827 | + return; | ||
828 | + } | ||
829 | + | ||
830 | + if (managed_texture) { | ||
831 | + /* "Managed" textures exist in both VRAM and RAM and must be synced. */ | ||
832 | + id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder]; | ||
833 | + [blit synchronizeResource:texture]; | ||
834 | + [blit endEncoding]; | ||
835 | + } | ||
836 | + [texture release]; | ||
837 | + [command_buffer addCompletedHandler: | ||
838 | + ^(id<MTLCommandBuffer> cb) | ||
839 | + { | ||
840 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
841 | + apple_gfx_render_frame_completed_bh, s); | ||
842 | + }]; | ||
843 | + [command_buffer commit]; | ||
844 | + [command_buffer release]; | ||
845 | + }); | ||
846 | +} | ||
847 | + | ||
848 | +static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram) | ||
849 | +{ | ||
850 | + /* | ||
851 | + * TODO: Skip this entirely on a pure Metal or headless/guest-only | ||
852 | + * rendering path, else use a blit command encoder? Needs careful | ||
853 | + * (double?) buffering design. | ||
854 | + */ | ||
855 | + size_t width = texture.width, height = texture.height; | ||
856 | + MTLRegion region = MTLRegionMake2D(0, 0, width, height); | ||
857 | + [texture getBytes:vram | ||
858 | + bytesPerRow:(width * 4) | ||
859 | + bytesPerImage:(width * height * 4) | ||
860 | + fromRegion:region | ||
861 | + mipmapLevel:0 | ||
862 | + slice:0]; | ||
863 | +} | ||
864 | + | ||
865 | +static void apple_gfx_render_frame_completed_bh(void *opaque) | ||
866 | +{ | ||
867 | + AppleGFXState *s = opaque; | ||
868 | + | ||
869 | + @autoreleasepool { | ||
870 | + --s->pending_frames; | ||
871 | + assert(s->pending_frames >= 0); | ||
872 | + | ||
873 | + /* Only update display if mode hasn't changed since we started rendering. */ | ||
874 | + if (s->rendering_frame_width == surface_width(s->surface) && | ||
875 | + s->rendering_frame_height == surface_height(s->surface)) { | ||
876 | + copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface)); | ||
877 | + if (s->gfx_update_requested) { | ||
878 | + s->gfx_update_requested = false; | ||
879 | + dpy_gfx_update_full(s->con); | ||
880 | + graphic_hw_update_done(s->con); | ||
881 | + s->new_frame_ready = false; | ||
882 | + } else { | ||
883 | + s->new_frame_ready = true; | ||
884 | + } | ||
885 | + } | ||
886 | + if (s->pending_frames > 0) { | ||
887 | + apple_gfx_render_new_frame(s); | ||
888 | + } | ||
889 | + } | ||
890 | +} | ||
891 | + | ||
892 | +static void apple_gfx_fb_update_display(void *opaque) | ||
893 | +{ | ||
894 | + AppleGFXState *s = opaque; | ||
895 | + | ||
896 | + assert(bql_locked()); | ||
897 | + if (s->new_frame_ready) { | ||
898 | + dpy_gfx_update_full(s->con); | ||
899 | + s->new_frame_ready = false; | ||
900 | + graphic_hw_update_done(s->con); | ||
901 | + } else if (s->pending_frames > 0) { | ||
902 | + s->gfx_update_requested = true; | ||
903 | + } else { | ||
904 | + graphic_hw_update_done(s->con); | ||
905 | + } | ||
906 | +} | ||
907 | + | ||
908 | +static const GraphicHwOps apple_gfx_fb_ops = { | ||
909 | + .gfx_update = apple_gfx_fb_update_display, | ||
910 | + .gfx_update_async = true, | ||
911 | +}; | ||
912 | + | ||
913 | +/* ------ Mouse cursor and display mode setting ------ */ | ||
914 | + | ||
915 | +static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height) | ||
916 | +{ | ||
917 | + MTLTextureDescriptor *textureDescriptor; | ||
918 | + | ||
919 | + if (s->surface && | ||
920 | + width == surface_width(s->surface) && | ||
921 | + height == surface_height(s->surface)) { | ||
922 | + return; | ||
923 | + } | ||
924 | + | ||
925 | + [s->texture release]; | ||
926 | + | ||
927 | + s->surface = qemu_create_displaysurface(width, height); | ||
928 | + | ||
929 | + @autoreleasepool { | ||
930 | + textureDescriptor = | ||
931 | + [MTLTextureDescriptor | ||
932 | + texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm | ||
933 | + width:width | ||
934 | + height:height | ||
935 | + mipmapped:NO]; | ||
936 | + textureDescriptor.usage = s->pgdisp.minimumTextureUsage; | ||
937 | + s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor]; | ||
938 | + s->using_managed_texture_storage = | ||
939 | + (s->texture.storageMode == MTLStorageModeManaged); | ||
940 | + } | ||
941 | + | ||
942 | + dpy_gfx_replace_surface(s->con, s->surface); | ||
943 | +} | ||
944 | + | ||
945 | +static void update_cursor(AppleGFXState *s) | ||
946 | +{ | ||
947 | + assert(bql_locked()); | ||
948 | + dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x, | ||
949 | + s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show)); | ||
950 | +} | ||
951 | + | ||
952 | +static void update_cursor_bh(void *opaque) | ||
953 | +{ | ||
954 | + AppleGFXState *s = opaque; | ||
955 | + update_cursor(s); | ||
956 | +} | ||
957 | + | ||
958 | +typedef struct AppleGFXSetCursorGlyphJob { | ||
959 | + AppleGFXState *s; | ||
960 | + NSBitmapImageRep *glyph; | ||
961 | + PGDisplayCoord_t hotspot; | ||
962 | +} AppleGFXSetCursorGlyphJob; | ||
963 | + | ||
964 | +static void set_cursor_glyph(void *opaque) | ||
965 | +{ | ||
966 | + AppleGFXSetCursorGlyphJob *job = opaque; | ||
967 | + AppleGFXState *s = job->s; | ||
968 | + NSBitmapImageRep *glyph = job->glyph; | ||
969 | + uint32_t bpp = glyph.bitsPerPixel; | ||
970 | + size_t width = glyph.pixelsWide; | ||
971 | + size_t height = glyph.pixelsHigh; | ||
972 | + size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4; | ||
973 | + const uint8_t* px_data = glyph.bitmapData; | ||
974 | + | ||
975 | + trace_apple_gfx_cursor_set(bpp, width, height); | ||
976 | + | ||
977 | + if (s->cursor) { | ||
978 | + cursor_unref(s->cursor); | ||
979 | + s->cursor = NULL; | ||
980 | + } | ||
981 | + | ||
982 | + if (bpp == 32) { /* Shouldn't be anything else, but just to be safe...*/ | ||
983 | + s->cursor = cursor_alloc(width, height); | ||
984 | + s->cursor->hot_x = job->hotspot.x; | ||
985 | + s->cursor->hot_y = job->hotspot.y; | ||
986 | + | ||
987 | + uint32_t *dest_px = s->cursor->data; | ||
988 | + | ||
989 | + for (size_t y = 0; y < height; ++y) { | ||
990 | + for (size_t x = 0; x < width; ++x) { | ||
991 | + /* | ||
992 | + * NSBitmapImageRep's red & blue channels are swapped | ||
993 | + * compared to QEMUCursor's. | ||
994 | + */ | ||
995 | + *dest_px = | ||
996 | + (px_data[0] << 16u) | | ||
997 | + (px_data[1] << 8u) | | ||
998 | + (px_data[2] << 0u) | | ||
999 | + (px_data[3] << 24u); | ||
1000 | + ++dest_px; | ||
1001 | + px_data += 4; | ||
1002 | + } | ||
1003 | + px_data += padding_bytes_per_row; | ||
1004 | + } | ||
1005 | + dpy_cursor_define(s->con, s->cursor); | ||
1006 | + update_cursor(s); | ||
1007 | + } | ||
1008 | + [glyph release]; | ||
1009 | + | ||
1010 | + g_free(job); | ||
1011 | +} | ||
1012 | + | ||
1013 | +/* ------ DMA (device reading system memory) ------ */ | ||
1014 | + | ||
1015 | +typedef struct AppleGFXReadMemoryJob { | ||
1016 | + QemuSemaphore sem; | ||
1017 | + hwaddr physical_address; | ||
1018 | + uint64_t length; | ||
1019 | + void *dst; | ||
1020 | + bool success; | ||
1021 | +} AppleGFXReadMemoryJob; | ||
1022 | + | ||
1023 | +static void apple_gfx_do_read_memory(void *opaque) | ||
1024 | +{ | ||
1025 | + AppleGFXReadMemoryJob *job = opaque; | ||
1026 | + MemTxResult r; | ||
1027 | + | ||
1028 | + r = dma_memory_read(&address_space_memory, job->physical_address, | ||
1029 | + job->dst, job->length, MEMTXATTRS_UNSPECIFIED); | ||
1030 | + job->success = r == MEMTX_OK; | ||
1031 | + | ||
1032 | + qemu_sem_post(&job->sem); | ||
1033 | +} | ||
1034 | + | ||
1035 | +static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address, | ||
1036 | + uint64_t length, void *dst) | ||
1037 | +{ | ||
1038 | + AppleGFXReadMemoryJob job = { | ||
1039 | + .physical_address = physical_address, .length = length, .dst = dst | ||
1040 | + }; | ||
1041 | + | ||
1042 | + trace_apple_gfx_read_memory(physical_address, length, dst); | ||
1043 | + | ||
1044 | + /* Performing DMA requires BQL, so do it in a BH. */ | ||
1045 | + qemu_sem_init(&job.sem, 0); | ||
1046 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
1047 | + apple_gfx_do_read_memory, &job); | ||
1048 | + qemu_sem_wait(&job.sem); | ||
1049 | + qemu_sem_destroy(&job.sem); | ||
1050 | + return job.success; | ||
1051 | +} | ||
1052 | + | ||
1053 | +/* ------ Memory-mapped device I/O operations ------ */ | ||
1054 | + | ||
1055 | +typedef struct AppleGFXIOJob { | ||
1056 | + AppleGFXState *state; | ||
1057 | + uint64_t offset; | ||
1058 | + uint64_t value; | ||
1059 | + bool completed; | ||
1060 | +} AppleGFXIOJob; | ||
1061 | + | ||
1062 | +static void apple_gfx_do_read(void *opaque) | ||
1063 | +{ | ||
1064 | + AppleGFXIOJob *job = opaque; | ||
1065 | + job->value = [job->state->pgdev mmioReadAtOffset:job->offset]; | ||
1066 | + qatomic_set(&job->completed, true); | ||
1067 | + aio_wait_kick(); | ||
1068 | +} | ||
1069 | + | ||
1070 | +static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size) | ||
1071 | +{ | ||
1072 | + AppleGFXIOJob job = { | ||
1073 | + .state = opaque, | ||
1074 | + .offset = offset, | ||
1075 | + .completed = false, | ||
1076 | + }; | ||
1077 | + dispatch_queue_t queue = get_background_queue(); | ||
1078 | + | ||
1079 | + dispatch_async_f(queue, &job, apple_gfx_do_read); | ||
1080 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | ||
1081 | + | ||
1082 | + trace_apple_gfx_read(offset, job.value); | ||
1083 | + return job.value; | ||
1084 | +} | ||
1085 | + | ||
1086 | +static void apple_gfx_do_write(void *opaque) | ||
1087 | +{ | ||
1088 | + AppleGFXIOJob *job = opaque; | ||
1089 | + [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value]; | ||
1090 | + qatomic_set(&job->completed, true); | ||
1091 | + aio_wait_kick(); | ||
1092 | +} | ||
1093 | + | ||
1094 | +static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val, | ||
1095 | + unsigned size) | ||
1096 | +{ | ||
1097 | + /* | ||
1098 | + * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can | ||
1099 | + * trigger synchronous operations on other dispatch queues, which in turn | ||
1100 | + * may call back out on one or more of the callback blocks. For this reason, | ||
1101 | + * and as we are holding the BQL, we invoke the I/O methods on a pool | ||
1102 | + * thread and handle AIO tasks while we wait. Any work in the callbacks | ||
1103 | + * requiring the BQL will in turn schedule BHs which this thread will | ||
1104 | + * process while waiting. | ||
1105 | + */ | ||
1106 | + AppleGFXIOJob job = { | ||
1107 | + .state = opaque, | ||
1108 | + .offset = offset, | ||
1109 | + .value = val, | ||
1110 | + .completed = false, | ||
1111 | + }; | ||
1112 | + dispatch_queue_t queue = get_background_queue(); | ||
1113 | + | ||
1114 | + dispatch_async_f(queue, &job, apple_gfx_do_write); | ||
1115 | + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); | ||
1116 | + | ||
1117 | + trace_apple_gfx_write(offset, val); | ||
1118 | +} | ||
1119 | + | ||
1120 | +static const MemoryRegionOps apple_gfx_ops = { | ||
1121 | + .read = apple_gfx_read, | ||
1122 | + .write = apple_gfx_write, | ||
1123 | + .endianness = DEVICE_LITTLE_ENDIAN, | ||
1124 | + .valid = { | ||
1125 | + .min_access_size = 4, | ||
1126 | + .max_access_size = 8, | ||
1127 | + }, | ||
1128 | + .impl = { | ||
1129 | + .min_access_size = 4, | ||
1130 | + .max_access_size = 4, | ||
1131 | + }, | ||
1132 | +}; | ||
1133 | + | ||
1134 | +static size_t apple_gfx_get_default_mmio_range_size(void) | ||
1135 | +{ | ||
1136 | + size_t mmio_range_size; | ||
1137 | + @autoreleasepool { | ||
1138 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; | ||
1139 | + mmio_range_size = desc.mmioLength; | ||
1140 | + [desc release]; | ||
1141 | + } | ||
1142 | + return mmio_range_size; | ||
1143 | +} | ||
1144 | + | ||
1145 | +/* ------ Initialisation and startup ------ */ | ||
1146 | + | ||
1147 | +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name) | ||
1148 | +{ | ||
1149 | + size_t mmio_range_size = apple_gfx_get_default_mmio_range_size(); | ||
1150 | + | ||
1151 | + trace_apple_gfx_common_init(obj_name, mmio_range_size); | ||
1152 | + memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name, | ||
1153 | + mmio_range_size); | ||
1154 | + | ||
1155 | + /* TODO: PVG framework supports serialising device state: integrate it! */ | ||
1156 | +} | ||
1157 | + | ||
1158 | +static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s, | ||
1159 | + PGDeviceDescriptor *desc) | ||
1160 | +{ | ||
1161 | + desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) { | ||
1162 | + PGTask_t *task = apple_gfx_new_task(s, vmSize); | ||
1163 | + *baseAddress = (void *)task->address; | ||
1164 | + trace_apple_gfx_create_task(vmSize, *baseAddress); | ||
1165 | + return task; | ||
1166 | + }; | ||
1167 | + | ||
1168 | + desc.destroyTask = ^(PGTask_t * _Nonnull task) { | ||
1169 | + trace_apple_gfx_destroy_task(task, task->mapped_regions->len); | ||
1170 | + | ||
1171 | + apple_gfx_destroy_task(s, task); | ||
1172 | + }; | ||
1173 | + | ||
1174 | + desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count, | ||
1175 | + uint64_t virtual_offset, bool read_only, | ||
1176 | + PGPhysicalMemoryRange_t * _Nonnull ranges) { | ||
1177 | + return apple_gfx_task_map_memory(s, task, virtual_offset, | ||
1178 | + ranges, range_count, read_only); | ||
1179 | + }; | ||
1180 | + | ||
1181 | + desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset, | ||
1182 | + uint64_t length) { | ||
1183 | + apple_gfx_task_unmap_memory(s, task, virtual_offset, length); | ||
1184 | + return true; | ||
1185 | + }; | ||
1186 | + | ||
1187 | + desc.readMemory = ^bool(uint64_t physical_address, uint64_t length, | ||
1188 | + void * _Nonnull dst) { | ||
1189 | + return apple_gfx_read_memory(s, physical_address, length, dst); | ||
1190 | + }; | ||
1191 | +} | ||
1192 | + | ||
1193 | +static void new_frame_handler_bh(void *opaque) | ||
1194 | +{ | ||
1195 | + AppleGFXState *s = opaque; | ||
1196 | + | ||
1197 | + /* Drop frames if guest gets too far ahead. */ | ||
1198 | + if (s->pending_frames >= 2) { | ||
1199 | + return; | ||
1200 | + } | ||
1201 | + ++s->pending_frames; | ||
1202 | + if (s->pending_frames > 1) { | ||
1203 | + return; | ||
1204 | + } | ||
1205 | + | ||
1206 | + @autoreleasepool { | ||
1207 | + apple_gfx_render_new_frame(s); | ||
1208 | + } | ||
1209 | +} | ||
1210 | + | ||
1211 | +static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s) | ||
1212 | +{ | ||
1213 | + PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new]; | ||
1214 | + | ||
1215 | + disp_desc.name = @"QEMU display"; | ||
1216 | + disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */ | ||
1217 | + disp_desc.queue = dispatch_get_main_queue(); | ||
1218 | + disp_desc.newFrameEventHandler = ^(void) { | ||
1219 | + trace_apple_gfx_new_frame(); | ||
1220 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s); | ||
1221 | + }; | ||
1222 | + disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels, | ||
1223 | + OSType pixelFormat) { | ||
1224 | + trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y); | ||
1225 | + | ||
1226 | + BQL_LOCK_GUARD(); | ||
1227 | + set_mode(s, sizeInPixels.x, sizeInPixels.y); | ||
1228 | + }; | ||
1229 | + disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph, | ||
1230 | + PGDisplayCoord_t hotspot) { | ||
1231 | + AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job)); | ||
1232 | + job->s = s; | ||
1233 | + job->glyph = glyph; | ||
1234 | + job->hotspot = hotspot; | ||
1235 | + [glyph retain]; | ||
1236 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
1237 | + set_cursor_glyph, job); | ||
1238 | + }; | ||
1239 | + disp_desc.cursorShowHandler = ^(BOOL show) { | ||
1240 | + trace_apple_gfx_cursor_show(show); | ||
1241 | + qatomic_set(&s->cursor_show, show); | ||
1242 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
1243 | + update_cursor_bh, s); | ||
1244 | + }; | ||
1245 | + disp_desc.cursorMoveHandler = ^(void) { | ||
1246 | + trace_apple_gfx_cursor_move(); | ||
1247 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
1248 | + update_cursor_bh, s); | ||
1249 | + }; | ||
1250 | + | ||
1251 | + return disp_desc; | ||
1252 | +} | ||
1253 | + | ||
1254 | +static NSArray<PGDisplayMode*>* apple_gfx_prepare_display_mode_array(void) | ||
1255 | +{ | ||
1256 | + PGDisplayMode *modes[ARRAY_SIZE(apple_gfx_modes)]; | ||
1257 | + NSArray<PGDisplayMode*>* mode_array; | ||
1258 | + int i; | ||
1259 | + | ||
1260 | + for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | ||
1261 | + modes[i] = | ||
1262 | + [[PGDisplayMode alloc] initWithSizeInPixels:apple_gfx_modes[i] refreshRateInHz:60.]; | ||
1263 | + } | ||
1264 | + | ||
1265 | + mode_array = [NSArray arrayWithObjects:modes count:ARRAY_SIZE(apple_gfx_modes)]; | ||
1266 | + | ||
1267 | + for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | ||
1268 | + [modes[i] release]; | ||
1269 | + modes[i] = nil; | ||
1270 | + } | ||
1271 | + | ||
1272 | + return mode_array; | ||
1273 | +} | ||
1274 | + | ||
1275 | +static id<MTLDevice> copy_suitable_metal_device(void) | ||
1276 | +{ | ||
1277 | + id<MTLDevice> dev = nil; | ||
1278 | + NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices(); | ||
1279 | + | ||
1280 | + /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */ | ||
1281 | + for (size_t i = 0; i < devs.count; ++i) { | ||
1282 | + if (devs[i].hasUnifiedMemory) { | ||
1283 | + dev = devs[i]; | ||
1284 | + break; | ||
1285 | + } | ||
1286 | + if (!devs[i].removable) { | ||
1287 | + dev = devs[i]; | ||
1288 | + } | ||
1289 | + } | ||
1290 | + | ||
1291 | + if (dev != nil) { | ||
1292 | + [dev retain]; | ||
1293 | + } else { | ||
1294 | + dev = MTLCreateSystemDefaultDevice(); | ||
1295 | + } | ||
1296 | + [devs release]; | ||
1297 | + | ||
1298 | + return dev; | ||
1299 | +} | ||
1300 | + | ||
1301 | +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | ||
1302 | + PGDeviceDescriptor *desc, Error **errp) | ||
1303 | +{ | ||
1304 | + PGDisplayDescriptor *disp_desc; | ||
1305 | + | ||
1306 | + if (apple_gfx_mig_blocker == NULL) { | ||
1307 | + error_setg(&apple_gfx_mig_blocker, | ||
1308 | + "Migration state blocked by apple-gfx display device"); | ||
1309 | + if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) { | ||
1310 | + return false; | ||
1311 | + } | ||
1312 | + } | ||
1313 | + | ||
1314 | + qemu_mutex_init(&s->task_mutex); | ||
1315 | + QTAILQ_INIT(&s->tasks); | ||
1316 | + s->mtl = copy_suitable_metal_device(); | ||
1317 | + s->mtl_queue = [s->mtl newCommandQueue]; | ||
1318 | + | ||
1319 | + desc.device = s->mtl; | ||
1320 | + | ||
1321 | + apple_gfx_register_task_mapping_handlers(s, desc); | ||
1322 | + | ||
1323 | + s->cursor_show = true; | ||
1324 | + | ||
1325 | + s->pgdev = PGNewDeviceWithDescriptor(desc); | ||
1326 | + | ||
1327 | + disp_desc = apple_gfx_prepare_display_descriptor(s); | ||
1328 | + /* | ||
1329 | + * Although the framework does, this integration currently does not support | ||
1330 | + * multiple virtual displays connected to a single PV graphics device. | ||
1331 | + * It is however possible to create | ||
1332 | + * more than one instance of the device, each with one display. The macOS | ||
1333 | + * guest will ignore these displays if they share the same serial number, | ||
1334 | + * so ensure each instance gets a unique one. | ||
1335 | + */ | ||
1336 | + s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc | ||
1337 | + port:0 | ||
1338 | + serialNum:next_pgdisplay_serial_num++]; | ||
1339 | + [disp_desc release]; | ||
1340 | + s->pgdisp.modeList = apple_gfx_prepare_display_mode_array(); | ||
1341 | + | ||
1342 | + s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s); | ||
1343 | + return true; | ||
1344 | +} | ||
1345 | diff --git a/hw/display/meson.build b/hw/display/meson.build | ||
1346 | index XXXXXXX..XXXXXXX 100644 | ||
1347 | --- a/hw/display/meson.build | ||
1348 | +++ b/hw/display/meson.build | ||
1349 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) | ||
1350 | |||
1351 | system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_dbg.c'), pixman]) | ||
1352 | |||
1353 | +if host_os == 'darwin' | ||
1354 | + system_ss.add(when: 'CONFIG_MAC_PVG', if_true: [files('apple-gfx.m'), pvg, metal]) | ||
1355 | + if cpu == 'aarch64' | ||
1356 | + system_ss.add(when: 'CONFIG_MAC_PVG_MMIO', if_true: [files('apple-gfx-mmio.m'), pvg, metal]) | ||
1357 | + endif | ||
1358 | +endif | ||
1359 | |||
1360 | if config_all_devices.has_key('CONFIG_VIRTIO_GPU') | ||
1361 | virtio_gpu_ss = ss.source_set() | ||
1362 | diff --git a/hw/display/trace-events b/hw/display/trace-events | ||
1363 | index XXXXXXX..XXXXXXX 100644 | ||
1364 | --- a/hw/display/trace-events | ||
1365 | +++ b/hw/display/trace-events | ||
1366 | @@ -XXX,XX +XXX,XX @@ dm163_bits_ppi(unsigned dest_width) "dest_width : %u" | ||
1367 | dm163_leds(int led, uint32_t value) "led %d: 0x%x" | ||
1368 | dm163_channels(int channel, uint8_t value) "channel %d: 0x%x" | ||
1369 | dm163_refresh_rate(uint32_t rr) "refresh rate %d" | ||
1370 | + | ||
1371 | +# apple-gfx.m | ||
1372 | +apple_gfx_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | ||
1373 | +apple_gfx_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | ||
1374 | +apple_gfx_create_task(uint32_t vm_size, void *va) "vm_size=0x%x base_addr=%p" | ||
1375 | +apple_gfx_destroy_task(void *task, unsigned int num_mapped_regions) "task=%p, task->mapped_regions->len=%u" | ||
1376 | +apple_gfx_map_memory(void *task, uint32_t range_count, uint64_t virtual_offset, uint32_t read_only) "task=%p range_count=0x%x virtual_offset=0x%"PRIx64" read_only=%d" | ||
1377 | +apple_gfx_map_memory_range(uint32_t i, uint64_t phys_addr, uint64_t phys_len) "[%d] phys_addr=0x%"PRIx64" phys_len=0x%"PRIx64 | ||
1378 | +apple_gfx_remap(uint64_t retval, void *source_ptr, uint64_t target) "retval=%"PRId64" source=%p target=0x%"PRIx64 | ||
1379 | +apple_gfx_unmap_memory(void *task, uint64_t virtual_offset, uint64_t length) "task=%p virtual_offset=0x%"PRIx64" length=0x%"PRIx64 | ||
1380 | +apple_gfx_read_memory(uint64_t phys_address, uint64_t length, void *dst) "phys_addr=0x%"PRIx64" length=0x%"PRIx64" dest=%p" | ||
1381 | +apple_gfx_raise_irq(uint32_t vector) "vector=0x%x" | ||
1382 | +apple_gfx_new_frame(void) "" | ||
1383 | +apple_gfx_mode_change(uint64_t x, uint64_t y) "x=%"PRId64" y=%"PRId64 | ||
1384 | +apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d width=%"PRId64" height=0x%"PRId64 | ||
1385 | +apple_gfx_cursor_show(uint32_t show) "show=%d" | ||
1386 | +apple_gfx_cursor_move(void) "" | ||
1387 | +apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes" | ||
1388 | + | ||
1389 | +# apple-gfx-mmio.m | ||
1390 | +apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | ||
1391 | +apple_gfx_mmio_iosfc_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | ||
1392 | +apple_gfx_iosfc_map_memory(uint64_t phys, uint64_t len, uint32_t ro, void *va, void *e, void *f, void* va_result) "phys=0x%"PRIx64" len=0x%"PRIx64" ro=%d va=%p e=%p f=%p -> *va=%p" | ||
1393 | +apple_gfx_iosfc_map_memory_new_region(size_t i, void *region, uint64_t start, uint64_t end) "index=%zu, region=%p, 0x%"PRIx64"-0x%"PRIx64 | ||
1394 | +apple_gfx_iosfc_unmap_memory(void *a, void *b, void *c, void *d, void *e, void *f) "a=%p b=%p c=%p d=%p e=%p f=%p" | ||
1395 | +apple_gfx_iosfc_unmap_memory_region(void* mem, void *region) "unmapping @ %p from memory region %p" | ||
1396 | +apple_gfx_iosfc_raise_irq(uint32_t vector) "vector=0x%x" | ||
1397 | + | ||
1398 | diff --git a/meson.build b/meson.build | ||
1399 | index XXXXXXX..XXXXXXX 100644 | ||
1400 | --- a/meson.build | ||
1401 | +++ b/meson.build | ||
1402 | @@ -XXX,XX +XXX,XX @@ socket = [] | ||
1403 | version_res = [] | ||
1404 | coref = [] | ||
1405 | iokit = [] | ||
1406 | +pvg = not_found | ||
1407 | +metal = [] | ||
1408 | emulator_link_args = [] | ||
1409 | midl = not_found | ||
1410 | widl = not_found | ||
1411 | @@ -XXX,XX +XXX,XX @@ elif host_os == 'darwin' | ||
1412 | coref = dependency('appleframeworks', modules: 'CoreFoundation') | ||
1413 | iokit = dependency('appleframeworks', modules: 'IOKit', required: false) | ||
1414 | host_dsosuf = '.dylib' | ||
1415 | + pvg = dependency('appleframeworks', modules: 'ParavirtualizedGraphics') | ||
1416 | + metal = dependency('appleframeworks', modules: 'Metal') | ||
1417 | elif host_os == 'sunos' | ||
1418 | socket = [cc.find_library('socket'), | ||
1419 | cc.find_library('nsl'), | ||
1420 | -- | ||
1421 | 2.39.5 (Apple Git-154) | ||
1422 | |||
1423 | diff view generated by jsdifflib |
1 | This change wires up the PCI variant of the paravirtualised | 1 | This change wires up the PCI variant of the paravirtualised |
---|---|---|---|
2 | graphics device, mainly useful for x86-64 macOS guests, implemented | 2 | graphics device, mainly useful for x86-64 macOS guests, implemented |
3 | by macOS's ParavirtualizedGraphics.framework. It builds on code | 3 | by macOS's ParavirtualizedGraphics.framework. It builds on code |
4 | shared with the vmapple/mmio variant of the PVG device. | 4 | shared with the vmapple/mmio variant of the PVG device. |
5 | 5 | ||
6 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 6 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
7 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 7 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
8 | --- | 8 | --- |
9 | 9 | ||
10 | v4: | 10 | v4: |
11 | 11 | ||
12 | * Threading improvements analogous to those in common apple-gfx code | 12 | * Threading improvements analogous to those in common apple-gfx code |
13 | and mmio device variant. | 13 | and mmio device variant. |
14 | * Smaller code review issues addressed. | 14 | * Smaller code review issues addressed. |
15 | 15 | ||
16 | v5: | 16 | v5: |
17 | 17 | ||
18 | * Minor error handling improvement. | 18 | * Minor error handling improvement. |
19 | 19 | ||
20 | v6: | 20 | v6: |
21 | 21 | ||
22 | * Removed an unused function parameter. | 22 | * Removed an unused function parameter. |
23 | 23 | ||
24 | v9: | 24 | v9: |
25 | 25 | ||
26 | * Fixup of changed common call. | 26 | * Fixup of changed common call. |
27 | * Whitespace and comment formatting tweaks. | 27 | * Whitespace and comment formatting tweaks. |
28 | 28 | ||
29 | v11: | 29 | v11: |
30 | 30 | ||
31 | * Comment formatting fix. | 31 | * Comment formatting fix. |
32 | 32 | ||
33 | hw/display/Kconfig | 4 + | 33 | hw/display/Kconfig | 4 + |
34 | hw/display/apple-gfx-pci.m | 150 +++++++++++++++++++++++++++++++++++++ | 34 | hw/display/apple-gfx-pci.m | 150 +++++++++++++++++++++++++++++++++++++ |
35 | hw/display/meson.build | 1 + | 35 | hw/display/meson.build | 1 + |
36 | 3 files changed, 155 insertions(+) | 36 | 3 files changed, 155 insertions(+) |
37 | create mode 100644 hw/display/apple-gfx-pci.m | 37 | create mode 100644 hw/display/apple-gfx-pci.m |
38 | 38 | ||
39 | diff --git a/hw/display/Kconfig b/hw/display/Kconfig | 39 | diff --git a/hw/display/Kconfig b/hw/display/Kconfig |
40 | index XXXXXXX..XXXXXXX 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/hw/display/Kconfig | 41 | --- a/hw/display/Kconfig |
42 | +++ b/hw/display/Kconfig | 42 | +++ b/hw/display/Kconfig |
43 | @@ -XXX,XX +XXX,XX @@ config MAC_PVG_MMIO | 43 | @@ -XXX,XX +XXX,XX @@ config MAC_PVG_MMIO |
44 | bool | 44 | bool |
45 | depends on MAC_PVG && AARCH64 | 45 | depends on MAC_PVG && AARCH64 |
46 | 46 | ||
47 | +config MAC_PVG_PCI | 47 | +config MAC_PVG_PCI |
48 | + bool | 48 | + bool |
49 | + depends on MAC_PVG && PCI | 49 | + depends on MAC_PVG && PCI |
50 | + default y if PCI_DEVICES | 50 | + default y if PCI_DEVICES |
51 | diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m | 51 | diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m |
52 | new file mode 100644 | 52 | new file mode 100644 |
53 | index XXXXXXX..XXXXXXX | 53 | index XXXXXXX..XXXXXXX |
54 | --- /dev/null | 54 | --- /dev/null |
55 | +++ b/hw/display/apple-gfx-pci.m | 55 | +++ b/hw/display/apple-gfx-pci.m |
56 | @@ -XXX,XX +XXX,XX @@ | 56 | @@ -XXX,XX +XXX,XX @@ |
57 | +/* | 57 | +/* |
58 | + * QEMU Apple ParavirtualizedGraphics.framework device, PCI variant | 58 | + * QEMU Apple ParavirtualizedGraphics.framework device, PCI variant |
59 | + * | 59 | + * |
60 | + * Copyright © 2023-2024 Phil Dennis-Jordan | 60 | + * Copyright © 2023-2024 Phil Dennis-Jordan |
61 | + * | 61 | + * |
62 | + * SPDX-License-Identifier: GPL-2.0-or-later | 62 | + * SPDX-License-Identifier: GPL-2.0-or-later |
63 | + * | 63 | + * |
64 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides | 64 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides |
65 | + * which implements 3d graphics passthrough to the host as well as a | 65 | + * which implements 3d graphics passthrough to the host as well as a |
66 | + * proprietary guest communication channel to drive it. This device model | 66 | + * proprietary guest communication channel to drive it. This device model |
67 | + * implements support to drive that library from within QEMU as a PCI device | 67 | + * implements support to drive that library from within QEMU as a PCI device |
68 | + * aimed primarily at x86-64 macOS VMs. | 68 | + * aimed primarily at x86-64 macOS VMs. |
69 | + */ | 69 | + */ |
70 | + | 70 | + |
71 | +#include "apple-gfx.h" | 71 | +#include "apple-gfx.h" |
72 | +#include "hw/pci/pci_device.h" | 72 | +#include "hw/pci/pci_device.h" |
73 | +#include "hw/pci/msi.h" | 73 | +#include "hw/pci/msi.h" |
74 | +#include "qapi/error.h" | 74 | +#include "qapi/error.h" |
75 | +#include "trace.h" | 75 | +#include "trace.h" |
76 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | 76 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> |
77 | + | 77 | + |
78 | +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXPCIState, APPLE_GFX_PCI) | 78 | +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXPCIState, APPLE_GFX_PCI) |
79 | + | 79 | + |
80 | +struct AppleGFXPCIState { | 80 | +struct AppleGFXPCIState { |
81 | + PCIDevice parent_obj; | 81 | + PCIDevice parent_obj; |
82 | + | 82 | + |
83 | + AppleGFXState common; | 83 | + AppleGFXState common; |
84 | +}; | 84 | +}; |
85 | + | 85 | + |
86 | +static const char* apple_gfx_pci_option_rom_path = NULL; | 86 | +static const char* apple_gfx_pci_option_rom_path = NULL; |
87 | + | 87 | + |
88 | +static void apple_gfx_init_option_rom_path(void) | 88 | +static void apple_gfx_init_option_rom_path(void) |
89 | +{ | 89 | +{ |
90 | + NSURL *option_rom_url = PGCopyOptionROMURL(); | 90 | + NSURL *option_rom_url = PGCopyOptionROMURL(); |
91 | + const char *option_rom_path = option_rom_url.fileSystemRepresentation; | 91 | + const char *option_rom_path = option_rom_url.fileSystemRepresentation; |
92 | + apple_gfx_pci_option_rom_path = g_strdup(option_rom_path); | 92 | + apple_gfx_pci_option_rom_path = g_strdup(option_rom_path); |
93 | + [option_rom_url release]; | 93 | + [option_rom_url release]; |
94 | +} | 94 | +} |
95 | + | 95 | + |
96 | +static void apple_gfx_pci_init(Object *obj) | 96 | +static void apple_gfx_pci_init(Object *obj) |
97 | +{ | 97 | +{ |
98 | + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); | 98 | + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); |
99 | + | 99 | + |
100 | + if (!apple_gfx_pci_option_rom_path) { | 100 | + if (!apple_gfx_pci_option_rom_path) { |
101 | + /* | 101 | + /* |
102 | + * The following is done on device not class init to avoid running | 102 | + * The following is done on device not class init to avoid running |
103 | + * ObjC code before fork() in -daemonize mode. | 103 | + * ObjC code before fork() in -daemonize mode. |
104 | + */ | 104 | + */ |
105 | + PCIDeviceClass *pci = PCI_DEVICE_CLASS(object_get_class(obj)); | 105 | + PCIDeviceClass *pci = PCI_DEVICE_CLASS(object_get_class(obj)); |
106 | + apple_gfx_init_option_rom_path(); | 106 | + apple_gfx_init_option_rom_path(); |
107 | + pci->romfile = apple_gfx_pci_option_rom_path; | 107 | + pci->romfile = apple_gfx_pci_option_rom_path; |
108 | + } | 108 | + } |
109 | + | 109 | + |
110 | + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_PCI); | 110 | + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_PCI); |
111 | +} | 111 | +} |
112 | + | 112 | + |
113 | +typedef struct AppleGFXPCIInterruptJob { | 113 | +typedef struct AppleGFXPCIInterruptJob { |
114 | + PCIDevice *device; | 114 | + PCIDevice *device; |
115 | + uint32_t vector; | 115 | + uint32_t vector; |
116 | +} AppleGFXPCIInterruptJob; | 116 | +} AppleGFXPCIInterruptJob; |
117 | + | 117 | + |
118 | +static void apple_gfx_pci_raise_interrupt(void *opaque) | 118 | +static void apple_gfx_pci_raise_interrupt(void *opaque) |
119 | +{ | 119 | +{ |
120 | + AppleGFXPCIInterruptJob *job = opaque; | 120 | + AppleGFXPCIInterruptJob *job = opaque; |
121 | + | 121 | + |
122 | + if (msi_enabled(job->device)) { | 122 | + if (msi_enabled(job->device)) { |
123 | + msi_notify(job->device, job->vector); | 123 | + msi_notify(job->device, job->vector); |
124 | + } | 124 | + } |
125 | + g_free(job); | 125 | + g_free(job); |
126 | +} | 126 | +} |
127 | + | 127 | + |
128 | +static void apple_gfx_pci_interrupt(PCIDevice *dev, uint32_t vector) | 128 | +static void apple_gfx_pci_interrupt(PCIDevice *dev, uint32_t vector) |
129 | +{ | 129 | +{ |
130 | + AppleGFXPCIInterruptJob *job; | 130 | + AppleGFXPCIInterruptJob *job; |
131 | + | 131 | + |
132 | + trace_apple_gfx_raise_irq(vector); | 132 | + trace_apple_gfx_raise_irq(vector); |
133 | + job = g_malloc0(sizeof(*job)); | 133 | + job = g_malloc0(sizeof(*job)); |
134 | + job->device = dev; | 134 | + job->device = dev; |
135 | + job->vector = vector; | 135 | + job->vector = vector; |
136 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | 136 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), |
137 | + apple_gfx_pci_raise_interrupt, job); | 137 | + apple_gfx_pci_raise_interrupt, job); |
138 | +} | 138 | +} |
139 | + | 139 | + |
140 | +static void apple_gfx_pci_realize(PCIDevice *dev, Error **errp) | 140 | +static void apple_gfx_pci_realize(PCIDevice *dev, Error **errp) |
141 | +{ | 141 | +{ |
142 | + AppleGFXPCIState *s = APPLE_GFX_PCI(dev); | 142 | + AppleGFXPCIState *s = APPLE_GFX_PCI(dev); |
143 | + int ret; | 143 | + int ret; |
144 | + | 144 | + |
145 | + pci_register_bar(dev, PG_PCI_BAR_MMIO, | 145 | + pci_register_bar(dev, PG_PCI_BAR_MMIO, |
146 | + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->common.iomem_gfx); | 146 | + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->common.iomem_gfx); |
147 | + | 147 | + |
148 | + ret = msi_init(dev, 0x0 /* config offset; 0 = find space */, | 148 | + ret = msi_init(dev, 0x0 /* config offset; 0 = find space */, |
149 | + PG_PCI_MAX_MSI_VECTORS, true /* msi64bit */, | 149 | + PG_PCI_MAX_MSI_VECTORS, true /* msi64bit */, |
150 | + false /* msi_per_vector_mask */, errp); | 150 | + false /* msi_per_vector_mask */, errp); |
151 | + if (ret != 0) { | 151 | + if (ret != 0) { |
152 | + return; | 152 | + return; |
153 | + } | 153 | + } |
154 | + | 154 | + |
155 | + @autoreleasepool { | 155 | + @autoreleasepool { |
156 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; | 156 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; |
157 | + desc.raiseInterrupt = ^(uint32_t vector) { | 157 | + desc.raiseInterrupt = ^(uint32_t vector) { |
158 | + apple_gfx_pci_interrupt(dev, vector); | 158 | + apple_gfx_pci_interrupt(dev, vector); |
159 | + }; | 159 | + }; |
160 | + | 160 | + |
161 | + apple_gfx_common_realize(&s->common, DEVICE(dev), desc, errp); | 161 | + apple_gfx_common_realize(&s->common, DEVICE(dev), desc, errp); |
162 | + [desc release]; | 162 | + [desc release]; |
163 | + desc = nil; | 163 | + desc = nil; |
164 | + } | 164 | + } |
165 | +} | 165 | +} |
166 | + | 166 | + |
167 | +static void apple_gfx_pci_reset(Object *obj, ResetType type) | 167 | +static void apple_gfx_pci_reset(Object *obj, ResetType type) |
168 | +{ | 168 | +{ |
169 | + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); | 169 | + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); |
170 | + [s->common.pgdev reset]; | 170 | + [s->common.pgdev reset]; |
171 | +} | 171 | +} |
172 | + | 172 | + |
173 | +static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) | 173 | +static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) |
174 | +{ | 174 | +{ |
175 | + DeviceClass *dc = DEVICE_CLASS(klass); | 175 | + DeviceClass *dc = DEVICE_CLASS(klass); |
176 | + PCIDeviceClass *pci = PCI_DEVICE_CLASS(klass); | 176 | + PCIDeviceClass *pci = PCI_DEVICE_CLASS(klass); |
177 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | 177 | + ResettableClass *rc = RESETTABLE_CLASS(klass); |
178 | + | 178 | + |
179 | + rc->phases.hold = apple_gfx_pci_reset; | 179 | + rc->phases.hold = apple_gfx_pci_reset; |
180 | + dc->desc = "macOS Paravirtualized Graphics PCI Display Controller"; | 180 | + dc->desc = "macOS Paravirtualized Graphics PCI Display Controller"; |
181 | + dc->hotpluggable = false; | 181 | + dc->hotpluggable = false; |
182 | + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); | 182 | + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); |
183 | + | 183 | + |
184 | + pci->vendor_id = PG_PCI_VENDOR_ID; | 184 | + pci->vendor_id = PG_PCI_VENDOR_ID; |
185 | + pci->device_id = PG_PCI_DEVICE_ID; | 185 | + pci->device_id = PG_PCI_DEVICE_ID; |
186 | + pci->class_id = PCI_CLASS_DISPLAY_OTHER; | 186 | + pci->class_id = PCI_CLASS_DISPLAY_OTHER; |
187 | + pci->realize = apple_gfx_pci_realize; | 187 | + pci->realize = apple_gfx_pci_realize; |
188 | + | 188 | + |
189 | + /* TODO: Property for setting mode list */ | 189 | + /* TODO: Property for setting mode list */ |
190 | +} | 190 | +} |
191 | + | 191 | + |
192 | +static TypeInfo apple_gfx_pci_types[] = { | 192 | +static TypeInfo apple_gfx_pci_types[] = { |
193 | + { | 193 | + { |
194 | + .name = TYPE_APPLE_GFX_PCI, | 194 | + .name = TYPE_APPLE_GFX_PCI, |
195 | + .parent = TYPE_PCI_DEVICE, | 195 | + .parent = TYPE_PCI_DEVICE, |
196 | + .instance_size = sizeof(AppleGFXPCIState), | 196 | + .instance_size = sizeof(AppleGFXPCIState), |
197 | + .class_init = apple_gfx_pci_class_init, | 197 | + .class_init = apple_gfx_pci_class_init, |
198 | + .instance_init = apple_gfx_pci_init, | 198 | + .instance_init = apple_gfx_pci_init, |
199 | + .interfaces = (InterfaceInfo[]) { | 199 | + .interfaces = (InterfaceInfo[]) { |
200 | + { INTERFACE_PCIE_DEVICE }, | 200 | + { INTERFACE_PCIE_DEVICE }, |
201 | + { }, | 201 | + { }, |
202 | + }, | 202 | + }, |
203 | + } | 203 | + } |
204 | +}; | 204 | +}; |
205 | +DEFINE_TYPES(apple_gfx_pci_types) | 205 | +DEFINE_TYPES(apple_gfx_pci_types) |
206 | + | 206 | + |
207 | diff --git a/hw/display/meson.build b/hw/display/meson.build | 207 | diff --git a/hw/display/meson.build b/hw/display/meson.build |
208 | index XXXXXXX..XXXXXXX 100644 | 208 | index XXXXXXX..XXXXXXX 100644 |
209 | --- a/hw/display/meson.build | 209 | --- a/hw/display/meson.build |
210 | +++ b/hw/display/meson.build | 210 | +++ b/hw/display/meson.build |
211 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_ | 211 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_ |
212 | 212 | ||
213 | if host_os == 'darwin' | 213 | if host_os == 'darwin' |
214 | system_ss.add(when: 'CONFIG_MAC_PVG', if_true: [files('apple-gfx.m'), pvg, metal]) | 214 | system_ss.add(when: 'CONFIG_MAC_PVG', if_true: [files('apple-gfx.m'), pvg, metal]) |
215 | + system_ss.add(when: 'CONFIG_MAC_PVG_PCI', if_true: [files('apple-gfx-pci.m'), pvg, metal]) | 215 | + system_ss.add(when: 'CONFIG_MAC_PVG_PCI', if_true: [files('apple-gfx-pci.m'), pvg, metal]) |
216 | if cpu == 'aarch64' | 216 | if cpu == 'aarch64' |
217 | system_ss.add(when: 'CONFIG_MAC_PVG_MMIO', if_true: [files('apple-gfx-mmio.m'), pvg, metal]) | 217 | system_ss.add(when: 'CONFIG_MAC_PVG_MMIO', if_true: [files('apple-gfx-mmio.m'), pvg, metal]) |
218 | endif | 218 | endif |
219 | -- | 219 | -- |
220 | 2.39.5 (Apple Git-154) | 220 | 2.39.5 (Apple Git-154) |
221 | 221 | ||
222 | 222 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | This change wires up the PCI variant of the paravirtualised | |
2 | graphics device, mainly useful for x86-64 macOS guests, implemented | ||
3 | by macOS's ParavirtualizedGraphics.framework. It builds on code | ||
4 | shared with the vmapple/mmio variant of the PVG device. | ||
5 | |||
6 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
7 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
8 | --- | ||
9 | |||
10 | v4: | ||
11 | |||
12 | * Threading improvements analogous to those in common apple-gfx code | ||
13 | and mmio device variant. | ||
14 | * Smaller code review issues addressed. | ||
15 | |||
16 | v5: | ||
17 | |||
18 | * Minor error handling improvement. | ||
19 | |||
20 | v6: | ||
21 | |||
22 | * Removed an unused function parameter. | ||
23 | |||
24 | v9: | ||
25 | |||
26 | * Fixup of changed common call. | ||
27 | * Whitespace and comment formatting tweaks. | ||
28 | |||
29 | v11: | ||
30 | |||
31 | * Comment formatting fix. | ||
32 | |||
33 | hw/display/Kconfig | 4 + | ||
34 | hw/display/apple-gfx-pci.m | 150 +++++++++++++++++++++++++++++++++++++ | ||
35 | hw/display/meson.build | 1 + | ||
36 | 3 files changed, 155 insertions(+) | ||
37 | create mode 100644 hw/display/apple-gfx-pci.m | ||
38 | |||
39 | diff --git a/hw/display/Kconfig b/hw/display/Kconfig | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/hw/display/Kconfig | ||
42 | +++ b/hw/display/Kconfig | ||
43 | @@ -XXX,XX +XXX,XX @@ config MAC_PVG_MMIO | ||
44 | bool | ||
45 | depends on MAC_PVG && AARCH64 | ||
46 | |||
47 | +config MAC_PVG_PCI | ||
48 | + bool | ||
49 | + depends on MAC_PVG && PCI | ||
50 | + default y if PCI_DEVICES | ||
51 | diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m | ||
52 | new file mode 100644 | ||
53 | index XXXXXXX..XXXXXXX | ||
54 | --- /dev/null | ||
55 | +++ b/hw/display/apple-gfx-pci.m | ||
56 | @@ -XXX,XX +XXX,XX @@ | ||
57 | +/* | ||
58 | + * QEMU Apple ParavirtualizedGraphics.framework device, PCI variant | ||
59 | + * | ||
60 | + * Copyright © 2023-2024 Phil Dennis-Jordan | ||
61 | + * | ||
62 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
63 | + * | ||
64 | + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides | ||
65 | + * which implements 3d graphics passthrough to the host as well as a | ||
66 | + * proprietary guest communication channel to drive it. This device model | ||
67 | + * implements support to drive that library from within QEMU as a PCI device | ||
68 | + * aimed primarily at x86-64 macOS VMs. | ||
69 | + */ | ||
70 | + | ||
71 | +#include "apple-gfx.h" | ||
72 | +#include "hw/pci/pci_device.h" | ||
73 | +#include "hw/pci/msi.h" | ||
74 | +#include "qapi/error.h" | ||
75 | +#include "trace.h" | ||
76 | +#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | ||
77 | + | ||
78 | +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXPCIState, APPLE_GFX_PCI) | ||
79 | + | ||
80 | +struct AppleGFXPCIState { | ||
81 | + PCIDevice parent_obj; | ||
82 | + | ||
83 | + AppleGFXState common; | ||
84 | +}; | ||
85 | + | ||
86 | +static const char* apple_gfx_pci_option_rom_path = NULL; | ||
87 | + | ||
88 | +static void apple_gfx_init_option_rom_path(void) | ||
89 | +{ | ||
90 | + NSURL *option_rom_url = PGCopyOptionROMURL(); | ||
91 | + const char *option_rom_path = option_rom_url.fileSystemRepresentation; | ||
92 | + apple_gfx_pci_option_rom_path = g_strdup(option_rom_path); | ||
93 | + [option_rom_url release]; | ||
94 | +} | ||
95 | + | ||
96 | +static void apple_gfx_pci_init(Object *obj) | ||
97 | +{ | ||
98 | + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); | ||
99 | + | ||
100 | + if (!apple_gfx_pci_option_rom_path) { | ||
101 | + /* | ||
102 | + * The following is done on device not class init to avoid running | ||
103 | + * ObjC code before fork() in -daemonize mode. | ||
104 | + */ | ||
105 | + PCIDeviceClass *pci = PCI_DEVICE_CLASS(object_get_class(obj)); | ||
106 | + apple_gfx_init_option_rom_path(); | ||
107 | + pci->romfile = apple_gfx_pci_option_rom_path; | ||
108 | + } | ||
109 | + | ||
110 | + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_PCI); | ||
111 | +} | ||
112 | + | ||
113 | +typedef struct AppleGFXPCIInterruptJob { | ||
114 | + PCIDevice *device; | ||
115 | + uint32_t vector; | ||
116 | +} AppleGFXPCIInterruptJob; | ||
117 | + | ||
118 | +static void apple_gfx_pci_raise_interrupt(void *opaque) | ||
119 | +{ | ||
120 | + AppleGFXPCIInterruptJob *job = opaque; | ||
121 | + | ||
122 | + if (msi_enabled(job->device)) { | ||
123 | + msi_notify(job->device, job->vector); | ||
124 | + } | ||
125 | + g_free(job); | ||
126 | +} | ||
127 | + | ||
128 | +static void apple_gfx_pci_interrupt(PCIDevice *dev, uint32_t vector) | ||
129 | +{ | ||
130 | + AppleGFXPCIInterruptJob *job; | ||
131 | + | ||
132 | + trace_apple_gfx_raise_irq(vector); | ||
133 | + job = g_malloc0(sizeof(*job)); | ||
134 | + job->device = dev; | ||
135 | + job->vector = vector; | ||
136 | + aio_bh_schedule_oneshot(qemu_get_aio_context(), | ||
137 | + apple_gfx_pci_raise_interrupt, job); | ||
138 | +} | ||
139 | + | ||
140 | +static void apple_gfx_pci_realize(PCIDevice *dev, Error **errp) | ||
141 | +{ | ||
142 | + AppleGFXPCIState *s = APPLE_GFX_PCI(dev); | ||
143 | + int ret; | ||
144 | + | ||
145 | + pci_register_bar(dev, PG_PCI_BAR_MMIO, | ||
146 | + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->common.iomem_gfx); | ||
147 | + | ||
148 | + ret = msi_init(dev, 0x0 /* config offset; 0 = find space */, | ||
149 | + PG_PCI_MAX_MSI_VECTORS, true /* msi64bit */, | ||
150 | + false /* msi_per_vector_mask */, errp); | ||
151 | + if (ret != 0) { | ||
152 | + return; | ||
153 | + } | ||
154 | + | ||
155 | + @autoreleasepool { | ||
156 | + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; | ||
157 | + desc.raiseInterrupt = ^(uint32_t vector) { | ||
158 | + apple_gfx_pci_interrupt(dev, vector); | ||
159 | + }; | ||
160 | + | ||
161 | + apple_gfx_common_realize(&s->common, DEVICE(dev), desc, errp); | ||
162 | + [desc release]; | ||
163 | + desc = nil; | ||
164 | + } | ||
165 | +} | ||
166 | + | ||
167 | +static void apple_gfx_pci_reset(Object *obj, ResetType type) | ||
168 | +{ | ||
169 | + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); | ||
170 | + [s->common.pgdev reset]; | ||
171 | +} | ||
172 | + | ||
173 | +static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) | ||
174 | +{ | ||
175 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
176 | + PCIDeviceClass *pci = PCI_DEVICE_CLASS(klass); | ||
177 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | ||
178 | + | ||
179 | + rc->phases.hold = apple_gfx_pci_reset; | ||
180 | + dc->desc = "macOS Paravirtualized Graphics PCI Display Controller"; | ||
181 | + dc->hotpluggable = false; | ||
182 | + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); | ||
183 | + | ||
184 | + pci->vendor_id = PG_PCI_VENDOR_ID; | ||
185 | + pci->device_id = PG_PCI_DEVICE_ID; | ||
186 | + pci->class_id = PCI_CLASS_DISPLAY_OTHER; | ||
187 | + pci->realize = apple_gfx_pci_realize; | ||
188 | + | ||
189 | + /* TODO: Property for setting mode list */ | ||
190 | +} | ||
191 | + | ||
192 | +static TypeInfo apple_gfx_pci_types[] = { | ||
193 | + { | ||
194 | + .name = TYPE_APPLE_GFX_PCI, | ||
195 | + .parent = TYPE_PCI_DEVICE, | ||
196 | + .instance_size = sizeof(AppleGFXPCIState), | ||
197 | + .class_init = apple_gfx_pci_class_init, | ||
198 | + .instance_init = apple_gfx_pci_init, | ||
199 | + .interfaces = (InterfaceInfo[]) { | ||
200 | + { INTERFACE_PCIE_DEVICE }, | ||
201 | + { }, | ||
202 | + }, | ||
203 | + } | ||
204 | +}; | ||
205 | +DEFINE_TYPES(apple_gfx_pci_types) | ||
206 | + | ||
207 | diff --git a/hw/display/meson.build b/hw/display/meson.build | ||
208 | index XXXXXXX..XXXXXXX 100644 | ||
209 | --- a/hw/display/meson.build | ||
210 | +++ b/hw/display/meson.build | ||
211 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_ | ||
212 | |||
213 | if host_os == 'darwin' | ||
214 | system_ss.add(when: 'CONFIG_MAC_PVG', if_true: [files('apple-gfx.m'), pvg, metal]) | ||
215 | + system_ss.add(when: 'CONFIG_MAC_PVG_PCI', if_true: [files('apple-gfx-pci.m'), pvg, metal]) | ||
216 | if cpu == 'aarch64' | ||
217 | system_ss.add(when: 'CONFIG_MAC_PVG_MMIO', if_true: [files('apple-gfx-mmio.m'), pvg, metal]) | ||
218 | endif | ||
219 | -- | ||
220 | 2.39.5 (Apple Git-154) | ||
221 | |||
222 | diff view generated by jsdifflib |
1 | This change adds a property 'display_modes' on the graphics device | 1 | This change adds a property 'display_modes' on the graphics device |
---|---|---|---|
2 | which permits specifying a list of display modes. (screen resolution | 2 | which permits specifying a list of display modes. (screen resolution |
3 | and refresh rate) | 3 | and refresh rate) |
4 | 4 | ||
5 | The property is an array of a custom type to make the syntax slightly | 5 | The property is an array of a custom type to make the syntax slightly |
6 | less awkward to use, for example: | 6 | less awkward to use, for example: |
7 | 7 | ||
8 | -device '{"driver":"apple-gfx-pci", "display-modes":["1920x1080@60", "3840x2160@60"]}' | 8 | -device '{"driver":"apple-gfx-pci", "display-modes":["1920x1080@60", "3840x2160@60"]}' |
9 | 9 | ||
10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
13 | --- | 13 | --- |
14 | 14 | ||
15 | v4: | 15 | v4: |
16 | 16 | ||
17 | * Switched to the native array property type, which recently gained | 17 | * Switched to the native array property type, which recently gained |
18 | command line support. | 18 | command line support. |
19 | * The property has also been added to the -mmio variant. | 19 | * The property has also been added to the -mmio variant. |
20 | * Tidied up the code a little. | 20 | * Tidied up the code a little. |
21 | 21 | ||
22 | v5: | 22 | v5: |
23 | 23 | ||
24 | * Better error handling and buffer management in property parsing and | 24 | * Better error handling and buffer management in property parsing and |
25 | output. | 25 | output. |
26 | 26 | ||
27 | v6: | 27 | v6: |
28 | 28 | ||
29 | * Switched to using NSMutableArray for the mode list to avoid need for | 29 | * Switched to using NSMutableArray for the mode list to avoid need for |
30 | allocating a temporary array - previously done with alloca. | 30 | allocating a temporary array - previously done with alloca. |
31 | 31 | ||
32 | v7: | 32 | v7: |
33 | 33 | ||
34 | * Simplified error handling in property parsing | 34 | * Simplified error handling in property parsing |
35 | 35 | ||
36 | v8: | 36 | v8: |
37 | 37 | ||
38 | * More consistent integer variable types. | 38 | * More consistent integer variable types. |
39 | 39 | ||
40 | v9: | 40 | v9: |
41 | 41 | ||
42 | * Re-ordered type definitions so we can drop a 'struct' keyword. | 42 | * Re-ordered type definitions so we can drop a 'struct' keyword. |
43 | 43 | ||
44 | hw/display/apple-gfx-mmio.m | 8 +++ | 44 | hw/display/apple-gfx-mmio.m | 8 +++ |
45 | hw/display/apple-gfx-pci.m | 9 ++- | 45 | hw/display/apple-gfx-pci.m | 9 ++- |
46 | hw/display/apple-gfx.h | 11 +++ | 46 | hw/display/apple-gfx.h | 11 +++ |
47 | hw/display/apple-gfx.m | 135 +++++++++++++++++++++++++++++++----- | 47 | hw/display/apple-gfx.m | 135 +++++++++++++++++++++++++++++++----- |
48 | hw/display/trace-events | 2 + | 48 | hw/display/trace-events | 2 + |
49 | 5 files changed, 145 insertions(+), 20 deletions(-) | 49 | 5 files changed, 145 insertions(+), 20 deletions(-) |
50 | 50 | ||
51 | diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m | 51 | diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m |
52 | index XXXXXXX..XXXXXXX 100644 | 52 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/hw/display/apple-gfx-mmio.m | 53 | --- a/hw/display/apple-gfx-mmio.m |
54 | +++ b/hw/display/apple-gfx-mmio.m | 54 | +++ b/hw/display/apple-gfx-mmio.m |
55 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_mmio_reset(Object *obj, ResetType type) | 55 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_mmio_reset(Object *obj, ResetType type) |
56 | [s->common.pgdev reset]; | 56 | [s->common.pgdev reset]; |
57 | } | 57 | } |
58 | 58 | ||
59 | +static Property apple_gfx_mmio_properties[] = { | 59 | +static Property apple_gfx_mmio_properties[] = { |
60 | + DEFINE_PROP_ARRAY("display-modes", AppleGFXMMIOState, | 60 | + DEFINE_PROP_ARRAY("display-modes", AppleGFXMMIOState, |
61 | + common.num_display_modes, common.display_modes, | 61 | + common.num_display_modes, common.display_modes, |
62 | + qdev_prop_display_mode, AppleGFXDisplayMode), | 62 | + qdev_prop_display_mode, AppleGFXDisplayMode), |
63 | + DEFINE_PROP_END_OF_LIST(), | 63 | + DEFINE_PROP_END_OF_LIST(), |
64 | +}; | 64 | +}; |
65 | 65 | ||
66 | static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) | 66 | static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) |
67 | { | 67 | { |
68 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) | 68 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) |
69 | rc->phases.hold = apple_gfx_mmio_reset; | 69 | rc->phases.hold = apple_gfx_mmio_reset; |
70 | dc->hotpluggable = false; | 70 | dc->hotpluggable = false; |
71 | dc->realize = apple_gfx_mmio_realize; | 71 | dc->realize = apple_gfx_mmio_realize; |
72 | + | 72 | + |
73 | + device_class_set_props(dc, apple_gfx_mmio_properties); | 73 | + device_class_set_props(dc, apple_gfx_mmio_properties); |
74 | } | 74 | } |
75 | 75 | ||
76 | static TypeInfo apple_gfx_mmio_types[] = { | 76 | static TypeInfo apple_gfx_mmio_types[] = { |
77 | diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m | 77 | diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m |
78 | index XXXXXXX..XXXXXXX 100644 | 78 | index XXXXXXX..XXXXXXX 100644 |
79 | --- a/hw/display/apple-gfx-pci.m | 79 | --- a/hw/display/apple-gfx-pci.m |
80 | +++ b/hw/display/apple-gfx-pci.m | 80 | +++ b/hw/display/apple-gfx-pci.m |
81 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_pci_reset(Object *obj, ResetType type) | 81 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_pci_reset(Object *obj, ResetType type) |
82 | [s->common.pgdev reset]; | 82 | [s->common.pgdev reset]; |
83 | } | 83 | } |
84 | 84 | ||
85 | +static Property apple_gfx_pci_properties[] = { | 85 | +static Property apple_gfx_pci_properties[] = { |
86 | + DEFINE_PROP_ARRAY("display-modes", AppleGFXPCIState, | 86 | + DEFINE_PROP_ARRAY("display-modes", AppleGFXPCIState, |
87 | + common.num_display_modes, common.display_modes, | 87 | + common.num_display_modes, common.display_modes, |
88 | + qdev_prop_display_mode, AppleGFXDisplayMode), | 88 | + qdev_prop_display_mode, AppleGFXDisplayMode), |
89 | + DEFINE_PROP_END_OF_LIST(), | 89 | + DEFINE_PROP_END_OF_LIST(), |
90 | +}; | 90 | +}; |
91 | + | 91 | + |
92 | static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) | 92 | static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) |
93 | { | 93 | { |
94 | DeviceClass *dc = DEVICE_CLASS(klass); | 94 | DeviceClass *dc = DEVICE_CLASS(klass); |
95 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) | 95 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) |
96 | pci->class_id = PCI_CLASS_DISPLAY_OTHER; | 96 | pci->class_id = PCI_CLASS_DISPLAY_OTHER; |
97 | pci->realize = apple_gfx_pci_realize; | 97 | pci->realize = apple_gfx_pci_realize; |
98 | 98 | ||
99 | - /* TODO: Property for setting mode list */ | 99 | - /* TODO: Property for setting mode list */ |
100 | + device_class_set_props(dc, apple_gfx_pci_properties); | 100 | + device_class_set_props(dc, apple_gfx_pci_properties); |
101 | } | 101 | } |
102 | 102 | ||
103 | static TypeInfo apple_gfx_pci_types[] = { | 103 | static TypeInfo apple_gfx_pci_types[] = { |
104 | diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h | 104 | diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h |
105 | index XXXXXXX..XXXXXXX 100644 | 105 | index XXXXXXX..XXXXXXX 100644 |
106 | --- a/hw/display/apple-gfx.h | 106 | --- a/hw/display/apple-gfx.h |
107 | +++ b/hw/display/apple-gfx.h | 107 | +++ b/hw/display/apple-gfx.h |
108 | @@ -XXX,XX +XXX,XX @@ | 108 | @@ -XXX,XX +XXX,XX @@ |
109 | #import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | 109 | #import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> |
110 | #include "qemu/typedefs.h" | 110 | #include "qemu/typedefs.h" |
111 | #include "exec/memory.h" | 111 | #include "exec/memory.h" |
112 | +#include "hw/qdev-properties.h" | 112 | +#include "hw/qdev-properties.h" |
113 | #include "ui/surface.h" | 113 | #include "ui/surface.h" |
114 | 114 | ||
115 | @class PGDeviceDescriptor; | 115 | @class PGDeviceDescriptor; |
116 | @@ -XXX,XX +XXX,XX @@ | 116 | @@ -XXX,XX +XXX,XX @@ |
117 | 117 | ||
118 | typedef QTAILQ_HEAD(, PGTask_s) PGTaskList; | 118 | typedef QTAILQ_HEAD(, PGTask_s) PGTaskList; |
119 | 119 | ||
120 | +typedef struct AppleGFXDisplayMode { | 120 | +typedef struct AppleGFXDisplayMode { |
121 | + uint16_t width_px; | 121 | + uint16_t width_px; |
122 | + uint16_t height_px; | 122 | + uint16_t height_px; |
123 | + uint16_t refresh_rate_hz; | 123 | + uint16_t refresh_rate_hz; |
124 | +} AppleGFXDisplayMode; | 124 | +} AppleGFXDisplayMode; |
125 | + | 125 | + |
126 | typedef struct AppleGFXState { | 126 | typedef struct AppleGFXState { |
127 | /* Initialised on init/realize() */ | 127 | /* Initialised on init/realize() */ |
128 | MemoryRegion iomem_gfx; | 128 | MemoryRegion iomem_gfx; |
129 | @@ -XXX,XX +XXX,XX @@ typedef struct AppleGFXState { | 129 | @@ -XXX,XX +XXX,XX @@ typedef struct AppleGFXState { |
130 | QemuConsole *con; | 130 | QemuConsole *con; |
131 | id<MTLDevice> mtl; | 131 | id<MTLDevice> mtl; |
132 | id<MTLCommandQueue> mtl_queue; | 132 | id<MTLCommandQueue> mtl_queue; |
133 | + AppleGFXDisplayMode *display_modes; | 133 | + AppleGFXDisplayMode *display_modes; |
134 | + uint32_t num_display_modes; | 134 | + uint32_t num_display_modes; |
135 | 135 | ||
136 | /* List `tasks` is protected by task_mutex */ | 136 | /* List `tasks` is protected by task_mutex */ |
137 | QemuMutex task_mutex; | 137 | QemuMutex task_mutex; |
138 | @@ -XXX,XX +XXX,XX @@ void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, | 138 | @@ -XXX,XX +XXX,XX @@ void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, |
139 | uint64_t length, bool read_only, | 139 | uint64_t length, bool read_only, |
140 | MemoryRegion **mapping_in_region); | 140 | MemoryRegion **mapping_in_region); |
141 | 141 | ||
142 | +extern const PropertyInfo qdev_prop_display_mode; | 142 | +extern const PropertyInfo qdev_prop_display_mode; |
143 | + | 143 | + |
144 | #endif | 144 | #endif |
145 | 145 | ||
146 | diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m | 146 | diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m |
147 | index XXXXXXX..XXXXXXX 100644 | 147 | index XXXXXXX..XXXXXXX 100644 |
148 | --- a/hw/display/apple-gfx.m | 148 | --- a/hw/display/apple-gfx.m |
149 | +++ b/hw/display/apple-gfx.m | 149 | +++ b/hw/display/apple-gfx.m |
150 | @@ -XXX,XX +XXX,XX @@ | 150 | @@ -XXX,XX +XXX,XX @@ |
151 | #include "sysemu/dma.h" | 151 | #include "sysemu/dma.h" |
152 | #include "ui/console.h" | 152 | #include "ui/console.h" |
153 | 153 | ||
154 | -static const PGDisplayCoord_t apple_gfx_modes[] = { | 154 | -static const PGDisplayCoord_t apple_gfx_modes[] = { |
155 | - { .x = 1440, .y = 1080 }, | 155 | - { .x = 1440, .y = 1080 }, |
156 | - { .x = 1280, .y = 1024 }, | 156 | - { .x = 1280, .y = 1024 }, |
157 | +static const AppleGFXDisplayMode apple_gfx_default_modes[] = { | 157 | +static const AppleGFXDisplayMode apple_gfx_default_modes[] = { |
158 | + { 1920, 1080, 60 }, | 158 | + { 1920, 1080, 60 }, |
159 | + { 1440, 1080, 60 }, | 159 | + { 1440, 1080, 60 }, |
160 | + { 1280, 1024, 60 }, | 160 | + { 1280, 1024, 60 }, |
161 | }; | 161 | }; |
162 | 162 | ||
163 | static Error *apple_gfx_mig_blocker; | 163 | static Error *apple_gfx_mig_blocker; |
164 | @@ -XXX,XX +XXX,XX @@ static void new_frame_handler_bh(void *opaque) | 164 | @@ -XXX,XX +XXX,XX @@ static void new_frame_handler_bh(void *opaque) |
165 | return disp_desc; | 165 | return disp_desc; |
166 | } | 166 | } |
167 | 167 | ||
168 | -static NSArray<PGDisplayMode*>* apple_gfx_prepare_display_mode_array(void) | 168 | -static NSArray<PGDisplayMode*>* apple_gfx_prepare_display_mode_array(void) |
169 | +static NSArray<PGDisplayMode *> *apple_gfx_create_display_mode_array( | 169 | +static NSArray<PGDisplayMode *> *apple_gfx_create_display_mode_array( |
170 | + const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count) | 170 | + const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count) |
171 | { | 171 | { |
172 | - PGDisplayMode *modes[ARRAY_SIZE(apple_gfx_modes)]; | 172 | - PGDisplayMode *modes[ARRAY_SIZE(apple_gfx_modes)]; |
173 | - NSArray<PGDisplayMode*>* mode_array; | 173 | - NSArray<PGDisplayMode*>* mode_array; |
174 | - int i; | 174 | - int i; |
175 | - | 175 | - |
176 | - for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | 176 | - for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { |
177 | - modes[i] = | 177 | - modes[i] = |
178 | - [[PGDisplayMode alloc] initWithSizeInPixels:apple_gfx_modes[i] refreshRateInHz:60.]; | 178 | - [[PGDisplayMode alloc] initWithSizeInPixels:apple_gfx_modes[i] refreshRateInHz:60.]; |
179 | - } | 179 | - } |
180 | - | 180 | - |
181 | - mode_array = [NSArray arrayWithObjects:modes count:ARRAY_SIZE(apple_gfx_modes)]; | 181 | - mode_array = [NSArray arrayWithObjects:modes count:ARRAY_SIZE(apple_gfx_modes)]; |
182 | - | 182 | - |
183 | - for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | 183 | - for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { |
184 | - [modes[i] release]; | 184 | - [modes[i] release]; |
185 | - modes[i] = nil; | 185 | - modes[i] = nil; |
186 | + uint32_t i; | 186 | + uint32_t i; |
187 | + PGDisplayMode *mode_obj; | 187 | + PGDisplayMode *mode_obj; |
188 | + NSMutableArray<PGDisplayMode *> *mode_array = | 188 | + NSMutableArray<PGDisplayMode *> *mode_array = |
189 | + [[NSMutableArray alloc] initWithCapacity:display_mode_count]; | 189 | + [[NSMutableArray alloc] initWithCapacity:display_mode_count]; |
190 | + | 190 | + |
191 | + for (i = 0; i < display_mode_count; i++) { | 191 | + for (i = 0; i < display_mode_count; i++) { |
192 | + const AppleGFXDisplayMode *mode = &display_modes[i]; | 192 | + const AppleGFXDisplayMode *mode = &display_modes[i]; |
193 | + trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px); | 193 | + trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px); |
194 | + PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px }; | 194 | + PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px }; |
195 | + | 195 | + |
196 | + mode_obj = | 196 | + mode_obj = |
197 | + [[PGDisplayMode alloc] initWithSizeInPixels:mode_size | 197 | + [[PGDisplayMode alloc] initWithSizeInPixels:mode_size |
198 | + refreshRateInHz:mode->refresh_rate_hz]; | 198 | + refreshRateInHz:mode->refresh_rate_hz]; |
199 | + [mode_array addObject:mode_obj]; | 199 | + [mode_array addObject:mode_obj]; |
200 | + [mode_obj release]; | 200 | + [mode_obj release]; |
201 | } | 201 | } |
202 | 202 | ||
203 | return mode_array; | 203 | return mode_array; |
204 | @@ -XXX,XX +XXX,XX @@ bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | 204 | @@ -XXX,XX +XXX,XX @@ bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, |
205 | PGDeviceDescriptor *desc, Error **errp) | 205 | PGDeviceDescriptor *desc, Error **errp) |
206 | { | 206 | { |
207 | PGDisplayDescriptor *disp_desc; | 207 | PGDisplayDescriptor *disp_desc; |
208 | + const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes; | 208 | + const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes; |
209 | + uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes); | 209 | + uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes); |
210 | + NSArray<PGDisplayMode *> *mode_array; | 210 | + NSArray<PGDisplayMode *> *mode_array; |
211 | 211 | ||
212 | if (apple_gfx_mig_blocker == NULL) { | 212 | if (apple_gfx_mig_blocker == NULL) { |
213 | error_setg(&apple_gfx_mig_blocker, | 213 | error_setg(&apple_gfx_mig_blocker, |
214 | @@ -XXX,XX +XXX,XX @@ bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | 214 | @@ -XXX,XX +XXX,XX @@ bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, |
215 | port:0 | 215 | port:0 |
216 | serialNum:next_pgdisplay_serial_num++]; | 216 | serialNum:next_pgdisplay_serial_num++]; |
217 | [disp_desc release]; | 217 | [disp_desc release]; |
218 | - s->pgdisp.modeList = apple_gfx_prepare_display_mode_array(); | 218 | - s->pgdisp.modeList = apple_gfx_prepare_display_mode_array(); |
219 | + | 219 | + |
220 | + if (s->display_modes != NULL && s->num_display_modes > 0) { | 220 | + if (s->display_modes != NULL && s->num_display_modes > 0) { |
221 | + trace_apple_gfx_common_realize_modes_property(s->num_display_modes); | 221 | + trace_apple_gfx_common_realize_modes_property(s->num_display_modes); |
222 | + display_modes = s->display_modes; | 222 | + display_modes = s->display_modes; |
223 | + num_display_modes = s->num_display_modes; | 223 | + num_display_modes = s->num_display_modes; |
224 | + } | 224 | + } |
225 | + s->pgdisp.modeList = mode_array = | 225 | + s->pgdisp.modeList = mode_array = |
226 | + apple_gfx_create_display_mode_array(display_modes, num_display_modes); | 226 | + apple_gfx_create_display_mode_array(display_modes, num_display_modes); |
227 | + [mode_array release]; | 227 | + [mode_array release]; |
228 | 228 | ||
229 | s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s); | 229 | s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s); |
230 | return true; | 230 | return true; |
231 | } | 231 | } |
232 | + | 232 | + |
233 | +/* ------ Display mode list device property ------ */ | 233 | +/* ------ Display mode list device property ------ */ |
234 | + | 234 | + |
235 | +static void apple_gfx_get_display_mode(Object *obj, Visitor *v, | 235 | +static void apple_gfx_get_display_mode(Object *obj, Visitor *v, |
236 | + const char *name, void *opaque, | 236 | + const char *name, void *opaque, |
237 | + Error **errp) | 237 | + Error **errp) |
238 | +{ | 238 | +{ |
239 | + Property *prop = opaque; | 239 | + Property *prop = opaque; |
240 | + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); | 240 | + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); |
241 | + /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */ | 241 | + /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */ |
242 | + char buffer[5 * 3 + 2 + 1]; | 242 | + char buffer[5 * 3 + 2 + 1]; |
243 | + char *pos = buffer; | 243 | + char *pos = buffer; |
244 | + | 244 | + |
245 | + int rc = snprintf(buffer, sizeof(buffer), | 245 | + int rc = snprintf(buffer, sizeof(buffer), |
246 | + "%"PRIu16"x%"PRIu16"@%"PRIu16, | 246 | + "%"PRIu16"x%"PRIu16"@%"PRIu16, |
247 | + mode->width_px, mode->height_px, | 247 | + mode->width_px, mode->height_px, |
248 | + mode->refresh_rate_hz); | 248 | + mode->refresh_rate_hz); |
249 | + assert(rc < sizeof(buffer)); | 249 | + assert(rc < sizeof(buffer)); |
250 | + | 250 | + |
251 | + visit_type_str(v, name, &pos, errp); | 251 | + visit_type_str(v, name, &pos, errp); |
252 | +} | 252 | +} |
253 | + | 253 | + |
254 | +static void apple_gfx_set_display_mode(Object *obj, Visitor *v, | 254 | +static void apple_gfx_set_display_mode(Object *obj, Visitor *v, |
255 | + const char *name, void *opaque, | 255 | + const char *name, void *opaque, |
256 | + Error **errp) | 256 | + Error **errp) |
257 | +{ | 257 | +{ |
258 | + Property *prop = opaque; | 258 | + Property *prop = opaque; |
259 | + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); | 259 | + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); |
260 | + const char *endptr; | 260 | + const char *endptr; |
261 | + g_autofree char *str = NULL; | 261 | + g_autofree char *str = NULL; |
262 | + int ret; | 262 | + int ret; |
263 | + int val; | 263 | + int val; |
264 | + | 264 | + |
265 | + if (!visit_type_str(v, name, &str, errp)) { | 265 | + if (!visit_type_str(v, name, &str, errp)) { |
266 | + return; | 266 | + return; |
267 | + } | 267 | + } |
268 | + | 268 | + |
269 | + endptr = str; | 269 | + endptr = str; |
270 | + | 270 | + |
271 | + ret = qemu_strtoi(endptr, &endptr, 10, &val); | 271 | + ret = qemu_strtoi(endptr, &endptr, 10, &val); |
272 | + if (ret || val > UINT16_MAX || val <= 0) { | 272 | + if (ret || val > UINT16_MAX || val <= 0) { |
273 | + error_setg(errp, "width in '%s' must be a decimal integer number " | 273 | + error_setg(errp, "width in '%s' must be a decimal integer number " |
274 | + "of pixels in the range 1..65535", name); | 274 | + "of pixels in the range 1..65535", name); |
275 | + return; | 275 | + return; |
276 | + } | 276 | + } |
277 | + mode->width_px = val; | 277 | + mode->width_px = val; |
278 | + if (*endptr != 'x') { | 278 | + if (*endptr != 'x') { |
279 | + goto separator_error; | 279 | + goto separator_error; |
280 | + } | 280 | + } |
281 | + | 281 | + |
282 | + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); | 282 | + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); |
283 | + if (ret || val > UINT16_MAX || val <= 0) { | 283 | + if (ret || val > UINT16_MAX || val <= 0) { |
284 | + error_setg(errp, "height in '%s' must be a decimal integer number " | 284 | + error_setg(errp, "height in '%s' must be a decimal integer number " |
285 | + "of pixels in the range 1..65535", name); | 285 | + "of pixels in the range 1..65535", name); |
286 | + return; | 286 | + return; |
287 | + } | 287 | + } |
288 | + mode->height_px = val; | 288 | + mode->height_px = val; |
289 | + if (*endptr != '@') { | 289 | + if (*endptr != '@') { |
290 | + goto separator_error; | 290 | + goto separator_error; |
291 | + } | 291 | + } |
292 | + | 292 | + |
293 | + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); | 293 | + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); |
294 | + if (ret || val > UINT16_MAX || val <= 0) { | 294 | + if (ret || val > UINT16_MAX || val <= 0) { |
295 | + error_setg(errp, "refresh rate in '%s'" | 295 | + error_setg(errp, "refresh rate in '%s'" |
296 | + " must be a positive decimal integer (Hertz)", name); | 296 | + " must be a positive decimal integer (Hertz)", name); |
297 | + return; | 297 | + return; |
298 | + } | 298 | + } |
299 | + mode->refresh_rate_hz = val; | 299 | + mode->refresh_rate_hz = val; |
300 | + return; | 300 | + return; |
301 | + | 301 | + |
302 | +separator_error: | 302 | +separator_error: |
303 | + error_setg(errp, "Each display mode takes the format " | 303 | + error_setg(errp, "Each display mode takes the format " |
304 | + "'<width>x<height>@<rate>'"); | 304 | + "'<width>x<height>@<rate>'"); |
305 | +} | 305 | +} |
306 | + | 306 | + |
307 | +const PropertyInfo qdev_prop_display_mode = { | 307 | +const PropertyInfo qdev_prop_display_mode = { |
308 | + .name = "display_mode", | 308 | + .name = "display_mode", |
309 | + .description = | 309 | + .description = |
310 | + "Display mode in pixels and Hertz, as <width>x<height>@<refresh-rate> " | 310 | + "Display mode in pixels and Hertz, as <width>x<height>@<refresh-rate> " |
311 | + "Example: 3840x2160@60", | 311 | + "Example: 3840x2160@60", |
312 | + .get = apple_gfx_get_display_mode, | 312 | + .get = apple_gfx_get_display_mode, |
313 | + .set = apple_gfx_set_display_mode, | 313 | + .set = apple_gfx_set_display_mode, |
314 | +}; | 314 | +}; |
315 | diff --git a/hw/display/trace-events b/hw/display/trace-events | 315 | diff --git a/hw/display/trace-events b/hw/display/trace-events |
316 | index XXXXXXX..XXXXXXX 100644 | 316 | index XXXXXXX..XXXXXXX 100644 |
317 | --- a/hw/display/trace-events | 317 | --- a/hw/display/trace-events |
318 | +++ b/hw/display/trace-events | 318 | +++ b/hw/display/trace-events |
319 | @@ -XXX,XX +XXX,XX @@ apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d widt | 319 | @@ -XXX,XX +XXX,XX @@ apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d widt |
320 | apple_gfx_cursor_show(uint32_t show) "show=%d" | 320 | apple_gfx_cursor_show(uint32_t show) "show=%d" |
321 | apple_gfx_cursor_move(void) "" | 321 | apple_gfx_cursor_move(void) "" |
322 | apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes" | 322 | apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes" |
323 | +apple_gfx_common_realize_modes_property(uint32_t num_modes) "using %u modes supplied by 'display-modes' device property" | 323 | +apple_gfx_common_realize_modes_property(uint32_t num_modes) "using %u modes supplied by 'display-modes' device property" |
324 | +apple_gfx_display_mode(uint32_t mode_idx, uint16_t width_px, uint16_t height_px) "mode %2"PRIu32": %4"PRIu16"x%4"PRIu16 | 324 | +apple_gfx_display_mode(uint32_t mode_idx, uint16_t width_px, uint16_t height_px) "mode %2"PRIu32": %4"PRIu16"x%4"PRIu16 |
325 | 325 | ||
326 | # apple-gfx-mmio.m | 326 | # apple-gfx-mmio.m |
327 | apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | 327 | apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 |
328 | -- | 328 | -- |
329 | 2.39.5 (Apple Git-154) | 329 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | This change adds a property 'display_modes' on the graphics device | |
2 | which permits specifying a list of display modes. (screen resolution | ||
3 | and refresh rate) | ||
4 | |||
5 | The property is an array of a custom type to make the syntax slightly | ||
6 | less awkward to use, for example: | ||
7 | |||
8 | -device '{"driver":"apple-gfx-pci", "display-modes":["1920x1080@60", "3840x2160@60"]}' | ||
9 | |||
10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
13 | --- | ||
14 | |||
15 | v4: | ||
16 | |||
17 | * Switched to the native array property type, which recently gained | ||
18 | command line support. | ||
19 | * The property has also been added to the -mmio variant. | ||
20 | * Tidied up the code a little. | ||
21 | |||
22 | v5: | ||
23 | |||
24 | * Better error handling and buffer management in property parsing and | ||
25 | output. | ||
26 | |||
27 | v6: | ||
28 | |||
29 | * Switched to using NSMutableArray for the mode list to avoid need for | ||
30 | allocating a temporary array - previously done with alloca. | ||
31 | |||
32 | v7: | ||
33 | |||
34 | * Simplified error handling in property parsing | ||
35 | |||
36 | v8: | ||
37 | |||
38 | * More consistent integer variable types. | ||
39 | |||
40 | v9: | ||
41 | |||
42 | * Re-ordered type definitions so we can drop a 'struct' keyword. | ||
43 | |||
44 | hw/display/apple-gfx-mmio.m | 8 +++ | ||
45 | hw/display/apple-gfx-pci.m | 9 ++- | ||
46 | hw/display/apple-gfx.h | 11 +++ | ||
47 | hw/display/apple-gfx.m | 135 +++++++++++++++++++++++++++++++----- | ||
48 | hw/display/trace-events | 2 + | ||
49 | 5 files changed, 145 insertions(+), 20 deletions(-) | ||
50 | |||
51 | diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/hw/display/apple-gfx-mmio.m | ||
54 | +++ b/hw/display/apple-gfx-mmio.m | ||
55 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_mmio_reset(Object *obj, ResetType type) | ||
56 | [s->common.pgdev reset]; | ||
57 | } | ||
58 | |||
59 | +static Property apple_gfx_mmio_properties[] = { | ||
60 | + DEFINE_PROP_ARRAY("display-modes", AppleGFXMMIOState, | ||
61 | + common.num_display_modes, common.display_modes, | ||
62 | + qdev_prop_display_mode, AppleGFXDisplayMode), | ||
63 | + DEFINE_PROP_END_OF_LIST(), | ||
64 | +}; | ||
65 | |||
66 | static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) | ||
67 | { | ||
68 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_mmio_class_init(ObjectClass *klass, void *data) | ||
69 | rc->phases.hold = apple_gfx_mmio_reset; | ||
70 | dc->hotpluggable = false; | ||
71 | dc->realize = apple_gfx_mmio_realize; | ||
72 | + | ||
73 | + device_class_set_props(dc, apple_gfx_mmio_properties); | ||
74 | } | ||
75 | |||
76 | static TypeInfo apple_gfx_mmio_types[] = { | ||
77 | diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m | ||
78 | index XXXXXXX..XXXXXXX 100644 | ||
79 | --- a/hw/display/apple-gfx-pci.m | ||
80 | +++ b/hw/display/apple-gfx-pci.m | ||
81 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_pci_reset(Object *obj, ResetType type) | ||
82 | [s->common.pgdev reset]; | ||
83 | } | ||
84 | |||
85 | +static Property apple_gfx_pci_properties[] = { | ||
86 | + DEFINE_PROP_ARRAY("display-modes", AppleGFXPCIState, | ||
87 | + common.num_display_modes, common.display_modes, | ||
88 | + qdev_prop_display_mode, AppleGFXDisplayMode), | ||
89 | + DEFINE_PROP_END_OF_LIST(), | ||
90 | +}; | ||
91 | + | ||
92 | static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) | ||
93 | { | ||
94 | DeviceClass *dc = DEVICE_CLASS(klass); | ||
95 | @@ -XXX,XX +XXX,XX @@ static void apple_gfx_pci_class_init(ObjectClass *klass, void *data) | ||
96 | pci->class_id = PCI_CLASS_DISPLAY_OTHER; | ||
97 | pci->realize = apple_gfx_pci_realize; | ||
98 | |||
99 | - /* TODO: Property for setting mode list */ | ||
100 | + device_class_set_props(dc, apple_gfx_pci_properties); | ||
101 | } | ||
102 | |||
103 | static TypeInfo apple_gfx_pci_types[] = { | ||
104 | diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h | ||
105 | index XXXXXXX..XXXXXXX 100644 | ||
106 | --- a/hw/display/apple-gfx.h | ||
107 | +++ b/hw/display/apple-gfx.h | ||
108 | @@ -XXX,XX +XXX,XX @@ | ||
109 | #import <ParavirtualizedGraphics/ParavirtualizedGraphics.h> | ||
110 | #include "qemu/typedefs.h" | ||
111 | #include "exec/memory.h" | ||
112 | +#include "hw/qdev-properties.h" | ||
113 | #include "ui/surface.h" | ||
114 | |||
115 | @class PGDeviceDescriptor; | ||
116 | @@ -XXX,XX +XXX,XX @@ | ||
117 | |||
118 | typedef QTAILQ_HEAD(, PGTask_s) PGTaskList; | ||
119 | |||
120 | +typedef struct AppleGFXDisplayMode { | ||
121 | + uint16_t width_px; | ||
122 | + uint16_t height_px; | ||
123 | + uint16_t refresh_rate_hz; | ||
124 | +} AppleGFXDisplayMode; | ||
125 | + | ||
126 | typedef struct AppleGFXState { | ||
127 | /* Initialised on init/realize() */ | ||
128 | MemoryRegion iomem_gfx; | ||
129 | @@ -XXX,XX +XXX,XX @@ typedef struct AppleGFXState { | ||
130 | QemuConsole *con; | ||
131 | id<MTLDevice> mtl; | ||
132 | id<MTLCommandQueue> mtl_queue; | ||
133 | + AppleGFXDisplayMode *display_modes; | ||
134 | + uint32_t num_display_modes; | ||
135 | |||
136 | /* List `tasks` is protected by task_mutex */ | ||
137 | QemuMutex task_mutex; | ||
138 | @@ -XXX,XX +XXX,XX @@ void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, | ||
139 | uint64_t length, bool read_only, | ||
140 | MemoryRegion **mapping_in_region); | ||
141 | |||
142 | +extern const PropertyInfo qdev_prop_display_mode; | ||
143 | + | ||
144 | #endif | ||
145 | |||
146 | diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m | ||
147 | index XXXXXXX..XXXXXXX 100644 | ||
148 | --- a/hw/display/apple-gfx.m | ||
149 | +++ b/hw/display/apple-gfx.m | ||
150 | @@ -XXX,XX +XXX,XX @@ | ||
151 | #include "sysemu/dma.h" | ||
152 | #include "ui/console.h" | ||
153 | |||
154 | -static const PGDisplayCoord_t apple_gfx_modes[] = { | ||
155 | - { .x = 1440, .y = 1080 }, | ||
156 | - { .x = 1280, .y = 1024 }, | ||
157 | +static const AppleGFXDisplayMode apple_gfx_default_modes[] = { | ||
158 | + { 1920, 1080, 60 }, | ||
159 | + { 1440, 1080, 60 }, | ||
160 | + { 1280, 1024, 60 }, | ||
161 | }; | ||
162 | |||
163 | static Error *apple_gfx_mig_blocker; | ||
164 | @@ -XXX,XX +XXX,XX @@ static void new_frame_handler_bh(void *opaque) | ||
165 | return disp_desc; | ||
166 | } | ||
167 | |||
168 | -static NSArray<PGDisplayMode*>* apple_gfx_prepare_display_mode_array(void) | ||
169 | +static NSArray<PGDisplayMode *> *apple_gfx_create_display_mode_array( | ||
170 | + const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count) | ||
171 | { | ||
172 | - PGDisplayMode *modes[ARRAY_SIZE(apple_gfx_modes)]; | ||
173 | - NSArray<PGDisplayMode*>* mode_array; | ||
174 | - int i; | ||
175 | - | ||
176 | - for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | ||
177 | - modes[i] = | ||
178 | - [[PGDisplayMode alloc] initWithSizeInPixels:apple_gfx_modes[i] refreshRateInHz:60.]; | ||
179 | - } | ||
180 | - | ||
181 | - mode_array = [NSArray arrayWithObjects:modes count:ARRAY_SIZE(apple_gfx_modes)]; | ||
182 | - | ||
183 | - for (i = 0; i < ARRAY_SIZE(apple_gfx_modes); i++) { | ||
184 | - [modes[i] release]; | ||
185 | - modes[i] = nil; | ||
186 | + uint32_t i; | ||
187 | + PGDisplayMode *mode_obj; | ||
188 | + NSMutableArray<PGDisplayMode *> *mode_array = | ||
189 | + [[NSMutableArray alloc] initWithCapacity:display_mode_count]; | ||
190 | + | ||
191 | + for (i = 0; i < display_mode_count; i++) { | ||
192 | + const AppleGFXDisplayMode *mode = &display_modes[i]; | ||
193 | + trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px); | ||
194 | + PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px }; | ||
195 | + | ||
196 | + mode_obj = | ||
197 | + [[PGDisplayMode alloc] initWithSizeInPixels:mode_size | ||
198 | + refreshRateInHz:mode->refresh_rate_hz]; | ||
199 | + [mode_array addObject:mode_obj]; | ||
200 | + [mode_obj release]; | ||
201 | } | ||
202 | |||
203 | return mode_array; | ||
204 | @@ -XXX,XX +XXX,XX @@ bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | ||
205 | PGDeviceDescriptor *desc, Error **errp) | ||
206 | { | ||
207 | PGDisplayDescriptor *disp_desc; | ||
208 | + const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes; | ||
209 | + uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes); | ||
210 | + NSArray<PGDisplayMode *> *mode_array; | ||
211 | |||
212 | if (apple_gfx_mig_blocker == NULL) { | ||
213 | error_setg(&apple_gfx_mig_blocker, | ||
214 | @@ -XXX,XX +XXX,XX @@ bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, | ||
215 | port:0 | ||
216 | serialNum:next_pgdisplay_serial_num++]; | ||
217 | [disp_desc release]; | ||
218 | - s->pgdisp.modeList = apple_gfx_prepare_display_mode_array(); | ||
219 | + | ||
220 | + if (s->display_modes != NULL && s->num_display_modes > 0) { | ||
221 | + trace_apple_gfx_common_realize_modes_property(s->num_display_modes); | ||
222 | + display_modes = s->display_modes; | ||
223 | + num_display_modes = s->num_display_modes; | ||
224 | + } | ||
225 | + s->pgdisp.modeList = mode_array = | ||
226 | + apple_gfx_create_display_mode_array(display_modes, num_display_modes); | ||
227 | + [mode_array release]; | ||
228 | |||
229 | s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s); | ||
230 | return true; | ||
231 | } | ||
232 | + | ||
233 | +/* ------ Display mode list device property ------ */ | ||
234 | + | ||
235 | +static void apple_gfx_get_display_mode(Object *obj, Visitor *v, | ||
236 | + const char *name, void *opaque, | ||
237 | + Error **errp) | ||
238 | +{ | ||
239 | + Property *prop = opaque; | ||
240 | + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); | ||
241 | + /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */ | ||
242 | + char buffer[5 * 3 + 2 + 1]; | ||
243 | + char *pos = buffer; | ||
244 | + | ||
245 | + int rc = snprintf(buffer, sizeof(buffer), | ||
246 | + "%"PRIu16"x%"PRIu16"@%"PRIu16, | ||
247 | + mode->width_px, mode->height_px, | ||
248 | + mode->refresh_rate_hz); | ||
249 | + assert(rc < sizeof(buffer)); | ||
250 | + | ||
251 | + visit_type_str(v, name, &pos, errp); | ||
252 | +} | ||
253 | + | ||
254 | +static void apple_gfx_set_display_mode(Object *obj, Visitor *v, | ||
255 | + const char *name, void *opaque, | ||
256 | + Error **errp) | ||
257 | +{ | ||
258 | + Property *prop = opaque; | ||
259 | + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); | ||
260 | + const char *endptr; | ||
261 | + g_autofree char *str = NULL; | ||
262 | + int ret; | ||
263 | + int val; | ||
264 | + | ||
265 | + if (!visit_type_str(v, name, &str, errp)) { | ||
266 | + return; | ||
267 | + } | ||
268 | + | ||
269 | + endptr = str; | ||
270 | + | ||
271 | + ret = qemu_strtoi(endptr, &endptr, 10, &val); | ||
272 | + if (ret || val > UINT16_MAX || val <= 0) { | ||
273 | + error_setg(errp, "width in '%s' must be a decimal integer number " | ||
274 | + "of pixels in the range 1..65535", name); | ||
275 | + return; | ||
276 | + } | ||
277 | + mode->width_px = val; | ||
278 | + if (*endptr != 'x') { | ||
279 | + goto separator_error; | ||
280 | + } | ||
281 | + | ||
282 | + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); | ||
283 | + if (ret || val > UINT16_MAX || val <= 0) { | ||
284 | + error_setg(errp, "height in '%s' must be a decimal integer number " | ||
285 | + "of pixels in the range 1..65535", name); | ||
286 | + return; | ||
287 | + } | ||
288 | + mode->height_px = val; | ||
289 | + if (*endptr != '@') { | ||
290 | + goto separator_error; | ||
291 | + } | ||
292 | + | ||
293 | + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); | ||
294 | + if (ret || val > UINT16_MAX || val <= 0) { | ||
295 | + error_setg(errp, "refresh rate in '%s'" | ||
296 | + " must be a positive decimal integer (Hertz)", name); | ||
297 | + return; | ||
298 | + } | ||
299 | + mode->refresh_rate_hz = val; | ||
300 | + return; | ||
301 | + | ||
302 | +separator_error: | ||
303 | + error_setg(errp, "Each display mode takes the format " | ||
304 | + "'<width>x<height>@<rate>'"); | ||
305 | +} | ||
306 | + | ||
307 | +const PropertyInfo qdev_prop_display_mode = { | ||
308 | + .name = "display_mode", | ||
309 | + .description = | ||
310 | + "Display mode in pixels and Hertz, as <width>x<height>@<refresh-rate> " | ||
311 | + "Example: 3840x2160@60", | ||
312 | + .get = apple_gfx_get_display_mode, | ||
313 | + .set = apple_gfx_set_display_mode, | ||
314 | +}; | ||
315 | diff --git a/hw/display/trace-events b/hw/display/trace-events | ||
316 | index XXXXXXX..XXXXXXX 100644 | ||
317 | --- a/hw/display/trace-events | ||
318 | +++ b/hw/display/trace-events | ||
319 | @@ -XXX,XX +XXX,XX @@ apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d widt | ||
320 | apple_gfx_cursor_show(uint32_t show) "show=%d" | ||
321 | apple_gfx_cursor_move(void) "" | ||
322 | apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes" | ||
323 | +apple_gfx_common_realize_modes_property(uint32_t num_modes) "using %u modes supplied by 'display-modes' device property" | ||
324 | +apple_gfx_display_mode(uint32_t mode_idx, uint16_t width_px, uint16_t height_px) "mode %2"PRIu32": %4"PRIu16"x%4"PRIu16 | ||
325 | |||
326 | # apple-gfx-mmio.m | ||
327 | apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | ||
328 | -- | ||
329 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | I'm happy to take responsibility for the macOS PV graphics code. As | ||
2 | HVF patches don't seem to get much attention at the moment, I'm also | ||
3 | adding myself as designated reviewer for HVF and x86 HVF to try and | ||
4 | improve that. | ||
1 | 5 | ||
6 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
7 | Reviewed-by: Roman Bolshakov <rbolshakov@ddn.com> | ||
8 | --- | ||
9 | MAINTAINERS | 7 +++++++ | ||
10 | 1 file changed, 7 insertions(+) | ||
11 | |||
12 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/MAINTAINERS | ||
15 | +++ b/MAINTAINERS | ||
16 | @@ -XXX,XX +XXX,XX @@ F: target/arm/hvf/ | ||
17 | X86 HVF CPUs | ||
18 | M: Cameron Esfahani <dirty@apple.com> | ||
19 | M: Roman Bolshakov <rbolshakov@ddn.com> | ||
20 | +R: Phil Dennis-Jordan <phil@philjordan.eu> | ||
21 | W: https://wiki.qemu.org/Features/HVF | ||
22 | S: Maintained | ||
23 | F: target/i386/hvf/ | ||
24 | @@ -XXX,XX +XXX,XX @@ F: target/i386/hvf/ | ||
25 | HVF | ||
26 | M: Cameron Esfahani <dirty@apple.com> | ||
27 | M: Roman Bolshakov <rbolshakov@ddn.com> | ||
28 | +R: Phil Dennis-Jordan <phil@philjordan.eu> | ||
29 | W: https://wiki.qemu.org/Features/HVF | ||
30 | S: Maintained | ||
31 | F: accel/hvf/ | ||
32 | @@ -XXX,XX +XXX,XX @@ F: hw/display/edid* | ||
33 | F: include/hw/display/edid.h | ||
34 | F: qemu-edid.c | ||
35 | |||
36 | +macOS PV Graphics (apple-gfx) | ||
37 | +M: Phil Dennis-Jordan <phil@philjordan.eu> | ||
38 | +S: Maintained | ||
39 | +F: hw/display/apple-gfx* | ||
40 | + | ||
41 | PIIX4 South Bridge (i82371AB) | ||
42 | M: Hervé Poussineau <hpoussin@reactos.org> | ||
43 | M: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
44 | -- | ||
45 | 2.39.5 (Apple Git-154) | ||
46 | |||
47 | diff view generated by jsdifflib |
1 | I'm happy to take responsibility for the macOS PV graphics code. As | 1 | I'm happy to take responsibility for the macOS PV graphics code. As |
---|---|---|---|
2 | HVF patches don't seem to get much attention at the moment, I'm also | 2 | HVF patches don't seem to get much attention at the moment, I'm also |
3 | adding myself as designated reviewer for HVF and x86 HVF to try and | 3 | adding myself as designated reviewer for HVF and x86 HVF to try and |
4 | improve that. | 4 | improve that. |
5 | |||
6 | I anticipate that the resulting workload should be covered by the | ||
7 | funding I'm receiving for improving Qemu in combination with macOS. As | ||
8 | of right now this runs out at the end of 2024; I expect the workload on | ||
9 | apple-gfx should be relatively minor and manageable in my spare time | ||
10 | beyond that. I may have to remove myself from more general HVF duties | ||
11 | once the contract runs out if it's more than I can manage. | ||
12 | 5 | ||
13 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 6 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
14 | Reviewed-by: Roman Bolshakov <rbolshakov@ddn.com> | 7 | Reviewed-by: Roman Bolshakov <rbolshakov@ddn.com> |
15 | --- | 8 | --- |
16 | MAINTAINERS | 7 +++++++ | 9 | MAINTAINERS | 7 +++++++ |
... | ... | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | We will introduce a number of devices that are specific to the vmapple | 3 | We will introduce a number of devices that are specific to the vmapple |
4 | target machine. To keep them all tidily together, let's put them into | 4 | target machine. To keep them all tidily together, let's put them into |
5 | a single target directory. | 5 | a single target directory. |
6 | 6 | ||
7 | Signed-off-by: Alexander Graf <graf@amazon.com> | 7 | Signed-off-by: Alexander Graf <graf@amazon.com> |
8 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 8 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
9 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 9 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
10 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 10 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
11 | --- | 11 | --- |
12 | MAINTAINERS | 7 +++++++ | 12 | MAINTAINERS | 7 +++++++ |
13 | hw/Kconfig | 1 + | 13 | hw/Kconfig | 1 + |
14 | hw/meson.build | 1 + | 14 | hw/meson.build | 1 + |
15 | hw/vmapple/Kconfig | 1 + | 15 | hw/vmapple/Kconfig | 1 + |
16 | hw/vmapple/meson.build | 0 | 16 | hw/vmapple/meson.build | 0 |
17 | hw/vmapple/trace-events | 2 ++ | 17 | hw/vmapple/trace-events | 2 ++ |
18 | hw/vmapple/trace.h | 1 + | 18 | hw/vmapple/trace.h | 1 + |
19 | meson.build | 1 + | 19 | meson.build | 1 + |
20 | 8 files changed, 14 insertions(+) | 20 | 8 files changed, 14 insertions(+) |
21 | create mode 100644 hw/vmapple/Kconfig | 21 | create mode 100644 hw/vmapple/Kconfig |
22 | create mode 100644 hw/vmapple/meson.build | 22 | create mode 100644 hw/vmapple/meson.build |
23 | create mode 100644 hw/vmapple/trace-events | 23 | create mode 100644 hw/vmapple/trace-events |
24 | create mode 100644 hw/vmapple/trace.h | 24 | create mode 100644 hw/vmapple/trace.h |
25 | 25 | ||
26 | diff --git a/MAINTAINERS b/MAINTAINERS | 26 | diff --git a/MAINTAINERS b/MAINTAINERS |
27 | index XXXXXXX..XXXXXXX 100644 | 27 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/MAINTAINERS | 28 | --- a/MAINTAINERS |
29 | +++ b/MAINTAINERS | 29 | +++ b/MAINTAINERS |
30 | @@ -XXX,XX +XXX,XX @@ F: hw/hyperv/hv-balloon*.h | 30 | @@ -XXX,XX +XXX,XX @@ F: hw/hyperv/hv-balloon*.h |
31 | F: include/hw/hyperv/dynmem-proto.h | 31 | F: include/hw/hyperv/dynmem-proto.h |
32 | F: include/hw/hyperv/hv-balloon.h | 32 | F: include/hw/hyperv/hv-balloon.h |
33 | 33 | ||
34 | +VMapple | 34 | +VMapple |
35 | +M: Alexander Graf <agraf@csgraf.de> | 35 | +M: Alexander Graf <agraf@csgraf.de> |
36 | +R: Phil Dennis-Jordan <phil@philjordan.eu> | 36 | +R: Phil Dennis-Jordan <phil@philjordan.eu> |
37 | +S: Maintained | 37 | +S: Maintained |
38 | +F: hw/vmapple/* | 38 | +F: hw/vmapple/* |
39 | +F: include/hw/vmapple/* | 39 | +F: include/hw/vmapple/* |
40 | + | 40 | + |
41 | Subsystems | 41 | Subsystems |
42 | ---------- | 42 | ---------- |
43 | Overall Audio backends | 43 | Overall Audio backends |
44 | diff --git a/hw/Kconfig b/hw/Kconfig | 44 | diff --git a/hw/Kconfig b/hw/Kconfig |
45 | index XXXXXXX..XXXXXXX 100644 | 45 | index XXXXXXX..XXXXXXX 100644 |
46 | --- a/hw/Kconfig | 46 | --- a/hw/Kconfig |
47 | +++ b/hw/Kconfig | 47 | +++ b/hw/Kconfig |
48 | @@ -XXX,XX +XXX,XX @@ source ufs/Kconfig | 48 | @@ -XXX,XX +XXX,XX @@ source ufs/Kconfig |
49 | source usb/Kconfig | 49 | source usb/Kconfig |
50 | source virtio/Kconfig | 50 | source virtio/Kconfig |
51 | source vfio/Kconfig | 51 | source vfio/Kconfig |
52 | +source vmapple/Kconfig | 52 | +source vmapple/Kconfig |
53 | source xen/Kconfig | 53 | source xen/Kconfig |
54 | source watchdog/Kconfig | 54 | source watchdog/Kconfig |
55 | 55 | ||
56 | diff --git a/hw/meson.build b/hw/meson.build | 56 | diff --git a/hw/meson.build b/hw/meson.build |
57 | index XXXXXXX..XXXXXXX 100644 | 57 | index XXXXXXX..XXXXXXX 100644 |
58 | --- a/hw/meson.build | 58 | --- a/hw/meson.build |
59 | +++ b/hw/meson.build | 59 | +++ b/hw/meson.build |
60 | @@ -XXX,XX +XXX,XX @@ subdir('ufs') | 60 | @@ -XXX,XX +XXX,XX @@ subdir('ufs') |
61 | subdir('usb') | 61 | subdir('usb') |
62 | subdir('vfio') | 62 | subdir('vfio') |
63 | subdir('virtio') | 63 | subdir('virtio') |
64 | +subdir('vmapple') | 64 | +subdir('vmapple') |
65 | subdir('watchdog') | 65 | subdir('watchdog') |
66 | subdir('xen') | 66 | subdir('xen') |
67 | subdir('xenpv') | 67 | subdir('xenpv') |
68 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | 68 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig |
69 | new file mode 100644 | 69 | new file mode 100644 |
70 | index XXXXXXX..XXXXXXX | 70 | index XXXXXXX..XXXXXXX |
71 | --- /dev/null | 71 | --- /dev/null |
72 | +++ b/hw/vmapple/Kconfig | 72 | +++ b/hw/vmapple/Kconfig |
73 | @@ -0,0 +1 @@ | 73 | @@ -0,0 +1 @@ |
74 | + | 74 | + |
75 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | 75 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build |
76 | new file mode 100644 | 76 | new file mode 100644 |
77 | index XXXXXXX..XXXXXXX | 77 | index XXXXXXX..XXXXXXX |
78 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events | 78 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events |
79 | new file mode 100644 | 79 | new file mode 100644 |
80 | index XXXXXXX..XXXXXXX | 80 | index XXXXXXX..XXXXXXX |
81 | --- /dev/null | 81 | --- /dev/null |
82 | +++ b/hw/vmapple/trace-events | 82 | +++ b/hw/vmapple/trace-events |
83 | @@ -XXX,XX +XXX,XX @@ | 83 | @@ -XXX,XX +XXX,XX @@ |
84 | +# See docs/devel/tracing.rst for syntax documentation. | 84 | +# See docs/devel/tracing.rst for syntax documentation. |
85 | + | 85 | + |
86 | diff --git a/hw/vmapple/trace.h b/hw/vmapple/trace.h | 86 | diff --git a/hw/vmapple/trace.h b/hw/vmapple/trace.h |
87 | new file mode 100644 | 87 | new file mode 100644 |
88 | index XXXXXXX..XXXXXXX | 88 | index XXXXXXX..XXXXXXX |
89 | --- /dev/null | 89 | --- /dev/null |
90 | +++ b/hw/vmapple/trace.h | 90 | +++ b/hw/vmapple/trace.h |
91 | @@ -0,0 +1 @@ | 91 | @@ -0,0 +1 @@ |
92 | +#include "trace/trace-hw_vmapple.h" | 92 | +#include "trace/trace-hw_vmapple.h" |
93 | diff --git a/meson.build b/meson.build | 93 | diff --git a/meson.build b/meson.build |
94 | index XXXXXXX..XXXXXXX 100644 | 94 | index XXXXXXX..XXXXXXX 100644 |
95 | --- a/meson.build | 95 | --- a/meson.build |
96 | +++ b/meson.build | 96 | +++ b/meson.build |
97 | @@ -XXX,XX +XXX,XX @@ if have_system | 97 | @@ -XXX,XX +XXX,XX @@ if have_system |
98 | 'hw/usb', | 98 | 'hw/usb', |
99 | 'hw/vfio', | 99 | 'hw/vfio', |
100 | 'hw/virtio', | 100 | 'hw/virtio', |
101 | + 'hw/vmapple', | 101 | + 'hw/vmapple', |
102 | 'hw/watchdog', | 102 | 'hw/watchdog', |
103 | 'hw/xen', | 103 | 'hw/xen', |
104 | 'hw/gpio', | 104 | 'hw/gpio', |
105 | -- | 105 | -- |
106 | 2.39.5 (Apple Git-154) | 106 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alexander Graf <graf@amazon.com> | ||
1 | 2 | ||
3 | We will introduce a number of devices that are specific to the vmapple | ||
4 | target machine. To keep them all tidily together, let's put them into | ||
5 | a single target directory. | ||
6 | |||
7 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
8 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
9 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
10 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
11 | --- | ||
12 | MAINTAINERS | 7 +++++++ | ||
13 | hw/Kconfig | 1 + | ||
14 | hw/meson.build | 1 + | ||
15 | hw/vmapple/Kconfig | 1 + | ||
16 | hw/vmapple/meson.build | 0 | ||
17 | hw/vmapple/trace-events | 2 ++ | ||
18 | hw/vmapple/trace.h | 1 + | ||
19 | meson.build | 1 + | ||
20 | 8 files changed, 14 insertions(+) | ||
21 | create mode 100644 hw/vmapple/Kconfig | ||
22 | create mode 100644 hw/vmapple/meson.build | ||
23 | create mode 100644 hw/vmapple/trace-events | ||
24 | create mode 100644 hw/vmapple/trace.h | ||
25 | |||
26 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/MAINTAINERS | ||
29 | +++ b/MAINTAINERS | ||
30 | @@ -XXX,XX +XXX,XX @@ F: hw/hyperv/hv-balloon*.h | ||
31 | F: include/hw/hyperv/dynmem-proto.h | ||
32 | F: include/hw/hyperv/hv-balloon.h | ||
33 | |||
34 | +VMapple | ||
35 | +M: Alexander Graf <agraf@csgraf.de> | ||
36 | +M: Phil Dennis-Jordan <phil@philjordan.eu> | ||
37 | +S: Maintained | ||
38 | +F: hw/vmapple/* | ||
39 | +F: include/hw/vmapple/* | ||
40 | + | ||
41 | Subsystems | ||
42 | ---------- | ||
43 | Overall Audio backends | ||
44 | diff --git a/hw/Kconfig b/hw/Kconfig | ||
45 | index XXXXXXX..XXXXXXX 100644 | ||
46 | --- a/hw/Kconfig | ||
47 | +++ b/hw/Kconfig | ||
48 | @@ -XXX,XX +XXX,XX @@ source ufs/Kconfig | ||
49 | source usb/Kconfig | ||
50 | source virtio/Kconfig | ||
51 | source vfio/Kconfig | ||
52 | +source vmapple/Kconfig | ||
53 | source xen/Kconfig | ||
54 | source watchdog/Kconfig | ||
55 | |||
56 | diff --git a/hw/meson.build b/hw/meson.build | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/hw/meson.build | ||
59 | +++ b/hw/meson.build | ||
60 | @@ -XXX,XX +XXX,XX @@ subdir('ufs') | ||
61 | subdir('usb') | ||
62 | subdir('vfio') | ||
63 | subdir('virtio') | ||
64 | +subdir('vmapple') | ||
65 | subdir('watchdog') | ||
66 | subdir('xen') | ||
67 | subdir('xenpv') | ||
68 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | ||
69 | new file mode 100644 | ||
70 | index XXXXXXX..XXXXXXX | ||
71 | --- /dev/null | ||
72 | +++ b/hw/vmapple/Kconfig | ||
73 | @@ -0,0 +1 @@ | ||
74 | + | ||
75 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | ||
76 | new file mode 100644 | ||
77 | index XXXXXXX..XXXXXXX | ||
78 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events | ||
79 | new file mode 100644 | ||
80 | index XXXXXXX..XXXXXXX | ||
81 | --- /dev/null | ||
82 | +++ b/hw/vmapple/trace-events | ||
83 | @@ -XXX,XX +XXX,XX @@ | ||
84 | +# See docs/devel/tracing.rst for syntax documentation. | ||
85 | + | ||
86 | diff --git a/hw/vmapple/trace.h b/hw/vmapple/trace.h | ||
87 | new file mode 100644 | ||
88 | index XXXXXXX..XXXXXXX | ||
89 | --- /dev/null | ||
90 | +++ b/hw/vmapple/trace.h | ||
91 | @@ -0,0 +1 @@ | ||
92 | +#include "trace/trace-hw_vmapple.h" | ||
93 | diff --git a/meson.build b/meson.build | ||
94 | index XXXXXXX..XXXXXXX 100644 | ||
95 | --- a/meson.build | ||
96 | +++ b/meson.build | ||
97 | @@ -XXX,XX +XXX,XX @@ if have_system | ||
98 | 'hw/usb', | ||
99 | 'hw/vfio', | ||
100 | 'hw/virtio', | ||
101 | + 'hw/vmapple', | ||
102 | 'hw/watchdog', | ||
103 | 'hw/xen', | ||
104 | 'hw/gpio', | ||
105 | -- | ||
106 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | In addition to the ISA and PCI variants of pvpanic, let's add an MMIO | 3 | In addition to the ISA and PCI variants of pvpanic, let's add an MMIO |
4 | platform device that we can use in embedded arm environments. | 4 | platform device that we can use in embedded arm environments. |
5 | 5 | ||
6 | Signed-off-by: Alexander Graf <graf@amazon.com> | 6 | Signed-off-by: Alexander Graf <graf@amazon.com> |
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
8 | Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 8 | Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
9 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 9 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
10 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 10 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
11 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 11 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
12 | --- | 12 | --- |
13 | 13 | ||
14 | v3: | 14 | v3: |
15 | * Rebased on upstream, updated a header path | 15 | * Rebased on upstream, updated a header path |
16 | 16 | ||
17 | hw/misc/Kconfig | 4 +++ | 17 | hw/misc/Kconfig | 4 +++ |
18 | hw/misc/meson.build | 1 + | 18 | hw/misc/meson.build | 1 + |
19 | hw/misc/pvpanic-mmio.c | 61 +++++++++++++++++++++++++++++++++++++++ | 19 | hw/misc/pvpanic-mmio.c | 61 +++++++++++++++++++++++++++++++++++++++ |
20 | include/hw/misc/pvpanic.h | 1 + | 20 | include/hw/misc/pvpanic.h | 1 + |
21 | 4 files changed, 67 insertions(+) | 21 | 4 files changed, 67 insertions(+) |
22 | create mode 100644 hw/misc/pvpanic-mmio.c | 22 | create mode 100644 hw/misc/pvpanic-mmio.c |
23 | 23 | ||
24 | diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig | 24 | diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig |
25 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/hw/misc/Kconfig | 26 | --- a/hw/misc/Kconfig |
27 | +++ b/hw/misc/Kconfig | 27 | +++ b/hw/misc/Kconfig |
28 | @@ -XXX,XX +XXX,XX @@ config PVPANIC_ISA | 28 | @@ -XXX,XX +XXX,XX @@ config PVPANIC_ISA |
29 | depends on ISA_BUS | 29 | depends on ISA_BUS |
30 | select PVPANIC_COMMON | 30 | select PVPANIC_COMMON |
31 | 31 | ||
32 | +config PVPANIC_MMIO | 32 | +config PVPANIC_MMIO |
33 | + bool | 33 | + bool |
34 | + select PVPANIC_COMMON | 34 | + select PVPANIC_COMMON |
35 | + | 35 | + |
36 | config AUX | 36 | config AUX |
37 | bool | 37 | bool |
38 | select I2C | 38 | select I2C |
39 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build | 39 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build |
40 | index XXXXXXX..XXXXXXX 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/hw/misc/meson.build | 41 | --- a/hw/misc/meson.build |
42 | +++ b/hw/misc/meson.build | 42 | +++ b/hw/misc/meson.build |
43 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c')) | 43 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c')) |
44 | 44 | ||
45 | system_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) | 45 | system_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) |
46 | system_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) | 46 | system_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) |
47 | +system_ss.add(when: 'CONFIG_PVPANIC_MMIO', if_true: files('pvpanic-mmio.c')) | 47 | +system_ss.add(when: 'CONFIG_PVPANIC_MMIO', if_true: files('pvpanic-mmio.c')) |
48 | system_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) | 48 | system_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) |
49 | system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( | 49 | system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( |
50 | 'aspeed_hace.c', | 50 | 'aspeed_hace.c', |
51 | diff --git a/hw/misc/pvpanic-mmio.c b/hw/misc/pvpanic-mmio.c | 51 | diff --git a/hw/misc/pvpanic-mmio.c b/hw/misc/pvpanic-mmio.c |
52 | new file mode 100644 | 52 | new file mode 100644 |
53 | index XXXXXXX..XXXXXXX | 53 | index XXXXXXX..XXXXXXX |
54 | --- /dev/null | 54 | --- /dev/null |
55 | +++ b/hw/misc/pvpanic-mmio.c | 55 | +++ b/hw/misc/pvpanic-mmio.c |
56 | @@ -XXX,XX +XXX,XX @@ | 56 | @@ -XXX,XX +XXX,XX @@ |
57 | +/* | 57 | +/* |
58 | + * QEMU simulated pvpanic device (MMIO frontend) | 58 | + * QEMU simulated pvpanic device (MMIO frontend) |
59 | + * | 59 | + * |
60 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 60 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
61 | + * | 61 | + * |
62 | + * SPDX-License-Identifier: GPL-2.0-or-later | 62 | + * SPDX-License-Identifier: GPL-2.0-or-later |
63 | + */ | 63 | + */ |
64 | + | 64 | + |
65 | +#include "qemu/osdep.h" | 65 | +#include "qemu/osdep.h" |
66 | + | 66 | + |
67 | +#include "hw/qdev-properties.h" | 67 | +#include "hw/qdev-properties.h" |
68 | +#include "hw/misc/pvpanic.h" | 68 | +#include "hw/misc/pvpanic.h" |
69 | +#include "hw/sysbus.h" | 69 | +#include "hw/sysbus.h" |
70 | +#include "standard-headers/misc/pvpanic.h" | 70 | +#include "standard-headers/misc/pvpanic.h" |
71 | + | 71 | + |
72 | +OBJECT_DECLARE_SIMPLE_TYPE(PVPanicMMIOState, PVPANIC_MMIO_DEVICE) | 72 | +OBJECT_DECLARE_SIMPLE_TYPE(PVPanicMMIOState, PVPANIC_MMIO_DEVICE) |
73 | + | 73 | + |
74 | +#define PVPANIC_MMIO_SIZE 0x2 | 74 | +#define PVPANIC_MMIO_SIZE 0x2 |
75 | + | 75 | + |
76 | +struct PVPanicMMIOState { | 76 | +struct PVPanicMMIOState { |
77 | + SysBusDevice parent_obj; | 77 | + SysBusDevice parent_obj; |
78 | + | 78 | + |
79 | + PVPanicState pvpanic; | 79 | + PVPanicState pvpanic; |
80 | +}; | 80 | +}; |
81 | + | 81 | + |
82 | +static void pvpanic_mmio_initfn(Object *obj) | 82 | +static void pvpanic_mmio_initfn(Object *obj) |
83 | +{ | 83 | +{ |
84 | + PVPanicMMIOState *s = PVPANIC_MMIO_DEVICE(obj); | 84 | + PVPanicMMIOState *s = PVPANIC_MMIO_DEVICE(obj); |
85 | + | 85 | + |
86 | + pvpanic_setup_io(&s->pvpanic, DEVICE(s), PVPANIC_MMIO_SIZE); | 86 | + pvpanic_setup_io(&s->pvpanic, DEVICE(s), PVPANIC_MMIO_SIZE); |
87 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->pvpanic.mr); | 87 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->pvpanic.mr); |
88 | +} | 88 | +} |
89 | + | 89 | + |
90 | +static Property pvpanic_mmio_properties[] = { | 90 | +static Property pvpanic_mmio_properties[] = { |
91 | + DEFINE_PROP_UINT8("events", PVPanicMMIOState, pvpanic.events, | 91 | + DEFINE_PROP_UINT8("events", PVPanicMMIOState, pvpanic.events, |
92 | + PVPANIC_PANICKED | PVPANIC_CRASH_LOADED), | 92 | + PVPANIC_PANICKED | PVPANIC_CRASH_LOADED), |
93 | + DEFINE_PROP_END_OF_LIST(), | 93 | + DEFINE_PROP_END_OF_LIST(), |
94 | +}; | 94 | +}; |
95 | + | 95 | + |
96 | +static void pvpanic_mmio_class_init(ObjectClass *klass, void *data) | 96 | +static void pvpanic_mmio_class_init(ObjectClass *klass, void *data) |
97 | +{ | 97 | +{ |
98 | + DeviceClass *dc = DEVICE_CLASS(klass); | 98 | + DeviceClass *dc = DEVICE_CLASS(klass); |
99 | + | 99 | + |
100 | + device_class_set_props(dc, pvpanic_mmio_properties); | 100 | + device_class_set_props(dc, pvpanic_mmio_properties); |
101 | + set_bit(DEVICE_CATEGORY_MISC, dc->categories); | 101 | + set_bit(DEVICE_CATEGORY_MISC, dc->categories); |
102 | +} | 102 | +} |
103 | + | 103 | + |
104 | +static const TypeInfo pvpanic_mmio_info = { | 104 | +static const TypeInfo pvpanic_mmio_info = { |
105 | + .name = TYPE_PVPANIC_MMIO_DEVICE, | 105 | + .name = TYPE_PVPANIC_MMIO_DEVICE, |
106 | + .parent = TYPE_SYS_BUS_DEVICE, | 106 | + .parent = TYPE_SYS_BUS_DEVICE, |
107 | + .instance_size = sizeof(PVPanicMMIOState), | 107 | + .instance_size = sizeof(PVPanicMMIOState), |
108 | + .instance_init = pvpanic_mmio_initfn, | 108 | + .instance_init = pvpanic_mmio_initfn, |
109 | + .class_init = pvpanic_mmio_class_init, | 109 | + .class_init = pvpanic_mmio_class_init, |
110 | +}; | 110 | +}; |
111 | + | 111 | + |
112 | +static void pvpanic_register_types(void) | 112 | +static void pvpanic_register_types(void) |
113 | +{ | 113 | +{ |
114 | + type_register_static(&pvpanic_mmio_info); | 114 | + type_register_static(&pvpanic_mmio_info); |
115 | +} | 115 | +} |
116 | + | 116 | + |
117 | +type_init(pvpanic_register_types) | 117 | +type_init(pvpanic_register_types) |
118 | diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h | 118 | diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h |
119 | index XXXXXXX..XXXXXXX 100644 | 119 | index XXXXXXX..XXXXXXX 100644 |
120 | --- a/include/hw/misc/pvpanic.h | 120 | --- a/include/hw/misc/pvpanic.h |
121 | +++ b/include/hw/misc/pvpanic.h | 121 | +++ b/include/hw/misc/pvpanic.h |
122 | @@ -XXX,XX +XXX,XX @@ | 122 | @@ -XXX,XX +XXX,XX @@ |
123 | 123 | ||
124 | #define TYPE_PVPANIC_ISA_DEVICE "pvpanic" | 124 | #define TYPE_PVPANIC_ISA_DEVICE "pvpanic" |
125 | #define TYPE_PVPANIC_PCI_DEVICE "pvpanic-pci" | 125 | #define TYPE_PVPANIC_PCI_DEVICE "pvpanic-pci" |
126 | +#define TYPE_PVPANIC_MMIO_DEVICE "pvpanic-mmio" | 126 | +#define TYPE_PVPANIC_MMIO_DEVICE "pvpanic-mmio" |
127 | 127 | ||
128 | #define PVPANIC_IOPORT_PROP "ioport" | 128 | #define PVPANIC_IOPORT_PROP "ioport" |
129 | 129 | ||
130 | -- | 130 | -- |
131 | 2.39.5 (Apple Git-154) | 131 | 2.39.5 (Apple Git-154) |
132 | 132 | ||
133 | 133 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alexander Graf <graf@amazon.com> | ||
1 | 2 | ||
3 | In addition to the ISA and PCI variants of pvpanic, let's add an MMIO | ||
4 | platform device that we can use in embedded arm environments. | ||
5 | |||
6 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
7 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
8 | Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
9 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
10 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
11 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
12 | --- | ||
13 | |||
14 | v3: | ||
15 | * Rebased on upstream, updated a header path | ||
16 | |||
17 | hw/misc/Kconfig | 4 +++ | ||
18 | hw/misc/meson.build | 1 + | ||
19 | hw/misc/pvpanic-mmio.c | 61 +++++++++++++++++++++++++++++++++++++++ | ||
20 | include/hw/misc/pvpanic.h | 1 + | ||
21 | 4 files changed, 67 insertions(+) | ||
22 | create mode 100644 hw/misc/pvpanic-mmio.c | ||
23 | |||
24 | diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/hw/misc/Kconfig | ||
27 | +++ b/hw/misc/Kconfig | ||
28 | @@ -XXX,XX +XXX,XX @@ config PVPANIC_ISA | ||
29 | depends on ISA_BUS | ||
30 | select PVPANIC_COMMON | ||
31 | |||
32 | +config PVPANIC_MMIO | ||
33 | + bool | ||
34 | + select PVPANIC_COMMON | ||
35 | + | ||
36 | config AUX | ||
37 | bool | ||
38 | select I2C | ||
39 | diff --git a/hw/misc/meson.build b/hw/misc/meson.build | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/hw/misc/meson.build | ||
42 | +++ b/hw/misc/meson.build | ||
43 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c')) | ||
44 | |||
45 | system_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) | ||
46 | system_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) | ||
47 | +system_ss.add(when: 'CONFIG_PVPANIC_MMIO', if_true: files('pvpanic-mmio.c')) | ||
48 | system_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) | ||
49 | system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( | ||
50 | 'aspeed_hace.c', | ||
51 | diff --git a/hw/misc/pvpanic-mmio.c b/hw/misc/pvpanic-mmio.c | ||
52 | new file mode 100644 | ||
53 | index XXXXXXX..XXXXXXX | ||
54 | --- /dev/null | ||
55 | +++ b/hw/misc/pvpanic-mmio.c | ||
56 | @@ -XXX,XX +XXX,XX @@ | ||
57 | +/* | ||
58 | + * QEMU simulated pvpanic device (MMIO frontend) | ||
59 | + * | ||
60 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
61 | + * | ||
62 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
63 | + */ | ||
64 | + | ||
65 | +#include "qemu/osdep.h" | ||
66 | + | ||
67 | +#include "hw/qdev-properties.h" | ||
68 | +#include "hw/misc/pvpanic.h" | ||
69 | +#include "hw/sysbus.h" | ||
70 | +#include "standard-headers/misc/pvpanic.h" | ||
71 | + | ||
72 | +OBJECT_DECLARE_SIMPLE_TYPE(PVPanicMMIOState, PVPANIC_MMIO_DEVICE) | ||
73 | + | ||
74 | +#define PVPANIC_MMIO_SIZE 0x2 | ||
75 | + | ||
76 | +struct PVPanicMMIOState { | ||
77 | + SysBusDevice parent_obj; | ||
78 | + | ||
79 | + PVPanicState pvpanic; | ||
80 | +}; | ||
81 | + | ||
82 | +static void pvpanic_mmio_initfn(Object *obj) | ||
83 | +{ | ||
84 | + PVPanicMMIOState *s = PVPANIC_MMIO_DEVICE(obj); | ||
85 | + | ||
86 | + pvpanic_setup_io(&s->pvpanic, DEVICE(s), PVPANIC_MMIO_SIZE); | ||
87 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->pvpanic.mr); | ||
88 | +} | ||
89 | + | ||
90 | +static Property pvpanic_mmio_properties[] = { | ||
91 | + DEFINE_PROP_UINT8("events", PVPanicMMIOState, pvpanic.events, | ||
92 | + PVPANIC_PANICKED | PVPANIC_CRASH_LOADED), | ||
93 | + DEFINE_PROP_END_OF_LIST(), | ||
94 | +}; | ||
95 | + | ||
96 | +static void pvpanic_mmio_class_init(ObjectClass *klass, void *data) | ||
97 | +{ | ||
98 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
99 | + | ||
100 | + device_class_set_props(dc, pvpanic_mmio_properties); | ||
101 | + set_bit(DEVICE_CATEGORY_MISC, dc->categories); | ||
102 | +} | ||
103 | + | ||
104 | +static const TypeInfo pvpanic_mmio_info = { | ||
105 | + .name = TYPE_PVPANIC_MMIO_DEVICE, | ||
106 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
107 | + .instance_size = sizeof(PVPanicMMIOState), | ||
108 | + .instance_init = pvpanic_mmio_initfn, | ||
109 | + .class_init = pvpanic_mmio_class_init, | ||
110 | +}; | ||
111 | + | ||
112 | +static void pvpanic_register_types(void) | ||
113 | +{ | ||
114 | + type_register_static(&pvpanic_mmio_info); | ||
115 | +} | ||
116 | + | ||
117 | +type_init(pvpanic_register_types) | ||
118 | diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h | ||
119 | index XXXXXXX..XXXXXXX 100644 | ||
120 | --- a/include/hw/misc/pvpanic.h | ||
121 | +++ b/include/hw/misc/pvpanic.h | ||
122 | @@ -XXX,XX +XXX,XX @@ | ||
123 | |||
124 | #define TYPE_PVPANIC_ISA_DEVICE "pvpanic" | ||
125 | #define TYPE_PVPANIC_PCI_DEVICE "pvpanic-pci" | ||
126 | +#define TYPE_PVPANIC_MMIO_DEVICE "pvpanic-mmio" | ||
127 | |||
128 | #define PVPANIC_IOPORT_PROP "ioport" | ||
129 | |||
130 | -- | ||
131 | 2.39.5 (Apple Git-154) | ||
132 | |||
133 | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | MacOS unconditionally disables interrupts of the physical timer on boot | 3 | MacOS unconditionally disables interrupts of the physical timer on boot |
4 | and then continues to use the virtual one. We don't really want to support | 4 | and then continues to use the virtual one. We don't really want to support |
5 | a full physical timer emulation, so let's just ignore those writes. | 5 | a full physical timer emulation, so let's just ignore those writes. |
6 | 6 | ||
7 | Signed-off-by: Alexander Graf <graf@amazon.com> | 7 | Signed-off-by: Alexander Graf <graf@amazon.com> |
8 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 8 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
9 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 9 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
10 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 10 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
11 | --- | 11 | --- |
12 | target/arm/hvf/hvf.c | 9 +++++++++ | 12 | target/arm/hvf/hvf.c | 9 +++++++++ |
13 | 1 file changed, 9 insertions(+) | 13 | 1 file changed, 9 insertions(+) |
14 | 14 | ||
15 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | 15 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c |
16 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/hvf/hvf.c | 17 | --- a/target/arm/hvf/hvf.c |
18 | +++ b/target/arm/hvf/hvf.c | 18 | +++ b/target/arm/hvf/hvf.c |
19 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ |
20 | 20 | ||
21 | #include "qemu/osdep.h" | 21 | #include "qemu/osdep.h" |
22 | #include "qemu/error-report.h" | 22 | #include "qemu/error-report.h" |
23 | +#include "qemu/log.h" | 23 | +#include "qemu/log.h" |
24 | 24 | ||
25 | #include "sysemu/runstate.h" | 25 | #include "sysemu/runstate.h" |
26 | #include "sysemu/hvf.h" | 26 | #include "sysemu/hvf.h" |
27 | @@ -XXX,XX +XXX,XX @@ void hvf_arm_init_debug(void) | 27 | @@ -XXX,XX +XXX,XX @@ void hvf_arm_init_debug(void) |
28 | #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) | 28 | #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) |
29 | #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) | 29 | #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) |
30 | #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) | 30 | #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) |
31 | +#define SYSREG_CNTP_CTL_EL0 SYSREG(3, 3, 14, 2, 1) | 31 | +#define SYSREG_CNTP_CTL_EL0 SYSREG(3, 3, 14, 2, 1) |
32 | #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) | 32 | #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) |
33 | #define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0) | 33 | #define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0) |
34 | #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1) | 34 | #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1) |
35 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) | 35 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) |
36 | case SYSREG_OSLAR_EL1: | 36 | case SYSREG_OSLAR_EL1: |
37 | env->cp15.oslsr_el1 = val & 1; | 37 | env->cp15.oslsr_el1 = val & 1; |
38 | return 0; | 38 | return 0; |
39 | + case SYSREG_CNTP_CTL_EL0: | 39 | + case SYSREG_CNTP_CTL_EL0: |
40 | + /* | 40 | + /* |
41 | + * Guests should not rely on the physical counter, but macOS emits | 41 | + * Guests should not rely on the physical counter, but macOS emits |
42 | + * disable writes to it. Let it do so, but ignore the requests. | 42 | + * disable writes to it. Let it do so, but ignore the requests. |
43 | + */ | 43 | + */ |
44 | + qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n"); | 44 | + qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n"); |
45 | + return 0; | 45 | + return 0; |
46 | case SYSREG_OSDLR_EL1: | 46 | case SYSREG_OSDLR_EL1: |
47 | /* Dummy register */ | 47 | /* Dummy register */ |
48 | return 0; | 48 | return 0; |
49 | -- | 49 | -- |
50 | 2.39.5 (Apple Git-154) | 50 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alexander Graf <graf@amazon.com> | ||
1 | 2 | ||
3 | MacOS unconditionally disables interrupts of the physical timer on boot | ||
4 | and then continues to use the virtual one. We don't really want to support | ||
5 | a full physical timer emulation, so let's just ignore those writes. | ||
6 | |||
7 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
8 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
9 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
10 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
11 | --- | ||
12 | target/arm/hvf/hvf.c | 9 +++++++++ | ||
13 | 1 file changed, 9 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/hvf/hvf.c | ||
18 | +++ b/target/arm/hvf/hvf.c | ||
19 | @@ -XXX,XX +XXX,XX @@ | ||
20 | |||
21 | #include "qemu/osdep.h" | ||
22 | #include "qemu/error-report.h" | ||
23 | +#include "qemu/log.h" | ||
24 | |||
25 | #include "sysemu/runstate.h" | ||
26 | #include "sysemu/hvf.h" | ||
27 | @@ -XXX,XX +XXX,XX @@ void hvf_arm_init_debug(void) | ||
28 | #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) | ||
29 | #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) | ||
30 | #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) | ||
31 | +#define SYSREG_CNTP_CTL_EL0 SYSREG(3, 3, 14, 2, 1) | ||
32 | #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) | ||
33 | #define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0) | ||
34 | #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1) | ||
35 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) | ||
36 | case SYSREG_OSLAR_EL1: | ||
37 | env->cp15.oslsr_el1 = val & 1; | ||
38 | return 0; | ||
39 | + case SYSREG_CNTP_CTL_EL0: | ||
40 | + /* | ||
41 | + * Guests should not rely on the physical counter, but macOS emits | ||
42 | + * disable writes to it. Let it do so, but ignore the requests. | ||
43 | + */ | ||
44 | + qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n"); | ||
45 | + return 0; | ||
46 | case SYSREG_OSDLR_EL1: | ||
47 | /* Dummy register */ | ||
48 | return 0; | ||
49 | -- | ||
50 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | Some boards such as vmapple don't do real legacy PCI IRQ swizzling. | 3 | Some boards such as vmapple don't do real legacy PCI IRQ swizzling. |
4 | Instead, they just keep allocating more board IRQ lines for each new | 4 | Instead, they just keep allocating more board IRQ lines for each new |
5 | legacy IRQ. Let's support that mode by giving instantiators a new | 5 | legacy IRQ. Let's support that mode by giving instantiators a new |
6 | "nr_irqs" property they can use to support more than 4 legacy IRQ lines. | 6 | "nr_irqs" property they can use to support more than 4 legacy IRQ lines. |
7 | In this mode, GPEX will export more IRQ lines, one for each device. | 7 | In this mode, GPEX will export more IRQ lines, one for each device. |
8 | 8 | ||
9 | Signed-off-by: Alexander Graf <graf@amazon.com> | 9 | Signed-off-by: Alexander Graf <graf@amazon.com> |
10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
13 | --- | 13 | --- |
14 | 14 | ||
15 | v4: | 15 | v4: |
16 | 16 | ||
17 | * Turned pair of IRQ arrays into array of structs. | 17 | * Turned pair of IRQ arrays into array of structs. |
18 | * Simplified swizzling logic selection. | 18 | * Simplified swizzling logic selection. |
19 | 19 | ||
20 | v12: | 20 | v12: |
21 | 21 | ||
22 | * Fixed uses of deleted GPEX_NUM_IRQS constant that have been | 22 | * Fixed uses of deleted GPEX_NUM_IRQS constant that have been |
23 | added to QEMU since this patch was originally written. | 23 | added to QEMU since this patch was originally written. |
24 | 24 | ||
25 | hw/arm/sbsa-ref.c | 2 +- | 25 | hw/arm/sbsa-ref.c | 2 +- |
26 | hw/arm/virt.c | 2 +- | 26 | hw/arm/virt.c | 2 +- |
27 | hw/i386/microvm.c | 2 +- | 27 | hw/i386/microvm.c | 2 +- |
28 | hw/loongarch/virt.c | 12 +++++------ | 28 | hw/loongarch/virt.c | 12 +++++------ |
29 | hw/mips/loongson3_virt.c | 2 +- | 29 | hw/mips/loongson3_virt.c | 2 +- |
30 | hw/openrisc/virt.c | 12 +++++------ | 30 | hw/openrisc/virt.c | 12 +++++------ |
31 | hw/pci-host/gpex.c | 43 ++++++++++++++++++++++++++++++-------- | 31 | hw/pci-host/gpex.c | 43 ++++++++++++++++++++++++++++++-------- |
32 | hw/riscv/virt.c | 12 +++++------ | 32 | hw/riscv/virt.c | 12 +++++------ |
33 | hw/xen/xen-pvh-common.c | 2 +- | 33 | hw/xen/xen-pvh-common.c | 2 +- |
34 | hw/xtensa/virt.c | 2 +- | 34 | hw/xtensa/virt.c | 2 +- |
35 | include/hw/pci-host/gpex.h | 7 +++---- | 35 | include/hw/pci-host/gpex.h | 7 +++---- |
36 | 11 files changed, 61 insertions(+), 37 deletions(-) | 36 | 11 files changed, 61 insertions(+), 37 deletions(-) |
37 | 37 | ||
38 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c | 38 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c |
39 | index XXXXXXX..XXXXXXX 100644 | 39 | index XXXXXXX..XXXXXXX 100644 |
40 | --- a/hw/arm/sbsa-ref.c | 40 | --- a/hw/arm/sbsa-ref.c |
41 | +++ b/hw/arm/sbsa-ref.c | 41 | +++ b/hw/arm/sbsa-ref.c |
42 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(SBSAMachineState *sms) | 42 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(SBSAMachineState *sms) |
43 | /* Map IO port space */ | 43 | /* Map IO port space */ |
44 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); | 44 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); |
45 | 45 | ||
46 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 46 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
47 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 47 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
48 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | 48 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, |
49 | qdev_get_gpio_in(sms->gic, irq + i)); | 49 | qdev_get_gpio_in(sms->gic, irq + i)); |
50 | gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); | 50 | gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); |
51 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c | 51 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c |
52 | index XXXXXXX..XXXXXXX 100644 | 52 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/hw/arm/virt.c | 53 | --- a/hw/arm/virt.c |
54 | +++ b/hw/arm/virt.c | 54 | +++ b/hw/arm/virt.c |
55 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(VirtMachineState *vms) | 55 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(VirtMachineState *vms) |
56 | /* Map IO port space */ | 56 | /* Map IO port space */ |
57 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); | 57 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); |
58 | 58 | ||
59 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 59 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
60 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 60 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
61 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | 61 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, |
62 | qdev_get_gpio_in(vms->gic, irq + i)); | 62 | qdev_get_gpio_in(vms->gic, irq + i)); |
63 | gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); | 63 | gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); |
64 | diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c | 64 | diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c |
65 | index XXXXXXX..XXXXXXX 100644 | 65 | index XXXXXXX..XXXXXXX 100644 |
66 | --- a/hw/i386/microvm.c | 66 | --- a/hw/i386/microvm.c |
67 | +++ b/hw/i386/microvm.c | 67 | +++ b/hw/i386/microvm.c |
68 | @@ -XXX,XX +XXX,XX @@ static void create_gpex(MicrovmMachineState *mms) | 68 | @@ -XXX,XX +XXX,XX @@ static void create_gpex(MicrovmMachineState *mms) |
69 | mms->gpex.mmio64.base, mmio64_alias); | 69 | mms->gpex.mmio64.base, mmio64_alias); |
70 | } | 70 | } |
71 | 71 | ||
72 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 72 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
73 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 73 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
74 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | 74 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, |
75 | x86ms->gsi[mms->gpex.irq + i]); | 75 | x86ms->gsi[mms->gpex.irq + i]); |
76 | } | 76 | } |
77 | diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c | 77 | diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c |
78 | index XXXXXXX..XXXXXXX 100644 | 78 | index XXXXXXX..XXXXXXX 100644 |
79 | --- a/hw/loongarch/virt.c | 79 | --- a/hw/loongarch/virt.c |
80 | +++ b/hw/loongarch/virt.c | 80 | +++ b/hw/loongarch/virt.c |
81 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, | 81 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, |
82 | { | 82 | { |
83 | int pin, dev; | 83 | int pin, dev; |
84 | uint32_t irq_map_stride = 0; | 84 | uint32_t irq_map_stride = 0; |
85 | - uint32_t full_irq_map[GPEX_NUM_IRQS *GPEX_NUM_IRQS * 10] = {}; | 85 | - uint32_t full_irq_map[GPEX_NUM_IRQS *GPEX_NUM_IRQS * 10] = {}; |
86 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 10] = {}; | 86 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 10] = {}; |
87 | uint32_t *irq_map = full_irq_map; | 87 | uint32_t *irq_map = full_irq_map; |
88 | const MachineState *ms = MACHINE(lvms); | 88 | const MachineState *ms = MACHINE(lvms); |
89 | 89 | ||
90 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, | 90 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, |
91 | * to wrap to any number of devices. | 91 | * to wrap to any number of devices. |
92 | */ | 92 | */ |
93 | 93 | ||
94 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { | 94 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { |
95 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { | 95 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { |
96 | int devfn = dev * 0x8; | 96 | int devfn = dev * 0x8; |
97 | 97 | ||
98 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { | 98 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { |
99 | - int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); | 99 | - int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); |
100 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { | 100 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { |
101 | + int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); | 101 | + int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); |
102 | int i = 0; | 102 | int i = 0; |
103 | 103 | ||
104 | /* Fill PCI address cells */ | 104 | /* Fill PCI address cells */ |
105 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, | 105 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, |
106 | 106 | ||
107 | 107 | ||
108 | qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", full_irq_map, | 108 | qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", full_irq_map, |
109 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * | 109 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * |
110 | + PCI_NUM_PINS * PCI_NUM_PINS * | 110 | + PCI_NUM_PINS * PCI_NUM_PINS * |
111 | irq_map_stride * sizeof(uint32_t)); | 111 | irq_map_stride * sizeof(uint32_t)); |
112 | qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask", | 112 | qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask", |
113 | 0x1800, 0, 0, 0x7); | 113 | 0x1800, 0, 0, 0x7); |
114 | @@ -XXX,XX +XXX,XX @@ static void virt_devices_init(DeviceState *pch_pic, | 114 | @@ -XXX,XX +XXX,XX @@ static void virt_devices_init(DeviceState *pch_pic, |
115 | memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE, | 115 | memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE, |
116 | pio_alias); | 116 | pio_alias); |
117 | 117 | ||
118 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 118 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
119 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 119 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
120 | sysbus_connect_irq(d, i, | 120 | sysbus_connect_irq(d, i, |
121 | qdev_get_gpio_in(pch_pic, 16 + i)); | 121 | qdev_get_gpio_in(pch_pic, 16 + i)); |
122 | gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i); | 122 | gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i); |
123 | diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c | 123 | diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c |
124 | index XXXXXXX..XXXXXXX 100644 | 124 | index XXXXXXX..XXXXXXX 100644 |
125 | --- a/hw/mips/loongson3_virt.c | 125 | --- a/hw/mips/loongson3_virt.c |
126 | +++ b/hw/mips/loongson3_virt.c | 126 | +++ b/hw/mips/loongson3_virt.c |
127 | @@ -XXX,XX +XXX,XX @@ static inline void loongson3_virt_devices_init(MachineState *machine, | 127 | @@ -XXX,XX +XXX,XX @@ static inline void loongson3_virt_devices_init(MachineState *machine, |
128 | virt_memmap[VIRT_PCIE_PIO].base, s->pio_alias); | 128 | virt_memmap[VIRT_PCIE_PIO].base, s->pio_alias); |
129 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, virt_memmap[VIRT_PCIE_PIO].base); | 129 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, virt_memmap[VIRT_PCIE_PIO].base); |
130 | 130 | ||
131 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 131 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
132 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 132 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
133 | irq = qdev_get_gpio_in(pic, PCIE_IRQ_BASE + i); | 133 | irq = qdev_get_gpio_in(pic, PCIE_IRQ_BASE + i); |
134 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); | 134 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); |
135 | gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ_BASE + i); | 135 | gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ_BASE + i); |
136 | diff --git a/hw/openrisc/virt.c b/hw/openrisc/virt.c | 136 | diff --git a/hw/openrisc/virt.c b/hw/openrisc/virt.c |
137 | index XXXXXXX..XXXXXXX 100644 | 137 | index XXXXXXX..XXXXXXX 100644 |
138 | --- a/hw/openrisc/virt.c | 138 | --- a/hw/openrisc/virt.c |
139 | +++ b/hw/openrisc/virt.c | 139 | +++ b/hw/openrisc/virt.c |
140 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, | 140 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, |
141 | { | 141 | { |
142 | int pin, dev; | 142 | int pin, dev; |
143 | uint32_t irq_map_stride = 0; | 143 | uint32_t irq_map_stride = 0; |
144 | - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * 6] = {}; | 144 | - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * 6] = {}; |
145 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 6] = {}; | 145 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 6] = {}; |
146 | uint32_t *irq_map = full_irq_map; | 146 | uint32_t *irq_map = full_irq_map; |
147 | 147 | ||
148 | /* | 148 | /* |
149 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, | 149 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, |
150 | * possible slot) seeing the interrupt-map-mask will allow the table | 150 | * possible slot) seeing the interrupt-map-mask will allow the table |
151 | * to wrap to any number of devices. | 151 | * to wrap to any number of devices. |
152 | */ | 152 | */ |
153 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { | 153 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { |
154 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { | 154 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { |
155 | int devfn = dev << 3; | 155 | int devfn = dev << 3; |
156 | 156 | ||
157 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { | 157 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { |
158 | - int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); | 158 | - int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); |
159 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { | 159 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { |
160 | + int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); | 160 | + int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); |
161 | int i = 0; | 161 | int i = 0; |
162 | 162 | ||
163 | /* Fill PCI address cells */ | 163 | /* Fill PCI address cells */ |
164 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, | 164 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, |
165 | } | 165 | } |
166 | 166 | ||
167 | qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, | 167 | qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, |
168 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * | 168 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * |
169 | + PCI_NUM_PINS * PCI_NUM_PINS * | 169 | + PCI_NUM_PINS * PCI_NUM_PINS * |
170 | irq_map_stride * sizeof(uint32_t)); | 170 | irq_map_stride * sizeof(uint32_t)); |
171 | 171 | ||
172 | qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", | 172 | qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", |
173 | @@ -XXX,XX +XXX,XX @@ static void openrisc_virt_pcie_init(OR1KVirtState *state, | 173 | @@ -XXX,XX +XXX,XX @@ static void openrisc_virt_pcie_init(OR1KVirtState *state, |
174 | memory_region_add_subregion(get_system_memory(), pio_base, alias); | 174 | memory_region_add_subregion(get_system_memory(), pio_base, alias); |
175 | 175 | ||
176 | /* Connect IRQ lines. */ | 176 | /* Connect IRQ lines. */ |
177 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 177 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
178 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 178 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
179 | pcie_irq = get_per_cpu_irq(cpus, num_cpus, irq_base + i); | 179 | pcie_irq = get_per_cpu_irq(cpus, num_cpus, irq_base + i); |
180 | 180 | ||
181 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pcie_irq); | 181 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pcie_irq); |
182 | diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c | 182 | diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c |
183 | index XXXXXXX..XXXXXXX 100644 | 183 | index XXXXXXX..XXXXXXX 100644 |
184 | --- a/hw/pci-host/gpex.c | 184 | --- a/hw/pci-host/gpex.c |
185 | +++ b/hw/pci-host/gpex.c | 185 | +++ b/hw/pci-host/gpex.c |
186 | @@ -XXX,XX +XXX,XX @@ | 186 | @@ -XXX,XX +XXX,XX @@ |
187 | #include "qemu/osdep.h" | 187 | #include "qemu/osdep.h" |
188 | #include "qapi/error.h" | 188 | #include "qapi/error.h" |
189 | #include "hw/irq.h" | 189 | #include "hw/irq.h" |
190 | +#include "hw/pci/pci_bus.h" | 190 | +#include "hw/pci/pci_bus.h" |
191 | #include "hw/pci-host/gpex.h" | 191 | #include "hw/pci-host/gpex.h" |
192 | #include "hw/qdev-properties.h" | 192 | #include "hw/qdev-properties.h" |
193 | #include "migration/vmstate.h" | 193 | #include "migration/vmstate.h" |
194 | @@ -XXX,XX +XXX,XX @@ | 194 | @@ -XXX,XX +XXX,XX @@ |
195 | * GPEX host | 195 | * GPEX host |
196 | */ | 196 | */ |
197 | 197 | ||
198 | +struct GPEXIrq { | 198 | +struct GPEXIrq { |
199 | + qemu_irq irq; | 199 | + qemu_irq irq; |
200 | + int irq_num; | 200 | + int irq_num; |
201 | +}; | 201 | +}; |
202 | + | 202 | + |
203 | static void gpex_set_irq(void *opaque, int irq_num, int level) | 203 | static void gpex_set_irq(void *opaque, int irq_num, int level) |
204 | { | 204 | { |
205 | GPEXHost *s = opaque; | 205 | GPEXHost *s = opaque; |
206 | 206 | ||
207 | - qemu_set_irq(s->irq[irq_num], level); | 207 | - qemu_set_irq(s->irq[irq_num], level); |
208 | + qemu_set_irq(s->irq[irq_num].irq, level); | 208 | + qemu_set_irq(s->irq[irq_num].irq, level); |
209 | } | 209 | } |
210 | 210 | ||
211 | int gpex_set_irq_num(GPEXHost *s, int index, int gsi) | 211 | int gpex_set_irq_num(GPEXHost *s, int index, int gsi) |
212 | { | 212 | { |
213 | - if (index >= GPEX_NUM_IRQS) { | 213 | - if (index >= GPEX_NUM_IRQS) { |
214 | + if (index >= s->num_irqs) { | 214 | + if (index >= s->num_irqs) { |
215 | return -EINVAL; | 215 | return -EINVAL; |
216 | } | 216 | } |
217 | 217 | ||
218 | - s->irq_num[index] = gsi; | 218 | - s->irq_num[index] = gsi; |
219 | + s->irq[index].irq_num = gsi; | 219 | + s->irq[index].irq_num = gsi; |
220 | return 0; | 220 | return 0; |
221 | } | 221 | } |
222 | 222 | ||
223 | @@ -XXX,XX +XXX,XX @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) | 223 | @@ -XXX,XX +XXX,XX @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) |
224 | { | 224 | { |
225 | PCIINTxRoute route; | 225 | PCIINTxRoute route; |
226 | GPEXHost *s = opaque; | 226 | GPEXHost *s = opaque; |
227 | - int gsi = s->irq_num[pin]; | 227 | - int gsi = s->irq_num[pin]; |
228 | + int gsi = s->irq[pin].irq_num; | 228 | + int gsi = s->irq[pin].irq_num; |
229 | 229 | ||
230 | route.irq = gsi; | 230 | route.irq = gsi; |
231 | if (gsi < 0) { | 231 | if (gsi < 0) { |
232 | @@ -XXX,XX +XXX,XX @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) | 232 | @@ -XXX,XX +XXX,XX @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) |
233 | return route; | 233 | return route; |
234 | } | 234 | } |
235 | 235 | ||
236 | +static int gpex_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) | 236 | +static int gpex_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) |
237 | +{ | 237 | +{ |
238 | + PCIBus *bus = pci_device_root_bus(pci_dev); | 238 | + PCIBus *bus = pci_device_root_bus(pci_dev); |
239 | + | 239 | + |
240 | + return (PCI_SLOT(pci_dev->devfn) + pin) % bus->nirq; | 240 | + return (PCI_SLOT(pci_dev->devfn) + pin) % bus->nirq; |
241 | +} | 241 | +} |
242 | + | 242 | + |
243 | static void gpex_host_realize(DeviceState *dev, Error **errp) | 243 | static void gpex_host_realize(DeviceState *dev, Error **errp) |
244 | { | 244 | { |
245 | PCIHostState *pci = PCI_HOST_BRIDGE(dev); | 245 | PCIHostState *pci = PCI_HOST_BRIDGE(dev); |
246 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp) | 246 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp) |
247 | PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev); | 247 | PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev); |
248 | int i; | 248 | int i; |
249 | 249 | ||
250 | + s->irq = g_malloc0_n(s->num_irqs, sizeof(*s->irq)); | 250 | + s->irq = g_malloc0_n(s->num_irqs, sizeof(*s->irq)); |
251 | + | 251 | + |
252 | pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX); | 252 | pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX); |
253 | sysbus_init_mmio(sbd, &pex->mmio); | 253 | sysbus_init_mmio(sbd, &pex->mmio); |
254 | 254 | ||
255 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp) | 255 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp) |
256 | sysbus_init_mmio(sbd, &s->io_ioport); | 256 | sysbus_init_mmio(sbd, &s->io_ioport); |
257 | } | 257 | } |
258 | 258 | ||
259 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 259 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
260 | - sysbus_init_irq(sbd, &s->irq[i]); | 260 | - sysbus_init_irq(sbd, &s->irq[i]); |
261 | - s->irq_num[i] = -1; | 261 | - s->irq_num[i] = -1; |
262 | + for (i = 0; i < s->num_irqs; i++) { | 262 | + for (i = 0; i < s->num_irqs; i++) { |
263 | + sysbus_init_irq(sbd, &s->irq[i].irq); | 263 | + sysbus_init_irq(sbd, &s->irq[i].irq); |
264 | + s->irq[i].irq_num = -1; | 264 | + s->irq[i].irq_num = -1; |
265 | } | 265 | } |
266 | 266 | ||
267 | pci->bus = pci_register_root_bus(dev, "pcie.0", gpex_set_irq, | 267 | pci->bus = pci_register_root_bus(dev, "pcie.0", gpex_set_irq, |
268 | - pci_swizzle_map_irq_fn, s, &s->io_mmio, | 268 | - pci_swizzle_map_irq_fn, s, &s->io_mmio, |
269 | - &s->io_ioport, 0, 4, TYPE_PCIE_BUS); | 269 | - &s->io_ioport, 0, 4, TYPE_PCIE_BUS); |
270 | + gpex_swizzle_map_irq_fn, | 270 | + gpex_swizzle_map_irq_fn, |
271 | + s, &s->io_mmio, &s->io_ioport, 0, | 271 | + s, &s->io_mmio, &s->io_ioport, 0, |
272 | + s->num_irqs, TYPE_PCIE_BUS); | 272 | + s->num_irqs, TYPE_PCIE_BUS); |
273 | 273 | ||
274 | pci_bus_set_route_irq_fn(pci->bus, gpex_route_intx_pin_to_irq); | 274 | pci_bus_set_route_irq_fn(pci->bus, gpex_route_intx_pin_to_irq); |
275 | qdev_realize(DEVICE(&s->gpex_root), BUS(pci->bus), &error_fatal); | 275 | qdev_realize(DEVICE(&s->gpex_root), BUS(pci->bus), &error_fatal); |
276 | } | 276 | } |
277 | 277 | ||
278 | +static void gpex_host_unrealize(DeviceState *dev) | 278 | +static void gpex_host_unrealize(DeviceState *dev) |
279 | +{ | 279 | +{ |
280 | + GPEXHost *s = GPEX_HOST(dev); | 280 | + GPEXHost *s = GPEX_HOST(dev); |
281 | + | 281 | + |
282 | + g_free(s->irq); | 282 | + g_free(s->irq); |
283 | +} | 283 | +} |
284 | + | 284 | + |
285 | static const char *gpex_host_root_bus_path(PCIHostState *host_bridge, | 285 | static const char *gpex_host_root_bus_path(PCIHostState *host_bridge, |
286 | PCIBus *rootbus) | 286 | PCIBus *rootbus) |
287 | { | 287 | { |
288 | @@ -XXX,XX +XXX,XX @@ static Property gpex_host_properties[] = { | 288 | @@ -XXX,XX +XXX,XX @@ static Property gpex_host_properties[] = { |
289 | gpex_cfg.mmio64.base, 0), | 289 | gpex_cfg.mmio64.base, 0), |
290 | DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost, | 290 | DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost, |
291 | gpex_cfg.mmio64.size, 0), | 291 | gpex_cfg.mmio64.size, 0), |
292 | + DEFINE_PROP_UINT8("num-irqs", GPEXHost, num_irqs, PCI_NUM_PINS), | 292 | + DEFINE_PROP_UINT8("num-irqs", GPEXHost, num_irqs, PCI_NUM_PINS), |
293 | DEFINE_PROP_END_OF_LIST(), | 293 | DEFINE_PROP_END_OF_LIST(), |
294 | }; | 294 | }; |
295 | 295 | ||
296 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data) | 296 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data) |
297 | 297 | ||
298 | hc->root_bus_path = gpex_host_root_bus_path; | 298 | hc->root_bus_path = gpex_host_root_bus_path; |
299 | dc->realize = gpex_host_realize; | 299 | dc->realize = gpex_host_realize; |
300 | + dc->unrealize = gpex_host_unrealize; | 300 | + dc->unrealize = gpex_host_unrealize; |
301 | set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); | 301 | set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); |
302 | dc->fw_name = "pci"; | 302 | dc->fw_name = "pci"; |
303 | device_class_set_props(dc, gpex_host_properties); | 303 | device_class_set_props(dc, gpex_host_properties); |
304 | diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c | 304 | diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c |
305 | index XXXXXXX..XXXXXXX 100644 | 305 | index XXXXXXX..XXXXXXX 100644 |
306 | --- a/hw/riscv/virt.c | 306 | --- a/hw/riscv/virt.c |
307 | +++ b/hw/riscv/virt.c | 307 | +++ b/hw/riscv/virt.c |
308 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, | 308 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, |
309 | { | 309 | { |
310 | int pin, dev; | 310 | int pin, dev; |
311 | uint32_t irq_map_stride = 0; | 311 | uint32_t irq_map_stride = 0; |
312 | - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * | 312 | - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * |
313 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * | 313 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * |
314 | FDT_MAX_INT_MAP_WIDTH] = {}; | 314 | FDT_MAX_INT_MAP_WIDTH] = {}; |
315 | uint32_t *irq_map = full_irq_map; | 315 | uint32_t *irq_map = full_irq_map; |
316 | 316 | ||
317 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, | 317 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, |
318 | * possible slot) seeing the interrupt-map-mask will allow the table | 318 | * possible slot) seeing the interrupt-map-mask will allow the table |
319 | * to wrap to any number of devices. | 319 | * to wrap to any number of devices. |
320 | */ | 320 | */ |
321 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { | 321 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { |
322 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { | 322 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { |
323 | int devfn = dev * 0x8; | 323 | int devfn = dev * 0x8; |
324 | 324 | ||
325 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { | 325 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { |
326 | - int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); | 326 | - int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); |
327 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { | 327 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { |
328 | + int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); | 328 | + int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); |
329 | int i = 0; | 329 | int i = 0; |
330 | 330 | ||
331 | /* Fill PCI address cells */ | 331 | /* Fill PCI address cells */ |
332 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, | 332 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, |
333 | } | 333 | } |
334 | 334 | ||
335 | qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, | 335 | qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, |
336 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * | 336 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * |
337 | + PCI_NUM_PINS * PCI_NUM_PINS * | 337 | + PCI_NUM_PINS * PCI_NUM_PINS * |
338 | irq_map_stride * sizeof(uint32_t)); | 338 | irq_map_stride * sizeof(uint32_t)); |
339 | 339 | ||
340 | qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", | 340 | qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", |
341 | @@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, | 341 | @@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, |
342 | 342 | ||
343 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base); | 343 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base); |
344 | 344 | ||
345 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 345 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
346 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 346 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
347 | irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i); | 347 | irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i); |
348 | 348 | ||
349 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); | 349 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); |
350 | diff --git a/hw/xen/xen-pvh-common.c b/hw/xen/xen-pvh-common.c | 350 | diff --git a/hw/xen/xen-pvh-common.c b/hw/xen/xen-pvh-common.c |
351 | index XXXXXXX..XXXXXXX 100644 | 351 | index XXXXXXX..XXXXXXX 100644 |
352 | --- a/hw/xen/xen-pvh-common.c | 352 | --- a/hw/xen/xen-pvh-common.c |
353 | +++ b/hw/xen/xen-pvh-common.c | 353 | +++ b/hw/xen/xen-pvh-common.c |
354 | @@ -XXX,XX +XXX,XX @@ static inline void xenpvh_gpex_init(XenPVHMachineState *s, | 354 | @@ -XXX,XX +XXX,XX @@ static inline void xenpvh_gpex_init(XenPVHMachineState *s, |
355 | */ | 355 | */ |
356 | assert(xpc->set_pci_intx_irq); | 356 | assert(xpc->set_pci_intx_irq); |
357 | 357 | ||
358 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 358 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
359 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 359 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
360 | qemu_irq irq = qemu_allocate_irq(xpc->set_pci_intx_irq, s, i); | 360 | qemu_irq irq = qemu_allocate_irq(xpc->set_pci_intx_irq, s, i); |
361 | 361 | ||
362 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); | 362 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); |
363 | diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c | 363 | diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c |
364 | index XXXXXXX..XXXXXXX 100644 | 364 | index XXXXXXX..XXXXXXX 100644 |
365 | --- a/hw/xtensa/virt.c | 365 | --- a/hw/xtensa/virt.c |
366 | +++ b/hw/xtensa/virt.c | 366 | +++ b/hw/xtensa/virt.c |
367 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base, | 367 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base, |
368 | /* Connect IRQ lines. */ | 368 | /* Connect IRQ lines. */ |
369 | extints = xtensa_get_extints(env); | 369 | extints = xtensa_get_extints(env); |
370 | 370 | ||
371 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | 371 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { |
372 | + for (i = 0; i < PCI_NUM_PINS; i++) { | 372 | + for (i = 0; i < PCI_NUM_PINS; i++) { |
373 | void *q = extints[irq_base + i]; | 373 | void *q = extints[irq_base + i]; |
374 | 374 | ||
375 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, q); | 375 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, q); |
376 | diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h | 376 | diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h |
377 | index XXXXXXX..XXXXXXX 100644 | 377 | index XXXXXXX..XXXXXXX 100644 |
378 | --- a/include/hw/pci-host/gpex.h | 378 | --- a/include/hw/pci-host/gpex.h |
379 | +++ b/include/hw/pci-host/gpex.h | 379 | +++ b/include/hw/pci-host/gpex.h |
380 | @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(GPEXHost, GPEX_HOST) | 380 | @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(GPEXHost, GPEX_HOST) |
381 | #define TYPE_GPEX_ROOT_DEVICE "gpex-root" | 381 | #define TYPE_GPEX_ROOT_DEVICE "gpex-root" |
382 | OBJECT_DECLARE_SIMPLE_TYPE(GPEXRootState, GPEX_ROOT_DEVICE) | 382 | OBJECT_DECLARE_SIMPLE_TYPE(GPEXRootState, GPEX_ROOT_DEVICE) |
383 | 383 | ||
384 | -#define GPEX_NUM_IRQS 4 | 384 | -#define GPEX_NUM_IRQS 4 |
385 | - | 385 | - |
386 | struct GPEXRootState { | 386 | struct GPEXRootState { |
387 | /*< private >*/ | 387 | /*< private >*/ |
388 | PCIDevice parent_obj; | 388 | PCIDevice parent_obj; |
389 | @@ -XXX,XX +XXX,XX @@ struct GPEXConfig { | 389 | @@ -XXX,XX +XXX,XX @@ struct GPEXConfig { |
390 | PCIBus *bus; | 390 | PCIBus *bus; |
391 | }; | 391 | }; |
392 | 392 | ||
393 | +typedef struct GPEXIrq GPEXIrq; | 393 | +typedef struct GPEXIrq GPEXIrq; |
394 | struct GPEXHost { | 394 | struct GPEXHost { |
395 | /*< private >*/ | 395 | /*< private >*/ |
396 | PCIExpressHost parent_obj; | 396 | PCIExpressHost parent_obj; |
397 | @@ -XXX,XX +XXX,XX @@ struct GPEXHost { | 397 | @@ -XXX,XX +XXX,XX @@ struct GPEXHost { |
398 | MemoryRegion io_mmio; | 398 | MemoryRegion io_mmio; |
399 | MemoryRegion io_ioport_window; | 399 | MemoryRegion io_ioport_window; |
400 | MemoryRegion io_mmio_window; | 400 | MemoryRegion io_mmio_window; |
401 | - qemu_irq irq[GPEX_NUM_IRQS]; | 401 | - qemu_irq irq[GPEX_NUM_IRQS]; |
402 | - int irq_num[GPEX_NUM_IRQS]; | 402 | - int irq_num[GPEX_NUM_IRQS]; |
403 | + GPEXIrq *irq; | 403 | + GPEXIrq *irq; |
404 | + uint8_t num_irqs; | 404 | + uint8_t num_irqs; |
405 | 405 | ||
406 | bool allow_unmapped_accesses; | 406 | bool allow_unmapped_accesses; |
407 | 407 | ||
408 | -- | 408 | -- |
409 | 2.39.5 (Apple Git-154) | 409 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <graf@amazon.com> | |
2 | |||
3 | Some boards such as vmapple don't do real legacy PCI IRQ swizzling. | ||
4 | Instead, they just keep allocating more board IRQ lines for each new | ||
5 | legacy IRQ. Let's support that mode by giving instantiators a new | ||
6 | "nr_irqs" property they can use to support more than 4 legacy IRQ lines. | ||
7 | In this mode, GPEX will export more IRQ lines, one for each device. | ||
8 | |||
9 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
13 | --- | ||
14 | |||
15 | v4: | ||
16 | |||
17 | * Turned pair of IRQ arrays into array of structs. | ||
18 | * Simplified swizzling logic selection. | ||
19 | |||
20 | v12: | ||
21 | |||
22 | * Fixed uses of deleted GPEX_NUM_IRQS constant that have been | ||
23 | added to QEMU since this patch was originally written. | ||
24 | |||
25 | hw/arm/sbsa-ref.c | 2 +- | ||
26 | hw/arm/virt.c | 2 +- | ||
27 | hw/i386/microvm.c | 2 +- | ||
28 | hw/loongarch/virt.c | 12 +++++------ | ||
29 | hw/mips/loongson3_virt.c | 2 +- | ||
30 | hw/openrisc/virt.c | 12 +++++------ | ||
31 | hw/pci-host/gpex.c | 43 ++++++++++++++++++++++++++++++-------- | ||
32 | hw/riscv/virt.c | 12 +++++------ | ||
33 | hw/xen/xen-pvh-common.c | 2 +- | ||
34 | hw/xtensa/virt.c | 2 +- | ||
35 | include/hw/pci-host/gpex.h | 7 +++---- | ||
36 | 11 files changed, 61 insertions(+), 37 deletions(-) | ||
37 | |||
38 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/hw/arm/sbsa-ref.c | ||
41 | +++ b/hw/arm/sbsa-ref.c | ||
42 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(SBSAMachineState *sms) | ||
43 | /* Map IO port space */ | ||
44 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); | ||
45 | |||
46 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
47 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
48 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | ||
49 | qdev_get_gpio_in(sms->gic, irq + i)); | ||
50 | gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); | ||
51 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/hw/arm/virt.c | ||
54 | +++ b/hw/arm/virt.c | ||
55 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(VirtMachineState *vms) | ||
56 | /* Map IO port space */ | ||
57 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); | ||
58 | |||
59 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
60 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
61 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | ||
62 | qdev_get_gpio_in(vms->gic, irq + i)); | ||
63 | gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); | ||
64 | diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/hw/i386/microvm.c | ||
67 | +++ b/hw/i386/microvm.c | ||
68 | @@ -XXX,XX +XXX,XX @@ static void create_gpex(MicrovmMachineState *mms) | ||
69 | mms->gpex.mmio64.base, mmio64_alias); | ||
70 | } | ||
71 | |||
72 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
73 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
74 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | ||
75 | x86ms->gsi[mms->gpex.irq + i]); | ||
76 | } | ||
77 | diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c | ||
78 | index XXXXXXX..XXXXXXX 100644 | ||
79 | --- a/hw/loongarch/virt.c | ||
80 | +++ b/hw/loongarch/virt.c | ||
81 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, | ||
82 | { | ||
83 | int pin, dev; | ||
84 | uint32_t irq_map_stride = 0; | ||
85 | - uint32_t full_irq_map[GPEX_NUM_IRQS *GPEX_NUM_IRQS * 10] = {}; | ||
86 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 10] = {}; | ||
87 | uint32_t *irq_map = full_irq_map; | ||
88 | const MachineState *ms = MACHINE(lvms); | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, | ||
91 | * to wrap to any number of devices. | ||
92 | */ | ||
93 | |||
94 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { | ||
95 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { | ||
96 | int devfn = dev * 0x8; | ||
97 | |||
98 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { | ||
99 | - int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); | ||
100 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { | ||
101 | + int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); | ||
102 | int i = 0; | ||
103 | |||
104 | /* Fill PCI address cells */ | ||
105 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, | ||
106 | |||
107 | |||
108 | qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", full_irq_map, | ||
109 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * | ||
110 | + PCI_NUM_PINS * PCI_NUM_PINS * | ||
111 | irq_map_stride * sizeof(uint32_t)); | ||
112 | qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask", | ||
113 | 0x1800, 0, 0, 0x7); | ||
114 | @@ -XXX,XX +XXX,XX @@ static void virt_devices_init(DeviceState *pch_pic, | ||
115 | memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE, | ||
116 | pio_alias); | ||
117 | |||
118 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
119 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
120 | sysbus_connect_irq(d, i, | ||
121 | qdev_get_gpio_in(pch_pic, 16 + i)); | ||
122 | gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i); | ||
123 | diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c | ||
124 | index XXXXXXX..XXXXXXX 100644 | ||
125 | --- a/hw/mips/loongson3_virt.c | ||
126 | +++ b/hw/mips/loongson3_virt.c | ||
127 | @@ -XXX,XX +XXX,XX @@ static inline void loongson3_virt_devices_init(MachineState *machine, | ||
128 | virt_memmap[VIRT_PCIE_PIO].base, s->pio_alias); | ||
129 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, virt_memmap[VIRT_PCIE_PIO].base); | ||
130 | |||
131 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
132 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
133 | irq = qdev_get_gpio_in(pic, PCIE_IRQ_BASE + i); | ||
134 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); | ||
135 | gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ_BASE + i); | ||
136 | diff --git a/hw/openrisc/virt.c b/hw/openrisc/virt.c | ||
137 | index XXXXXXX..XXXXXXX 100644 | ||
138 | --- a/hw/openrisc/virt.c | ||
139 | +++ b/hw/openrisc/virt.c | ||
140 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, | ||
141 | { | ||
142 | int pin, dev; | ||
143 | uint32_t irq_map_stride = 0; | ||
144 | - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * 6] = {}; | ||
145 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 6] = {}; | ||
146 | uint32_t *irq_map = full_irq_map; | ||
147 | |||
148 | /* | ||
149 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, | ||
150 | * possible slot) seeing the interrupt-map-mask will allow the table | ||
151 | * to wrap to any number of devices. | ||
152 | */ | ||
153 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { | ||
154 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { | ||
155 | int devfn = dev << 3; | ||
156 | |||
157 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { | ||
158 | - int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); | ||
159 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { | ||
160 | + int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); | ||
161 | int i = 0; | ||
162 | |||
163 | /* Fill PCI address cells */ | ||
164 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, | ||
165 | } | ||
166 | |||
167 | qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, | ||
168 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * | ||
169 | + PCI_NUM_PINS * PCI_NUM_PINS * | ||
170 | irq_map_stride * sizeof(uint32_t)); | ||
171 | |||
172 | qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", | ||
173 | @@ -XXX,XX +XXX,XX @@ static void openrisc_virt_pcie_init(OR1KVirtState *state, | ||
174 | memory_region_add_subregion(get_system_memory(), pio_base, alias); | ||
175 | |||
176 | /* Connect IRQ lines. */ | ||
177 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
178 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
179 | pcie_irq = get_per_cpu_irq(cpus, num_cpus, irq_base + i); | ||
180 | |||
181 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pcie_irq); | ||
182 | diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c | ||
183 | index XXXXXXX..XXXXXXX 100644 | ||
184 | --- a/hw/pci-host/gpex.c | ||
185 | +++ b/hw/pci-host/gpex.c | ||
186 | @@ -XXX,XX +XXX,XX @@ | ||
187 | #include "qemu/osdep.h" | ||
188 | #include "qapi/error.h" | ||
189 | #include "hw/irq.h" | ||
190 | +#include "hw/pci/pci_bus.h" | ||
191 | #include "hw/pci-host/gpex.h" | ||
192 | #include "hw/qdev-properties.h" | ||
193 | #include "migration/vmstate.h" | ||
194 | @@ -XXX,XX +XXX,XX @@ | ||
195 | * GPEX host | ||
196 | */ | ||
197 | |||
198 | +struct GPEXIrq { | ||
199 | + qemu_irq irq; | ||
200 | + int irq_num; | ||
201 | +}; | ||
202 | + | ||
203 | static void gpex_set_irq(void *opaque, int irq_num, int level) | ||
204 | { | ||
205 | GPEXHost *s = opaque; | ||
206 | |||
207 | - qemu_set_irq(s->irq[irq_num], level); | ||
208 | + qemu_set_irq(s->irq[irq_num].irq, level); | ||
209 | } | ||
210 | |||
211 | int gpex_set_irq_num(GPEXHost *s, int index, int gsi) | ||
212 | { | ||
213 | - if (index >= GPEX_NUM_IRQS) { | ||
214 | + if (index >= s->num_irqs) { | ||
215 | return -EINVAL; | ||
216 | } | ||
217 | |||
218 | - s->irq_num[index] = gsi; | ||
219 | + s->irq[index].irq_num = gsi; | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | @@ -XXX,XX +XXX,XX @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) | ||
224 | { | ||
225 | PCIINTxRoute route; | ||
226 | GPEXHost *s = opaque; | ||
227 | - int gsi = s->irq_num[pin]; | ||
228 | + int gsi = s->irq[pin].irq_num; | ||
229 | |||
230 | route.irq = gsi; | ||
231 | if (gsi < 0) { | ||
232 | @@ -XXX,XX +XXX,XX @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) | ||
233 | return route; | ||
234 | } | ||
235 | |||
236 | +static int gpex_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) | ||
237 | +{ | ||
238 | + PCIBus *bus = pci_device_root_bus(pci_dev); | ||
239 | + | ||
240 | + return (PCI_SLOT(pci_dev->devfn) + pin) % bus->nirq; | ||
241 | +} | ||
242 | + | ||
243 | static void gpex_host_realize(DeviceState *dev, Error **errp) | ||
244 | { | ||
245 | PCIHostState *pci = PCI_HOST_BRIDGE(dev); | ||
246 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp) | ||
247 | PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev); | ||
248 | int i; | ||
249 | |||
250 | + s->irq = g_malloc0_n(s->num_irqs, sizeof(*s->irq)); | ||
251 | + | ||
252 | pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX); | ||
253 | sysbus_init_mmio(sbd, &pex->mmio); | ||
254 | |||
255 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_realize(DeviceState *dev, Error **errp) | ||
256 | sysbus_init_mmio(sbd, &s->io_ioport); | ||
257 | } | ||
258 | |||
259 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
260 | - sysbus_init_irq(sbd, &s->irq[i]); | ||
261 | - s->irq_num[i] = -1; | ||
262 | + for (i = 0; i < s->num_irqs; i++) { | ||
263 | + sysbus_init_irq(sbd, &s->irq[i].irq); | ||
264 | + s->irq[i].irq_num = -1; | ||
265 | } | ||
266 | |||
267 | pci->bus = pci_register_root_bus(dev, "pcie.0", gpex_set_irq, | ||
268 | - pci_swizzle_map_irq_fn, s, &s->io_mmio, | ||
269 | - &s->io_ioport, 0, 4, TYPE_PCIE_BUS); | ||
270 | + gpex_swizzle_map_irq_fn, | ||
271 | + s, &s->io_mmio, &s->io_ioport, 0, | ||
272 | + s->num_irqs, TYPE_PCIE_BUS); | ||
273 | |||
274 | pci_bus_set_route_irq_fn(pci->bus, gpex_route_intx_pin_to_irq); | ||
275 | qdev_realize(DEVICE(&s->gpex_root), BUS(pci->bus), &error_fatal); | ||
276 | } | ||
277 | |||
278 | +static void gpex_host_unrealize(DeviceState *dev) | ||
279 | +{ | ||
280 | + GPEXHost *s = GPEX_HOST(dev); | ||
281 | + | ||
282 | + g_free(s->irq); | ||
283 | +} | ||
284 | + | ||
285 | static const char *gpex_host_root_bus_path(PCIHostState *host_bridge, | ||
286 | PCIBus *rootbus) | ||
287 | { | ||
288 | @@ -XXX,XX +XXX,XX @@ static Property gpex_host_properties[] = { | ||
289 | gpex_cfg.mmio64.base, 0), | ||
290 | DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost, | ||
291 | gpex_cfg.mmio64.size, 0), | ||
292 | + DEFINE_PROP_UINT8("num-irqs", GPEXHost, num_irqs, PCI_NUM_PINS), | ||
293 | DEFINE_PROP_END_OF_LIST(), | ||
294 | }; | ||
295 | |||
296 | @@ -XXX,XX +XXX,XX @@ static void gpex_host_class_init(ObjectClass *klass, void *data) | ||
297 | |||
298 | hc->root_bus_path = gpex_host_root_bus_path; | ||
299 | dc->realize = gpex_host_realize; | ||
300 | + dc->unrealize = gpex_host_unrealize; | ||
301 | set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); | ||
302 | dc->fw_name = "pci"; | ||
303 | device_class_set_props(dc, gpex_host_properties); | ||
304 | diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c | ||
305 | index XXXXXXX..XXXXXXX 100644 | ||
306 | --- a/hw/riscv/virt.c | ||
307 | +++ b/hw/riscv/virt.c | ||
308 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, | ||
309 | { | ||
310 | int pin, dev; | ||
311 | uint32_t irq_map_stride = 0; | ||
312 | - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * | ||
313 | + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * | ||
314 | FDT_MAX_INT_MAP_WIDTH] = {}; | ||
315 | uint32_t *irq_map = full_irq_map; | ||
316 | |||
317 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, | ||
318 | * possible slot) seeing the interrupt-map-mask will allow the table | ||
319 | * to wrap to any number of devices. | ||
320 | */ | ||
321 | - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { | ||
322 | + for (dev = 0; dev < PCI_NUM_PINS; dev++) { | ||
323 | int devfn = dev * 0x8; | ||
324 | |||
325 | - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { | ||
326 | - int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); | ||
327 | + for (pin = 0; pin < PCI_NUM_PINS; pin++) { | ||
328 | + int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); | ||
329 | int i = 0; | ||
330 | |||
331 | /* Fill PCI address cells */ | ||
332 | @@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, | ||
333 | } | ||
334 | |||
335 | qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, | ||
336 | - GPEX_NUM_IRQS * GPEX_NUM_IRQS * | ||
337 | + PCI_NUM_PINS * PCI_NUM_PINS * | ||
338 | irq_map_stride * sizeof(uint32_t)); | ||
339 | |||
340 | qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", | ||
341 | @@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, | ||
342 | |||
343 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base); | ||
344 | |||
345 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
346 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
347 | irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i); | ||
348 | |||
349 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); | ||
350 | diff --git a/hw/xen/xen-pvh-common.c b/hw/xen/xen-pvh-common.c | ||
351 | index XXXXXXX..XXXXXXX 100644 | ||
352 | --- a/hw/xen/xen-pvh-common.c | ||
353 | +++ b/hw/xen/xen-pvh-common.c | ||
354 | @@ -XXX,XX +XXX,XX @@ static inline void xenpvh_gpex_init(XenPVHMachineState *s, | ||
355 | */ | ||
356 | assert(xpc->set_pci_intx_irq); | ||
357 | |||
358 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
359 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
360 | qemu_irq irq = qemu_allocate_irq(xpc->set_pci_intx_irq, s, i); | ||
361 | |||
362 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); | ||
363 | diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c | ||
364 | index XXXXXXX..XXXXXXX 100644 | ||
365 | --- a/hw/xtensa/virt.c | ||
366 | +++ b/hw/xtensa/virt.c | ||
367 | @@ -XXX,XX +XXX,XX @@ static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base, | ||
368 | /* Connect IRQ lines. */ | ||
369 | extints = xtensa_get_extints(env); | ||
370 | |||
371 | - for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
372 | + for (i = 0; i < PCI_NUM_PINS; i++) { | ||
373 | void *q = extints[irq_base + i]; | ||
374 | |||
375 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, q); | ||
376 | diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h | ||
377 | index XXXXXXX..XXXXXXX 100644 | ||
378 | --- a/include/hw/pci-host/gpex.h | ||
379 | +++ b/include/hw/pci-host/gpex.h | ||
380 | @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(GPEXHost, GPEX_HOST) | ||
381 | #define TYPE_GPEX_ROOT_DEVICE "gpex-root" | ||
382 | OBJECT_DECLARE_SIMPLE_TYPE(GPEXRootState, GPEX_ROOT_DEVICE) | ||
383 | |||
384 | -#define GPEX_NUM_IRQS 4 | ||
385 | - | ||
386 | struct GPEXRootState { | ||
387 | /*< private >*/ | ||
388 | PCIDevice parent_obj; | ||
389 | @@ -XXX,XX +XXX,XX @@ struct GPEXConfig { | ||
390 | PCIBus *bus; | ||
391 | }; | ||
392 | |||
393 | +typedef struct GPEXIrq GPEXIrq; | ||
394 | struct GPEXHost { | ||
395 | /*< private >*/ | ||
396 | PCIExpressHost parent_obj; | ||
397 | @@ -XXX,XX +XXX,XX @@ struct GPEXHost { | ||
398 | MemoryRegion io_mmio; | ||
399 | MemoryRegion io_ioport_window; | ||
400 | MemoryRegion io_mmio_window; | ||
401 | - qemu_irq irq[GPEX_NUM_IRQS]; | ||
402 | - int irq_num[GPEX_NUM_IRQS]; | ||
403 | + GPEXIrq *irq; | ||
404 | + uint8_t num_irqs; | ||
405 | |||
406 | bool allow_unmapped_accesses; | ||
407 | |||
408 | -- | ||
409 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | VMApple contains an "aes" engine device that it uses to encrypt and | 3 | VMApple contains an "aes" engine device that it uses to encrypt and |
4 | decrypt its nvram. It has trivial hard coded keys it uses for that | 4 | decrypt its nvram. It has trivial hard coded keys it uses for that |
5 | purpose. | 5 | purpose. |
6 | 6 | ||
7 | Add device emulation for this device model. | 7 | Add device emulation for this device model. |
8 | 8 | ||
9 | Signed-off-by: Alexander Graf <graf@amazon.com> | 9 | Signed-off-by: Alexander Graf <graf@amazon.com> |
10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
13 | --- | 13 | --- |
14 | 14 | ||
15 | v3: | 15 | v3: |
16 | 16 | ||
17 | * Rebased on latest upstream and fixed minor breakages. | 17 | * Rebased on latest upstream and fixed minor breakages. |
18 | * Replaced legacy device reset method with Resettable method | 18 | * Replaced legacy device reset method with Resettable method |
19 | 19 | ||
20 | v4: | 20 | v4: |
21 | 21 | ||
22 | * Improved logging of unimplemented functions and guest errors. | 22 | * Improved logging of unimplemented functions and guest errors. |
23 | * Better adherence to naming and coding conventions. | 23 | * Better adherence to naming and coding conventions. |
24 | * Cleaner error handling and recovery, including using g_autoptr | 24 | * Cleaner error handling and recovery, including using g_autoptr |
25 | 25 | ||
26 | v5: | 26 | v5: |
27 | 27 | ||
28 | * More logging improvements | 28 | * More logging improvements |
29 | * Use xxx64_overflow() functions for hexdump buffer size calculations. | 29 | * Use xxx64_overflow() functions for hexdump buffer size calculations. |
30 | 30 | ||
31 | v7: | 31 | v7: |
32 | 32 | ||
33 | * Coding style tweaks. | 33 | * Coding style tweaks. |
34 | 34 | ||
35 | v8: | 35 | v8: |
36 | 36 | ||
37 | * Further improved logging of guest errors. | 37 | * Further improved logging of guest errors. |
38 | 38 | ||
39 | v9: | 39 | v9: |
40 | 40 | ||
41 | * Replaced a use of cpu_physical_memory_write with dma_memory_write. | 41 | * Replaced a use of cpu_physical_memory_write with dma_memory_write. |
42 | * Dropped unnecessary use of ternary operator for bool -> 0/1. | 42 | * Dropped unnecessary use of ternary operator for bool -> 0/1. |
43 | 43 | ||
44 | v10: | 44 | v10: |
45 | 45 | ||
46 | * Code style and comment improvements. | 46 | * Code style and comment improvements. |
47 | 47 | ||
48 | hw/vmapple/Kconfig | 2 + | 48 | hw/vmapple/Kconfig | 2 + |
49 | hw/vmapple/aes.c | 581 +++++++++++++++++++++++++++++++++++ | 49 | hw/vmapple/aes.c | 581 +++++++++++++++++++++++++++++++++++ |
50 | hw/vmapple/meson.build | 1 + | 50 | hw/vmapple/meson.build | 1 + |
51 | hw/vmapple/trace-events | 14 + | 51 | hw/vmapple/trace-events | 14 + |
52 | include/hw/vmapple/vmapple.h | 17 + | 52 | include/hw/vmapple/vmapple.h | 17 + |
53 | include/qemu/cutils.h | 15 + | 53 | include/qemu/cutils.h | 15 + |
54 | util/hexdump.c | 18 ++ | 54 | util/hexdump.c | 18 ++ |
55 | 7 files changed, 648 insertions(+) | 55 | 7 files changed, 648 insertions(+) |
56 | create mode 100644 hw/vmapple/aes.c | 56 | create mode 100644 hw/vmapple/aes.c |
57 | create mode 100644 include/hw/vmapple/vmapple.h | 57 | create mode 100644 include/hw/vmapple/vmapple.h |
58 | 58 | ||
59 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | 59 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig |
60 | index XXXXXXX..XXXXXXX 100644 | 60 | index XXXXXXX..XXXXXXX 100644 |
61 | --- a/hw/vmapple/Kconfig | 61 | --- a/hw/vmapple/Kconfig |
62 | +++ b/hw/vmapple/Kconfig | 62 | +++ b/hw/vmapple/Kconfig |
63 | @@ -1 +1,3 @@ | 63 | @@ -1 +1,3 @@ |
64 | +config VMAPPLE_AES | 64 | +config VMAPPLE_AES |
65 | + bool | 65 | + bool |
66 | 66 | ||
67 | diff --git a/hw/vmapple/aes.c b/hw/vmapple/aes.c | 67 | diff --git a/hw/vmapple/aes.c b/hw/vmapple/aes.c |
68 | new file mode 100644 | 68 | new file mode 100644 |
69 | index XXXXXXX..XXXXXXX | 69 | index XXXXXXX..XXXXXXX |
70 | --- /dev/null | 70 | --- /dev/null |
71 | +++ b/hw/vmapple/aes.c | 71 | +++ b/hw/vmapple/aes.c |
72 | @@ -XXX,XX +XXX,XX @@ | 72 | @@ -XXX,XX +XXX,XX @@ |
73 | +/* | 73 | +/* |
74 | + * QEMU Apple AES device emulation | 74 | + * QEMU Apple AES device emulation |
75 | + * | 75 | + * |
76 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 76 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
77 | + * | 77 | + * |
78 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 78 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
79 | + * See the COPYING file in the top-level directory. | 79 | + * See the COPYING file in the top-level directory. |
80 | + * | 80 | + * |
81 | + * SPDX-License-Identifier: GPL-2.0-or-later | 81 | + * SPDX-License-Identifier: GPL-2.0-or-later |
82 | + */ | 82 | + */ |
83 | + | 83 | + |
84 | +#include "qemu/osdep.h" | 84 | +#include "qemu/osdep.h" |
85 | +#include "trace.h" | 85 | +#include "trace.h" |
86 | +#include "crypto/hash.h" | 86 | +#include "crypto/hash.h" |
87 | +#include "crypto/aes.h" | 87 | +#include "crypto/aes.h" |
88 | +#include "crypto/cipher.h" | 88 | +#include "crypto/cipher.h" |
89 | +#include "hw/irq.h" | 89 | +#include "hw/irq.h" |
90 | +#include "hw/sysbus.h" | 90 | +#include "hw/sysbus.h" |
91 | +#include "hw/vmapple/vmapple.h" | 91 | +#include "hw/vmapple/vmapple.h" |
92 | +#include "migration/vmstate.h" | 92 | +#include "migration/vmstate.h" |
93 | +#include "qemu/cutils.h" | 93 | +#include "qemu/cutils.h" |
94 | +#include "qemu/log.h" | 94 | +#include "qemu/log.h" |
95 | +#include "qemu/module.h" | 95 | +#include "qemu/module.h" |
96 | +#include "sysemu/dma.h" | 96 | +#include "sysemu/dma.h" |
97 | + | 97 | + |
98 | +OBJECT_DECLARE_SIMPLE_TYPE(AESState, APPLE_AES) | 98 | +OBJECT_DECLARE_SIMPLE_TYPE(AESState, APPLE_AES) |
99 | + | 99 | + |
100 | +#define MAX_FIFO_SIZE 9 | 100 | +#define MAX_FIFO_SIZE 9 |
101 | + | 101 | + |
102 | +#define CMD_KEY 0x1 | 102 | +#define CMD_KEY 0x1 |
103 | +#define CMD_KEY_CONTEXT_SHIFT 27 | 103 | +#define CMD_KEY_CONTEXT_SHIFT 27 |
104 | +#define CMD_KEY_CONTEXT_MASK (0x1 << CMD_KEY_CONTEXT_SHIFT) | 104 | +#define CMD_KEY_CONTEXT_MASK (0x1 << CMD_KEY_CONTEXT_SHIFT) |
105 | +#define CMD_KEY_SELECT_MAX_IDX 0x7 | 105 | +#define CMD_KEY_SELECT_MAX_IDX 0x7 |
106 | +#define CMD_KEY_SELECT_SHIFT 24 | 106 | +#define CMD_KEY_SELECT_SHIFT 24 |
107 | +#define CMD_KEY_SELECT_MASK (CMD_KEY_SELECT_MAX_IDX << CMD_KEY_SELECT_SHIFT) | 107 | +#define CMD_KEY_SELECT_MASK (CMD_KEY_SELECT_MAX_IDX << CMD_KEY_SELECT_SHIFT) |
108 | +#define CMD_KEY_KEY_LEN_NUM 4u | 108 | +#define CMD_KEY_KEY_LEN_NUM 4u |
109 | +#define CMD_KEY_KEY_LEN_SHIFT 22 | 109 | +#define CMD_KEY_KEY_LEN_SHIFT 22 |
110 | +#define CMD_KEY_KEY_LEN_MASK ((CMD_KEY_KEY_LEN_NUM - 1u) << CMD_KEY_KEY_LEN_SHIFT) | 110 | +#define CMD_KEY_KEY_LEN_MASK ((CMD_KEY_KEY_LEN_NUM - 1u) << CMD_KEY_KEY_LEN_SHIFT) |
111 | +#define CMD_KEY_ENCRYPT_SHIFT 20 | 111 | +#define CMD_KEY_ENCRYPT_SHIFT 20 |
112 | +#define CMD_KEY_ENCRYPT_MASK (0x1 << CMD_KEY_ENCRYPT_SHIFT) | 112 | +#define CMD_KEY_ENCRYPT_MASK (0x1 << CMD_KEY_ENCRYPT_SHIFT) |
113 | +#define CMD_KEY_BLOCK_MODE_SHIFT 16 | 113 | +#define CMD_KEY_BLOCK_MODE_SHIFT 16 |
114 | +#define CMD_KEY_BLOCK_MODE_MASK (0x3 << CMD_KEY_BLOCK_MODE_SHIFT) | 114 | +#define CMD_KEY_BLOCK_MODE_MASK (0x3 << CMD_KEY_BLOCK_MODE_SHIFT) |
115 | +#define CMD_IV 0x2 | 115 | +#define CMD_IV 0x2 |
116 | +#define CMD_IV_CONTEXT_SHIFT 26 | 116 | +#define CMD_IV_CONTEXT_SHIFT 26 |
117 | +#define CMD_IV_CONTEXT_MASK (0x3 << CMD_KEY_CONTEXT_SHIFT) | 117 | +#define CMD_IV_CONTEXT_MASK (0x3 << CMD_KEY_CONTEXT_SHIFT) |
118 | +#define CMD_DSB 0x3 | 118 | +#define CMD_DSB 0x3 |
119 | +#define CMD_SKG 0x4 | 119 | +#define CMD_SKG 0x4 |
120 | +#define CMD_DATA 0x5 | 120 | +#define CMD_DATA 0x5 |
121 | +#define CMD_DATA_KEY_CTX_SHIFT 27 | 121 | +#define CMD_DATA_KEY_CTX_SHIFT 27 |
122 | +#define CMD_DATA_KEY_CTX_MASK (0x1 << CMD_DATA_KEY_CTX_SHIFT) | 122 | +#define CMD_DATA_KEY_CTX_MASK (0x1 << CMD_DATA_KEY_CTX_SHIFT) |
123 | +#define CMD_DATA_IV_CTX_SHIFT 25 | 123 | +#define CMD_DATA_IV_CTX_SHIFT 25 |
124 | +#define CMD_DATA_IV_CTX_MASK (0x3 << CMD_DATA_IV_CTX_SHIFT) | 124 | +#define CMD_DATA_IV_CTX_MASK (0x3 << CMD_DATA_IV_CTX_SHIFT) |
125 | +#define CMD_DATA_LEN_MASK 0xffffff | 125 | +#define CMD_DATA_LEN_MASK 0xffffff |
126 | +#define CMD_STORE_IV 0x6 | 126 | +#define CMD_STORE_IV 0x6 |
127 | +#define CMD_STORE_IV_ADDR_MASK 0xffffff | 127 | +#define CMD_STORE_IV_ADDR_MASK 0xffffff |
128 | +#define CMD_WRITE_REG 0x7 | 128 | +#define CMD_WRITE_REG 0x7 |
129 | +#define CMD_FLAG 0x8 | 129 | +#define CMD_FLAG 0x8 |
130 | +#define CMD_FLAG_STOP_MASK BIT(26) | 130 | +#define CMD_FLAG_STOP_MASK BIT(26) |
131 | +#define CMD_FLAG_RAISE_IRQ_MASK BIT(27) | 131 | +#define CMD_FLAG_RAISE_IRQ_MASK BIT(27) |
132 | +#define CMD_FLAG_INFO_MASK 0xff | 132 | +#define CMD_FLAG_INFO_MASK 0xff |
133 | +#define CMD_MAX 0x10 | 133 | +#define CMD_MAX 0x10 |
134 | + | 134 | + |
135 | +#define CMD_SHIFT 28 | 135 | +#define CMD_SHIFT 28 |
136 | + | 136 | + |
137 | +#define REG_STATUS 0xc | 137 | +#define REG_STATUS 0xc |
138 | +#define REG_STATUS_DMA_READ_RUNNING BIT(0) | 138 | +#define REG_STATUS_DMA_READ_RUNNING BIT(0) |
139 | +#define REG_STATUS_DMA_READ_PENDING BIT(1) | 139 | +#define REG_STATUS_DMA_READ_PENDING BIT(1) |
140 | +#define REG_STATUS_DMA_WRITE_RUNNING BIT(2) | 140 | +#define REG_STATUS_DMA_WRITE_RUNNING BIT(2) |
141 | +#define REG_STATUS_DMA_WRITE_PENDING BIT(3) | 141 | +#define REG_STATUS_DMA_WRITE_PENDING BIT(3) |
142 | +#define REG_STATUS_BUSY BIT(4) | 142 | +#define REG_STATUS_BUSY BIT(4) |
143 | +#define REG_STATUS_EXECUTING BIT(5) | 143 | +#define REG_STATUS_EXECUTING BIT(5) |
144 | +#define REG_STATUS_READY BIT(6) | 144 | +#define REG_STATUS_READY BIT(6) |
145 | +#define REG_STATUS_TEXT_DPA_SEEDED BIT(7) | 145 | +#define REG_STATUS_TEXT_DPA_SEEDED BIT(7) |
146 | +#define REG_STATUS_UNWRAP_DPA_SEEDED BIT(8) | 146 | +#define REG_STATUS_UNWRAP_DPA_SEEDED BIT(8) |
147 | + | 147 | + |
148 | +#define REG_IRQ_STATUS 0x18 | 148 | +#define REG_IRQ_STATUS 0x18 |
149 | +#define REG_IRQ_STATUS_INVALID_CMD BIT(2) | 149 | +#define REG_IRQ_STATUS_INVALID_CMD BIT(2) |
150 | +#define REG_IRQ_STATUS_FLAG BIT(5) | 150 | +#define REG_IRQ_STATUS_FLAG BIT(5) |
151 | +#define REG_IRQ_ENABLE 0x1c | 151 | +#define REG_IRQ_ENABLE 0x1c |
152 | +#define REG_WATERMARK 0x20 | 152 | +#define REG_WATERMARK 0x20 |
153 | +#define REG_Q_STATUS 0x24 | 153 | +#define REG_Q_STATUS 0x24 |
154 | +#define REG_FLAG_INFO 0x30 | 154 | +#define REG_FLAG_INFO 0x30 |
155 | +#define REG_FIFO 0x200 | 155 | +#define REG_FIFO 0x200 |
156 | + | 156 | + |
157 | +static const uint32_t key_lens[CMD_KEY_KEY_LEN_NUM] = { | 157 | +static const uint32_t key_lens[CMD_KEY_KEY_LEN_NUM] = { |
158 | + [0] = 16, | 158 | + [0] = 16, |
159 | + [1] = 24, | 159 | + [1] = 24, |
160 | + [2] = 32, | 160 | + [2] = 32, |
161 | + [3] = 64, | 161 | + [3] = 64, |
162 | +}; | 162 | +}; |
163 | + | 163 | + |
164 | +typedef struct Key { | 164 | +typedef struct Key { |
165 | + uint32_t key_len; | 165 | + uint32_t key_len; |
166 | + uint8_t key[32]; | 166 | + uint8_t key[32]; |
167 | +} Key; | 167 | +} Key; |
168 | + | 168 | + |
169 | +typedef struct IV { | 169 | +typedef struct IV { |
170 | + uint32_t iv[4]; | 170 | + uint32_t iv[4]; |
171 | +} IV; | 171 | +} IV; |
172 | + | 172 | + |
173 | +static Key builtin_keys[CMD_KEY_SELECT_MAX_IDX + 1] = { | 173 | +static Key builtin_keys[CMD_KEY_SELECT_MAX_IDX + 1] = { |
174 | + [1] = { | 174 | + [1] = { |
175 | + .key_len = 32, | 175 | + .key_len = 32, |
176 | + .key = { 0x1 }, | 176 | + .key = { 0x1 }, |
177 | + }, | 177 | + }, |
178 | + [2] = { | 178 | + [2] = { |
179 | + .key_len = 32, | 179 | + .key_len = 32, |
180 | + .key = { 0x2 }, | 180 | + .key = { 0x2 }, |
181 | + }, | 181 | + }, |
182 | + [3] = { | 182 | + [3] = { |
183 | + .key_len = 32, | 183 | + .key_len = 32, |
184 | + .key = { 0x3 }, | 184 | + .key = { 0x3 }, |
185 | + } | 185 | + } |
186 | +}; | 186 | +}; |
187 | + | 187 | + |
188 | +struct AESState { | 188 | +struct AESState { |
189 | + SysBusDevice parent_obj; | 189 | + SysBusDevice parent_obj; |
190 | + | 190 | + |
191 | + qemu_irq irq; | 191 | + qemu_irq irq; |
192 | + MemoryRegion iomem1; | 192 | + MemoryRegion iomem1; |
193 | + MemoryRegion iomem2; | 193 | + MemoryRegion iomem2; |
194 | + AddressSpace *as; | 194 | + AddressSpace *as; |
195 | + | 195 | + |
196 | + uint32_t status; | 196 | + uint32_t status; |
197 | + uint32_t q_status; | 197 | + uint32_t q_status; |
198 | + uint32_t irq_status; | 198 | + uint32_t irq_status; |
199 | + uint32_t irq_enable; | 199 | + uint32_t irq_enable; |
200 | + uint32_t watermark; | 200 | + uint32_t watermark; |
201 | + uint32_t flag_info; | 201 | + uint32_t flag_info; |
202 | + uint32_t fifo[MAX_FIFO_SIZE]; | 202 | + uint32_t fifo[MAX_FIFO_SIZE]; |
203 | + uint32_t fifo_idx; | 203 | + uint32_t fifo_idx; |
204 | + Key key[2]; | 204 | + Key key[2]; |
205 | + IV iv[4]; | 205 | + IV iv[4]; |
206 | + bool is_encrypt; | 206 | + bool is_encrypt; |
207 | + QCryptoCipherMode block_mode; | 207 | + QCryptoCipherMode block_mode; |
208 | +}; | 208 | +}; |
209 | + | 209 | + |
210 | +static void aes_update_irq(AESState *s) | 210 | +static void aes_update_irq(AESState *s) |
211 | +{ | 211 | +{ |
212 | + qemu_set_irq(s->irq, !!(s->irq_status & s->irq_enable)); | 212 | + qemu_set_irq(s->irq, !!(s->irq_status & s->irq_enable)); |
213 | +} | 213 | +} |
214 | + | 214 | + |
215 | +static uint64_t aes1_read(void *opaque, hwaddr offset, unsigned size) | 215 | +static uint64_t aes1_read(void *opaque, hwaddr offset, unsigned size) |
216 | +{ | 216 | +{ |
217 | + AESState *s = opaque; | 217 | + AESState *s = opaque; |
218 | + uint64_t res = 0; | 218 | + uint64_t res = 0; |
219 | + | 219 | + |
220 | + switch (offset) { | 220 | + switch (offset) { |
221 | + case REG_STATUS: | 221 | + case REG_STATUS: |
222 | + res = s->status; | 222 | + res = s->status; |
223 | + break; | 223 | + break; |
224 | + case REG_IRQ_STATUS: | 224 | + case REG_IRQ_STATUS: |
225 | + res = s->irq_status; | 225 | + res = s->irq_status; |
226 | + break; | 226 | + break; |
227 | + case REG_IRQ_ENABLE: | 227 | + case REG_IRQ_ENABLE: |
228 | + res = s->irq_enable; | 228 | + res = s->irq_enable; |
229 | + break; | 229 | + break; |
230 | + case REG_WATERMARK: | 230 | + case REG_WATERMARK: |
231 | + res = s->watermark; | 231 | + res = s->watermark; |
232 | + break; | 232 | + break; |
233 | + case REG_Q_STATUS: | 233 | + case REG_Q_STATUS: |
234 | + res = s->q_status; | 234 | + res = s->q_status; |
235 | + break; | 235 | + break; |
236 | + case REG_FLAG_INFO: | 236 | + case REG_FLAG_INFO: |
237 | + res = s->flag_info; | 237 | + res = s->flag_info; |
238 | + break; | 238 | + break; |
239 | + | 239 | + |
240 | + default: | 240 | + default: |
241 | + qemu_log_mask(LOG_UNIMP, "%s: Unknown AES MMIO offset %" PRIx64 "\n", | 241 | + qemu_log_mask(LOG_UNIMP, "%s: Unknown AES MMIO offset %" PRIx64 "\n", |
242 | + __func__, offset); | 242 | + __func__, offset); |
243 | + break; | 243 | + break; |
244 | + } | 244 | + } |
245 | + | 245 | + |
246 | + trace_aes_read(offset, res); | 246 | + trace_aes_read(offset, res); |
247 | + | 247 | + |
248 | + return res; | 248 | + return res; |
249 | +} | 249 | +} |
250 | + | 250 | + |
251 | +static void fifo_append(AESState *s, uint64_t val) | 251 | +static void fifo_append(AESState *s, uint64_t val) |
252 | +{ | 252 | +{ |
253 | + if (s->fifo_idx == MAX_FIFO_SIZE) { | 253 | + if (s->fifo_idx == MAX_FIFO_SIZE) { |
254 | + /* Exceeded the FIFO. Bail out */ | 254 | + /* Exceeded the FIFO. Bail out */ |
255 | + return; | 255 | + return; |
256 | + } | 256 | + } |
257 | + | 257 | + |
258 | + s->fifo[s->fifo_idx++] = val; | 258 | + s->fifo[s->fifo_idx++] = val; |
259 | +} | 259 | +} |
260 | + | 260 | + |
261 | +static bool has_payload(AESState *s, uint32_t elems) | 261 | +static bool has_payload(AESState *s, uint32_t elems) |
262 | +{ | 262 | +{ |
263 | + return s->fifo_idx >= elems + 1; | 263 | + return s->fifo_idx >= elems + 1; |
264 | +} | 264 | +} |
265 | + | 265 | + |
266 | +static bool cmd_key(AESState *s) | 266 | +static bool cmd_key(AESState *s) |
267 | +{ | 267 | +{ |
268 | + uint32_t cmd = s->fifo[0]; | 268 | + uint32_t cmd = s->fifo[0]; |
269 | + uint32_t key_select = (cmd & CMD_KEY_SELECT_MASK) >> CMD_KEY_SELECT_SHIFT; | 269 | + uint32_t key_select = (cmd & CMD_KEY_SELECT_MASK) >> CMD_KEY_SELECT_SHIFT; |
270 | + uint32_t ctxt = (cmd & CMD_KEY_CONTEXT_MASK) >> CMD_KEY_CONTEXT_SHIFT; | 270 | + uint32_t ctxt = (cmd & CMD_KEY_CONTEXT_MASK) >> CMD_KEY_CONTEXT_SHIFT; |
271 | + uint32_t key_len; | 271 | + uint32_t key_len; |
272 | + | 272 | + |
273 | + switch ((cmd & CMD_KEY_BLOCK_MODE_MASK) >> CMD_KEY_BLOCK_MODE_SHIFT) { | 273 | + switch ((cmd & CMD_KEY_BLOCK_MODE_MASK) >> CMD_KEY_BLOCK_MODE_SHIFT) { |
274 | + case 0: | 274 | + case 0: |
275 | + s->block_mode = QCRYPTO_CIPHER_MODE_ECB; | 275 | + s->block_mode = QCRYPTO_CIPHER_MODE_ECB; |
276 | + break; | 276 | + break; |
277 | + case 1: | 277 | + case 1: |
278 | + s->block_mode = QCRYPTO_CIPHER_MODE_CBC; | 278 | + s->block_mode = QCRYPTO_CIPHER_MODE_CBC; |
279 | + break; | 279 | + break; |
280 | + default: | 280 | + default: |
281 | + return false; | 281 | + return false; |
282 | + } | 282 | + } |
283 | + | 283 | + |
284 | + s->is_encrypt = cmd & CMD_KEY_ENCRYPT_MASK; | 284 | + s->is_encrypt = cmd & CMD_KEY_ENCRYPT_MASK; |
285 | + key_len = key_lens[(cmd & CMD_KEY_KEY_LEN_MASK) >> CMD_KEY_KEY_LEN_SHIFT]; | 285 | + key_len = key_lens[(cmd & CMD_KEY_KEY_LEN_MASK) >> CMD_KEY_KEY_LEN_SHIFT]; |
286 | + | 286 | + |
287 | + if (key_select) { | 287 | + if (key_select) { |
288 | + trace_aes_cmd_key_select_builtin(ctxt, key_select, | 288 | + trace_aes_cmd_key_select_builtin(ctxt, key_select, |
289 | + s->is_encrypt ? "en" : "de", | 289 | + s->is_encrypt ? "en" : "de", |
290 | + QCryptoCipherMode_str(s->block_mode)); | 290 | + QCryptoCipherMode_str(s->block_mode)); |
291 | + s->key[ctxt] = builtin_keys[key_select]; | 291 | + s->key[ctxt] = builtin_keys[key_select]; |
292 | + } else { | 292 | + } else { |
293 | + trace_aes_cmd_key_select_new(ctxt, key_len, | 293 | + trace_aes_cmd_key_select_new(ctxt, key_len, |
294 | + s->is_encrypt ? "en" : "de", | 294 | + s->is_encrypt ? "en" : "de", |
295 | + QCryptoCipherMode_str(s->block_mode)); | 295 | + QCryptoCipherMode_str(s->block_mode)); |
296 | + if (key_len > sizeof(s->key[ctxt].key)) { | 296 | + if (key_len > sizeof(s->key[ctxt].key)) { |
297 | + return false; | 297 | + return false; |
298 | + } | 298 | + } |
299 | + if (!has_payload(s, key_len / sizeof(uint32_t))) { | 299 | + if (!has_payload(s, key_len / sizeof(uint32_t))) { |
300 | + /* wait for payload */ | 300 | + /* wait for payload */ |
301 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); | 301 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); |
302 | + return false; | 302 | + return false; |
303 | + } | 303 | + } |
304 | + memcpy(&s->key[ctxt].key, &s->fifo[1], key_len); | 304 | + memcpy(&s->key[ctxt].key, &s->fifo[1], key_len); |
305 | + s->key[ctxt].key_len = key_len; | 305 | + s->key[ctxt].key_len = key_len; |
306 | + } | 306 | + } |
307 | + | 307 | + |
308 | + return true; | 308 | + return true; |
309 | +} | 309 | +} |
310 | + | 310 | + |
311 | +static bool cmd_iv(AESState *s) | 311 | +static bool cmd_iv(AESState *s) |
312 | +{ | 312 | +{ |
313 | + uint32_t cmd = s->fifo[0]; | 313 | + uint32_t cmd = s->fifo[0]; |
314 | + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; | 314 | + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; |
315 | + | 315 | + |
316 | + if (!has_payload(s, 4)) { | 316 | + if (!has_payload(s, 4)) { |
317 | + /* wait for payload */ | 317 | + /* wait for payload */ |
318 | + return false; | 318 | + return false; |
319 | + } | 319 | + } |
320 | + memcpy(&s->iv[ctxt].iv, &s->fifo[1], sizeof(s->iv[ctxt].iv)); | 320 | + memcpy(&s->iv[ctxt].iv, &s->fifo[1], sizeof(s->iv[ctxt].iv)); |
321 | + trace_aes_cmd_iv(ctxt, s->fifo[1], s->fifo[2], s->fifo[3], s->fifo[4]); | 321 | + trace_aes_cmd_iv(ctxt, s->fifo[1], s->fifo[2], s->fifo[3], s->fifo[4]); |
322 | + | 322 | + |
323 | + return true; | 323 | + return true; |
324 | +} | 324 | +} |
325 | + | 325 | + |
326 | +static void dump_data(const char *desc, const void *p, size_t len) | 326 | +static void dump_data(const char *desc, const void *p, size_t len) |
327 | +{ | 327 | +{ |
328 | + static const size_t MAX_LEN = 0x1000; | 328 | + static const size_t MAX_LEN = 0x1000; |
329 | + char hex[MAX_LEN * 2 + 1] = ""; | 329 | + char hex[MAX_LEN * 2 + 1] = ""; |
330 | + | 330 | + |
331 | + if (len > MAX_LEN) { | 331 | + if (len > MAX_LEN) { |
332 | + return; | 332 | + return; |
333 | + } | 333 | + } |
334 | + | 334 | + |
335 | + qemu_hexdump_to_buffer(hex, sizeof(hex), p, len); | 335 | + qemu_hexdump_to_buffer(hex, sizeof(hex), p, len); |
336 | + trace_aes_dump_data(desc, hex); | 336 | + trace_aes_dump_data(desc, hex); |
337 | +} | 337 | +} |
338 | + | 338 | + |
339 | +static bool cmd_data(AESState *s) | 339 | +static bool cmd_data(AESState *s) |
340 | +{ | 340 | +{ |
341 | + uint32_t cmd = s->fifo[0]; | 341 | + uint32_t cmd = s->fifo[0]; |
342 | + uint32_t ctxt_iv = 0; | 342 | + uint32_t ctxt_iv = 0; |
343 | + uint32_t ctxt_key = (cmd & CMD_DATA_KEY_CTX_MASK) >> CMD_DATA_KEY_CTX_SHIFT; | 343 | + uint32_t ctxt_key = (cmd & CMD_DATA_KEY_CTX_MASK) >> CMD_DATA_KEY_CTX_SHIFT; |
344 | + uint32_t len = cmd & CMD_DATA_LEN_MASK; | 344 | + uint32_t len = cmd & CMD_DATA_LEN_MASK; |
345 | + uint64_t src_addr = s->fifo[2]; | 345 | + uint64_t src_addr = s->fifo[2]; |
346 | + uint64_t dst_addr = s->fifo[3]; | 346 | + uint64_t dst_addr = s->fifo[3]; |
347 | + QCryptoCipherAlgo alg; | 347 | + QCryptoCipherAlgo alg; |
348 | + g_autoptr(QCryptoCipher) cipher = NULL; | 348 | + g_autoptr(QCryptoCipher) cipher = NULL; |
349 | + g_autoptr(GByteArray) src = NULL; | 349 | + g_autoptr(GByteArray) src = NULL; |
350 | + g_autoptr(GByteArray) dst = NULL; | 350 | + g_autoptr(GByteArray) dst = NULL; |
351 | + MemTxResult r; | 351 | + MemTxResult r; |
352 | + | 352 | + |
353 | + src_addr |= ((uint64_t)s->fifo[1] << 16) & 0xffff00000000ULL; | 353 | + src_addr |= ((uint64_t)s->fifo[1] << 16) & 0xffff00000000ULL; |
354 | + dst_addr |= ((uint64_t)s->fifo[1] << 32) & 0xffff00000000ULL; | 354 | + dst_addr |= ((uint64_t)s->fifo[1] << 32) & 0xffff00000000ULL; |
355 | + | 355 | + |
356 | + trace_aes_cmd_data(ctxt_key, ctxt_iv, src_addr, dst_addr, len); | 356 | + trace_aes_cmd_data(ctxt_key, ctxt_iv, src_addr, dst_addr, len); |
357 | + | 357 | + |
358 | + if (!has_payload(s, 3)) { | 358 | + if (!has_payload(s, 3)) { |
359 | + /* wait for payload */ | 359 | + /* wait for payload */ |
360 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); | 360 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); |
361 | + return false; | 361 | + return false; |
362 | + } | 362 | + } |
363 | + | 363 | + |
364 | + if (ctxt_key >= ARRAY_SIZE(s->key) || | 364 | + if (ctxt_key >= ARRAY_SIZE(s->key) || |
365 | + ctxt_iv >= ARRAY_SIZE(s->iv)) { | 365 | + ctxt_iv >= ARRAY_SIZE(s->iv)) { |
366 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key or iv\n", __func__); | 366 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key or iv\n", __func__); |
367 | + return false; | 367 | + return false; |
368 | + } | 368 | + } |
369 | + | 369 | + |
370 | + src = g_byte_array_sized_new(len); | 370 | + src = g_byte_array_sized_new(len); |
371 | + g_byte_array_set_size(src, len); | 371 | + g_byte_array_set_size(src, len); |
372 | + dst = g_byte_array_sized_new(len); | 372 | + dst = g_byte_array_sized_new(len); |
373 | + g_byte_array_set_size(dst, len); | 373 | + g_byte_array_set_size(dst, len); |
374 | + | 374 | + |
375 | + r = dma_memory_read(s->as, src_addr, src->data, len, MEMTXATTRS_UNSPECIFIED); | 375 | + r = dma_memory_read(s->as, src_addr, src->data, len, MEMTXATTRS_UNSPECIFIED); |
376 | + if (r != MEMTX_OK) { | 376 | + if (r != MEMTX_OK) { |
377 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA read of %"PRIu32" bytes " | 377 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA read of %"PRIu32" bytes " |
378 | + "from 0x%"PRIx64" failed. (r=%d)\n", | 378 | + "from 0x%"PRIx64" failed. (r=%d)\n", |
379 | + __func__, len, src_addr, r); | 379 | + __func__, len, src_addr, r); |
380 | + return false; | 380 | + return false; |
381 | + } | 381 | + } |
382 | + | 382 | + |
383 | + dump_data("cmd_data(): src_data=", src->data, len); | 383 | + dump_data("cmd_data(): src_data=", src->data, len); |
384 | + | 384 | + |
385 | + switch (s->key[ctxt_key].key_len) { | 385 | + switch (s->key[ctxt_key].key_len) { |
386 | + case 128 / 8: | 386 | + case 128 / 8: |
387 | + alg = QCRYPTO_CIPHER_ALGO_AES_128; | 387 | + alg = QCRYPTO_CIPHER_ALGO_AES_128; |
388 | + break; | 388 | + break; |
389 | + case 192 / 8: | 389 | + case 192 / 8: |
390 | + alg = QCRYPTO_CIPHER_ALGO_AES_192; | 390 | + alg = QCRYPTO_CIPHER_ALGO_AES_192; |
391 | + break; | 391 | + break; |
392 | + case 256 / 8: | 392 | + case 256 / 8: |
393 | + alg = QCRYPTO_CIPHER_ALGO_AES_256; | 393 | + alg = QCRYPTO_CIPHER_ALGO_AES_256; |
394 | + break; | 394 | + break; |
395 | + default: | 395 | + default: |
396 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key length\n", __func__); | 396 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key length\n", __func__); |
397 | + return false; | 397 | + return false; |
398 | + } | 398 | + } |
399 | + cipher = qcrypto_cipher_new(alg, s->block_mode, | 399 | + cipher = qcrypto_cipher_new(alg, s->block_mode, |
400 | + s->key[ctxt_key].key, | 400 | + s->key[ctxt_key].key, |
401 | + s->key[ctxt_key].key_len, NULL); | 401 | + s->key[ctxt_key].key_len, NULL); |
402 | + if (!cipher) { | 402 | + if (!cipher) { |
403 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to create cipher object\n", | 403 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to create cipher object\n", |
404 | + __func__); | 404 | + __func__); |
405 | + return false; | 405 | + return false; |
406 | + } | 406 | + } |
407 | + if (s->block_mode != QCRYPTO_CIPHER_MODE_ECB) { | 407 | + if (s->block_mode != QCRYPTO_CIPHER_MODE_ECB) { |
408 | + if (qcrypto_cipher_setiv(cipher, (void *)s->iv[ctxt_iv].iv, | 408 | + if (qcrypto_cipher_setiv(cipher, (void *)s->iv[ctxt_iv].iv, |
409 | + sizeof(s->iv[ctxt_iv].iv), NULL) != 0) { | 409 | + sizeof(s->iv[ctxt_iv].iv), NULL) != 0) { |
410 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to set IV\n", __func__); | 410 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to set IV\n", __func__); |
411 | + return false; | 411 | + return false; |
412 | + } | 412 | + } |
413 | + } | 413 | + } |
414 | + if (s->is_encrypt) { | 414 | + if (s->is_encrypt) { |
415 | + if (qcrypto_cipher_encrypt(cipher, src->data, dst->data, len, NULL) != 0) { | 415 | + if (qcrypto_cipher_encrypt(cipher, src->data, dst->data, len, NULL) != 0) { |
416 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Encryption failed\n", __func__); | 416 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Encryption failed\n", __func__); |
417 | + return false; | 417 | + return false; |
418 | + } | 418 | + } |
419 | + } else { | 419 | + } else { |
420 | + if (qcrypto_cipher_decrypt(cipher, src->data, dst->data, len, NULL) != 0) { | 420 | + if (qcrypto_cipher_decrypt(cipher, src->data, dst->data, len, NULL) != 0) { |
421 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Decryption failed\n", __func__); | 421 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Decryption failed\n", __func__); |
422 | + return false; | 422 | + return false; |
423 | + } | 423 | + } |
424 | + } | 424 | + } |
425 | + | 425 | + |
426 | + dump_data("cmd_data(): dst_data=", dst->data, len); | 426 | + dump_data("cmd_data(): dst_data=", dst->data, len); |
427 | + r = dma_memory_write(s->as, dst_addr, dst->data, len, MEMTXATTRS_UNSPECIFIED); | 427 | + r = dma_memory_write(s->as, dst_addr, dst->data, len, MEMTXATTRS_UNSPECIFIED); |
428 | + if (r != MEMTX_OK) { | 428 | + if (r != MEMTX_OK) { |
429 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA write of %"PRIu32" bytes " | 429 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA write of %"PRIu32" bytes " |
430 | + "to 0x%"PRIx64" failed. (r=%d)\n", | 430 | + "to 0x%"PRIx64" failed. (r=%d)\n", |
431 | + __func__, len, src_addr, r); | 431 | + __func__, len, src_addr, r); |
432 | + return false; | 432 | + return false; |
433 | + } | 433 | + } |
434 | + | 434 | + |
435 | + return true; | 435 | + return true; |
436 | +} | 436 | +} |
437 | + | 437 | + |
438 | +static bool cmd_store_iv(AESState *s) | 438 | +static bool cmd_store_iv(AESState *s) |
439 | +{ | 439 | +{ |
440 | + uint32_t cmd = s->fifo[0]; | 440 | + uint32_t cmd = s->fifo[0]; |
441 | + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; | 441 | + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; |
442 | + uint64_t addr = s->fifo[1]; | 442 | + uint64_t addr = s->fifo[1]; |
443 | + MemTxResult dma_result; | 443 | + MemTxResult dma_result; |
444 | + | 444 | + |
445 | + if (!has_payload(s, 1)) { | 445 | + if (!has_payload(s, 1)) { |
446 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); | 446 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); |
447 | + return false; | 447 | + return false; |
448 | + } | 448 | + } |
449 | + | 449 | + |
450 | + if (ctxt >= ARRAY_SIZE(s->iv)) { | 450 | + if (ctxt >= ARRAY_SIZE(s->iv)) { |
451 | + qemu_log_mask(LOG_GUEST_ERROR, | 451 | + qemu_log_mask(LOG_GUEST_ERROR, |
452 | + "%s: Invalid context. ctxt = %u, allowed: 0..%zu\n", | 452 | + "%s: Invalid context. ctxt = %u, allowed: 0..%zu\n", |
453 | + __func__, ctxt, ARRAY_SIZE(s->iv) - 1); | 453 | + __func__, ctxt, ARRAY_SIZE(s->iv) - 1); |
454 | + return false; | 454 | + return false; |
455 | + } | 455 | + } |
456 | + | 456 | + |
457 | + addr |= ((uint64_t)cmd << 32) & 0xff00000000ULL; | 457 | + addr |= ((uint64_t)cmd << 32) & 0xff00000000ULL; |
458 | + dma_result = dma_memory_write(&address_space_memory, addr, | 458 | + dma_result = dma_memory_write(&address_space_memory, addr, |
459 | + &s->iv[ctxt].iv, sizeof(s->iv[ctxt].iv), | 459 | + &s->iv[ctxt].iv, sizeof(s->iv[ctxt].iv), |
460 | + MEMTXATTRS_UNSPECIFIED); | 460 | + MEMTXATTRS_UNSPECIFIED); |
461 | + | 461 | + |
462 | + trace_aes_cmd_store_iv(ctxt, addr, s->iv[ctxt].iv[0], s->iv[ctxt].iv[1], | 462 | + trace_aes_cmd_store_iv(ctxt, addr, s->iv[ctxt].iv[0], s->iv[ctxt].iv[1], |
463 | + s->iv[ctxt].iv[2], s->iv[ctxt].iv[3]); | 463 | + s->iv[ctxt].iv[2], s->iv[ctxt].iv[3]); |
464 | + | 464 | + |
465 | + return dma_result == MEMTX_OK; | 465 | + return dma_result == MEMTX_OK; |
466 | +} | 466 | +} |
467 | + | 467 | + |
468 | +static bool cmd_flag(AESState *s) | 468 | +static bool cmd_flag(AESState *s) |
469 | +{ | 469 | +{ |
470 | + uint32_t cmd = s->fifo[0]; | 470 | + uint32_t cmd = s->fifo[0]; |
471 | + uint32_t raise_irq = cmd & CMD_FLAG_RAISE_IRQ_MASK; | 471 | + uint32_t raise_irq = cmd & CMD_FLAG_RAISE_IRQ_MASK; |
472 | + | 472 | + |
473 | + /* We always process data when it's coming in, so fire an IRQ immediately */ | 473 | + /* We always process data when it's coming in, so fire an IRQ immediately */ |
474 | + if (raise_irq) { | 474 | + if (raise_irq) { |
475 | + s->irq_status |= REG_IRQ_STATUS_FLAG; | 475 | + s->irq_status |= REG_IRQ_STATUS_FLAG; |
476 | + } | 476 | + } |
477 | + | 477 | + |
478 | + s->flag_info = cmd & CMD_FLAG_INFO_MASK; | 478 | + s->flag_info = cmd & CMD_FLAG_INFO_MASK; |
479 | + | 479 | + |
480 | + trace_aes_cmd_flag(!!raise_irq, s->flag_info); | 480 | + trace_aes_cmd_flag(!!raise_irq, s->flag_info); |
481 | + | 481 | + |
482 | + return true; | 482 | + return true; |
483 | +} | 483 | +} |
484 | + | 484 | + |
485 | +static void fifo_process(AESState *s) | 485 | +static void fifo_process(AESState *s) |
486 | +{ | 486 | +{ |
487 | + uint32_t cmd = s->fifo[0] >> CMD_SHIFT; | 487 | + uint32_t cmd = s->fifo[0] >> CMD_SHIFT; |
488 | + bool success = false; | 488 | + bool success = false; |
489 | + | 489 | + |
490 | + if (!s->fifo_idx) { | 490 | + if (!s->fifo_idx) { |
491 | + return; | 491 | + return; |
492 | + } | 492 | + } |
493 | + | 493 | + |
494 | + switch (cmd) { | 494 | + switch (cmd) { |
495 | + case CMD_KEY: | 495 | + case CMD_KEY: |
496 | + success = cmd_key(s); | 496 | + success = cmd_key(s); |
497 | + break; | 497 | + break; |
498 | + case CMD_IV: | 498 | + case CMD_IV: |
499 | + success = cmd_iv(s); | 499 | + success = cmd_iv(s); |
500 | + break; | 500 | + break; |
501 | + case CMD_DATA: | 501 | + case CMD_DATA: |
502 | + success = cmd_data(s); | 502 | + success = cmd_data(s); |
503 | + break; | 503 | + break; |
504 | + case CMD_STORE_IV: | 504 | + case CMD_STORE_IV: |
505 | + success = cmd_store_iv(s); | 505 | + success = cmd_store_iv(s); |
506 | + break; | 506 | + break; |
507 | + case CMD_FLAG: | 507 | + case CMD_FLAG: |
508 | + success = cmd_flag(s); | 508 | + success = cmd_flag(s); |
509 | + break; | 509 | + break; |
510 | + default: | 510 | + default: |
511 | + s->irq_status |= REG_IRQ_STATUS_INVALID_CMD; | 511 | + s->irq_status |= REG_IRQ_STATUS_INVALID_CMD; |
512 | + break; | 512 | + break; |
513 | + } | 513 | + } |
514 | + | 514 | + |
515 | + if (success) { | 515 | + if (success) { |
516 | + s->fifo_idx = 0; | 516 | + s->fifo_idx = 0; |
517 | + } | 517 | + } |
518 | + | 518 | + |
519 | + trace_aes_fifo_process(cmd, success); | 519 | + trace_aes_fifo_process(cmd, success); |
520 | +} | 520 | +} |
521 | + | 521 | + |
522 | +static void aes1_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) | 522 | +static void aes1_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) |
523 | +{ | 523 | +{ |
524 | + AESState *s = opaque; | 524 | + AESState *s = opaque; |
525 | + | 525 | + |
526 | + trace_aes_write(offset, val); | 526 | + trace_aes_write(offset, val); |
527 | + | 527 | + |
528 | + switch (offset) { | 528 | + switch (offset) { |
529 | + case REG_IRQ_STATUS: | 529 | + case REG_IRQ_STATUS: |
530 | + s->irq_status &= ~val; | 530 | + s->irq_status &= ~val; |
531 | + break; | 531 | + break; |
532 | + case REG_IRQ_ENABLE: | 532 | + case REG_IRQ_ENABLE: |
533 | + s->irq_enable = val; | 533 | + s->irq_enable = val; |
534 | + break; | 534 | + break; |
535 | + case REG_FIFO: | 535 | + case REG_FIFO: |
536 | + fifo_append(s, val); | 536 | + fifo_append(s, val); |
537 | + fifo_process(s); | 537 | + fifo_process(s); |
538 | + break; | 538 | + break; |
539 | + default: | 539 | + default: |
540 | + qemu_log_mask(LOG_UNIMP, | 540 | + qemu_log_mask(LOG_UNIMP, |
541 | + "%s: Unknown AES MMIO offset %"PRIx64", data %"PRIx64"\n", | 541 | + "%s: Unknown AES MMIO offset %"PRIx64", data %"PRIx64"\n", |
542 | + __func__, offset, val); | 542 | + __func__, offset, val); |
543 | + return; | 543 | + return; |
544 | + } | 544 | + } |
545 | + | 545 | + |
546 | + aes_update_irq(s); | 546 | + aes_update_irq(s); |
547 | +} | 547 | +} |
548 | + | 548 | + |
549 | +static const MemoryRegionOps aes1_ops = { | 549 | +static const MemoryRegionOps aes1_ops = { |
550 | + .read = aes1_read, | 550 | + .read = aes1_read, |
551 | + .write = aes1_write, | 551 | + .write = aes1_write, |
552 | + .endianness = DEVICE_NATIVE_ENDIAN, | 552 | + .endianness = DEVICE_NATIVE_ENDIAN, |
553 | + .valid = { | 553 | + .valid = { |
554 | + .min_access_size = 4, | 554 | + .min_access_size = 4, |
555 | + .max_access_size = 8, | 555 | + .max_access_size = 8, |
556 | + }, | 556 | + }, |
557 | + .impl = { | 557 | + .impl = { |
558 | + .min_access_size = 4, | 558 | + .min_access_size = 4, |
559 | + .max_access_size = 4, | 559 | + .max_access_size = 4, |
560 | + }, | 560 | + }, |
561 | +}; | 561 | +}; |
562 | + | 562 | + |
563 | +static uint64_t aes2_read(void *opaque, hwaddr offset, unsigned size) | 563 | +static uint64_t aes2_read(void *opaque, hwaddr offset, unsigned size) |
564 | +{ | 564 | +{ |
565 | + uint64_t res = 0; | 565 | + uint64_t res = 0; |
566 | + | 566 | + |
567 | + switch (offset) { | 567 | + switch (offset) { |
568 | + case 0: | 568 | + case 0: |
569 | + res = 0; | 569 | + res = 0; |
570 | + break; | 570 | + break; |
571 | + default: | 571 | + default: |
572 | + qemu_log_mask(LOG_UNIMP, | 572 | + qemu_log_mask(LOG_UNIMP, |
573 | + "%s: Unknown AES MMIO 2 offset %"PRIx64"\n", | 573 | + "%s: Unknown AES MMIO 2 offset %"PRIx64"\n", |
574 | + __func__, offset); | 574 | + __func__, offset); |
575 | + break; | 575 | + break; |
576 | + } | 576 | + } |
577 | + | 577 | + |
578 | + trace_aes_2_read(offset, res); | 578 | + trace_aes_2_read(offset, res); |
579 | + | 579 | + |
580 | + return res; | 580 | + return res; |
581 | +} | 581 | +} |
582 | + | 582 | + |
583 | +static void aes2_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) | 583 | +static void aes2_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) |
584 | +{ | 584 | +{ |
585 | + trace_aes_2_write(offset, val); | 585 | + trace_aes_2_write(offset, val); |
586 | + | 586 | + |
587 | + switch (offset) { | 587 | + switch (offset) { |
588 | + default: | 588 | + default: |
589 | + qemu_log_mask(LOG_UNIMP, | 589 | + qemu_log_mask(LOG_UNIMP, |
590 | + "%s: Unknown AES MMIO 2 offset %"PRIx64", data %"PRIx64"\n", | 590 | + "%s: Unknown AES MMIO 2 offset %"PRIx64", data %"PRIx64"\n", |
591 | + __func__, offset, val); | 591 | + __func__, offset, val); |
592 | + return; | 592 | + return; |
593 | + } | 593 | + } |
594 | +} | 594 | +} |
595 | + | 595 | + |
596 | +static const MemoryRegionOps aes2_ops = { | 596 | +static const MemoryRegionOps aes2_ops = { |
597 | + .read = aes2_read, | 597 | + .read = aes2_read, |
598 | + .write = aes2_write, | 598 | + .write = aes2_write, |
599 | + .endianness = DEVICE_NATIVE_ENDIAN, | 599 | + .endianness = DEVICE_NATIVE_ENDIAN, |
600 | + .valid = { | 600 | + .valid = { |
601 | + .min_access_size = 4, | 601 | + .min_access_size = 4, |
602 | + .max_access_size = 8, | 602 | + .max_access_size = 8, |
603 | + }, | 603 | + }, |
604 | + .impl = { | 604 | + .impl = { |
605 | + .min_access_size = 4, | 605 | + .min_access_size = 4, |
606 | + .max_access_size = 4, | 606 | + .max_access_size = 4, |
607 | + }, | 607 | + }, |
608 | +}; | 608 | +}; |
609 | + | 609 | + |
610 | +static void aes_reset(Object *obj, ResetType type) | 610 | +static void aes_reset(Object *obj, ResetType type) |
611 | +{ | 611 | +{ |
612 | + AESState *s = APPLE_AES(obj); | 612 | + AESState *s = APPLE_AES(obj); |
613 | + | 613 | + |
614 | + s->status = 0x3f80; | 614 | + s->status = 0x3f80; |
615 | + s->q_status = 2; | 615 | + s->q_status = 2; |
616 | + s->irq_status = 0; | 616 | + s->irq_status = 0; |
617 | + s->irq_enable = 0; | 617 | + s->irq_enable = 0; |
618 | + s->watermark = 0; | 618 | + s->watermark = 0; |
619 | +} | 619 | +} |
620 | + | 620 | + |
621 | +static void aes_init(Object *obj) | 621 | +static void aes_init(Object *obj) |
622 | +{ | 622 | +{ |
623 | + AESState *s = APPLE_AES(obj); | 623 | + AESState *s = APPLE_AES(obj); |
624 | + | 624 | + |
625 | + memory_region_init_io(&s->iomem1, obj, &aes1_ops, s, TYPE_APPLE_AES, 0x4000); | 625 | + memory_region_init_io(&s->iomem1, obj, &aes1_ops, s, TYPE_APPLE_AES, 0x4000); |
626 | + memory_region_init_io(&s->iomem2, obj, &aes2_ops, s, TYPE_APPLE_AES, 0x4000); | 626 | + memory_region_init_io(&s->iomem2, obj, &aes2_ops, s, TYPE_APPLE_AES, 0x4000); |
627 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem1); | 627 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem1); |
628 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem2); | 628 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem2); |
629 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); | 629 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); |
630 | + s->as = &address_space_memory; | 630 | + s->as = &address_space_memory; |
631 | +} | 631 | +} |
632 | + | 632 | + |
633 | +static void aes_class_init(ObjectClass *klass, void *data) | 633 | +static void aes_class_init(ObjectClass *klass, void *data) |
634 | +{ | 634 | +{ |
635 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | 635 | + ResettableClass *rc = RESETTABLE_CLASS(klass); |
636 | + | 636 | + |
637 | + rc->phases.hold = aes_reset; | 637 | + rc->phases.hold = aes_reset; |
638 | +} | 638 | +} |
639 | + | 639 | + |
640 | +static const TypeInfo aes_info = { | 640 | +static const TypeInfo aes_info = { |
641 | + .name = TYPE_APPLE_AES, | 641 | + .name = TYPE_APPLE_AES, |
642 | + .parent = TYPE_SYS_BUS_DEVICE, | 642 | + .parent = TYPE_SYS_BUS_DEVICE, |
643 | + .instance_size = sizeof(AESState), | 643 | + .instance_size = sizeof(AESState), |
644 | + .class_init = aes_class_init, | 644 | + .class_init = aes_class_init, |
645 | + .instance_init = aes_init, | 645 | + .instance_init = aes_init, |
646 | +}; | 646 | +}; |
647 | + | 647 | + |
648 | +static void aes_register_types(void) | 648 | +static void aes_register_types(void) |
649 | +{ | 649 | +{ |
650 | + type_register_static(&aes_info); | 650 | + type_register_static(&aes_info); |
651 | +} | 651 | +} |
652 | + | 652 | + |
653 | +type_init(aes_register_types) | 653 | +type_init(aes_register_types) |
654 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | 654 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build |
655 | index XXXXXXX..XXXXXXX 100644 | 655 | index XXXXXXX..XXXXXXX 100644 |
656 | --- a/hw/vmapple/meson.build | 656 | --- a/hw/vmapple/meson.build |
657 | +++ b/hw/vmapple/meson.build | 657 | +++ b/hw/vmapple/meson.build |
658 | @@ -0,0 +1 @@ | 658 | @@ -0,0 +1 @@ |
659 | +system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | 659 | +system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) |
660 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events | 660 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events |
661 | index XXXXXXX..XXXXXXX 100644 | 661 | index XXXXXXX..XXXXXXX 100644 |
662 | --- a/hw/vmapple/trace-events | 662 | --- a/hw/vmapple/trace-events |
663 | +++ b/hw/vmapple/trace-events | 663 | +++ b/hw/vmapple/trace-events |
664 | @@ -XXX,XX +XXX,XX @@ | 664 | @@ -XXX,XX +XXX,XX @@ |
665 | # See docs/devel/tracing.rst for syntax documentation. | 665 | # See docs/devel/tracing.rst for syntax documentation. |
666 | 666 | ||
667 | +# aes.c | 667 | +# aes.c |
668 | +aes_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | 668 | +aes_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 |
669 | +aes_cmd_key_select_builtin(uint32_t ctx, uint32_t key_id, const char *direction, const char *cipher) "[%d] Selecting builtin key %d to %scrypt with %s" | 669 | +aes_cmd_key_select_builtin(uint32_t ctx, uint32_t key_id, const char *direction, const char *cipher) "[%d] Selecting builtin key %d to %scrypt with %s" |
670 | +aes_cmd_key_select_new(uint32_t ctx, uint32_t key_len, const char *direction, const char *cipher) "[%d] Selecting new key size=%d to %scrypt with %s" | 670 | +aes_cmd_key_select_new(uint32_t ctx, uint32_t key_len, const char *direction, const char *cipher) "[%d] Selecting new key size=%d to %scrypt with %s" |
671 | +aes_cmd_iv(uint32_t ctx, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] 0x%08x 0x%08x 0x%08x 0x%08x" | 671 | +aes_cmd_iv(uint32_t ctx, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] 0x%08x 0x%08x 0x%08x 0x%08x" |
672 | +aes_cmd_data(uint32_t key, uint32_t iv, uint64_t src, uint64_t dst, uint32_t len) "[key=%d iv=%d] src=0x%"PRIx64" dst=0x%"PRIx64" len=0x%x" | 672 | +aes_cmd_data(uint32_t key, uint32_t iv, uint64_t src, uint64_t dst, uint32_t len) "[key=%d iv=%d] src=0x%"PRIx64" dst=0x%"PRIx64" len=0x%x" |
673 | +aes_cmd_store_iv(uint32_t ctx, uint64_t addr, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] addr=0x%"PRIx64"x -> 0x%08x 0x%08x 0x%08x 0x%08x" | 673 | +aes_cmd_store_iv(uint32_t ctx, uint64_t addr, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] addr=0x%"PRIx64"x -> 0x%08x 0x%08x 0x%08x 0x%08x" |
674 | +aes_cmd_flag(uint32_t raise, uint32_t flag_info) "raise=%d flag_info=0x%x" | 674 | +aes_cmd_flag(uint32_t raise, uint32_t flag_info) "raise=%d flag_info=0x%x" |
675 | +aes_fifo_process(uint32_t cmd, bool success) "cmd=%d success=%d" | 675 | +aes_fifo_process(uint32_t cmd, bool success) "cmd=%d success=%d" |
676 | +aes_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | 676 | +aes_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 |
677 | +aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | 677 | +aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 |
678 | +aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | 678 | +aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 |
679 | +aes_dump_data(const char *desc, const char *hex) "%s%s" | 679 | +aes_dump_data(const char *desc, const char *hex) "%s%s" |
680 | + | 680 | + |
681 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | 681 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h |
682 | new file mode 100644 | 682 | new file mode 100644 |
683 | index XXXXXXX..XXXXXXX | 683 | index XXXXXXX..XXXXXXX |
684 | --- /dev/null | 684 | --- /dev/null |
685 | +++ b/include/hw/vmapple/vmapple.h | 685 | +++ b/include/hw/vmapple/vmapple.h |
686 | @@ -XXX,XX +XXX,XX @@ | 686 | @@ -XXX,XX +XXX,XX @@ |
687 | +/* | 687 | +/* |
688 | + * Devices specific to the VMApple machine type | 688 | + * Devices specific to the VMApple machine type |
689 | + * | 689 | + * |
690 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 690 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
691 | + * | 691 | + * |
692 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 692 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
693 | + * See the COPYING file in the top-level directory. | 693 | + * See the COPYING file in the top-level directory. |
694 | + * | 694 | + * |
695 | + * SPDX-License-Identifier: GPL-2.0-or-later | 695 | + * SPDX-License-Identifier: GPL-2.0-or-later |
696 | + */ | 696 | + */ |
697 | + | 697 | + |
698 | +#ifndef HW_VMAPPLE_VMAPPLE_H | 698 | +#ifndef HW_VMAPPLE_VMAPPLE_H |
699 | +#define HW_VMAPPLE_VMAPPLE_H | 699 | +#define HW_VMAPPLE_VMAPPLE_H |
700 | + | 700 | + |
701 | +#define TYPE_APPLE_AES "apple-aes" | 701 | +#define TYPE_APPLE_AES "apple-aes" |
702 | + | 702 | + |
703 | +#endif /* HW_VMAPPLE_VMAPPLE_H */ | 703 | +#endif /* HW_VMAPPLE_VMAPPLE_H */ |
704 | diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h | 704 | diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h |
705 | index XXXXXXX..XXXXXXX 100644 | 705 | index XXXXXXX..XXXXXXX 100644 |
706 | --- a/include/qemu/cutils.h | 706 | --- a/include/qemu/cutils.h |
707 | +++ b/include/qemu/cutils.h | 707 | +++ b/include/qemu/cutils.h |
708 | @@ -XXX,XX +XXX,XX @@ GString *qemu_hexdump_line(GString *str, const void *buf, size_t len, | 708 | @@ -XXX,XX +XXX,XX @@ GString *qemu_hexdump_line(GString *str, const void *buf, size_t len, |
709 | void qemu_hexdump(FILE *fp, const char *prefix, | 709 | void qemu_hexdump(FILE *fp, const char *prefix, |
710 | const void *bufptr, size_t size); | 710 | const void *bufptr, size_t size); |
711 | 711 | ||
712 | +/** | 712 | +/** |
713 | + * qemu_hexdump_to_buffer: | 713 | + * qemu_hexdump_to_buffer: |
714 | + * @buffer: output string buffer | 714 | + * @buffer: output string buffer |
715 | + * @buffer_size: amount of available space in buffer. Must be at least | 715 | + * @buffer_size: amount of available space in buffer. Must be at least |
716 | + * data_size*2+1. | 716 | + * data_size*2+1. |
717 | + * @data: input bytes | 717 | + * @data: input bytes |
718 | + * @data_size: number of bytes in data | 718 | + * @data_size: number of bytes in data |
719 | + * | 719 | + * |
720 | + * Converts the @data_size bytes in @data into hex digit pairs, writing them to | 720 | + * Converts the @data_size bytes in @data into hex digit pairs, writing them to |
721 | + * @buffer. Finally, a nul terminating character is written; @buffer therefore | 721 | + * @buffer. Finally, a nul terminating character is written; @buffer therefore |
722 | + * needs space for (data_size*2+1) chars. | 722 | + * needs space for (data_size*2+1) chars. |
723 | + */ | 723 | + */ |
724 | +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, | 724 | +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, |
725 | + const uint8_t *restrict data, size_t data_size); | 725 | + const uint8_t *restrict data, size_t data_size); |
726 | + | 726 | + |
727 | #endif | 727 | #endif |
728 | diff --git a/util/hexdump.c b/util/hexdump.c | 728 | diff --git a/util/hexdump.c b/util/hexdump.c |
729 | index XXXXXXX..XXXXXXX 100644 | 729 | index XXXXXXX..XXXXXXX 100644 |
730 | --- a/util/hexdump.c | 730 | --- a/util/hexdump.c |
731 | +++ b/util/hexdump.c | 731 | +++ b/util/hexdump.c |
732 | @@ -XXX,XX +XXX,XX @@ | 732 | @@ -XXX,XX +XXX,XX @@ |
733 | 733 | ||
734 | #include "qemu/osdep.h" | 734 | #include "qemu/osdep.h" |
735 | #include "qemu/cutils.h" | 735 | #include "qemu/cutils.h" |
736 | +#include "qemu/host-utils.h" | 736 | +#include "qemu/host-utils.h" |
737 | 737 | ||
738 | static inline char hexdump_nibble(unsigned x) | 738 | static inline char hexdump_nibble(unsigned x) |
739 | { | 739 | { |
740 | @@ -XXX,XX +XXX,XX @@ void qemu_hexdump(FILE *fp, const char *prefix, | 740 | @@ -XXX,XX +XXX,XX @@ void qemu_hexdump(FILE *fp, const char *prefix, |
741 | } | 741 | } |
742 | 742 | ||
743 | } | 743 | } |
744 | + | 744 | + |
745 | +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, | 745 | +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, |
746 | + const uint8_t *restrict data, size_t data_size) | 746 | + const uint8_t *restrict data, size_t data_size) |
747 | +{ | 747 | +{ |
748 | + size_t i; | 748 | + size_t i; |
749 | + uint64_t required_buffer_size; | 749 | + uint64_t required_buffer_size; |
750 | + bool overflow = umul64_overflow(data_size, 2, &required_buffer_size); | 750 | + bool overflow = umul64_overflow(data_size, 2, &required_buffer_size); |
751 | + overflow |= uadd64_overflow(required_buffer_size, 1, &required_buffer_size); | 751 | + overflow |= uadd64_overflow(required_buffer_size, 1, &required_buffer_size); |
752 | + assert(!overflow && buffer_size >= required_buffer_size); | 752 | + assert(!overflow && buffer_size >= required_buffer_size); |
753 | + | 753 | + |
754 | + for (i = 0; i < data_size; i++) { | 754 | + for (i = 0; i < data_size; i++) { |
755 | + uint8_t val = data[i]; | 755 | + uint8_t val = data[i]; |
756 | + *(buffer++) = hexdump_nibble(val >> 4); | 756 | + *(buffer++) = hexdump_nibble(val >> 4); |
757 | + *(buffer++) = hexdump_nibble(val & 0xf); | 757 | + *(buffer++) = hexdump_nibble(val & 0xf); |
758 | + } | 758 | + } |
759 | + *buffer = '\0'; | 759 | + *buffer = '\0'; |
760 | +} | 760 | +} |
761 | -- | 761 | -- |
762 | 2.39.5 (Apple Git-154) | 762 | 2.39.5 (Apple Git-154) |
763 | 763 | ||
764 | 764 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <graf@amazon.com> | |
2 | |||
3 | VMApple contains an "aes" engine device that it uses to encrypt and | ||
4 | decrypt its nvram. It has trivial hard coded keys it uses for that | ||
5 | purpose. | ||
6 | |||
7 | Add device emulation for this device model. | ||
8 | |||
9 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
10 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
11 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
12 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
13 | --- | ||
14 | |||
15 | v3: | ||
16 | |||
17 | * Rebased on latest upstream and fixed minor breakages. | ||
18 | * Replaced legacy device reset method with Resettable method | ||
19 | |||
20 | v4: | ||
21 | |||
22 | * Improved logging of unimplemented functions and guest errors. | ||
23 | * Better adherence to naming and coding conventions. | ||
24 | * Cleaner error handling and recovery, including using g_autoptr | ||
25 | |||
26 | v5: | ||
27 | |||
28 | * More logging improvements | ||
29 | * Use xxx64_overflow() functions for hexdump buffer size calculations. | ||
30 | |||
31 | v7: | ||
32 | |||
33 | * Coding style tweaks. | ||
34 | |||
35 | v8: | ||
36 | |||
37 | * Further improved logging of guest errors. | ||
38 | |||
39 | v9: | ||
40 | |||
41 | * Replaced a use of cpu_physical_memory_write with dma_memory_write. | ||
42 | * Dropped unnecessary use of ternary operator for bool -> 0/1. | ||
43 | |||
44 | v10: | ||
45 | |||
46 | * Code style and comment improvements. | ||
47 | |||
48 | hw/vmapple/Kconfig | 2 + | ||
49 | hw/vmapple/aes.c | 581 +++++++++++++++++++++++++++++++++++ | ||
50 | hw/vmapple/meson.build | 1 + | ||
51 | hw/vmapple/trace-events | 14 + | ||
52 | include/hw/vmapple/vmapple.h | 17 + | ||
53 | include/qemu/cutils.h | 15 + | ||
54 | util/hexdump.c | 18 ++ | ||
55 | 7 files changed, 648 insertions(+) | ||
56 | create mode 100644 hw/vmapple/aes.c | ||
57 | create mode 100644 include/hw/vmapple/vmapple.h | ||
58 | |||
59 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/hw/vmapple/Kconfig | ||
62 | +++ b/hw/vmapple/Kconfig | ||
63 | @@ -1 +1,3 @@ | ||
64 | +config VMAPPLE_AES | ||
65 | + bool | ||
66 | |||
67 | diff --git a/hw/vmapple/aes.c b/hw/vmapple/aes.c | ||
68 | new file mode 100644 | ||
69 | index XXXXXXX..XXXXXXX | ||
70 | --- /dev/null | ||
71 | +++ b/hw/vmapple/aes.c | ||
72 | @@ -XXX,XX +XXX,XX @@ | ||
73 | +/* | ||
74 | + * QEMU Apple AES device emulation | ||
75 | + * | ||
76 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
77 | + * | ||
78 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
79 | + * See the COPYING file in the top-level directory. | ||
80 | + * | ||
81 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
82 | + */ | ||
83 | + | ||
84 | +#include "qemu/osdep.h" | ||
85 | +#include "trace.h" | ||
86 | +#include "crypto/hash.h" | ||
87 | +#include "crypto/aes.h" | ||
88 | +#include "crypto/cipher.h" | ||
89 | +#include "hw/irq.h" | ||
90 | +#include "hw/sysbus.h" | ||
91 | +#include "hw/vmapple/vmapple.h" | ||
92 | +#include "migration/vmstate.h" | ||
93 | +#include "qemu/cutils.h" | ||
94 | +#include "qemu/log.h" | ||
95 | +#include "qemu/module.h" | ||
96 | +#include "sysemu/dma.h" | ||
97 | + | ||
98 | +OBJECT_DECLARE_SIMPLE_TYPE(AESState, APPLE_AES) | ||
99 | + | ||
100 | +#define MAX_FIFO_SIZE 9 | ||
101 | + | ||
102 | +#define CMD_KEY 0x1 | ||
103 | +#define CMD_KEY_CONTEXT_SHIFT 27 | ||
104 | +#define CMD_KEY_CONTEXT_MASK (0x1 << CMD_KEY_CONTEXT_SHIFT) | ||
105 | +#define CMD_KEY_SELECT_MAX_IDX 0x7 | ||
106 | +#define CMD_KEY_SELECT_SHIFT 24 | ||
107 | +#define CMD_KEY_SELECT_MASK (CMD_KEY_SELECT_MAX_IDX << CMD_KEY_SELECT_SHIFT) | ||
108 | +#define CMD_KEY_KEY_LEN_NUM 4u | ||
109 | +#define CMD_KEY_KEY_LEN_SHIFT 22 | ||
110 | +#define CMD_KEY_KEY_LEN_MASK ((CMD_KEY_KEY_LEN_NUM - 1u) << CMD_KEY_KEY_LEN_SHIFT) | ||
111 | +#define CMD_KEY_ENCRYPT_SHIFT 20 | ||
112 | +#define CMD_KEY_ENCRYPT_MASK (0x1 << CMD_KEY_ENCRYPT_SHIFT) | ||
113 | +#define CMD_KEY_BLOCK_MODE_SHIFT 16 | ||
114 | +#define CMD_KEY_BLOCK_MODE_MASK (0x3 << CMD_KEY_BLOCK_MODE_SHIFT) | ||
115 | +#define CMD_IV 0x2 | ||
116 | +#define CMD_IV_CONTEXT_SHIFT 26 | ||
117 | +#define CMD_IV_CONTEXT_MASK (0x3 << CMD_KEY_CONTEXT_SHIFT) | ||
118 | +#define CMD_DSB 0x3 | ||
119 | +#define CMD_SKG 0x4 | ||
120 | +#define CMD_DATA 0x5 | ||
121 | +#define CMD_DATA_KEY_CTX_SHIFT 27 | ||
122 | +#define CMD_DATA_KEY_CTX_MASK (0x1 << CMD_DATA_KEY_CTX_SHIFT) | ||
123 | +#define CMD_DATA_IV_CTX_SHIFT 25 | ||
124 | +#define CMD_DATA_IV_CTX_MASK (0x3 << CMD_DATA_IV_CTX_SHIFT) | ||
125 | +#define CMD_DATA_LEN_MASK 0xffffff | ||
126 | +#define CMD_STORE_IV 0x6 | ||
127 | +#define CMD_STORE_IV_ADDR_MASK 0xffffff | ||
128 | +#define CMD_WRITE_REG 0x7 | ||
129 | +#define CMD_FLAG 0x8 | ||
130 | +#define CMD_FLAG_STOP_MASK BIT(26) | ||
131 | +#define CMD_FLAG_RAISE_IRQ_MASK BIT(27) | ||
132 | +#define CMD_FLAG_INFO_MASK 0xff | ||
133 | +#define CMD_MAX 0x10 | ||
134 | + | ||
135 | +#define CMD_SHIFT 28 | ||
136 | + | ||
137 | +#define REG_STATUS 0xc | ||
138 | +#define REG_STATUS_DMA_READ_RUNNING BIT(0) | ||
139 | +#define REG_STATUS_DMA_READ_PENDING BIT(1) | ||
140 | +#define REG_STATUS_DMA_WRITE_RUNNING BIT(2) | ||
141 | +#define REG_STATUS_DMA_WRITE_PENDING BIT(3) | ||
142 | +#define REG_STATUS_BUSY BIT(4) | ||
143 | +#define REG_STATUS_EXECUTING BIT(5) | ||
144 | +#define REG_STATUS_READY BIT(6) | ||
145 | +#define REG_STATUS_TEXT_DPA_SEEDED BIT(7) | ||
146 | +#define REG_STATUS_UNWRAP_DPA_SEEDED BIT(8) | ||
147 | + | ||
148 | +#define REG_IRQ_STATUS 0x18 | ||
149 | +#define REG_IRQ_STATUS_INVALID_CMD BIT(2) | ||
150 | +#define REG_IRQ_STATUS_FLAG BIT(5) | ||
151 | +#define REG_IRQ_ENABLE 0x1c | ||
152 | +#define REG_WATERMARK 0x20 | ||
153 | +#define REG_Q_STATUS 0x24 | ||
154 | +#define REG_FLAG_INFO 0x30 | ||
155 | +#define REG_FIFO 0x200 | ||
156 | + | ||
157 | +static const uint32_t key_lens[CMD_KEY_KEY_LEN_NUM] = { | ||
158 | + [0] = 16, | ||
159 | + [1] = 24, | ||
160 | + [2] = 32, | ||
161 | + [3] = 64, | ||
162 | +}; | ||
163 | + | ||
164 | +typedef struct Key { | ||
165 | + uint32_t key_len; | ||
166 | + uint8_t key[32]; | ||
167 | +} Key; | ||
168 | + | ||
169 | +typedef struct IV { | ||
170 | + uint32_t iv[4]; | ||
171 | +} IV; | ||
172 | + | ||
173 | +static Key builtin_keys[CMD_KEY_SELECT_MAX_IDX + 1] = { | ||
174 | + [1] = { | ||
175 | + .key_len = 32, | ||
176 | + .key = { 0x1 }, | ||
177 | + }, | ||
178 | + [2] = { | ||
179 | + .key_len = 32, | ||
180 | + .key = { 0x2 }, | ||
181 | + }, | ||
182 | + [3] = { | ||
183 | + .key_len = 32, | ||
184 | + .key = { 0x3 }, | ||
185 | + } | ||
186 | +}; | ||
187 | + | ||
188 | +struct AESState { | ||
189 | + SysBusDevice parent_obj; | ||
190 | + | ||
191 | + qemu_irq irq; | ||
192 | + MemoryRegion iomem1; | ||
193 | + MemoryRegion iomem2; | ||
194 | + AddressSpace *as; | ||
195 | + | ||
196 | + uint32_t status; | ||
197 | + uint32_t q_status; | ||
198 | + uint32_t irq_status; | ||
199 | + uint32_t irq_enable; | ||
200 | + uint32_t watermark; | ||
201 | + uint32_t flag_info; | ||
202 | + uint32_t fifo[MAX_FIFO_SIZE]; | ||
203 | + uint32_t fifo_idx; | ||
204 | + Key key[2]; | ||
205 | + IV iv[4]; | ||
206 | + bool is_encrypt; | ||
207 | + QCryptoCipherMode block_mode; | ||
208 | +}; | ||
209 | + | ||
210 | +static void aes_update_irq(AESState *s) | ||
211 | +{ | ||
212 | + qemu_set_irq(s->irq, !!(s->irq_status & s->irq_enable)); | ||
213 | +} | ||
214 | + | ||
215 | +static uint64_t aes1_read(void *opaque, hwaddr offset, unsigned size) | ||
216 | +{ | ||
217 | + AESState *s = opaque; | ||
218 | + uint64_t res = 0; | ||
219 | + | ||
220 | + switch (offset) { | ||
221 | + case REG_STATUS: | ||
222 | + res = s->status; | ||
223 | + break; | ||
224 | + case REG_IRQ_STATUS: | ||
225 | + res = s->irq_status; | ||
226 | + break; | ||
227 | + case REG_IRQ_ENABLE: | ||
228 | + res = s->irq_enable; | ||
229 | + break; | ||
230 | + case REG_WATERMARK: | ||
231 | + res = s->watermark; | ||
232 | + break; | ||
233 | + case REG_Q_STATUS: | ||
234 | + res = s->q_status; | ||
235 | + break; | ||
236 | + case REG_FLAG_INFO: | ||
237 | + res = s->flag_info; | ||
238 | + break; | ||
239 | + | ||
240 | + default: | ||
241 | + qemu_log_mask(LOG_UNIMP, "%s: Unknown AES MMIO offset %" PRIx64 "\n", | ||
242 | + __func__, offset); | ||
243 | + break; | ||
244 | + } | ||
245 | + | ||
246 | + trace_aes_read(offset, res); | ||
247 | + | ||
248 | + return res; | ||
249 | +} | ||
250 | + | ||
251 | +static void fifo_append(AESState *s, uint64_t val) | ||
252 | +{ | ||
253 | + if (s->fifo_idx == MAX_FIFO_SIZE) { | ||
254 | + /* Exceeded the FIFO. Bail out */ | ||
255 | + return; | ||
256 | + } | ||
257 | + | ||
258 | + s->fifo[s->fifo_idx++] = val; | ||
259 | +} | ||
260 | + | ||
261 | +static bool has_payload(AESState *s, uint32_t elems) | ||
262 | +{ | ||
263 | + return s->fifo_idx >= elems + 1; | ||
264 | +} | ||
265 | + | ||
266 | +static bool cmd_key(AESState *s) | ||
267 | +{ | ||
268 | + uint32_t cmd = s->fifo[0]; | ||
269 | + uint32_t key_select = (cmd & CMD_KEY_SELECT_MASK) >> CMD_KEY_SELECT_SHIFT; | ||
270 | + uint32_t ctxt = (cmd & CMD_KEY_CONTEXT_MASK) >> CMD_KEY_CONTEXT_SHIFT; | ||
271 | + uint32_t key_len; | ||
272 | + | ||
273 | + switch ((cmd & CMD_KEY_BLOCK_MODE_MASK) >> CMD_KEY_BLOCK_MODE_SHIFT) { | ||
274 | + case 0: | ||
275 | + s->block_mode = QCRYPTO_CIPHER_MODE_ECB; | ||
276 | + break; | ||
277 | + case 1: | ||
278 | + s->block_mode = QCRYPTO_CIPHER_MODE_CBC; | ||
279 | + break; | ||
280 | + default: | ||
281 | + return false; | ||
282 | + } | ||
283 | + | ||
284 | + s->is_encrypt = cmd & CMD_KEY_ENCRYPT_MASK; | ||
285 | + key_len = key_lens[(cmd & CMD_KEY_KEY_LEN_MASK) >> CMD_KEY_KEY_LEN_SHIFT]; | ||
286 | + | ||
287 | + if (key_select) { | ||
288 | + trace_aes_cmd_key_select_builtin(ctxt, key_select, | ||
289 | + s->is_encrypt ? "en" : "de", | ||
290 | + QCryptoCipherMode_str(s->block_mode)); | ||
291 | + s->key[ctxt] = builtin_keys[key_select]; | ||
292 | + } else { | ||
293 | + trace_aes_cmd_key_select_new(ctxt, key_len, | ||
294 | + s->is_encrypt ? "en" : "de", | ||
295 | + QCryptoCipherMode_str(s->block_mode)); | ||
296 | + if (key_len > sizeof(s->key[ctxt].key)) { | ||
297 | + return false; | ||
298 | + } | ||
299 | + if (!has_payload(s, key_len / sizeof(uint32_t))) { | ||
300 | + /* wait for payload */ | ||
301 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); | ||
302 | + return false; | ||
303 | + } | ||
304 | + memcpy(&s->key[ctxt].key, &s->fifo[1], key_len); | ||
305 | + s->key[ctxt].key_len = key_len; | ||
306 | + } | ||
307 | + | ||
308 | + return true; | ||
309 | +} | ||
310 | + | ||
311 | +static bool cmd_iv(AESState *s) | ||
312 | +{ | ||
313 | + uint32_t cmd = s->fifo[0]; | ||
314 | + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; | ||
315 | + | ||
316 | + if (!has_payload(s, 4)) { | ||
317 | + /* wait for payload */ | ||
318 | + return false; | ||
319 | + } | ||
320 | + memcpy(&s->iv[ctxt].iv, &s->fifo[1], sizeof(s->iv[ctxt].iv)); | ||
321 | + trace_aes_cmd_iv(ctxt, s->fifo[1], s->fifo[2], s->fifo[3], s->fifo[4]); | ||
322 | + | ||
323 | + return true; | ||
324 | +} | ||
325 | + | ||
326 | +static void dump_data(const char *desc, const void *p, size_t len) | ||
327 | +{ | ||
328 | + static const size_t MAX_LEN = 0x1000; | ||
329 | + char hex[MAX_LEN * 2 + 1] = ""; | ||
330 | + | ||
331 | + if (len > MAX_LEN) { | ||
332 | + return; | ||
333 | + } | ||
334 | + | ||
335 | + qemu_hexdump_to_buffer(hex, sizeof(hex), p, len); | ||
336 | + trace_aes_dump_data(desc, hex); | ||
337 | +} | ||
338 | + | ||
339 | +static bool cmd_data(AESState *s) | ||
340 | +{ | ||
341 | + uint32_t cmd = s->fifo[0]; | ||
342 | + uint32_t ctxt_iv = 0; | ||
343 | + uint32_t ctxt_key = (cmd & CMD_DATA_KEY_CTX_MASK) >> CMD_DATA_KEY_CTX_SHIFT; | ||
344 | + uint32_t len = cmd & CMD_DATA_LEN_MASK; | ||
345 | + uint64_t src_addr = s->fifo[2]; | ||
346 | + uint64_t dst_addr = s->fifo[3]; | ||
347 | + QCryptoCipherAlgo alg; | ||
348 | + g_autoptr(QCryptoCipher) cipher = NULL; | ||
349 | + g_autoptr(GByteArray) src = NULL; | ||
350 | + g_autoptr(GByteArray) dst = NULL; | ||
351 | + MemTxResult r; | ||
352 | + | ||
353 | + src_addr |= ((uint64_t)s->fifo[1] << 16) & 0xffff00000000ULL; | ||
354 | + dst_addr |= ((uint64_t)s->fifo[1] << 32) & 0xffff00000000ULL; | ||
355 | + | ||
356 | + trace_aes_cmd_data(ctxt_key, ctxt_iv, src_addr, dst_addr, len); | ||
357 | + | ||
358 | + if (!has_payload(s, 3)) { | ||
359 | + /* wait for payload */ | ||
360 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); | ||
361 | + return false; | ||
362 | + } | ||
363 | + | ||
364 | + if (ctxt_key >= ARRAY_SIZE(s->key) || | ||
365 | + ctxt_iv >= ARRAY_SIZE(s->iv)) { | ||
366 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key or iv\n", __func__); | ||
367 | + return false; | ||
368 | + } | ||
369 | + | ||
370 | + src = g_byte_array_sized_new(len); | ||
371 | + g_byte_array_set_size(src, len); | ||
372 | + dst = g_byte_array_sized_new(len); | ||
373 | + g_byte_array_set_size(dst, len); | ||
374 | + | ||
375 | + r = dma_memory_read(s->as, src_addr, src->data, len, MEMTXATTRS_UNSPECIFIED); | ||
376 | + if (r != MEMTX_OK) { | ||
377 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA read of %"PRIu32" bytes " | ||
378 | + "from 0x%"PRIx64" failed. (r=%d)\n", | ||
379 | + __func__, len, src_addr, r); | ||
380 | + return false; | ||
381 | + } | ||
382 | + | ||
383 | + dump_data("cmd_data(): src_data=", src->data, len); | ||
384 | + | ||
385 | + switch (s->key[ctxt_key].key_len) { | ||
386 | + case 128 / 8: | ||
387 | + alg = QCRYPTO_CIPHER_ALGO_AES_128; | ||
388 | + break; | ||
389 | + case 192 / 8: | ||
390 | + alg = QCRYPTO_CIPHER_ALGO_AES_192; | ||
391 | + break; | ||
392 | + case 256 / 8: | ||
393 | + alg = QCRYPTO_CIPHER_ALGO_AES_256; | ||
394 | + break; | ||
395 | + default: | ||
396 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key length\n", __func__); | ||
397 | + return false; | ||
398 | + } | ||
399 | + cipher = qcrypto_cipher_new(alg, s->block_mode, | ||
400 | + s->key[ctxt_key].key, | ||
401 | + s->key[ctxt_key].key_len, NULL); | ||
402 | + if (!cipher) { | ||
403 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to create cipher object\n", | ||
404 | + __func__); | ||
405 | + return false; | ||
406 | + } | ||
407 | + if (s->block_mode != QCRYPTO_CIPHER_MODE_ECB) { | ||
408 | + if (qcrypto_cipher_setiv(cipher, (void *)s->iv[ctxt_iv].iv, | ||
409 | + sizeof(s->iv[ctxt_iv].iv), NULL) != 0) { | ||
410 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to set IV\n", __func__); | ||
411 | + return false; | ||
412 | + } | ||
413 | + } | ||
414 | + if (s->is_encrypt) { | ||
415 | + if (qcrypto_cipher_encrypt(cipher, src->data, dst->data, len, NULL) != 0) { | ||
416 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Encryption failed\n", __func__); | ||
417 | + return false; | ||
418 | + } | ||
419 | + } else { | ||
420 | + if (qcrypto_cipher_decrypt(cipher, src->data, dst->data, len, NULL) != 0) { | ||
421 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: Decryption failed\n", __func__); | ||
422 | + return false; | ||
423 | + } | ||
424 | + } | ||
425 | + | ||
426 | + dump_data("cmd_data(): dst_data=", dst->data, len); | ||
427 | + r = dma_memory_write(s->as, dst_addr, dst->data, len, MEMTXATTRS_UNSPECIFIED); | ||
428 | + if (r != MEMTX_OK) { | ||
429 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA write of %"PRIu32" bytes " | ||
430 | + "to 0x%"PRIx64" failed. (r=%d)\n", | ||
431 | + __func__, len, src_addr, r); | ||
432 | + return false; | ||
433 | + } | ||
434 | + | ||
435 | + return true; | ||
436 | +} | ||
437 | + | ||
438 | +static bool cmd_store_iv(AESState *s) | ||
439 | +{ | ||
440 | + uint32_t cmd = s->fifo[0]; | ||
441 | + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; | ||
442 | + uint64_t addr = s->fifo[1]; | ||
443 | + MemTxResult dma_result; | ||
444 | + | ||
445 | + if (!has_payload(s, 1)) { | ||
446 | + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); | ||
447 | + return false; | ||
448 | + } | ||
449 | + | ||
450 | + if (ctxt >= ARRAY_SIZE(s->iv)) { | ||
451 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
452 | + "%s: Invalid context. ctxt = %u, allowed: 0..%zu\n", | ||
453 | + __func__, ctxt, ARRAY_SIZE(s->iv) - 1); | ||
454 | + return false; | ||
455 | + } | ||
456 | + | ||
457 | + addr |= ((uint64_t)cmd << 32) & 0xff00000000ULL; | ||
458 | + dma_result = dma_memory_write(&address_space_memory, addr, | ||
459 | + &s->iv[ctxt].iv, sizeof(s->iv[ctxt].iv), | ||
460 | + MEMTXATTRS_UNSPECIFIED); | ||
461 | + | ||
462 | + trace_aes_cmd_store_iv(ctxt, addr, s->iv[ctxt].iv[0], s->iv[ctxt].iv[1], | ||
463 | + s->iv[ctxt].iv[2], s->iv[ctxt].iv[3]); | ||
464 | + | ||
465 | + return dma_result == MEMTX_OK; | ||
466 | +} | ||
467 | + | ||
468 | +static bool cmd_flag(AESState *s) | ||
469 | +{ | ||
470 | + uint32_t cmd = s->fifo[0]; | ||
471 | + uint32_t raise_irq = cmd & CMD_FLAG_RAISE_IRQ_MASK; | ||
472 | + | ||
473 | + /* We always process data when it's coming in, so fire an IRQ immediately */ | ||
474 | + if (raise_irq) { | ||
475 | + s->irq_status |= REG_IRQ_STATUS_FLAG; | ||
476 | + } | ||
477 | + | ||
478 | + s->flag_info = cmd & CMD_FLAG_INFO_MASK; | ||
479 | + | ||
480 | + trace_aes_cmd_flag(!!raise_irq, s->flag_info); | ||
481 | + | ||
482 | + return true; | ||
483 | +} | ||
484 | + | ||
485 | +static void fifo_process(AESState *s) | ||
486 | +{ | ||
487 | + uint32_t cmd = s->fifo[0] >> CMD_SHIFT; | ||
488 | + bool success = false; | ||
489 | + | ||
490 | + if (!s->fifo_idx) { | ||
491 | + return; | ||
492 | + } | ||
493 | + | ||
494 | + switch (cmd) { | ||
495 | + case CMD_KEY: | ||
496 | + success = cmd_key(s); | ||
497 | + break; | ||
498 | + case CMD_IV: | ||
499 | + success = cmd_iv(s); | ||
500 | + break; | ||
501 | + case CMD_DATA: | ||
502 | + success = cmd_data(s); | ||
503 | + break; | ||
504 | + case CMD_STORE_IV: | ||
505 | + success = cmd_store_iv(s); | ||
506 | + break; | ||
507 | + case CMD_FLAG: | ||
508 | + success = cmd_flag(s); | ||
509 | + break; | ||
510 | + default: | ||
511 | + s->irq_status |= REG_IRQ_STATUS_INVALID_CMD; | ||
512 | + break; | ||
513 | + } | ||
514 | + | ||
515 | + if (success) { | ||
516 | + s->fifo_idx = 0; | ||
517 | + } | ||
518 | + | ||
519 | + trace_aes_fifo_process(cmd, success); | ||
520 | +} | ||
521 | + | ||
522 | +static void aes1_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) | ||
523 | +{ | ||
524 | + AESState *s = opaque; | ||
525 | + | ||
526 | + trace_aes_write(offset, val); | ||
527 | + | ||
528 | + switch (offset) { | ||
529 | + case REG_IRQ_STATUS: | ||
530 | + s->irq_status &= ~val; | ||
531 | + break; | ||
532 | + case REG_IRQ_ENABLE: | ||
533 | + s->irq_enable = val; | ||
534 | + break; | ||
535 | + case REG_FIFO: | ||
536 | + fifo_append(s, val); | ||
537 | + fifo_process(s); | ||
538 | + break; | ||
539 | + default: | ||
540 | + qemu_log_mask(LOG_UNIMP, | ||
541 | + "%s: Unknown AES MMIO offset %"PRIx64", data %"PRIx64"\n", | ||
542 | + __func__, offset, val); | ||
543 | + return; | ||
544 | + } | ||
545 | + | ||
546 | + aes_update_irq(s); | ||
547 | +} | ||
548 | + | ||
549 | +static const MemoryRegionOps aes1_ops = { | ||
550 | + .read = aes1_read, | ||
551 | + .write = aes1_write, | ||
552 | + .endianness = DEVICE_NATIVE_ENDIAN, | ||
553 | + .valid = { | ||
554 | + .min_access_size = 4, | ||
555 | + .max_access_size = 8, | ||
556 | + }, | ||
557 | + .impl = { | ||
558 | + .min_access_size = 4, | ||
559 | + .max_access_size = 4, | ||
560 | + }, | ||
561 | +}; | ||
562 | + | ||
563 | +static uint64_t aes2_read(void *opaque, hwaddr offset, unsigned size) | ||
564 | +{ | ||
565 | + uint64_t res = 0; | ||
566 | + | ||
567 | + switch (offset) { | ||
568 | + case 0: | ||
569 | + res = 0; | ||
570 | + break; | ||
571 | + default: | ||
572 | + qemu_log_mask(LOG_UNIMP, | ||
573 | + "%s: Unknown AES MMIO 2 offset %"PRIx64"\n", | ||
574 | + __func__, offset); | ||
575 | + break; | ||
576 | + } | ||
577 | + | ||
578 | + trace_aes_2_read(offset, res); | ||
579 | + | ||
580 | + return res; | ||
581 | +} | ||
582 | + | ||
583 | +static void aes2_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) | ||
584 | +{ | ||
585 | + trace_aes_2_write(offset, val); | ||
586 | + | ||
587 | + switch (offset) { | ||
588 | + default: | ||
589 | + qemu_log_mask(LOG_UNIMP, | ||
590 | + "%s: Unknown AES MMIO 2 offset %"PRIx64", data %"PRIx64"\n", | ||
591 | + __func__, offset, val); | ||
592 | + return; | ||
593 | + } | ||
594 | +} | ||
595 | + | ||
596 | +static const MemoryRegionOps aes2_ops = { | ||
597 | + .read = aes2_read, | ||
598 | + .write = aes2_write, | ||
599 | + .endianness = DEVICE_NATIVE_ENDIAN, | ||
600 | + .valid = { | ||
601 | + .min_access_size = 4, | ||
602 | + .max_access_size = 8, | ||
603 | + }, | ||
604 | + .impl = { | ||
605 | + .min_access_size = 4, | ||
606 | + .max_access_size = 4, | ||
607 | + }, | ||
608 | +}; | ||
609 | + | ||
610 | +static void aes_reset(Object *obj, ResetType type) | ||
611 | +{ | ||
612 | + AESState *s = APPLE_AES(obj); | ||
613 | + | ||
614 | + s->status = 0x3f80; | ||
615 | + s->q_status = 2; | ||
616 | + s->irq_status = 0; | ||
617 | + s->irq_enable = 0; | ||
618 | + s->watermark = 0; | ||
619 | +} | ||
620 | + | ||
621 | +static void aes_init(Object *obj) | ||
622 | +{ | ||
623 | + AESState *s = APPLE_AES(obj); | ||
624 | + | ||
625 | + memory_region_init_io(&s->iomem1, obj, &aes1_ops, s, TYPE_APPLE_AES, 0x4000); | ||
626 | + memory_region_init_io(&s->iomem2, obj, &aes2_ops, s, TYPE_APPLE_AES, 0x4000); | ||
627 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem1); | ||
628 | + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem2); | ||
629 | + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); | ||
630 | + s->as = &address_space_memory; | ||
631 | +} | ||
632 | + | ||
633 | +static void aes_class_init(ObjectClass *klass, void *data) | ||
634 | +{ | ||
635 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | ||
636 | + | ||
637 | + rc->phases.hold = aes_reset; | ||
638 | +} | ||
639 | + | ||
640 | +static const TypeInfo aes_info = { | ||
641 | + .name = TYPE_APPLE_AES, | ||
642 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
643 | + .instance_size = sizeof(AESState), | ||
644 | + .class_init = aes_class_init, | ||
645 | + .instance_init = aes_init, | ||
646 | +}; | ||
647 | + | ||
648 | +static void aes_register_types(void) | ||
649 | +{ | ||
650 | + type_register_static(&aes_info); | ||
651 | +} | ||
652 | + | ||
653 | +type_init(aes_register_types) | ||
654 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | ||
655 | index XXXXXXX..XXXXXXX 100644 | ||
656 | --- a/hw/vmapple/meson.build | ||
657 | +++ b/hw/vmapple/meson.build | ||
658 | @@ -0,0 +1 @@ | ||
659 | +system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | ||
660 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events | ||
661 | index XXXXXXX..XXXXXXX 100644 | ||
662 | --- a/hw/vmapple/trace-events | ||
663 | +++ b/hw/vmapple/trace-events | ||
664 | @@ -XXX,XX +XXX,XX @@ | ||
665 | # See docs/devel/tracing.rst for syntax documentation. | ||
666 | |||
667 | +# aes.c | ||
668 | +aes_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | ||
669 | +aes_cmd_key_select_builtin(uint32_t ctx, uint32_t key_id, const char *direction, const char *cipher) "[%d] Selecting builtin key %d to %scrypt with %s" | ||
670 | +aes_cmd_key_select_new(uint32_t ctx, uint32_t key_len, const char *direction, const char *cipher) "[%d] Selecting new key size=%d to %scrypt with %s" | ||
671 | +aes_cmd_iv(uint32_t ctx, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] 0x%08x 0x%08x 0x%08x 0x%08x" | ||
672 | +aes_cmd_data(uint32_t key, uint32_t iv, uint64_t src, uint64_t dst, uint32_t len) "[key=%d iv=%d] src=0x%"PRIx64" dst=0x%"PRIx64" len=0x%x" | ||
673 | +aes_cmd_store_iv(uint32_t ctx, uint64_t addr, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] addr=0x%"PRIx64"x -> 0x%08x 0x%08x 0x%08x 0x%08x" | ||
674 | +aes_cmd_flag(uint32_t raise, uint32_t flag_info) "raise=%d flag_info=0x%x" | ||
675 | +aes_fifo_process(uint32_t cmd, bool success) "cmd=%d success=%d" | ||
676 | +aes_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | ||
677 | +aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | ||
678 | +aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | ||
679 | +aes_dump_data(const char *desc, const char *hex) "%s%s" | ||
680 | + | ||
681 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | ||
682 | new file mode 100644 | ||
683 | index XXXXXXX..XXXXXXX | ||
684 | --- /dev/null | ||
685 | +++ b/include/hw/vmapple/vmapple.h | ||
686 | @@ -XXX,XX +XXX,XX @@ | ||
687 | +/* | ||
688 | + * Devices specific to the VMApple machine type | ||
689 | + * | ||
690 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
691 | + * | ||
692 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
693 | + * See the COPYING file in the top-level directory. | ||
694 | + * | ||
695 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
696 | + */ | ||
697 | + | ||
698 | +#ifndef HW_VMAPPLE_VMAPPLE_H | ||
699 | +#define HW_VMAPPLE_VMAPPLE_H | ||
700 | + | ||
701 | +#define TYPE_APPLE_AES "apple-aes" | ||
702 | + | ||
703 | +#endif /* HW_VMAPPLE_VMAPPLE_H */ | ||
704 | diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h | ||
705 | index XXXXXXX..XXXXXXX 100644 | ||
706 | --- a/include/qemu/cutils.h | ||
707 | +++ b/include/qemu/cutils.h | ||
708 | @@ -XXX,XX +XXX,XX @@ GString *qemu_hexdump_line(GString *str, const void *buf, size_t len, | ||
709 | void qemu_hexdump(FILE *fp, const char *prefix, | ||
710 | const void *bufptr, size_t size); | ||
711 | |||
712 | +/** | ||
713 | + * qemu_hexdump_to_buffer: | ||
714 | + * @buffer: output string buffer | ||
715 | + * @buffer_size: amount of available space in buffer. Must be at least | ||
716 | + * data_size*2+1. | ||
717 | + * @data: input bytes | ||
718 | + * @data_size: number of bytes in data | ||
719 | + * | ||
720 | + * Converts the @data_size bytes in @data into hex digit pairs, writing them to | ||
721 | + * @buffer. Finally, a nul terminating character is written; @buffer therefore | ||
722 | + * needs space for (data_size*2+1) chars. | ||
723 | + */ | ||
724 | +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, | ||
725 | + const uint8_t *restrict data, size_t data_size); | ||
726 | + | ||
727 | #endif | ||
728 | diff --git a/util/hexdump.c b/util/hexdump.c | ||
729 | index XXXXXXX..XXXXXXX 100644 | ||
730 | --- a/util/hexdump.c | ||
731 | +++ b/util/hexdump.c | ||
732 | @@ -XXX,XX +XXX,XX @@ | ||
733 | |||
734 | #include "qemu/osdep.h" | ||
735 | #include "qemu/cutils.h" | ||
736 | +#include "qemu/host-utils.h" | ||
737 | |||
738 | static inline char hexdump_nibble(unsigned x) | ||
739 | { | ||
740 | @@ -XXX,XX +XXX,XX @@ void qemu_hexdump(FILE *fp, const char *prefix, | ||
741 | } | ||
742 | |||
743 | } | ||
744 | + | ||
745 | +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, | ||
746 | + const uint8_t *restrict data, size_t data_size) | ||
747 | +{ | ||
748 | + size_t i; | ||
749 | + uint64_t required_buffer_size; | ||
750 | + bool overflow = umul64_overflow(data_size, 2, &required_buffer_size); | ||
751 | + overflow |= uadd64_overflow(required_buffer_size, 1, &required_buffer_size); | ||
752 | + assert(!overflow && buffer_size >= required_buffer_size); | ||
753 | + | ||
754 | + for (i = 0; i < data_size; i++) { | ||
755 | + uint8_t val = data[i]; | ||
756 | + *(buffer++) = hexdump_nibble(val >> 4); | ||
757 | + *(buffer++) = hexdump_nibble(val & 0xf); | ||
758 | + } | ||
759 | + *buffer = '\0'; | ||
760 | +} | ||
761 | -- | ||
762 | 2.39.5 (Apple Git-154) | ||
763 | |||
764 | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | The VMApple machine exposes AUX and ROOT block devices (as well as USB OTG | 3 | The VMApple machine exposes AUX and ROOT block devices (as well as USB OTG |
4 | emulation) via virtio-pci as well as a special, simple backdoor platform | 4 | emulation) via virtio-pci as well as a special, simple backdoor platform |
5 | device. | 5 | device. |
6 | 6 | ||
7 | This patch implements this backdoor platform device to the best of my | 7 | This patch implements this backdoor platform device to the best of my |
8 | understanding. I left out any USB OTG parts; they're only needed for | 8 | understanding. I left out any USB OTG parts; they're only needed for |
9 | guest recovery and I don't understand the protocol yet. | 9 | guest recovery and I don't understand the protocol yet. |
10 | 10 | ||
11 | Signed-off-by: Alexander Graf <graf@amazon.com> | 11 | Signed-off-by: Alexander Graf <graf@amazon.com> |
12 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 12 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
13 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 13 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
14 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 14 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
15 | --- | 15 | --- |
16 | 16 | ||
17 | v4: | 17 | v4: |
18 | 18 | ||
19 | * Moved most header code to .c, rest to vmapple.h | 19 | * Moved most header code to .c, rest to vmapple.h |
20 | * Better compliance with coding, naming, and formatting conventions. | 20 | * Better compliance with coding, naming, and formatting conventions. |
21 | 21 | ||
22 | v8: | 22 | v8: |
23 | 23 | ||
24 | * Replaced uses of cpu_physical_memory_read with dma_memory_read. | 24 | * Replaced uses of cpu_physical_memory_read with dma_memory_read. |
25 | * Replaced an instance of g_free with g_autofree. | 25 | * Replaced an instance of g_free with g_autofree. |
26 | 26 | ||
27 | v9: | 27 | v9: |
28 | 28 | ||
29 | * Replaced uses of cpu_physical_memory_write with dma_memory_write. | 29 | * Replaced uses of cpu_physical_memory_write with dma_memory_write. |
30 | 30 | ||
31 | hw/vmapple/Kconfig | 3 + | 31 | hw/vmapple/Kconfig | 3 + |
32 | hw/vmapple/bdif.c | 275 +++++++++++++++++++++++++++++++++++ | 32 | hw/vmapple/bdif.c | 275 +++++++++++++++++++++++++++++++++++ |
33 | hw/vmapple/meson.build | 1 + | 33 | hw/vmapple/meson.build | 1 + |
34 | hw/vmapple/trace-events | 5 + | 34 | hw/vmapple/trace-events | 5 + |
35 | include/hw/vmapple/vmapple.h | 2 + | 35 | include/hw/vmapple/vmapple.h | 2 + |
36 | 5 files changed, 286 insertions(+) | 36 | 5 files changed, 286 insertions(+) |
37 | create mode 100644 hw/vmapple/bdif.c | 37 | create mode 100644 hw/vmapple/bdif.c |
38 | 38 | ||
39 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | 39 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig |
40 | index XXXXXXX..XXXXXXX 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/hw/vmapple/Kconfig | 41 | --- a/hw/vmapple/Kconfig |
42 | +++ b/hw/vmapple/Kconfig | 42 | +++ b/hw/vmapple/Kconfig |
43 | @@ -XXX,XX +XXX,XX @@ | 43 | @@ -XXX,XX +XXX,XX @@ |
44 | config VMAPPLE_AES | 44 | config VMAPPLE_AES |
45 | bool | 45 | bool |
46 | 46 | ||
47 | +config VMAPPLE_BDIF | 47 | +config VMAPPLE_BDIF |
48 | + bool | 48 | + bool |
49 | + | 49 | + |
50 | diff --git a/hw/vmapple/bdif.c b/hw/vmapple/bdif.c | 50 | diff --git a/hw/vmapple/bdif.c b/hw/vmapple/bdif.c |
51 | new file mode 100644 | 51 | new file mode 100644 |
52 | index XXXXXXX..XXXXXXX | 52 | index XXXXXXX..XXXXXXX |
53 | --- /dev/null | 53 | --- /dev/null |
54 | +++ b/hw/vmapple/bdif.c | 54 | +++ b/hw/vmapple/bdif.c |
55 | @@ -XXX,XX +XXX,XX @@ | 55 | @@ -XXX,XX +XXX,XX @@ |
56 | +/* | 56 | +/* |
57 | + * VMApple Backdoor Interface | 57 | + * VMApple Backdoor Interface |
58 | + * | 58 | + * |
59 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 59 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
60 | + * | 60 | + * |
61 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 61 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
62 | + * See the COPYING file in the top-level directory. | 62 | + * See the COPYING file in the top-level directory. |
63 | + * | 63 | + * |
64 | + * SPDX-License-Identifier: GPL-2.0-or-later | 64 | + * SPDX-License-Identifier: GPL-2.0-or-later |
65 | + */ | 65 | + */ |
66 | + | 66 | + |
67 | +#include "qemu/osdep.h" | 67 | +#include "qemu/osdep.h" |
68 | +#include "qemu/units.h" | 68 | +#include "qemu/units.h" |
69 | +#include "qemu/log.h" | 69 | +#include "qemu/log.h" |
70 | +#include "qemu/module.h" | 70 | +#include "qemu/module.h" |
71 | +#include "trace.h" | 71 | +#include "trace.h" |
72 | +#include "hw/vmapple/vmapple.h" | 72 | +#include "hw/vmapple/vmapple.h" |
73 | +#include "hw/sysbus.h" | 73 | +#include "hw/sysbus.h" |
74 | +#include "hw/block/block.h" | 74 | +#include "hw/block/block.h" |
75 | +#include "qapi/error.h" | 75 | +#include "qapi/error.h" |
76 | +#include "sysemu/block-backend.h" | 76 | +#include "sysemu/block-backend.h" |
77 | +#include "sysemu/dma.h" | 77 | +#include "sysemu/dma.h" |
78 | + | 78 | + |
79 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleBdifState, VMAPPLE_BDIF) | 79 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleBdifState, VMAPPLE_BDIF) |
80 | + | 80 | + |
81 | +struct VMAppleBdifState { | 81 | +struct VMAppleBdifState { |
82 | + SysBusDevice parent_obj; | 82 | + SysBusDevice parent_obj; |
83 | + | 83 | + |
84 | + BlockBackend *aux; | 84 | + BlockBackend *aux; |
85 | + BlockBackend *root; | 85 | + BlockBackend *root; |
86 | + MemoryRegion mmio; | 86 | + MemoryRegion mmio; |
87 | +}; | 87 | +}; |
88 | + | 88 | + |
89 | +#define VMAPPLE_BDIF_SIZE 0x00200000 | 89 | +#define VMAPPLE_BDIF_SIZE 0x00200000 |
90 | + | 90 | + |
91 | +#define REG_DEVID_MASK 0xffff0000 | 91 | +#define REG_DEVID_MASK 0xffff0000 |
92 | +#define DEVID_ROOT 0x00000000 | 92 | +#define DEVID_ROOT 0x00000000 |
93 | +#define DEVID_AUX 0x00010000 | 93 | +#define DEVID_AUX 0x00010000 |
94 | +#define DEVID_USB 0x00100000 | 94 | +#define DEVID_USB 0x00100000 |
95 | + | 95 | + |
96 | +#define REG_STATUS 0x0 | 96 | +#define REG_STATUS 0x0 |
97 | +#define REG_STATUS_ACTIVE BIT(0) | 97 | +#define REG_STATUS_ACTIVE BIT(0) |
98 | +#define REG_CFG 0x4 | 98 | +#define REG_CFG 0x4 |
99 | +#define REG_CFG_ACTIVE BIT(1) | 99 | +#define REG_CFG_ACTIVE BIT(1) |
100 | +#define REG_UNK1 0x8 | 100 | +#define REG_UNK1 0x8 |
101 | +#define REG_BUSY 0x10 | 101 | +#define REG_BUSY 0x10 |
102 | +#define REG_BUSY_READY BIT(0) | 102 | +#define REG_BUSY_READY BIT(0) |
103 | +#define REG_UNK2 0x400 | 103 | +#define REG_UNK2 0x400 |
104 | +#define REG_CMD 0x408 | 104 | +#define REG_CMD 0x408 |
105 | +#define REG_NEXT_DEVICE 0x420 | 105 | +#define REG_NEXT_DEVICE 0x420 |
106 | +#define REG_UNK3 0x434 | 106 | +#define REG_UNK3 0x434 |
107 | + | 107 | + |
108 | +typedef struct VblkSector { | 108 | +typedef struct VblkSector { |
109 | + uint32_t pad; | 109 | + uint32_t pad; |
110 | + uint32_t pad2; | 110 | + uint32_t pad2; |
111 | + uint32_t sector; | 111 | + uint32_t sector; |
112 | + uint32_t pad3; | 112 | + uint32_t pad3; |
113 | +} VblkSector; | 113 | +} VblkSector; |
114 | + | 114 | + |
115 | +typedef struct VblkReqCmd { | 115 | +typedef struct VblkReqCmd { |
116 | + uint64_t addr; | 116 | + uint64_t addr; |
117 | + uint32_t len; | 117 | + uint32_t len; |
118 | + uint32_t flags; | 118 | + uint32_t flags; |
119 | +} VblkReqCmd; | 119 | +} VblkReqCmd; |
120 | + | 120 | + |
121 | +typedef struct VblkReq { | 121 | +typedef struct VblkReq { |
122 | + VblkReqCmd sector; | 122 | + VblkReqCmd sector; |
123 | + VblkReqCmd data; | 123 | + VblkReqCmd data; |
124 | + VblkReqCmd retval; | 124 | + VblkReqCmd retval; |
125 | +} VblkReq; | 125 | +} VblkReq; |
126 | + | 126 | + |
127 | +#define VBLK_DATA_FLAGS_READ 0x00030001 | 127 | +#define VBLK_DATA_FLAGS_READ 0x00030001 |
128 | +#define VBLK_DATA_FLAGS_WRITE 0x00010001 | 128 | +#define VBLK_DATA_FLAGS_WRITE 0x00010001 |
129 | + | 129 | + |
130 | +#define VBLK_RET_SUCCESS 0 | 130 | +#define VBLK_RET_SUCCESS 0 |
131 | +#define VBLK_RET_FAILED 1 | 131 | +#define VBLK_RET_FAILED 1 |
132 | + | 132 | + |
133 | +static uint64_t bdif_read(void *opaque, hwaddr offset, unsigned size) | 133 | +static uint64_t bdif_read(void *opaque, hwaddr offset, unsigned size) |
134 | +{ | 134 | +{ |
135 | + uint64_t ret = -1; | 135 | + uint64_t ret = -1; |
136 | + uint64_t devid = offset & REG_DEVID_MASK; | 136 | + uint64_t devid = offset & REG_DEVID_MASK; |
137 | + | 137 | + |
138 | + switch (offset & ~REG_DEVID_MASK) { | 138 | + switch (offset & ~REG_DEVID_MASK) { |
139 | + case REG_STATUS: | 139 | + case REG_STATUS: |
140 | + ret = REG_STATUS_ACTIVE; | 140 | + ret = REG_STATUS_ACTIVE; |
141 | + break; | 141 | + break; |
142 | + case REG_CFG: | 142 | + case REG_CFG: |
143 | + ret = REG_CFG_ACTIVE; | 143 | + ret = REG_CFG_ACTIVE; |
144 | + break; | 144 | + break; |
145 | + case REG_UNK1: | 145 | + case REG_UNK1: |
146 | + ret = 0x420; | 146 | + ret = 0x420; |
147 | + break; | 147 | + break; |
148 | + case REG_BUSY: | 148 | + case REG_BUSY: |
149 | + ret = REG_BUSY_READY; | 149 | + ret = REG_BUSY_READY; |
150 | + break; | 150 | + break; |
151 | + case REG_UNK2: | 151 | + case REG_UNK2: |
152 | + ret = 0x1; | 152 | + ret = 0x1; |
153 | + break; | 153 | + break; |
154 | + case REG_UNK3: | 154 | + case REG_UNK3: |
155 | + ret = 0x0; | 155 | + ret = 0x0; |
156 | + break; | 156 | + break; |
157 | + case REG_NEXT_DEVICE: | 157 | + case REG_NEXT_DEVICE: |
158 | + switch (devid) { | 158 | + switch (devid) { |
159 | + case DEVID_ROOT: | 159 | + case DEVID_ROOT: |
160 | + ret = 0x8000000; | 160 | + ret = 0x8000000; |
161 | + break; | 161 | + break; |
162 | + case DEVID_AUX: | 162 | + case DEVID_AUX: |
163 | + ret = 0x10000; | 163 | + ret = 0x10000; |
164 | + break; | 164 | + break; |
165 | + } | 165 | + } |
166 | + break; | 166 | + break; |
167 | + } | 167 | + } |
168 | + | 168 | + |
169 | + trace_bdif_read(offset, size, ret); | 169 | + trace_bdif_read(offset, size, ret); |
170 | + return ret; | 170 | + return ret; |
171 | +} | 171 | +} |
172 | + | 172 | + |
173 | +static void le2cpu_sector(VblkSector *sector) | 173 | +static void le2cpu_sector(VblkSector *sector) |
174 | +{ | 174 | +{ |
175 | + sector->sector = le32_to_cpu(sector->sector); | 175 | + sector->sector = le32_to_cpu(sector->sector); |
176 | +} | 176 | +} |
177 | + | 177 | + |
178 | +static void le2cpu_reqcmd(VblkReqCmd *cmd) | 178 | +static void le2cpu_reqcmd(VblkReqCmd *cmd) |
179 | +{ | 179 | +{ |
180 | + cmd->addr = le64_to_cpu(cmd->addr); | 180 | + cmd->addr = le64_to_cpu(cmd->addr); |
181 | + cmd->len = le32_to_cpu(cmd->len); | 181 | + cmd->len = le32_to_cpu(cmd->len); |
182 | + cmd->flags = le32_to_cpu(cmd->flags); | 182 | + cmd->flags = le32_to_cpu(cmd->flags); |
183 | +} | 183 | +} |
184 | + | 184 | + |
185 | +static void le2cpu_req(VblkReq *req) | 185 | +static void le2cpu_req(VblkReq *req) |
186 | +{ | 186 | +{ |
187 | + le2cpu_reqcmd(&req->sector); | 187 | + le2cpu_reqcmd(&req->sector); |
188 | + le2cpu_reqcmd(&req->data); | 188 | + le2cpu_reqcmd(&req->data); |
189 | + le2cpu_reqcmd(&req->retval); | 189 | + le2cpu_reqcmd(&req->retval); |
190 | +} | 190 | +} |
191 | + | 191 | + |
192 | +static void vblk_cmd(uint64_t devid, BlockBackend *blk, uint64_t gp_addr, | 192 | +static void vblk_cmd(uint64_t devid, BlockBackend *blk, uint64_t gp_addr, |
193 | + uint64_t static_off) | 193 | + uint64_t static_off) |
194 | +{ | 194 | +{ |
195 | + VblkReq req; | 195 | + VblkReq req; |
196 | + VblkSector sector; | 196 | + VblkSector sector; |
197 | + uint64_t off = 0; | 197 | + uint64_t off = 0; |
198 | + g_autofree char *buf = NULL; | 198 | + g_autofree char *buf = NULL; |
199 | + uint8_t ret = VBLK_RET_FAILED; | 199 | + uint8_t ret = VBLK_RET_FAILED; |
200 | + int r; | 200 | + int r; |
201 | + MemTxResult dma_result; | 201 | + MemTxResult dma_result; |
202 | + | 202 | + |
203 | + dma_result = dma_memory_read(&address_space_memory, gp_addr, | 203 | + dma_result = dma_memory_read(&address_space_memory, gp_addr, |
204 | + &req, sizeof(req), MEMTXATTRS_UNSPECIFIED); | 204 | + &req, sizeof(req), MEMTXATTRS_UNSPECIFIED); |
205 | + if (dma_result != MEMTX_OK) { | 205 | + if (dma_result != MEMTX_OK) { |
206 | + goto out; | 206 | + goto out; |
207 | + } | 207 | + } |
208 | + | 208 | + |
209 | + le2cpu_req(&req); | 209 | + le2cpu_req(&req); |
210 | + | 210 | + |
211 | + if (req.sector.len != sizeof(sector)) { | 211 | + if (req.sector.len != sizeof(sector)) { |
212 | + goto out; | 212 | + goto out; |
213 | + } | 213 | + } |
214 | + | 214 | + |
215 | + /* Read the vblk command */ | 215 | + /* Read the vblk command */ |
216 | + dma_result = dma_memory_read(&address_space_memory, req.sector.addr, | 216 | + dma_result = dma_memory_read(&address_space_memory, req.sector.addr, |
217 | + §or, sizeof(sector), | 217 | + §or, sizeof(sector), |
218 | + MEMTXATTRS_UNSPECIFIED); | 218 | + MEMTXATTRS_UNSPECIFIED); |
219 | + if (dma_result != MEMTX_OK) { | 219 | + if (dma_result != MEMTX_OK) { |
220 | + goto out; | 220 | + goto out; |
221 | + } | 221 | + } |
222 | + le2cpu_sector(§or); | 222 | + le2cpu_sector(§or); |
223 | + | 223 | + |
224 | + off = sector.sector * 512ULL + static_off; | 224 | + off = sector.sector * 512ULL + static_off; |
225 | + | 225 | + |
226 | + /* Sanity check that we're not allocating bogus sizes */ | 226 | + /* Sanity check that we're not allocating bogus sizes */ |
227 | + if (req.data.len > 128 * MiB) { | 227 | + if (req.data.len > 128 * MiB) { |
228 | + goto out; | 228 | + goto out; |
229 | + } | 229 | + } |
230 | + | 230 | + |
231 | + buf = g_malloc0(req.data.len); | 231 | + buf = g_malloc0(req.data.len); |
232 | + switch (req.data.flags) { | 232 | + switch (req.data.flags) { |
233 | + case VBLK_DATA_FLAGS_READ: | 233 | + case VBLK_DATA_FLAGS_READ: |
234 | + r = blk_pread(blk, off, req.data.len, buf, 0); | 234 | + r = blk_pread(blk, off, req.data.len, buf, 0); |
235 | + trace_bdif_vblk_read(devid == DEVID_AUX ? "aux" : "root", | 235 | + trace_bdif_vblk_read(devid == DEVID_AUX ? "aux" : "root", |
236 | + req.data.addr, off, req.data.len, r); | 236 | + req.data.addr, off, req.data.len, r); |
237 | + if (r < 0) { | 237 | + if (r < 0) { |
238 | + goto out; | 238 | + goto out; |
239 | + } | 239 | + } |
240 | + dma_result = dma_memory_write(&address_space_memory, req.data.addr, buf, | 240 | + dma_result = dma_memory_write(&address_space_memory, req.data.addr, buf, |
241 | + req.data.len, MEMTXATTRS_UNSPECIFIED); | 241 | + req.data.len, MEMTXATTRS_UNSPECIFIED); |
242 | + if (dma_result == MEMTX_OK) { | 242 | + if (dma_result == MEMTX_OK) { |
243 | + ret = VBLK_RET_SUCCESS; | 243 | + ret = VBLK_RET_SUCCESS; |
244 | + } | 244 | + } |
245 | + break; | 245 | + break; |
246 | + case VBLK_DATA_FLAGS_WRITE: | 246 | + case VBLK_DATA_FLAGS_WRITE: |
247 | + /* Not needed, iBoot only reads */ | 247 | + /* Not needed, iBoot only reads */ |
248 | + break; | 248 | + break; |
249 | + default: | 249 | + default: |
250 | + break; | 250 | + break; |
251 | + } | 251 | + } |
252 | + | 252 | + |
253 | +out: | 253 | +out: |
254 | + dma_memory_write(&address_space_memory, req.retval.addr, &ret, 1, | 254 | + dma_memory_write(&address_space_memory, req.retval.addr, &ret, 1, |
255 | + MEMTXATTRS_UNSPECIFIED); | 255 | + MEMTXATTRS_UNSPECIFIED); |
256 | +} | 256 | +} |
257 | + | 257 | + |
258 | +static void bdif_write(void *opaque, hwaddr offset, | 258 | +static void bdif_write(void *opaque, hwaddr offset, |
259 | + uint64_t value, unsigned size) | 259 | + uint64_t value, unsigned size) |
260 | +{ | 260 | +{ |
261 | + VMAppleBdifState *s = opaque; | 261 | + VMAppleBdifState *s = opaque; |
262 | + uint64_t devid = (offset & REG_DEVID_MASK); | 262 | + uint64_t devid = (offset & REG_DEVID_MASK); |
263 | + | 263 | + |
264 | + trace_bdif_write(offset, size, value); | 264 | + trace_bdif_write(offset, size, value); |
265 | + | 265 | + |
266 | + switch (offset & ~REG_DEVID_MASK) { | 266 | + switch (offset & ~REG_DEVID_MASK) { |
267 | + case REG_CMD: | 267 | + case REG_CMD: |
268 | + switch (devid) { | 268 | + switch (devid) { |
269 | + case DEVID_ROOT: | 269 | + case DEVID_ROOT: |
270 | + vblk_cmd(devid, s->root, value, 0x0); | 270 | + vblk_cmd(devid, s->root, value, 0x0); |
271 | + break; | 271 | + break; |
272 | + case DEVID_AUX: | 272 | + case DEVID_AUX: |
273 | + vblk_cmd(devid, s->aux, value, 0x0); | 273 | + vblk_cmd(devid, s->aux, value, 0x0); |
274 | + break; | 274 | + break; |
275 | + } | 275 | + } |
276 | + break; | 276 | + break; |
277 | + } | 277 | + } |
278 | +} | 278 | +} |
279 | + | 279 | + |
280 | +static const MemoryRegionOps bdif_ops = { | 280 | +static const MemoryRegionOps bdif_ops = { |
281 | + .read = bdif_read, | 281 | + .read = bdif_read, |
282 | + .write = bdif_write, | 282 | + .write = bdif_write, |
283 | + .endianness = DEVICE_NATIVE_ENDIAN, | 283 | + .endianness = DEVICE_NATIVE_ENDIAN, |
284 | + .valid = { | 284 | + .valid = { |
285 | + .min_access_size = 1, | 285 | + .min_access_size = 1, |
286 | + .max_access_size = 8, | 286 | + .max_access_size = 8, |
287 | + }, | 287 | + }, |
288 | + .impl = { | 288 | + .impl = { |
289 | + .min_access_size = 1, | 289 | + .min_access_size = 1, |
290 | + .max_access_size = 8, | 290 | + .max_access_size = 8, |
291 | + }, | 291 | + }, |
292 | +}; | 292 | +}; |
293 | + | 293 | + |
294 | +static void bdif_init(Object *obj) | 294 | +static void bdif_init(Object *obj) |
295 | +{ | 295 | +{ |
296 | + VMAppleBdifState *s = VMAPPLE_BDIF(obj); | 296 | + VMAppleBdifState *s = VMAPPLE_BDIF(obj); |
297 | + | 297 | + |
298 | + memory_region_init_io(&s->mmio, obj, &bdif_ops, obj, | 298 | + memory_region_init_io(&s->mmio, obj, &bdif_ops, obj, |
299 | + "VMApple Backdoor Interface", VMAPPLE_BDIF_SIZE); | 299 | + "VMApple Backdoor Interface", VMAPPLE_BDIF_SIZE); |
300 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); | 300 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); |
301 | +} | 301 | +} |
302 | + | 302 | + |
303 | +static Property bdif_properties[] = { | 303 | +static Property bdif_properties[] = { |
304 | + DEFINE_PROP_DRIVE("aux", VMAppleBdifState, aux), | 304 | + DEFINE_PROP_DRIVE("aux", VMAppleBdifState, aux), |
305 | + DEFINE_PROP_DRIVE("root", VMAppleBdifState, root), | 305 | + DEFINE_PROP_DRIVE("root", VMAppleBdifState, root), |
306 | + DEFINE_PROP_END_OF_LIST(), | 306 | + DEFINE_PROP_END_OF_LIST(), |
307 | +}; | 307 | +}; |
308 | + | 308 | + |
309 | +static void bdif_class_init(ObjectClass *klass, void *data) | 309 | +static void bdif_class_init(ObjectClass *klass, void *data) |
310 | +{ | 310 | +{ |
311 | + DeviceClass *dc = DEVICE_CLASS(klass); | 311 | + DeviceClass *dc = DEVICE_CLASS(klass); |
312 | + | 312 | + |
313 | + dc->desc = "VMApple Backdoor Interface"; | 313 | + dc->desc = "VMApple Backdoor Interface"; |
314 | + device_class_set_props(dc, bdif_properties); | 314 | + device_class_set_props(dc, bdif_properties); |
315 | +} | 315 | +} |
316 | + | 316 | + |
317 | +static const TypeInfo bdif_info = { | 317 | +static const TypeInfo bdif_info = { |
318 | + .name = TYPE_VMAPPLE_BDIF, | 318 | + .name = TYPE_VMAPPLE_BDIF, |
319 | + .parent = TYPE_SYS_BUS_DEVICE, | 319 | + .parent = TYPE_SYS_BUS_DEVICE, |
320 | + .instance_size = sizeof(VMAppleBdifState), | 320 | + .instance_size = sizeof(VMAppleBdifState), |
321 | + .instance_init = bdif_init, | 321 | + .instance_init = bdif_init, |
322 | + .class_init = bdif_class_init, | 322 | + .class_init = bdif_class_init, |
323 | +}; | 323 | +}; |
324 | + | 324 | + |
325 | +static void bdif_register_types(void) | 325 | +static void bdif_register_types(void) |
326 | +{ | 326 | +{ |
327 | + type_register_static(&bdif_info); | 327 | + type_register_static(&bdif_info); |
328 | +} | 328 | +} |
329 | + | 329 | + |
330 | +type_init(bdif_register_types) | 330 | +type_init(bdif_register_types) |
331 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | 331 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build |
332 | index XXXXXXX..XXXXXXX 100644 | 332 | index XXXXXXX..XXXXXXX 100644 |
333 | --- a/hw/vmapple/meson.build | 333 | --- a/hw/vmapple/meson.build |
334 | +++ b/hw/vmapple/meson.build | 334 | +++ b/hw/vmapple/meson.build |
335 | @@ -1 +1,2 @@ | 335 | @@ -1 +1,2 @@ |
336 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | 336 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) |
337 | +system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | 337 | +system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) |
338 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events | 338 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events |
339 | index XXXXXXX..XXXXXXX 100644 | 339 | index XXXXXXX..XXXXXXX 100644 |
340 | --- a/hw/vmapple/trace-events | 340 | --- a/hw/vmapple/trace-events |
341 | +++ b/hw/vmapple/trace-events | 341 | +++ b/hw/vmapple/trace-events |
342 | @@ -XXX,XX +XXX,XX @@ aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | 342 | @@ -XXX,XX +XXX,XX @@ aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 |
343 | aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | 343 | aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 |
344 | aes_dump_data(const char *desc, const char *hex) "%s%s" | 344 | aes_dump_data(const char *desc, const char *hex) "%s%s" |
345 | 345 | ||
346 | +# bdif.c | 346 | +# bdif.c |
347 | +bdif_read(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 | 347 | +bdif_read(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 |
348 | +bdif_write(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 | 348 | +bdif_write(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 |
349 | +bdif_vblk_read(const char *dev, uint64_t addr, uint64_t offset, uint32_t len, int r) "dev=%s addr=0x%"PRIx64" off=0x%"PRIx64" size=0x%x r=%d" | 349 | +bdif_vblk_read(const char *dev, uint64_t addr, uint64_t offset, uint32_t len, int r) "dev=%s addr=0x%"PRIx64" off=0x%"PRIx64" size=0x%x r=%d" |
350 | + | 350 | + |
351 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | 351 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h |
352 | index XXXXXXX..XXXXXXX 100644 | 352 | index XXXXXXX..XXXXXXX 100644 |
353 | --- a/include/hw/vmapple/vmapple.h | 353 | --- a/include/hw/vmapple/vmapple.h |
354 | +++ b/include/hw/vmapple/vmapple.h | 354 | +++ b/include/hw/vmapple/vmapple.h |
355 | @@ -XXX,XX +XXX,XX @@ | 355 | @@ -XXX,XX +XXX,XX @@ |
356 | 356 | ||
357 | #define TYPE_APPLE_AES "apple-aes" | 357 | #define TYPE_APPLE_AES "apple-aes" |
358 | 358 | ||
359 | +#define TYPE_VMAPPLE_BDIF "vmapple-bdif" | 359 | +#define TYPE_VMAPPLE_BDIF "vmapple-bdif" |
360 | + | 360 | + |
361 | #endif /* HW_VMAPPLE_VMAPPLE_H */ | 361 | #endif /* HW_VMAPPLE_VMAPPLE_H */ |
362 | -- | 362 | -- |
363 | 2.39.5 (Apple Git-154) | 363 | 2.39.5 (Apple Git-154) |
364 | 364 | ||
365 | 365 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <graf@amazon.com> | |
2 | |||
3 | The VMApple machine exposes AUX and ROOT block devices (as well as USB OTG | ||
4 | emulation) via virtio-pci as well as a special, simple backdoor platform | ||
5 | device. | ||
6 | |||
7 | This patch implements this backdoor platform device to the best of my | ||
8 | understanding. I left out any USB OTG parts; they're only needed for | ||
9 | guest recovery and I don't understand the protocol yet. | ||
10 | |||
11 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
12 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
13 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
14 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
15 | --- | ||
16 | |||
17 | v4: | ||
18 | |||
19 | * Moved most header code to .c, rest to vmapple.h | ||
20 | * Better compliance with coding, naming, and formatting conventions. | ||
21 | |||
22 | v8: | ||
23 | |||
24 | * Replaced uses of cpu_physical_memory_read with dma_memory_read. | ||
25 | * Replaced an instance of g_free with g_autofree. | ||
26 | |||
27 | v9: | ||
28 | |||
29 | * Replaced uses of cpu_physical_memory_write with dma_memory_write. | ||
30 | |||
31 | hw/vmapple/Kconfig | 3 + | ||
32 | hw/vmapple/bdif.c | 275 +++++++++++++++++++++++++++++++++++ | ||
33 | hw/vmapple/meson.build | 1 + | ||
34 | hw/vmapple/trace-events | 5 + | ||
35 | include/hw/vmapple/vmapple.h | 2 + | ||
36 | 5 files changed, 286 insertions(+) | ||
37 | create mode 100644 hw/vmapple/bdif.c | ||
38 | |||
39 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/hw/vmapple/Kconfig | ||
42 | +++ b/hw/vmapple/Kconfig | ||
43 | @@ -XXX,XX +XXX,XX @@ | ||
44 | config VMAPPLE_AES | ||
45 | bool | ||
46 | |||
47 | +config VMAPPLE_BDIF | ||
48 | + bool | ||
49 | + | ||
50 | diff --git a/hw/vmapple/bdif.c b/hw/vmapple/bdif.c | ||
51 | new file mode 100644 | ||
52 | index XXXXXXX..XXXXXXX | ||
53 | --- /dev/null | ||
54 | +++ b/hw/vmapple/bdif.c | ||
55 | @@ -XXX,XX +XXX,XX @@ | ||
56 | +/* | ||
57 | + * VMApple Backdoor Interface | ||
58 | + * | ||
59 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
60 | + * | ||
61 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
62 | + * See the COPYING file in the top-level directory. | ||
63 | + * | ||
64 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
65 | + */ | ||
66 | + | ||
67 | +#include "qemu/osdep.h" | ||
68 | +#include "qemu/units.h" | ||
69 | +#include "qemu/log.h" | ||
70 | +#include "qemu/module.h" | ||
71 | +#include "trace.h" | ||
72 | +#include "hw/vmapple/vmapple.h" | ||
73 | +#include "hw/sysbus.h" | ||
74 | +#include "hw/block/block.h" | ||
75 | +#include "qapi/error.h" | ||
76 | +#include "sysemu/block-backend.h" | ||
77 | +#include "sysemu/dma.h" | ||
78 | + | ||
79 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleBdifState, VMAPPLE_BDIF) | ||
80 | + | ||
81 | +struct VMAppleBdifState { | ||
82 | + SysBusDevice parent_obj; | ||
83 | + | ||
84 | + BlockBackend *aux; | ||
85 | + BlockBackend *root; | ||
86 | + MemoryRegion mmio; | ||
87 | +}; | ||
88 | + | ||
89 | +#define VMAPPLE_BDIF_SIZE 0x00200000 | ||
90 | + | ||
91 | +#define REG_DEVID_MASK 0xffff0000 | ||
92 | +#define DEVID_ROOT 0x00000000 | ||
93 | +#define DEVID_AUX 0x00010000 | ||
94 | +#define DEVID_USB 0x00100000 | ||
95 | + | ||
96 | +#define REG_STATUS 0x0 | ||
97 | +#define REG_STATUS_ACTIVE BIT(0) | ||
98 | +#define REG_CFG 0x4 | ||
99 | +#define REG_CFG_ACTIVE BIT(1) | ||
100 | +#define REG_UNK1 0x8 | ||
101 | +#define REG_BUSY 0x10 | ||
102 | +#define REG_BUSY_READY BIT(0) | ||
103 | +#define REG_UNK2 0x400 | ||
104 | +#define REG_CMD 0x408 | ||
105 | +#define REG_NEXT_DEVICE 0x420 | ||
106 | +#define REG_UNK3 0x434 | ||
107 | + | ||
108 | +typedef struct VblkSector { | ||
109 | + uint32_t pad; | ||
110 | + uint32_t pad2; | ||
111 | + uint32_t sector; | ||
112 | + uint32_t pad3; | ||
113 | +} VblkSector; | ||
114 | + | ||
115 | +typedef struct VblkReqCmd { | ||
116 | + uint64_t addr; | ||
117 | + uint32_t len; | ||
118 | + uint32_t flags; | ||
119 | +} VblkReqCmd; | ||
120 | + | ||
121 | +typedef struct VblkReq { | ||
122 | + VblkReqCmd sector; | ||
123 | + VblkReqCmd data; | ||
124 | + VblkReqCmd retval; | ||
125 | +} VblkReq; | ||
126 | + | ||
127 | +#define VBLK_DATA_FLAGS_READ 0x00030001 | ||
128 | +#define VBLK_DATA_FLAGS_WRITE 0x00010001 | ||
129 | + | ||
130 | +#define VBLK_RET_SUCCESS 0 | ||
131 | +#define VBLK_RET_FAILED 1 | ||
132 | + | ||
133 | +static uint64_t bdif_read(void *opaque, hwaddr offset, unsigned size) | ||
134 | +{ | ||
135 | + uint64_t ret = -1; | ||
136 | + uint64_t devid = offset & REG_DEVID_MASK; | ||
137 | + | ||
138 | + switch (offset & ~REG_DEVID_MASK) { | ||
139 | + case REG_STATUS: | ||
140 | + ret = REG_STATUS_ACTIVE; | ||
141 | + break; | ||
142 | + case REG_CFG: | ||
143 | + ret = REG_CFG_ACTIVE; | ||
144 | + break; | ||
145 | + case REG_UNK1: | ||
146 | + ret = 0x420; | ||
147 | + break; | ||
148 | + case REG_BUSY: | ||
149 | + ret = REG_BUSY_READY; | ||
150 | + break; | ||
151 | + case REG_UNK2: | ||
152 | + ret = 0x1; | ||
153 | + break; | ||
154 | + case REG_UNK3: | ||
155 | + ret = 0x0; | ||
156 | + break; | ||
157 | + case REG_NEXT_DEVICE: | ||
158 | + switch (devid) { | ||
159 | + case DEVID_ROOT: | ||
160 | + ret = 0x8000000; | ||
161 | + break; | ||
162 | + case DEVID_AUX: | ||
163 | + ret = 0x10000; | ||
164 | + break; | ||
165 | + } | ||
166 | + break; | ||
167 | + } | ||
168 | + | ||
169 | + trace_bdif_read(offset, size, ret); | ||
170 | + return ret; | ||
171 | +} | ||
172 | + | ||
173 | +static void le2cpu_sector(VblkSector *sector) | ||
174 | +{ | ||
175 | + sector->sector = le32_to_cpu(sector->sector); | ||
176 | +} | ||
177 | + | ||
178 | +static void le2cpu_reqcmd(VblkReqCmd *cmd) | ||
179 | +{ | ||
180 | + cmd->addr = le64_to_cpu(cmd->addr); | ||
181 | + cmd->len = le32_to_cpu(cmd->len); | ||
182 | + cmd->flags = le32_to_cpu(cmd->flags); | ||
183 | +} | ||
184 | + | ||
185 | +static void le2cpu_req(VblkReq *req) | ||
186 | +{ | ||
187 | + le2cpu_reqcmd(&req->sector); | ||
188 | + le2cpu_reqcmd(&req->data); | ||
189 | + le2cpu_reqcmd(&req->retval); | ||
190 | +} | ||
191 | + | ||
192 | +static void vblk_cmd(uint64_t devid, BlockBackend *blk, uint64_t gp_addr, | ||
193 | + uint64_t static_off) | ||
194 | +{ | ||
195 | + VblkReq req; | ||
196 | + VblkSector sector; | ||
197 | + uint64_t off = 0; | ||
198 | + g_autofree char *buf = NULL; | ||
199 | + uint8_t ret = VBLK_RET_FAILED; | ||
200 | + int r; | ||
201 | + MemTxResult dma_result; | ||
202 | + | ||
203 | + dma_result = dma_memory_read(&address_space_memory, gp_addr, | ||
204 | + &req, sizeof(req), MEMTXATTRS_UNSPECIFIED); | ||
205 | + if (dma_result != MEMTX_OK) { | ||
206 | + goto out; | ||
207 | + } | ||
208 | + | ||
209 | + le2cpu_req(&req); | ||
210 | + | ||
211 | + if (req.sector.len != sizeof(sector)) { | ||
212 | + goto out; | ||
213 | + } | ||
214 | + | ||
215 | + /* Read the vblk command */ | ||
216 | + dma_result = dma_memory_read(&address_space_memory, req.sector.addr, | ||
217 | + §or, sizeof(sector), | ||
218 | + MEMTXATTRS_UNSPECIFIED); | ||
219 | + if (dma_result != MEMTX_OK) { | ||
220 | + goto out; | ||
221 | + } | ||
222 | + le2cpu_sector(§or); | ||
223 | + | ||
224 | + off = sector.sector * 512ULL + static_off; | ||
225 | + | ||
226 | + /* Sanity check that we're not allocating bogus sizes */ | ||
227 | + if (req.data.len > 128 * MiB) { | ||
228 | + goto out; | ||
229 | + } | ||
230 | + | ||
231 | + buf = g_malloc0(req.data.len); | ||
232 | + switch (req.data.flags) { | ||
233 | + case VBLK_DATA_FLAGS_READ: | ||
234 | + r = blk_pread(blk, off, req.data.len, buf, 0); | ||
235 | + trace_bdif_vblk_read(devid == DEVID_AUX ? "aux" : "root", | ||
236 | + req.data.addr, off, req.data.len, r); | ||
237 | + if (r < 0) { | ||
238 | + goto out; | ||
239 | + } | ||
240 | + dma_result = dma_memory_write(&address_space_memory, req.data.addr, buf, | ||
241 | + req.data.len, MEMTXATTRS_UNSPECIFIED); | ||
242 | + if (dma_result == MEMTX_OK) { | ||
243 | + ret = VBLK_RET_SUCCESS; | ||
244 | + } | ||
245 | + break; | ||
246 | + case VBLK_DATA_FLAGS_WRITE: | ||
247 | + /* Not needed, iBoot only reads */ | ||
248 | + break; | ||
249 | + default: | ||
250 | + break; | ||
251 | + } | ||
252 | + | ||
253 | +out: | ||
254 | + dma_memory_write(&address_space_memory, req.retval.addr, &ret, 1, | ||
255 | + MEMTXATTRS_UNSPECIFIED); | ||
256 | +} | ||
257 | + | ||
258 | +static void bdif_write(void *opaque, hwaddr offset, | ||
259 | + uint64_t value, unsigned size) | ||
260 | +{ | ||
261 | + VMAppleBdifState *s = opaque; | ||
262 | + uint64_t devid = (offset & REG_DEVID_MASK); | ||
263 | + | ||
264 | + trace_bdif_write(offset, size, value); | ||
265 | + | ||
266 | + switch (offset & ~REG_DEVID_MASK) { | ||
267 | + case REG_CMD: | ||
268 | + switch (devid) { | ||
269 | + case DEVID_ROOT: | ||
270 | + vblk_cmd(devid, s->root, value, 0x0); | ||
271 | + break; | ||
272 | + case DEVID_AUX: | ||
273 | + vblk_cmd(devid, s->aux, value, 0x0); | ||
274 | + break; | ||
275 | + } | ||
276 | + break; | ||
277 | + } | ||
278 | +} | ||
279 | + | ||
280 | +static const MemoryRegionOps bdif_ops = { | ||
281 | + .read = bdif_read, | ||
282 | + .write = bdif_write, | ||
283 | + .endianness = DEVICE_NATIVE_ENDIAN, | ||
284 | + .valid = { | ||
285 | + .min_access_size = 1, | ||
286 | + .max_access_size = 8, | ||
287 | + }, | ||
288 | + .impl = { | ||
289 | + .min_access_size = 1, | ||
290 | + .max_access_size = 8, | ||
291 | + }, | ||
292 | +}; | ||
293 | + | ||
294 | +static void bdif_init(Object *obj) | ||
295 | +{ | ||
296 | + VMAppleBdifState *s = VMAPPLE_BDIF(obj); | ||
297 | + | ||
298 | + memory_region_init_io(&s->mmio, obj, &bdif_ops, obj, | ||
299 | + "VMApple Backdoor Interface", VMAPPLE_BDIF_SIZE); | ||
300 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); | ||
301 | +} | ||
302 | + | ||
303 | +static Property bdif_properties[] = { | ||
304 | + DEFINE_PROP_DRIVE("aux", VMAppleBdifState, aux), | ||
305 | + DEFINE_PROP_DRIVE("root", VMAppleBdifState, root), | ||
306 | + DEFINE_PROP_END_OF_LIST(), | ||
307 | +}; | ||
308 | + | ||
309 | +static void bdif_class_init(ObjectClass *klass, void *data) | ||
310 | +{ | ||
311 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
312 | + | ||
313 | + dc->desc = "VMApple Backdoor Interface"; | ||
314 | + device_class_set_props(dc, bdif_properties); | ||
315 | +} | ||
316 | + | ||
317 | +static const TypeInfo bdif_info = { | ||
318 | + .name = TYPE_VMAPPLE_BDIF, | ||
319 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
320 | + .instance_size = sizeof(VMAppleBdifState), | ||
321 | + .instance_init = bdif_init, | ||
322 | + .class_init = bdif_class_init, | ||
323 | +}; | ||
324 | + | ||
325 | +static void bdif_register_types(void) | ||
326 | +{ | ||
327 | + type_register_static(&bdif_info); | ||
328 | +} | ||
329 | + | ||
330 | +type_init(bdif_register_types) | ||
331 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | ||
332 | index XXXXXXX..XXXXXXX 100644 | ||
333 | --- a/hw/vmapple/meson.build | ||
334 | +++ b/hw/vmapple/meson.build | ||
335 | @@ -1 +1,2 @@ | ||
336 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | ||
337 | +system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | ||
338 | diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events | ||
339 | index XXXXXXX..XXXXXXX 100644 | ||
340 | --- a/hw/vmapple/trace-events | ||
341 | +++ b/hw/vmapple/trace-events | ||
342 | @@ -XXX,XX +XXX,XX @@ aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 | ||
343 | aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 | ||
344 | aes_dump_data(const char *desc, const char *hex) "%s%s" | ||
345 | |||
346 | +# bdif.c | ||
347 | +bdif_read(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 | ||
348 | +bdif_write(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 | ||
349 | +bdif_vblk_read(const char *dev, uint64_t addr, uint64_t offset, uint32_t len, int r) "dev=%s addr=0x%"PRIx64" off=0x%"PRIx64" size=0x%x r=%d" | ||
350 | + | ||
351 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | ||
352 | index XXXXXXX..XXXXXXX 100644 | ||
353 | --- a/include/hw/vmapple/vmapple.h | ||
354 | +++ b/include/hw/vmapple/vmapple.h | ||
355 | @@ -XXX,XX +XXX,XX @@ | ||
356 | |||
357 | #define TYPE_APPLE_AES "apple-aes" | ||
358 | |||
359 | +#define TYPE_VMAPPLE_BDIF "vmapple-bdif" | ||
360 | + | ||
361 | #endif /* HW_VMAPPLE_VMAPPLE_H */ | ||
362 | -- | ||
363 | 2.39.5 (Apple Git-154) | ||
364 | |||
365 | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | Instead of device tree or other more standardized means, VMApple passes | 3 | Instead of device tree or other more standardized means, VMApple passes |
4 | platform configuration to the first stage boot loader in a binary encoded | 4 | platform configuration to the first stage boot loader in a binary encoded |
5 | format that resides at a dedicated RAM region in physical address space. | 5 | format that resides at a dedicated RAM region in physical address space. |
6 | 6 | ||
7 | This patch models this configuration space as a qdev device which we can | 7 | This patch models this configuration space as a qdev device which we can |
8 | then map at the fixed location in the address space. That way, we can | 8 | then map at the fixed location in the address space. That way, we can |
9 | influence and annotate all configuration fields easily. | 9 | influence and annotate all configuration fields easily. |
10 | 10 | ||
11 | Signed-off-by: Alexander Graf <graf@amazon.com> | 11 | Signed-off-by: Alexander Graf <graf@amazon.com> |
12 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 12 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
13 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 13 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
14 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 14 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
15 | --- | 15 | --- |
16 | 16 | ||
17 | v3: | 17 | v3: |
18 | 18 | ||
19 | * Replaced legacy device reset method with Resettable method | 19 | * Replaced legacy device reset method with Resettable method |
20 | 20 | ||
21 | v4: | 21 | v4: |
22 | 22 | ||
23 | * Fixed initialisation of default values for properties | 23 | * Fixed initialisation of default values for properties |
24 | * Dropped superfluous endianness conversions | 24 | * Dropped superfluous endianness conversions |
25 | * Moved most header code to .c, device name #define goes in vmapple.h | 25 | * Moved most header code to .c, device name #define goes in vmapple.h |
26 | 26 | ||
27 | v5: | 27 | v5: |
28 | 28 | ||
29 | * Improved error reporting in case of string property buffer overflow. | 29 | * Improved error reporting in case of string property buffer overflow. |
30 | 30 | ||
31 | v7: | 31 | v7: |
32 | 32 | ||
33 | * Changed error messages for overrun of properties with | 33 | * Changed error messages for overrun of properties with |
34 | fixed-length strings to be more useful to users than developers. | 34 | fixed-length strings to be more useful to users than developers. |
35 | 35 | ||
36 | v8: | 36 | v8: |
37 | 37 | ||
38 | * Consistent parenthesising of macro arguments for better safety. | 38 | * Consistent parenthesising of macro arguments for better safety. |
39 | 39 | ||
40 | v10: | 40 | v10: |
41 | 41 | ||
42 | * Slightly tidier error reporting for overlong property values. | 42 | * Slightly tidier error reporting for overlong property values. |
43 | 43 | ||
44 | hw/vmapple/Kconfig | 3 + | 44 | hw/vmapple/Kconfig | 3 + |
45 | hw/vmapple/cfg.c | 196 +++++++++++++++++++++++++++++++++++ | 45 | hw/vmapple/cfg.c | 196 +++++++++++++++++++++++++++++++++++ |
46 | hw/vmapple/meson.build | 1 + | 46 | hw/vmapple/meson.build | 1 + |
47 | include/hw/vmapple/vmapple.h | 2 + | 47 | include/hw/vmapple/vmapple.h | 2 + |
48 | 4 files changed, 202 insertions(+) | 48 | 4 files changed, 202 insertions(+) |
49 | create mode 100644 hw/vmapple/cfg.c | 49 | create mode 100644 hw/vmapple/cfg.c |
50 | 50 | ||
51 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | 51 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig |
52 | index XXXXXXX..XXXXXXX 100644 | 52 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/hw/vmapple/Kconfig | 53 | --- a/hw/vmapple/Kconfig |
54 | +++ b/hw/vmapple/Kconfig | 54 | +++ b/hw/vmapple/Kconfig |
55 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_AES | 55 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_AES |
56 | config VMAPPLE_BDIF | 56 | config VMAPPLE_BDIF |
57 | bool | 57 | bool |
58 | 58 | ||
59 | +config VMAPPLE_CFG | 59 | +config VMAPPLE_CFG |
60 | + bool | 60 | + bool |
61 | + | 61 | + |
62 | diff --git a/hw/vmapple/cfg.c b/hw/vmapple/cfg.c | 62 | diff --git a/hw/vmapple/cfg.c b/hw/vmapple/cfg.c |
63 | new file mode 100644 | 63 | new file mode 100644 |
64 | index XXXXXXX..XXXXXXX | 64 | index XXXXXXX..XXXXXXX |
65 | --- /dev/null | 65 | --- /dev/null |
66 | +++ b/hw/vmapple/cfg.c | 66 | +++ b/hw/vmapple/cfg.c |
67 | @@ -XXX,XX +XXX,XX @@ | 67 | @@ -XXX,XX +XXX,XX @@ |
68 | +/* | 68 | +/* |
69 | + * VMApple Configuration Region | 69 | + * VMApple Configuration Region |
70 | + * | 70 | + * |
71 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 71 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
72 | + * | 72 | + * |
73 | + * SPDX-License-Identifier: GPL-2.0-or-later | 73 | + * SPDX-License-Identifier: GPL-2.0-or-later |
74 | + * | 74 | + * |
75 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 75 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
76 | + * See the COPYING file in the top-level directory. | 76 | + * See the COPYING file in the top-level directory. |
77 | + */ | 77 | + */ |
78 | + | 78 | + |
79 | +#include "qemu/osdep.h" | 79 | +#include "qemu/osdep.h" |
80 | +#include "hw/vmapple/vmapple.h" | 80 | +#include "hw/vmapple/vmapple.h" |
81 | +#include "hw/sysbus.h" | 81 | +#include "hw/sysbus.h" |
82 | +#include "qemu/log.h" | 82 | +#include "qemu/log.h" |
83 | +#include "qemu/module.h" | 83 | +#include "qemu/module.h" |
84 | +#include "qapi/error.h" | 84 | +#include "qapi/error.h" |
85 | +#include "net/net.h" | 85 | +#include "net/net.h" |
86 | + | 86 | + |
87 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleCfgState, VMAPPLE_CFG) | 87 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleCfgState, VMAPPLE_CFG) |
88 | + | 88 | + |
89 | +#define VMAPPLE_CFG_SIZE 0x00010000 | 89 | +#define VMAPPLE_CFG_SIZE 0x00010000 |
90 | + | 90 | + |
91 | +typedef struct VMAppleCfg { | 91 | +typedef struct VMAppleCfg { |
92 | + uint32_t version; /* 0x000 */ | 92 | + uint32_t version; /* 0x000 */ |
93 | + uint32_t nr_cpus; /* 0x004 */ | 93 | + uint32_t nr_cpus; /* 0x004 */ |
94 | + uint32_t unk1; /* 0x008 */ | 94 | + uint32_t unk1; /* 0x008 */ |
95 | + uint32_t unk2; /* 0x00c */ | 95 | + uint32_t unk2; /* 0x00c */ |
96 | + uint32_t unk3; /* 0x010 */ | 96 | + uint32_t unk3; /* 0x010 */ |
97 | + uint32_t unk4; /* 0x014 */ | 97 | + uint32_t unk4; /* 0x014 */ |
98 | + uint64_t ecid; /* 0x018 */ | 98 | + uint64_t ecid; /* 0x018 */ |
99 | + uint64_t ram_size; /* 0x020 */ | 99 | + uint64_t ram_size; /* 0x020 */ |
100 | + uint32_t run_installer1; /* 0x028 */ | 100 | + uint32_t run_installer1; /* 0x028 */ |
101 | + uint32_t unk5; /* 0x02c */ | 101 | + uint32_t unk5; /* 0x02c */ |
102 | + uint32_t unk6; /* 0x030 */ | 102 | + uint32_t unk6; /* 0x030 */ |
103 | + uint32_t run_installer2; /* 0x034 */ | 103 | + uint32_t run_installer2; /* 0x034 */ |
104 | + uint32_t rnd; /* 0x038 */ | 104 | + uint32_t rnd; /* 0x038 */ |
105 | + uint32_t unk7; /* 0x03c */ | 105 | + uint32_t unk7; /* 0x03c */ |
106 | + MACAddr mac_en0; /* 0x040 */ | 106 | + MACAddr mac_en0; /* 0x040 */ |
107 | + uint8_t pad1[2]; | 107 | + uint8_t pad1[2]; |
108 | + MACAddr mac_en1; /* 0x048 */ | 108 | + MACAddr mac_en1; /* 0x048 */ |
109 | + uint8_t pad2[2]; | 109 | + uint8_t pad2[2]; |
110 | + MACAddr mac_wifi0; /* 0x050 */ | 110 | + MACAddr mac_wifi0; /* 0x050 */ |
111 | + uint8_t pad3[2]; | 111 | + uint8_t pad3[2]; |
112 | + MACAddr mac_bt0; /* 0x058 */ | 112 | + MACAddr mac_bt0; /* 0x058 */ |
113 | + uint8_t pad4[2]; | 113 | + uint8_t pad4[2]; |
114 | + uint8_t reserved[0xa0]; /* 0x060 */ | 114 | + uint8_t reserved[0xa0]; /* 0x060 */ |
115 | + uint32_t cpu_ids[0x80]; /* 0x100 */ | 115 | + uint32_t cpu_ids[0x80]; /* 0x100 */ |
116 | + uint8_t scratch[0x200]; /* 0x180 */ | 116 | + uint8_t scratch[0x200]; /* 0x180 */ |
117 | + char serial[32]; /* 0x380 */ | 117 | + char serial[32]; /* 0x380 */ |
118 | + char unk8[32]; /* 0x3a0 */ | 118 | + char unk8[32]; /* 0x3a0 */ |
119 | + char model[32]; /* 0x3c0 */ | 119 | + char model[32]; /* 0x3c0 */ |
120 | + uint8_t unk9[32]; /* 0x3e0 */ | 120 | + uint8_t unk9[32]; /* 0x3e0 */ |
121 | + uint32_t unk10; /* 0x400 */ | 121 | + uint32_t unk10; /* 0x400 */ |
122 | + char soc_name[32]; /* 0x404 */ | 122 | + char soc_name[32]; /* 0x404 */ |
123 | +} VMAppleCfg; | 123 | +} VMAppleCfg; |
124 | + | 124 | + |
125 | +struct VMAppleCfgState { | 125 | +struct VMAppleCfgState { |
126 | + SysBusDevice parent_obj; | 126 | + SysBusDevice parent_obj; |
127 | + VMAppleCfg cfg; | 127 | + VMAppleCfg cfg; |
128 | + | 128 | + |
129 | + MemoryRegion mem; | 129 | + MemoryRegion mem; |
130 | + char *serial; | 130 | + char *serial; |
131 | + char *model; | 131 | + char *model; |
132 | + char *soc_name; | 132 | + char *soc_name; |
133 | +}; | 133 | +}; |
134 | + | 134 | + |
135 | +static void vmapple_cfg_reset(Object *obj, ResetType type) | 135 | +static void vmapple_cfg_reset(Object *obj, ResetType type) |
136 | +{ | 136 | +{ |
137 | + VMAppleCfgState *s = VMAPPLE_CFG(obj); | 137 | + VMAppleCfgState *s = VMAPPLE_CFG(obj); |
138 | + VMAppleCfg *cfg; | 138 | + VMAppleCfg *cfg; |
139 | + | 139 | + |
140 | + cfg = memory_region_get_ram_ptr(&s->mem); | 140 | + cfg = memory_region_get_ram_ptr(&s->mem); |
141 | + memset(cfg, 0, VMAPPLE_CFG_SIZE); | 141 | + memset(cfg, 0, VMAPPLE_CFG_SIZE); |
142 | + *cfg = s->cfg; | 142 | + *cfg = s->cfg; |
143 | +} | 143 | +} |
144 | + | 144 | + |
145 | +static bool set_fixlen_property_or_error(char *restrict dst, | 145 | +static bool set_fixlen_property_or_error(char *restrict dst, |
146 | + const char *restrict src, | 146 | + const char *restrict src, |
147 | + size_t dst_size, Error **errp, | 147 | + size_t dst_size, Error **errp, |
148 | + const char *property_name) | 148 | + const char *property_name) |
149 | +{ | 149 | +{ |
150 | + ERRP_GUARD(); | 150 | + ERRP_GUARD(); |
151 | + size_t len; | 151 | + size_t len; |
152 | + | 152 | + |
153 | + len = g_strlcpy(dst, src, dst_size); | 153 | + len = g_strlcpy(dst, src, dst_size); |
154 | + if (len < dst_size) { /* len does not count nul terminator */ | 154 | + if (len < dst_size) { /* len does not count nul terminator */ |
155 | + return true; | 155 | + return true; |
156 | + } | 156 | + } |
157 | + | 157 | + |
158 | + error_setg(errp, "Provided value too long for property '%s'", property_name); | 158 | + error_setg(errp, "Provided value too long for property '%s'", property_name); |
159 | + error_append_hint(errp, "length (%zu) exceeds maximum of %zu\n", | 159 | + error_append_hint(errp, "length (%zu) exceeds maximum of %zu\n", |
160 | + len, dst_size - 1); | 160 | + len, dst_size - 1); |
161 | + return false; | 161 | + return false; |
162 | +} | 162 | +} |
163 | + | 163 | + |
164 | +#define set_fixlen_property_or_return(dst_array, src, errp, property_name) \ | 164 | +#define set_fixlen_property_or_return(dst_array, src, errp, property_name) \ |
165 | + do { \ | 165 | + do { \ |
166 | + if (!set_fixlen_property_or_error((dst_array), (src), \ | 166 | + if (!set_fixlen_property_or_error((dst_array), (src), \ |
167 | + ARRAY_SIZE(dst_array), \ | 167 | + ARRAY_SIZE(dst_array), \ |
168 | + (errp), (property_name))) { \ | 168 | + (errp), (property_name))) { \ |
169 | + return; \ | 169 | + return; \ |
170 | + } \ | 170 | + } \ |
171 | + } while (0) | 171 | + } while (0) |
172 | + | 172 | + |
173 | +static void vmapple_cfg_realize(DeviceState *dev, Error **errp) | 173 | +static void vmapple_cfg_realize(DeviceState *dev, Error **errp) |
174 | +{ | 174 | +{ |
175 | + VMAppleCfgState *s = VMAPPLE_CFG(dev); | 175 | + VMAppleCfgState *s = VMAPPLE_CFG(dev); |
176 | + uint32_t i; | 176 | + uint32_t i; |
177 | + | 177 | + |
178 | + if (!s->serial) { | 178 | + if (!s->serial) { |
179 | + s->serial = g_strdup("1234"); | 179 | + s->serial = g_strdup("1234"); |
180 | + } | 180 | + } |
181 | + if (!s->model) { | 181 | + if (!s->model) { |
182 | + s->model = g_strdup("VM0001"); | 182 | + s->model = g_strdup("VM0001"); |
183 | + } | 183 | + } |
184 | + if (!s->soc_name) { | 184 | + if (!s->soc_name) { |
185 | + s->soc_name = g_strdup("Apple M1 (Virtual)"); | 185 | + s->soc_name = g_strdup("Apple M1 (Virtual)"); |
186 | + } | 186 | + } |
187 | + | 187 | + |
188 | + set_fixlen_property_or_return(s->cfg.serial, s->serial, errp, "serial"); | 188 | + set_fixlen_property_or_return(s->cfg.serial, s->serial, errp, "serial"); |
189 | + set_fixlen_property_or_return(s->cfg.model, s->model, errp, "model"); | 189 | + set_fixlen_property_or_return(s->cfg.model, s->model, errp, "model"); |
190 | + set_fixlen_property_or_return(s->cfg.soc_name, s->soc_name, errp, "soc_name"); | 190 | + set_fixlen_property_or_return(s->cfg.soc_name, s->soc_name, errp, "soc_name"); |
191 | + set_fixlen_property_or_return(s->cfg.unk8, "D/A", errp, "unk8"); | 191 | + set_fixlen_property_or_return(s->cfg.unk8, "D/A", errp, "unk8"); |
192 | + s->cfg.version = 2; | 192 | + s->cfg.version = 2; |
193 | + s->cfg.unk1 = 1; | 193 | + s->cfg.unk1 = 1; |
194 | + s->cfg.unk2 = 1; | 194 | + s->cfg.unk2 = 1; |
195 | + s->cfg.unk3 = 0x20; | 195 | + s->cfg.unk3 = 0x20; |
196 | + s->cfg.unk4 = 0; | 196 | + s->cfg.unk4 = 0; |
197 | + s->cfg.unk5 = 1; | 197 | + s->cfg.unk5 = 1; |
198 | + s->cfg.unk6 = 1; | 198 | + s->cfg.unk6 = 1; |
199 | + s->cfg.unk7 = 0; | 199 | + s->cfg.unk7 = 0; |
200 | + s->cfg.unk10 = 1; | 200 | + s->cfg.unk10 = 1; |
201 | + | 201 | + |
202 | + if (s->cfg.nr_cpus > ARRAY_SIZE(s->cfg.cpu_ids)) { | 202 | + if (s->cfg.nr_cpus > ARRAY_SIZE(s->cfg.cpu_ids)) { |
203 | + error_setg(errp, | 203 | + error_setg(errp, |
204 | + "Failed to create %u CPUs, vmapple machine supports %zu max", | 204 | + "Failed to create %u CPUs, vmapple machine supports %zu max", |
205 | + s->cfg.nr_cpus, ARRAY_SIZE(s->cfg.cpu_ids)); | 205 | + s->cfg.nr_cpus, ARRAY_SIZE(s->cfg.cpu_ids)); |
206 | + return; | 206 | + return; |
207 | + } | 207 | + } |
208 | + for (i = 0; i < s->cfg.nr_cpus; i++) { | 208 | + for (i = 0; i < s->cfg.nr_cpus; i++) { |
209 | + s->cfg.cpu_ids[i] = i; | 209 | + s->cfg.cpu_ids[i] = i; |
210 | + } | 210 | + } |
211 | +} | 211 | +} |
212 | + | 212 | + |
213 | +static void vmapple_cfg_init(Object *obj) | 213 | +static void vmapple_cfg_init(Object *obj) |
214 | +{ | 214 | +{ |
215 | + VMAppleCfgState *s = VMAPPLE_CFG(obj); | 215 | + VMAppleCfgState *s = VMAPPLE_CFG(obj); |
216 | + | 216 | + |
217 | + memory_region_init_ram(&s->mem, obj, "VMApple Config", VMAPPLE_CFG_SIZE, | 217 | + memory_region_init_ram(&s->mem, obj, "VMApple Config", VMAPPLE_CFG_SIZE, |
218 | + &error_fatal); | 218 | + &error_fatal); |
219 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mem); | 219 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mem); |
220 | +} | 220 | +} |
221 | + | 221 | + |
222 | +static Property vmapple_cfg_properties[] = { | 222 | +static Property vmapple_cfg_properties[] = { |
223 | + DEFINE_PROP_UINT32("nr-cpus", VMAppleCfgState, cfg.nr_cpus, 1), | 223 | + DEFINE_PROP_UINT32("nr-cpus", VMAppleCfgState, cfg.nr_cpus, 1), |
224 | + DEFINE_PROP_UINT64("ecid", VMAppleCfgState, cfg.ecid, 0), | 224 | + DEFINE_PROP_UINT64("ecid", VMAppleCfgState, cfg.ecid, 0), |
225 | + DEFINE_PROP_UINT64("ram-size", VMAppleCfgState, cfg.ram_size, 0), | 225 | + DEFINE_PROP_UINT64("ram-size", VMAppleCfgState, cfg.ram_size, 0), |
226 | + DEFINE_PROP_UINT32("run_installer1", VMAppleCfgState, cfg.run_installer1, 0), | 226 | + DEFINE_PROP_UINT32("run_installer1", VMAppleCfgState, cfg.run_installer1, 0), |
227 | + DEFINE_PROP_UINT32("run_installer2", VMAppleCfgState, cfg.run_installer2, 0), | 227 | + DEFINE_PROP_UINT32("run_installer2", VMAppleCfgState, cfg.run_installer2, 0), |
228 | + DEFINE_PROP_UINT32("rnd", VMAppleCfgState, cfg.rnd, 0), | 228 | + DEFINE_PROP_UINT32("rnd", VMAppleCfgState, cfg.rnd, 0), |
229 | + DEFINE_PROP_MACADDR("mac-en0", VMAppleCfgState, cfg.mac_en0), | 229 | + DEFINE_PROP_MACADDR("mac-en0", VMAppleCfgState, cfg.mac_en0), |
230 | + DEFINE_PROP_MACADDR("mac-en1", VMAppleCfgState, cfg.mac_en1), | 230 | + DEFINE_PROP_MACADDR("mac-en1", VMAppleCfgState, cfg.mac_en1), |
231 | + DEFINE_PROP_MACADDR("mac-wifi0", VMAppleCfgState, cfg.mac_wifi0), | 231 | + DEFINE_PROP_MACADDR("mac-wifi0", VMAppleCfgState, cfg.mac_wifi0), |
232 | + DEFINE_PROP_MACADDR("mac-bt0", VMAppleCfgState, cfg.mac_bt0), | 232 | + DEFINE_PROP_MACADDR("mac-bt0", VMAppleCfgState, cfg.mac_bt0), |
233 | + DEFINE_PROP_STRING("serial", VMAppleCfgState, serial), | 233 | + DEFINE_PROP_STRING("serial", VMAppleCfgState, serial), |
234 | + DEFINE_PROP_STRING("model", VMAppleCfgState, model), | 234 | + DEFINE_PROP_STRING("model", VMAppleCfgState, model), |
235 | + DEFINE_PROP_STRING("soc_name", VMAppleCfgState, soc_name), | 235 | + DEFINE_PROP_STRING("soc_name", VMAppleCfgState, soc_name), |
236 | + DEFINE_PROP_END_OF_LIST(), | 236 | + DEFINE_PROP_END_OF_LIST(), |
237 | +}; | 237 | +}; |
238 | + | 238 | + |
239 | +static void vmapple_cfg_class_init(ObjectClass *klass, void *data) | 239 | +static void vmapple_cfg_class_init(ObjectClass *klass, void *data) |
240 | +{ | 240 | +{ |
241 | + DeviceClass *dc = DEVICE_CLASS(klass); | 241 | + DeviceClass *dc = DEVICE_CLASS(klass); |
242 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | 242 | + ResettableClass *rc = RESETTABLE_CLASS(klass); |
243 | + | 243 | + |
244 | + dc->realize = vmapple_cfg_realize; | 244 | + dc->realize = vmapple_cfg_realize; |
245 | + dc->desc = "VMApple Configuration Region"; | 245 | + dc->desc = "VMApple Configuration Region"; |
246 | + device_class_set_props(dc, vmapple_cfg_properties); | 246 | + device_class_set_props(dc, vmapple_cfg_properties); |
247 | + rc->phases.hold = vmapple_cfg_reset; | 247 | + rc->phases.hold = vmapple_cfg_reset; |
248 | +} | 248 | +} |
249 | + | 249 | + |
250 | +static const TypeInfo vmapple_cfg_info = { | 250 | +static const TypeInfo vmapple_cfg_info = { |
251 | + .name = TYPE_VMAPPLE_CFG, | 251 | + .name = TYPE_VMAPPLE_CFG, |
252 | + .parent = TYPE_SYS_BUS_DEVICE, | 252 | + .parent = TYPE_SYS_BUS_DEVICE, |
253 | + .instance_size = sizeof(VMAppleCfgState), | 253 | + .instance_size = sizeof(VMAppleCfgState), |
254 | + .instance_init = vmapple_cfg_init, | 254 | + .instance_init = vmapple_cfg_init, |
255 | + .class_init = vmapple_cfg_class_init, | 255 | + .class_init = vmapple_cfg_class_init, |
256 | +}; | 256 | +}; |
257 | + | 257 | + |
258 | +static void vmapple_cfg_register_types(void) | 258 | +static void vmapple_cfg_register_types(void) |
259 | +{ | 259 | +{ |
260 | + type_register_static(&vmapple_cfg_info); | 260 | + type_register_static(&vmapple_cfg_info); |
261 | +} | 261 | +} |
262 | + | 262 | + |
263 | +type_init(vmapple_cfg_register_types) | 263 | +type_init(vmapple_cfg_register_types) |
264 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | 264 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build |
265 | index XXXXXXX..XXXXXXX 100644 | 265 | index XXXXXXX..XXXXXXX 100644 |
266 | --- a/hw/vmapple/meson.build | 266 | --- a/hw/vmapple/meson.build |
267 | +++ b/hw/vmapple/meson.build | 267 | +++ b/hw/vmapple/meson.build |
268 | @@ -XXX,XX +XXX,XX @@ | 268 | @@ -XXX,XX +XXX,XX @@ |
269 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | 269 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) |
270 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | 270 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) |
271 | +system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) | 271 | +system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) |
272 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | 272 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h |
273 | index XXXXXXX..XXXXXXX 100644 | 273 | index XXXXXXX..XXXXXXX 100644 |
274 | --- a/include/hw/vmapple/vmapple.h | 274 | --- a/include/hw/vmapple/vmapple.h |
275 | +++ b/include/hw/vmapple/vmapple.h | 275 | +++ b/include/hw/vmapple/vmapple.h |
276 | @@ -XXX,XX +XXX,XX @@ | 276 | @@ -XXX,XX +XXX,XX @@ |
277 | 277 | ||
278 | #define TYPE_VMAPPLE_BDIF "vmapple-bdif" | 278 | #define TYPE_VMAPPLE_BDIF "vmapple-bdif" |
279 | 279 | ||
280 | +#define TYPE_VMAPPLE_CFG "vmapple-cfg" | 280 | +#define TYPE_VMAPPLE_CFG "vmapple-cfg" |
281 | + | 281 | + |
282 | #endif /* HW_VMAPPLE_VMAPPLE_H */ | 282 | #endif /* HW_VMAPPLE_VMAPPLE_H */ |
283 | -- | 283 | -- |
284 | 2.39.5 (Apple Git-154) | 284 | 2.39.5 (Apple Git-154) |
285 | 285 | ||
286 | 286 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <graf@amazon.com> | |
2 | |||
3 | Instead of device tree or other more standardized means, VMApple passes | ||
4 | platform configuration to the first stage boot loader in a binary encoded | ||
5 | format that resides at a dedicated RAM region in physical address space. | ||
6 | |||
7 | This patch models this configuration space as a qdev device which we can | ||
8 | then map at the fixed location in the address space. That way, we can | ||
9 | influence and annotate all configuration fields easily. | ||
10 | |||
11 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
12 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
13 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
14 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
15 | --- | ||
16 | |||
17 | v3: | ||
18 | |||
19 | * Replaced legacy device reset method with Resettable method | ||
20 | |||
21 | v4: | ||
22 | |||
23 | * Fixed initialisation of default values for properties | ||
24 | * Dropped superfluous endianness conversions | ||
25 | * Moved most header code to .c, device name #define goes in vmapple.h | ||
26 | |||
27 | v5: | ||
28 | |||
29 | * Improved error reporting in case of string property buffer overflow. | ||
30 | |||
31 | v7: | ||
32 | |||
33 | * Changed error messages for overrun of properties with | ||
34 | fixed-length strings to be more useful to users than developers. | ||
35 | |||
36 | v8: | ||
37 | |||
38 | * Consistent parenthesising of macro arguments for better safety. | ||
39 | |||
40 | v10: | ||
41 | |||
42 | * Slightly tidier error reporting for overlong property values. | ||
43 | |||
44 | hw/vmapple/Kconfig | 3 + | ||
45 | hw/vmapple/cfg.c | 196 +++++++++++++++++++++++++++++++++++ | ||
46 | hw/vmapple/meson.build | 1 + | ||
47 | include/hw/vmapple/vmapple.h | 2 + | ||
48 | 4 files changed, 202 insertions(+) | ||
49 | create mode 100644 hw/vmapple/cfg.c | ||
50 | |||
51 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/hw/vmapple/Kconfig | ||
54 | +++ b/hw/vmapple/Kconfig | ||
55 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_AES | ||
56 | config VMAPPLE_BDIF | ||
57 | bool | ||
58 | |||
59 | +config VMAPPLE_CFG | ||
60 | + bool | ||
61 | + | ||
62 | diff --git a/hw/vmapple/cfg.c b/hw/vmapple/cfg.c | ||
63 | new file mode 100644 | ||
64 | index XXXXXXX..XXXXXXX | ||
65 | --- /dev/null | ||
66 | +++ b/hw/vmapple/cfg.c | ||
67 | @@ -XXX,XX +XXX,XX @@ | ||
68 | +/* | ||
69 | + * VMApple Configuration Region | ||
70 | + * | ||
71 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
72 | + * | ||
73 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
74 | + * | ||
75 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
76 | + * See the COPYING file in the top-level directory. | ||
77 | + */ | ||
78 | + | ||
79 | +#include "qemu/osdep.h" | ||
80 | +#include "hw/vmapple/vmapple.h" | ||
81 | +#include "hw/sysbus.h" | ||
82 | +#include "qemu/log.h" | ||
83 | +#include "qemu/module.h" | ||
84 | +#include "qapi/error.h" | ||
85 | +#include "net/net.h" | ||
86 | + | ||
87 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleCfgState, VMAPPLE_CFG) | ||
88 | + | ||
89 | +#define VMAPPLE_CFG_SIZE 0x00010000 | ||
90 | + | ||
91 | +typedef struct VMAppleCfg { | ||
92 | + uint32_t version; /* 0x000 */ | ||
93 | + uint32_t nr_cpus; /* 0x004 */ | ||
94 | + uint32_t unk1; /* 0x008 */ | ||
95 | + uint32_t unk2; /* 0x00c */ | ||
96 | + uint32_t unk3; /* 0x010 */ | ||
97 | + uint32_t unk4; /* 0x014 */ | ||
98 | + uint64_t ecid; /* 0x018 */ | ||
99 | + uint64_t ram_size; /* 0x020 */ | ||
100 | + uint32_t run_installer1; /* 0x028 */ | ||
101 | + uint32_t unk5; /* 0x02c */ | ||
102 | + uint32_t unk6; /* 0x030 */ | ||
103 | + uint32_t run_installer2; /* 0x034 */ | ||
104 | + uint32_t rnd; /* 0x038 */ | ||
105 | + uint32_t unk7; /* 0x03c */ | ||
106 | + MACAddr mac_en0; /* 0x040 */ | ||
107 | + uint8_t pad1[2]; | ||
108 | + MACAddr mac_en1; /* 0x048 */ | ||
109 | + uint8_t pad2[2]; | ||
110 | + MACAddr mac_wifi0; /* 0x050 */ | ||
111 | + uint8_t pad3[2]; | ||
112 | + MACAddr mac_bt0; /* 0x058 */ | ||
113 | + uint8_t pad4[2]; | ||
114 | + uint8_t reserved[0xa0]; /* 0x060 */ | ||
115 | + uint32_t cpu_ids[0x80]; /* 0x100 */ | ||
116 | + uint8_t scratch[0x200]; /* 0x180 */ | ||
117 | + char serial[32]; /* 0x380 */ | ||
118 | + char unk8[32]; /* 0x3a0 */ | ||
119 | + char model[32]; /* 0x3c0 */ | ||
120 | + uint8_t unk9[32]; /* 0x3e0 */ | ||
121 | + uint32_t unk10; /* 0x400 */ | ||
122 | + char soc_name[32]; /* 0x404 */ | ||
123 | +} VMAppleCfg; | ||
124 | + | ||
125 | +struct VMAppleCfgState { | ||
126 | + SysBusDevice parent_obj; | ||
127 | + VMAppleCfg cfg; | ||
128 | + | ||
129 | + MemoryRegion mem; | ||
130 | + char *serial; | ||
131 | + char *model; | ||
132 | + char *soc_name; | ||
133 | +}; | ||
134 | + | ||
135 | +static void vmapple_cfg_reset(Object *obj, ResetType type) | ||
136 | +{ | ||
137 | + VMAppleCfgState *s = VMAPPLE_CFG(obj); | ||
138 | + VMAppleCfg *cfg; | ||
139 | + | ||
140 | + cfg = memory_region_get_ram_ptr(&s->mem); | ||
141 | + memset(cfg, 0, VMAPPLE_CFG_SIZE); | ||
142 | + *cfg = s->cfg; | ||
143 | +} | ||
144 | + | ||
145 | +static bool set_fixlen_property_or_error(char *restrict dst, | ||
146 | + const char *restrict src, | ||
147 | + size_t dst_size, Error **errp, | ||
148 | + const char *property_name) | ||
149 | +{ | ||
150 | + ERRP_GUARD(); | ||
151 | + size_t len; | ||
152 | + | ||
153 | + len = g_strlcpy(dst, src, dst_size); | ||
154 | + if (len < dst_size) { /* len does not count nul terminator */ | ||
155 | + return true; | ||
156 | + } | ||
157 | + | ||
158 | + error_setg(errp, "Provided value too long for property '%s'", property_name); | ||
159 | + error_append_hint(errp, "length (%zu) exceeds maximum of %zu\n", | ||
160 | + len, dst_size - 1); | ||
161 | + return false; | ||
162 | +} | ||
163 | + | ||
164 | +#define set_fixlen_property_or_return(dst_array, src, errp, property_name) \ | ||
165 | + do { \ | ||
166 | + if (!set_fixlen_property_or_error((dst_array), (src), \ | ||
167 | + ARRAY_SIZE(dst_array), \ | ||
168 | + (errp), (property_name))) { \ | ||
169 | + return; \ | ||
170 | + } \ | ||
171 | + } while (0) | ||
172 | + | ||
173 | +static void vmapple_cfg_realize(DeviceState *dev, Error **errp) | ||
174 | +{ | ||
175 | + VMAppleCfgState *s = VMAPPLE_CFG(dev); | ||
176 | + uint32_t i; | ||
177 | + | ||
178 | + if (!s->serial) { | ||
179 | + s->serial = g_strdup("1234"); | ||
180 | + } | ||
181 | + if (!s->model) { | ||
182 | + s->model = g_strdup("VM0001"); | ||
183 | + } | ||
184 | + if (!s->soc_name) { | ||
185 | + s->soc_name = g_strdup("Apple M1 (Virtual)"); | ||
186 | + } | ||
187 | + | ||
188 | + set_fixlen_property_or_return(s->cfg.serial, s->serial, errp, "serial"); | ||
189 | + set_fixlen_property_or_return(s->cfg.model, s->model, errp, "model"); | ||
190 | + set_fixlen_property_or_return(s->cfg.soc_name, s->soc_name, errp, "soc_name"); | ||
191 | + set_fixlen_property_or_return(s->cfg.unk8, "D/A", errp, "unk8"); | ||
192 | + s->cfg.version = 2; | ||
193 | + s->cfg.unk1 = 1; | ||
194 | + s->cfg.unk2 = 1; | ||
195 | + s->cfg.unk3 = 0x20; | ||
196 | + s->cfg.unk4 = 0; | ||
197 | + s->cfg.unk5 = 1; | ||
198 | + s->cfg.unk6 = 1; | ||
199 | + s->cfg.unk7 = 0; | ||
200 | + s->cfg.unk10 = 1; | ||
201 | + | ||
202 | + if (s->cfg.nr_cpus > ARRAY_SIZE(s->cfg.cpu_ids)) { | ||
203 | + error_setg(errp, | ||
204 | + "Failed to create %u CPUs, vmapple machine supports %zu max", | ||
205 | + s->cfg.nr_cpus, ARRAY_SIZE(s->cfg.cpu_ids)); | ||
206 | + return; | ||
207 | + } | ||
208 | + for (i = 0; i < s->cfg.nr_cpus; i++) { | ||
209 | + s->cfg.cpu_ids[i] = i; | ||
210 | + } | ||
211 | +} | ||
212 | + | ||
213 | +static void vmapple_cfg_init(Object *obj) | ||
214 | +{ | ||
215 | + VMAppleCfgState *s = VMAPPLE_CFG(obj); | ||
216 | + | ||
217 | + memory_region_init_ram(&s->mem, obj, "VMApple Config", VMAPPLE_CFG_SIZE, | ||
218 | + &error_fatal); | ||
219 | + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mem); | ||
220 | +} | ||
221 | + | ||
222 | +static Property vmapple_cfg_properties[] = { | ||
223 | + DEFINE_PROP_UINT32("nr-cpus", VMAppleCfgState, cfg.nr_cpus, 1), | ||
224 | + DEFINE_PROP_UINT64("ecid", VMAppleCfgState, cfg.ecid, 0), | ||
225 | + DEFINE_PROP_UINT64("ram-size", VMAppleCfgState, cfg.ram_size, 0), | ||
226 | + DEFINE_PROP_UINT32("run_installer1", VMAppleCfgState, cfg.run_installer1, 0), | ||
227 | + DEFINE_PROP_UINT32("run_installer2", VMAppleCfgState, cfg.run_installer2, 0), | ||
228 | + DEFINE_PROP_UINT32("rnd", VMAppleCfgState, cfg.rnd, 0), | ||
229 | + DEFINE_PROP_MACADDR("mac-en0", VMAppleCfgState, cfg.mac_en0), | ||
230 | + DEFINE_PROP_MACADDR("mac-en1", VMAppleCfgState, cfg.mac_en1), | ||
231 | + DEFINE_PROP_MACADDR("mac-wifi0", VMAppleCfgState, cfg.mac_wifi0), | ||
232 | + DEFINE_PROP_MACADDR("mac-bt0", VMAppleCfgState, cfg.mac_bt0), | ||
233 | + DEFINE_PROP_STRING("serial", VMAppleCfgState, serial), | ||
234 | + DEFINE_PROP_STRING("model", VMAppleCfgState, model), | ||
235 | + DEFINE_PROP_STRING("soc_name", VMAppleCfgState, soc_name), | ||
236 | + DEFINE_PROP_END_OF_LIST(), | ||
237 | +}; | ||
238 | + | ||
239 | +static void vmapple_cfg_class_init(ObjectClass *klass, void *data) | ||
240 | +{ | ||
241 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
242 | + ResettableClass *rc = RESETTABLE_CLASS(klass); | ||
243 | + | ||
244 | + dc->realize = vmapple_cfg_realize; | ||
245 | + dc->desc = "VMApple Configuration Region"; | ||
246 | + device_class_set_props(dc, vmapple_cfg_properties); | ||
247 | + rc->phases.hold = vmapple_cfg_reset; | ||
248 | +} | ||
249 | + | ||
250 | +static const TypeInfo vmapple_cfg_info = { | ||
251 | + .name = TYPE_VMAPPLE_CFG, | ||
252 | + .parent = TYPE_SYS_BUS_DEVICE, | ||
253 | + .instance_size = sizeof(VMAppleCfgState), | ||
254 | + .instance_init = vmapple_cfg_init, | ||
255 | + .class_init = vmapple_cfg_class_init, | ||
256 | +}; | ||
257 | + | ||
258 | +static void vmapple_cfg_register_types(void) | ||
259 | +{ | ||
260 | + type_register_static(&vmapple_cfg_info); | ||
261 | +} | ||
262 | + | ||
263 | +type_init(vmapple_cfg_register_types) | ||
264 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | ||
265 | index XXXXXXX..XXXXXXX 100644 | ||
266 | --- a/hw/vmapple/meson.build | ||
267 | +++ b/hw/vmapple/meson.build | ||
268 | @@ -XXX,XX +XXX,XX @@ | ||
269 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | ||
270 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | ||
271 | +system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) | ||
272 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | ||
273 | index XXXXXXX..XXXXXXX 100644 | ||
274 | --- a/include/hw/vmapple/vmapple.h | ||
275 | +++ b/include/hw/vmapple/vmapple.h | ||
276 | @@ -XXX,XX +XXX,XX @@ | ||
277 | |||
278 | #define TYPE_VMAPPLE_BDIF "vmapple-bdif" | ||
279 | |||
280 | +#define TYPE_VMAPPLE_CFG "vmapple-cfg" | ||
281 | + | ||
282 | #endif /* HW_VMAPPLE_VMAPPLE_H */ | ||
283 | -- | ||
284 | 2.39.5 (Apple Git-154) | ||
285 | |||
286 | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | Apple has its own virtio-blk PCI device ID where it deviates from the | 3 | Apple has its own virtio-blk PCI device ID where it deviates from the |
4 | official virtio-pci spec slightly: It puts a new "apple type" | 4 | official virtio-pci spec slightly: It puts a new "apple type" |
5 | field at a static offset in config space and introduces a new barrier | 5 | field at a static offset in config space and introduces a new barrier |
6 | command. | 6 | command. |
7 | 7 | ||
8 | This patch first creates a mechanism for virtio-blk downstream classes to | 8 | This patch first creates a mechanism for virtio-blk downstream classes to |
9 | handle unknown commands. It then creates such a downstream class and a new | 9 | handle unknown commands. It then creates such a downstream class and a new |
10 | vmapple-virtio-blk-pci class which support the additional apple type config | 10 | vmapple-virtio-blk-pci class which support the additional apple type config |
11 | identifier as well as the barrier command. | 11 | identifier as well as the barrier command. |
12 | 12 | ||
13 | The 'aux' or 'root' device type are selected using the 'variant' property. | 13 | The 'aux' or 'root' device type are selected using the 'variant' property. |
14 | 14 | ||
15 | Signed-off-by: Alexander Graf <graf@amazon.com> | 15 | Signed-off-by: Alexander Graf <graf@amazon.com> |
16 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 16 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
17 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 17 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
18 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 18 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
19 | --- | 19 | --- |
20 | 20 | ||
21 | v4: | 21 | v4: |
22 | 22 | ||
23 | * Use recommended object type declaration pattern. | 23 | * Use recommended object type declaration pattern. |
24 | * Correctly log unimplemented code paths. | 24 | * Correctly log unimplemented code paths. |
25 | * Most header code moved to .c, type name #defines moved to vmapple.h | 25 | * Most header code moved to .c, type name #defines moved to vmapple.h |
26 | 26 | ||
27 | v5: | 27 | v5: |
28 | 28 | ||
29 | * Corrected handling of potentially unaligned writes to virtio config area. | 29 | * Corrected handling of potentially unaligned writes to virtio config area. |
30 | * Simplified passing through device variant type to subobject. | 30 | * Simplified passing through device variant type to subobject. |
31 | 31 | ||
32 | v9: | 32 | v9: |
33 | 33 | ||
34 | * Correctly specify class_size for VMAppleVirtIOBlkClass | 34 | * Correctly specify class_size for VMAppleVirtIOBlkClass |
35 | 35 | ||
36 | v10: | 36 | v10: |
37 | 37 | ||
38 | * Folded v9 patch 16/16 into this one, changing the device type design to | 38 | * Folded v9 patch 16/16 into this one, changing the device type design to |
39 | provide a single device type with a variant property instead of 2 different | 39 | provide a single device type with a variant property instead of 2 different |
40 | subtypes for aux and root volumes. | 40 | subtypes for aux and root volumes. |
41 | * Tidied up error reporting for the variant property. | 41 | * Tidied up error reporting for the variant property. |
42 | 42 | ||
43 | hw/block/virtio-blk.c | 19 ++- | 43 | hw/block/virtio-blk.c | 19 ++- |
44 | hw/core/qdev-properties-system.c | 8 ++ | 44 | hw/core/qdev-properties-system.c | 8 ++ |
45 | hw/vmapple/Kconfig | 3 + | 45 | hw/vmapple/Kconfig | 3 + |
46 | hw/vmapple/meson.build | 1 + | 46 | hw/vmapple/meson.build | 1 + |
47 | hw/vmapple/virtio-blk.c | 205 ++++++++++++++++++++++++++++ | 47 | hw/vmapple/virtio-blk.c | 205 ++++++++++++++++++++++++++++ |
48 | include/hw/pci/pci_ids.h | 1 + | 48 | include/hw/pci/pci_ids.h | 1 + |
49 | include/hw/qdev-properties-system.h | 5 + | 49 | include/hw/qdev-properties-system.h | 5 + |
50 | include/hw/virtio/virtio-blk.h | 12 +- | 50 | include/hw/virtio/virtio-blk.h | 12 +- |
51 | include/hw/vmapple/vmapple.h | 2 + | 51 | include/hw/vmapple/vmapple.h | 2 + |
52 | qapi/virtio.json | 14 ++ | 52 | qapi/virtio.json | 14 ++ |
53 | 10 files changed, 265 insertions(+), 5 deletions(-) | 53 | 10 files changed, 265 insertions(+), 5 deletions(-) |
54 | create mode 100644 hw/vmapple/virtio-blk.c | 54 | create mode 100644 hw/vmapple/virtio-blk.c |
55 | 55 | ||
56 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 56 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c |
57 | index XXXXXXX..XXXXXXX 100644 | 57 | index XXXXXXX..XXXXXXX 100644 |
58 | --- a/hw/block/virtio-blk.c | 58 | --- a/hw/block/virtio-blk.c |
59 | +++ b/hw/block/virtio-blk.c | 59 | +++ b/hw/block/virtio-blk.c |
60 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, | 60 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, |
61 | req->mr_next = NULL; | 61 | req->mr_next = NULL; |
62 | } | 62 | } |
63 | 63 | ||
64 | -static void virtio_blk_free_request(VirtIOBlockReq *req) | 64 | -static void virtio_blk_free_request(VirtIOBlockReq *req) |
65 | +void virtio_blk_free_request(VirtIOBlockReq *req) | 65 | +void virtio_blk_free_request(VirtIOBlockReq *req) |
66 | { | 66 | { |
67 | g_free(req); | 67 | g_free(req); |
68 | } | 68 | } |
69 | 69 | ||
70 | -static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | 70 | -static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) |
71 | +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | 71 | +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) |
72 | { | 72 | { |
73 | VirtIOBlock *s = req->dev; | 73 | VirtIOBlock *s = req->dev; |
74 | VirtIODevice *vdev = VIRTIO_DEVICE(s); | 74 | VirtIODevice *vdev = VIRTIO_DEVICE(s); |
75 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 75 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
76 | break; | 76 | break; |
77 | } | 77 | } |
78 | default: | 78 | default: |
79 | - virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | 79 | - virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); |
80 | - virtio_blk_free_request(req); | 80 | - virtio_blk_free_request(req); |
81 | + { | 81 | + { |
82 | + /* | 82 | + /* |
83 | + * Give subclasses a chance to handle unknown requests. This way the | 83 | + * Give subclasses a chance to handle unknown requests. This way the |
84 | + * class lookup is not in the hot path. | 84 | + * class lookup is not in the hot path. |
85 | + */ | 85 | + */ |
86 | + VirtIOBlkClass *vbk = VIRTIO_BLK_GET_CLASS(s); | 86 | + VirtIOBlkClass *vbk = VIRTIO_BLK_GET_CLASS(s); |
87 | + if (!vbk->handle_unknown_request || | 87 | + if (!vbk->handle_unknown_request || |
88 | + !vbk->handle_unknown_request(req, mrb, type)) { | 88 | + !vbk->handle_unknown_request(req, mrb, type)) { |
89 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | 89 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); |
90 | + virtio_blk_free_request(req); | 90 | + virtio_blk_free_request(req); |
91 | + } | 91 | + } |
92 | + } | 92 | + } |
93 | } | 93 | } |
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | @@ -XXX,XX +XXX,XX @@ static const TypeInfo virtio_blk_info = { | 96 | @@ -XXX,XX +XXX,XX @@ static const TypeInfo virtio_blk_info = { |
97 | .instance_size = sizeof(VirtIOBlock), | 97 | .instance_size = sizeof(VirtIOBlock), |
98 | .instance_init = virtio_blk_instance_init, | 98 | .instance_init = virtio_blk_instance_init, |
99 | .class_init = virtio_blk_class_init, | 99 | .class_init = virtio_blk_class_init, |
100 | + .class_size = sizeof(VirtIOBlkClass), | 100 | + .class_size = sizeof(VirtIOBlkClass), |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static void virtio_register_types(void) | 103 | static void virtio_register_types(void) |
104 | diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c | 104 | diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c |
105 | index XXXXXXX..XXXXXXX 100644 | 105 | index XXXXXXX..XXXXXXX 100644 |
106 | --- a/hw/core/qdev-properties-system.c | 106 | --- a/hw/core/qdev-properties-system.c |
107 | +++ b/hw/core/qdev-properties-system.c | 107 | +++ b/hw/core/qdev-properties-system.c |
108 | @@ -XXX,XX +XXX,XX @@ const PropertyInfo qdev_prop_iothread_vq_mapping_list = { | 108 | @@ -XXX,XX +XXX,XX @@ const PropertyInfo qdev_prop_iothread_vq_mapping_list = { |
109 | .set = set_iothread_vq_mapping_list, | 109 | .set = set_iothread_vq_mapping_list, |
110 | .release = release_iothread_vq_mapping_list, | 110 | .release = release_iothread_vq_mapping_list, |
111 | }; | 111 | }; |
112 | + | 112 | + |
113 | +const PropertyInfo qdev_prop_vmapple_virtio_blk_variant = { | 113 | +const PropertyInfo qdev_prop_vmapple_virtio_blk_variant = { |
114 | + .name = "VMAppleVirtioBlkVariant", | 114 | + .name = "VMAppleVirtioBlkVariant", |
115 | + .enum_table = &VMAppleVirtioBlkVariant_lookup, | 115 | + .enum_table = &VMAppleVirtioBlkVariant_lookup, |
116 | + .get = qdev_propinfo_get_enum, | 116 | + .get = qdev_propinfo_get_enum, |
117 | + .set = qdev_propinfo_set_enum, | 117 | + .set = qdev_propinfo_set_enum, |
118 | + .set_default_value = qdev_propinfo_set_default_value_enum, | 118 | + .set_default_value = qdev_propinfo_set_default_value_enum, |
119 | +}; | 119 | +}; |
120 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | 120 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig |
121 | index XXXXXXX..XXXXXXX 100644 | 121 | index XXXXXXX..XXXXXXX 100644 |
122 | --- a/hw/vmapple/Kconfig | 122 | --- a/hw/vmapple/Kconfig |
123 | +++ b/hw/vmapple/Kconfig | 123 | +++ b/hw/vmapple/Kconfig |
124 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_BDIF | 124 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_BDIF |
125 | config VMAPPLE_CFG | 125 | config VMAPPLE_CFG |
126 | bool | 126 | bool |
127 | 127 | ||
128 | +config VMAPPLE_VIRTIO_BLK | 128 | +config VMAPPLE_VIRTIO_BLK |
129 | + bool | 129 | + bool |
130 | + | 130 | + |
131 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | 131 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build |
132 | index XXXXXXX..XXXXXXX 100644 | 132 | index XXXXXXX..XXXXXXX 100644 |
133 | --- a/hw/vmapple/meson.build | 133 | --- a/hw/vmapple/meson.build |
134 | +++ b/hw/vmapple/meson.build | 134 | +++ b/hw/vmapple/meson.build |
135 | @@ -XXX,XX +XXX,XX @@ | 135 | @@ -XXX,XX +XXX,XX @@ |
136 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | 136 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) |
137 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | 137 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) |
138 | system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) | 138 | system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) |
139 | +system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c')) | 139 | +system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c')) |
140 | diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c | 140 | diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c |
141 | new file mode 100644 | 141 | new file mode 100644 |
142 | index XXXXXXX..XXXXXXX | 142 | index XXXXXXX..XXXXXXX |
143 | --- /dev/null | 143 | --- /dev/null |
144 | +++ b/hw/vmapple/virtio-blk.c | 144 | +++ b/hw/vmapple/virtio-blk.c |
145 | @@ -XXX,XX +XXX,XX @@ | 145 | @@ -XXX,XX +XXX,XX @@ |
146 | +/* | 146 | +/* |
147 | + * VMApple specific VirtIO Block implementation | 147 | + * VMApple specific VirtIO Block implementation |
148 | + * | 148 | + * |
149 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 149 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
150 | + * | 150 | + * |
151 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 151 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
152 | + * See the COPYING file in the top-level directory. | 152 | + * See the COPYING file in the top-level directory. |
153 | + * | 153 | + * |
154 | + * SPDX-License-Identifier: GPL-2.0-or-later | 154 | + * SPDX-License-Identifier: GPL-2.0-or-later |
155 | + * | 155 | + * |
156 | + * VMApple uses almost standard VirtIO Block, but with a few key differences: | 156 | + * VMApple uses almost standard VirtIO Block, but with a few key differences: |
157 | + * | 157 | + * |
158 | + * - Different PCI device/vendor ID | 158 | + * - Different PCI device/vendor ID |
159 | + * - An additional "type" identifier to differentiate AUX and Root volumes | 159 | + * - An additional "type" identifier to differentiate AUX and Root volumes |
160 | + * - An additional BARRIER command | 160 | + * - An additional BARRIER command |
161 | + */ | 161 | + */ |
162 | + | 162 | + |
163 | +#include "qemu/osdep.h" | 163 | +#include "qemu/osdep.h" |
164 | +#include "hw/vmapple/vmapple.h" | 164 | +#include "hw/vmapple/vmapple.h" |
165 | +#include "hw/virtio/virtio-blk.h" | 165 | +#include "hw/virtio/virtio-blk.h" |
166 | +#include "hw/virtio/virtio-pci.h" | 166 | +#include "hw/virtio/virtio-pci.h" |
167 | +#include "qemu/bswap.h" | 167 | +#include "qemu/bswap.h" |
168 | +#include "qemu/log.h" | 168 | +#include "qemu/log.h" |
169 | +#include "qemu/module.h" | 169 | +#include "qemu/module.h" |
170 | +#include "qapi/error.h" | 170 | +#include "qapi/error.h" |
171 | + | 171 | + |
172 | +#define TYPE_VMAPPLE_VIRTIO_BLK "vmapple-virtio-blk" | 172 | +#define TYPE_VMAPPLE_VIRTIO_BLK "vmapple-virtio-blk" |
173 | +OBJECT_DECLARE_TYPE(VMAppleVirtIOBlk, VMAppleVirtIOBlkClass, VMAPPLE_VIRTIO_BLK) | 173 | +OBJECT_DECLARE_TYPE(VMAppleVirtIOBlk, VMAppleVirtIOBlkClass, VMAPPLE_VIRTIO_BLK) |
174 | + | 174 | + |
175 | +typedef struct VMAppleVirtIOBlkClass { | 175 | +typedef struct VMAppleVirtIOBlkClass { |
176 | + VirtIOBlkClass parent; | 176 | + VirtIOBlkClass parent; |
177 | + | 177 | + |
178 | + void (*get_config)(VirtIODevice *vdev, uint8_t *config); | 178 | + void (*get_config)(VirtIODevice *vdev, uint8_t *config); |
179 | +} VMAppleVirtIOBlkClass; | 179 | +} VMAppleVirtIOBlkClass; |
180 | + | 180 | + |
181 | +typedef struct VMAppleVirtIOBlk { | 181 | +typedef struct VMAppleVirtIOBlk { |
182 | + VirtIOBlock parent_obj; | 182 | + VirtIOBlock parent_obj; |
183 | + | 183 | + |
184 | + uint32_t apple_type; | 184 | + uint32_t apple_type; |
185 | +} VMAppleVirtIOBlk; | 185 | +} VMAppleVirtIOBlk; |
186 | + | 186 | + |
187 | +/* | 187 | +/* |
188 | + * vmapple-virtio-blk-pci: This extends VirtioPCIProxy. | 188 | + * vmapple-virtio-blk-pci: This extends VirtioPCIProxy. |
189 | + */ | 189 | + */ |
190 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleVirtIOBlkPCI, VMAPPLE_VIRTIO_BLK_PCI) | 190 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleVirtIOBlkPCI, VMAPPLE_VIRTIO_BLK_PCI) |
191 | + | 191 | + |
192 | +#define VIRTIO_BLK_T_APPLE_BARRIER 0x10000 | 192 | +#define VIRTIO_BLK_T_APPLE_BARRIER 0x10000 |
193 | + | 193 | + |
194 | +static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req, | 194 | +static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req, |
195 | + MultiReqBuffer *mrb, | 195 | + MultiReqBuffer *mrb, |
196 | + uint32_t type) | 196 | + uint32_t type) |
197 | +{ | 197 | +{ |
198 | + switch (type) { | 198 | + switch (type) { |
199 | + case VIRTIO_BLK_T_APPLE_BARRIER: | 199 | + case VIRTIO_BLK_T_APPLE_BARRIER: |
200 | + qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n", | 200 | + qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n", |
201 | + __func__); | 201 | + __func__); |
202 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | 202 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
203 | + virtio_blk_free_request(req); | 203 | + virtio_blk_free_request(req); |
204 | + return true; | 204 | + return true; |
205 | + default: | 205 | + default: |
206 | + return false; | 206 | + return false; |
207 | + } | 207 | + } |
208 | +} | 208 | +} |
209 | + | 209 | + |
210 | +/* | 210 | +/* |
211 | + * VMApple virtio-blk uses the same config format as normal virtio, with one | 211 | + * VMApple virtio-blk uses the same config format as normal virtio, with one |
212 | + * exception: It adds an "apple type" specififer at the same location that | 212 | + * exception: It adds an "apple type" specififer at the same location that |
213 | + * the spec reserves for max_secure_erase_sectors. Let's hook into the | 213 | + * the spec reserves for max_secure_erase_sectors. Let's hook into the |
214 | + * get_config code path here, run it as usual and then patch in the apple type. | 214 | + * get_config code path here, run it as usual and then patch in the apple type. |
215 | + */ | 215 | + */ |
216 | +static void vmapple_virtio_blk_get_config(VirtIODevice *vdev, uint8_t *config) | 216 | +static void vmapple_virtio_blk_get_config(VirtIODevice *vdev, uint8_t *config) |
217 | +{ | 217 | +{ |
218 | + VMAppleVirtIOBlk *dev = VMAPPLE_VIRTIO_BLK(vdev); | 218 | + VMAppleVirtIOBlk *dev = VMAPPLE_VIRTIO_BLK(vdev); |
219 | + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_GET_CLASS(dev); | 219 | + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_GET_CLASS(dev); |
220 | + struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config; | 220 | + struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config; |
221 | + | 221 | + |
222 | + vvbk->get_config(vdev, config); | 222 | + vvbk->get_config(vdev, config); |
223 | + | 223 | + |
224 | + g_assert(dev->parent_obj.config_size >= endof(struct virtio_blk_config, zoned)); | 224 | + g_assert(dev->parent_obj.config_size >= endof(struct virtio_blk_config, zoned)); |
225 | + | 225 | + |
226 | + /* Apple abuses the field for max_secure_erase_sectors as type id */ | 226 | + /* Apple abuses the field for max_secure_erase_sectors as type id */ |
227 | + stl_he_p(&blkcfg->max_secure_erase_sectors, dev->apple_type); | 227 | + stl_he_p(&blkcfg->max_secure_erase_sectors, dev->apple_type); |
228 | +} | 228 | +} |
229 | + | 229 | + |
230 | +static void vmapple_virtio_blk_class_init(ObjectClass *klass, void *data) | 230 | +static void vmapple_virtio_blk_class_init(ObjectClass *klass, void *data) |
231 | +{ | 231 | +{ |
232 | + VirtIOBlkClass *vbk = VIRTIO_BLK_CLASS(klass); | 232 | + VirtIOBlkClass *vbk = VIRTIO_BLK_CLASS(klass); |
233 | + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); | 233 | + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); |
234 | + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_CLASS(klass); | 234 | + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_CLASS(klass); |
235 | + | 235 | + |
236 | + vbk->handle_unknown_request = vmapple_virtio_blk_handle_unknown_request; | 236 | + vbk->handle_unknown_request = vmapple_virtio_blk_handle_unknown_request; |
237 | + vvbk->get_config = vdc->get_config; | 237 | + vvbk->get_config = vdc->get_config; |
238 | + vdc->get_config = vmapple_virtio_blk_get_config; | 238 | + vdc->get_config = vmapple_virtio_blk_get_config; |
239 | +} | 239 | +} |
240 | + | 240 | + |
241 | +static const TypeInfo vmapple_virtio_blk_info = { | 241 | +static const TypeInfo vmapple_virtio_blk_info = { |
242 | + .name = TYPE_VMAPPLE_VIRTIO_BLK, | 242 | + .name = TYPE_VMAPPLE_VIRTIO_BLK, |
243 | + .parent = TYPE_VIRTIO_BLK, | 243 | + .parent = TYPE_VIRTIO_BLK, |
244 | + .instance_size = sizeof(VMAppleVirtIOBlk), | 244 | + .instance_size = sizeof(VMAppleVirtIOBlk), |
245 | + .class_size = sizeof(VMAppleVirtIOBlkClass), | 245 | + .class_size = sizeof(VMAppleVirtIOBlkClass), |
246 | + .class_init = vmapple_virtio_blk_class_init, | 246 | + .class_init = vmapple_virtio_blk_class_init, |
247 | +}; | 247 | +}; |
248 | + | 248 | + |
249 | +/* PCI Devices */ | 249 | +/* PCI Devices */ |
250 | + | 250 | + |
251 | +struct VMAppleVirtIOBlkPCI { | 251 | +struct VMAppleVirtIOBlkPCI { |
252 | + VirtIOPCIProxy parent_obj; | 252 | + VirtIOPCIProxy parent_obj; |
253 | + VMAppleVirtIOBlk vdev; | 253 | + VMAppleVirtIOBlk vdev; |
254 | + VMAppleVirtioBlkVariant variant; | 254 | + VMAppleVirtioBlkVariant variant; |
255 | +}; | 255 | +}; |
256 | + | 256 | + |
257 | + | 257 | + |
258 | +static Property vmapple_virtio_blk_pci_properties[] = { | 258 | +static Property vmapple_virtio_blk_pci_properties[] = { |
259 | + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), | 259 | + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), |
260 | + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, | 260 | + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, |
261 | + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), | 261 | + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), |
262 | + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, | 262 | + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, |
263 | + DEV_NVECTORS_UNSPECIFIED), | 263 | + DEV_NVECTORS_UNSPECIFIED), |
264 | + DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT("variant", VMAppleVirtIOBlkPCI, variant, | 264 | + DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT("variant", VMAppleVirtIOBlkPCI, variant, |
265 | + VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED), | 265 | + VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED), |
266 | + DEFINE_PROP_END_OF_LIST(), | 266 | + DEFINE_PROP_END_OF_LIST(), |
267 | +}; | 267 | +}; |
268 | + | 268 | + |
269 | +static void vmapple_virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) | 269 | +static void vmapple_virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) |
270 | +{ | 270 | +{ |
271 | + ERRP_GUARD(); | 271 | + ERRP_GUARD(); |
272 | + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(vpci_dev); | 272 | + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(vpci_dev); |
273 | + DeviceState *vdev = DEVICE(&dev->vdev); | 273 | + DeviceState *vdev = DEVICE(&dev->vdev); |
274 | + VirtIOBlkConf *conf = &dev->vdev.parent_obj.conf; | 274 | + VirtIOBlkConf *conf = &dev->vdev.parent_obj.conf; |
275 | + | 275 | + |
276 | + if (dev->variant == VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED) { | 276 | + if (dev->variant == VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED) { |
277 | + error_setg(errp, "vmapple virtio block device variant unspecified"); | 277 | + error_setg(errp, "vmapple virtio block device variant unspecified"); |
278 | + error_append_hint(errp, | 278 | + error_append_hint(errp, |
279 | + "Variant property must be set to 'aux' or 'root'.\n" | 279 | + "Variant property must be set to 'aux' or 'root'.\n" |
280 | + "Use a regular virtio-blk-pci device instead when " | 280 | + "Use a regular virtio-blk-pci device instead when " |
281 | + "neither is applicaple.\n"); | 281 | + "neither is applicaple.\n"); |
282 | + return; | 282 | + return; |
283 | + } | 283 | + } |
284 | + | 284 | + |
285 | + if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { | 285 | + if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { |
286 | + conf->num_queues = virtio_pci_optimal_num_queues(0); | 286 | + conf->num_queues = virtio_pci_optimal_num_queues(0); |
287 | + } | 287 | + } |
288 | + | 288 | + |
289 | + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { | 289 | + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { |
290 | + vpci_dev->nvectors = conf->num_queues + 1; | 290 | + vpci_dev->nvectors = conf->num_queues + 1; |
291 | + } | 291 | + } |
292 | + | 292 | + |
293 | + /* | 293 | + /* |
294 | + * We don't support zones, but we need the additional config space size. | 294 | + * We don't support zones, but we need the additional config space size. |
295 | + * Let's just expose the feature so the rest of the virtio-blk logic | 295 | + * Let's just expose the feature so the rest of the virtio-blk logic |
296 | + * allocates enough space for us. The guest will ignore zones anyway. | 296 | + * allocates enough space for us. The guest will ignore zones anyway. |
297 | + */ | 297 | + */ |
298 | + virtio_add_feature(&dev->vdev.parent_obj.host_features, VIRTIO_BLK_F_ZONED); | 298 | + virtio_add_feature(&dev->vdev.parent_obj.host_features, VIRTIO_BLK_F_ZONED); |
299 | + /* Propagate the apple type down to the virtio-blk device */ | 299 | + /* Propagate the apple type down to the virtio-blk device */ |
300 | + dev->vdev.apple_type = dev->variant; | 300 | + dev->vdev.apple_type = dev->variant; |
301 | + /* and spawn the virtio-blk device */ | 301 | + /* and spawn the virtio-blk device */ |
302 | + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); | 302 | + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); |
303 | + | 303 | + |
304 | + /* | 304 | + /* |
305 | + * The virtio-pci machinery adjusts its vendor/device ID based on whether | 305 | + * The virtio-pci machinery adjusts its vendor/device ID based on whether |
306 | + * we support modern or legacy virtio. Let's patch it back to the Apple | 306 | + * we support modern or legacy virtio. Let's patch it back to the Apple |
307 | + * identifiers here. | 307 | + * identifiers here. |
308 | + */ | 308 | + */ |
309 | + pci_config_set_vendor_id(vpci_dev->pci_dev.config, PCI_VENDOR_ID_APPLE); | 309 | + pci_config_set_vendor_id(vpci_dev->pci_dev.config, PCI_VENDOR_ID_APPLE); |
310 | + pci_config_set_device_id(vpci_dev->pci_dev.config, | 310 | + pci_config_set_device_id(vpci_dev->pci_dev.config, |
311 | + PCI_DEVICE_ID_APPLE_VIRTIO_BLK); | 311 | + PCI_DEVICE_ID_APPLE_VIRTIO_BLK); |
312 | +} | 312 | +} |
313 | + | 313 | + |
314 | +static void vmapple_virtio_blk_pci_class_init(ObjectClass *klass, void *data) | 314 | +static void vmapple_virtio_blk_pci_class_init(ObjectClass *klass, void *data) |
315 | +{ | 315 | +{ |
316 | + DeviceClass *dc = DEVICE_CLASS(klass); | 316 | + DeviceClass *dc = DEVICE_CLASS(klass); |
317 | + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); | 317 | + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); |
318 | + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); | 318 | + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); |
319 | + | 319 | + |
320 | + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); | 320 | + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); |
321 | + device_class_set_props(dc, vmapple_virtio_blk_pci_properties); | 321 | + device_class_set_props(dc, vmapple_virtio_blk_pci_properties); |
322 | + k->realize = vmapple_virtio_blk_pci_realize; | 322 | + k->realize = vmapple_virtio_blk_pci_realize; |
323 | + pcidev_k->vendor_id = PCI_VENDOR_ID_APPLE; | 323 | + pcidev_k->vendor_id = PCI_VENDOR_ID_APPLE; |
324 | + pcidev_k->device_id = PCI_DEVICE_ID_APPLE_VIRTIO_BLK; | 324 | + pcidev_k->device_id = PCI_DEVICE_ID_APPLE_VIRTIO_BLK; |
325 | + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; | 325 | + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; |
326 | + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; | 326 | + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; |
327 | +} | 327 | +} |
328 | + | 328 | + |
329 | +static void vmapple_virtio_blk_pci_instance_init(Object *obj) | 329 | +static void vmapple_virtio_blk_pci_instance_init(Object *obj) |
330 | +{ | 330 | +{ |
331 | + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(obj); | 331 | + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(obj); |
332 | + | 332 | + |
333 | + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), | 333 | + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), |
334 | + TYPE_VMAPPLE_VIRTIO_BLK); | 334 | + TYPE_VMAPPLE_VIRTIO_BLK); |
335 | +} | 335 | +} |
336 | + | 336 | + |
337 | +static const VirtioPCIDeviceTypeInfo vmapple_virtio_blk_pci_info = { | 337 | +static const VirtioPCIDeviceTypeInfo vmapple_virtio_blk_pci_info = { |
338 | + .generic_name = TYPE_VMAPPLE_VIRTIO_BLK_PCI, | 338 | + .generic_name = TYPE_VMAPPLE_VIRTIO_BLK_PCI, |
339 | + .instance_size = sizeof(VMAppleVirtIOBlkPCI), | 339 | + .instance_size = sizeof(VMAppleVirtIOBlkPCI), |
340 | + .instance_init = vmapple_virtio_blk_pci_instance_init, | 340 | + .instance_init = vmapple_virtio_blk_pci_instance_init, |
341 | + .class_init = vmapple_virtio_blk_pci_class_init, | 341 | + .class_init = vmapple_virtio_blk_pci_class_init, |
342 | +}; | 342 | +}; |
343 | + | 343 | + |
344 | +static void vmapple_virtio_blk_register_types(void) | 344 | +static void vmapple_virtio_blk_register_types(void) |
345 | +{ | 345 | +{ |
346 | + type_register_static(&vmapple_virtio_blk_info); | 346 | + type_register_static(&vmapple_virtio_blk_info); |
347 | + virtio_pci_types_register(&vmapple_virtio_blk_pci_info); | 347 | + virtio_pci_types_register(&vmapple_virtio_blk_pci_info); |
348 | +} | 348 | +} |
349 | + | 349 | + |
350 | +type_init(vmapple_virtio_blk_register_types) | 350 | +type_init(vmapple_virtio_blk_register_types) |
351 | diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h | 351 | diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h |
352 | index XXXXXXX..XXXXXXX 100644 | 352 | index XXXXXXX..XXXXXXX 100644 |
353 | --- a/include/hw/pci/pci_ids.h | 353 | --- a/include/hw/pci/pci_ids.h |
354 | +++ b/include/hw/pci/pci_ids.h | 354 | +++ b/include/hw/pci/pci_ids.h |
355 | @@ -XXX,XX +XXX,XX @@ | 355 | @@ -XXX,XX +XXX,XX @@ |
356 | #define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 | 356 | #define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 |
357 | #define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b | 357 | #define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b |
358 | #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 | 358 | #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 |
359 | +#define PCI_DEVICE_ID_APPLE_VIRTIO_BLK 0x1a00 | 359 | +#define PCI_DEVICE_ID_APPLE_VIRTIO_BLK 0x1a00 |
360 | 360 | ||
361 | #define PCI_VENDOR_ID_SUN 0x108e | 361 | #define PCI_VENDOR_ID_SUN 0x108e |
362 | #define PCI_DEVICE_ID_SUN_EBUS 0x1000 | 362 | #define PCI_DEVICE_ID_SUN_EBUS 0x1000 |
363 | diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h | 363 | diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h |
364 | index XXXXXXX..XXXXXXX 100644 | 364 | index XXXXXXX..XXXXXXX 100644 |
365 | --- a/include/hw/qdev-properties-system.h | 365 | --- a/include/hw/qdev-properties-system.h |
366 | +++ b/include/hw/qdev-properties-system.h | 366 | +++ b/include/hw/qdev-properties-system.h |
367 | @@ -XXX,XX +XXX,XX @@ extern const PropertyInfo qdev_prop_pcie_link_speed; | 367 | @@ -XXX,XX +XXX,XX @@ extern const PropertyInfo qdev_prop_pcie_link_speed; |
368 | extern const PropertyInfo qdev_prop_pcie_link_width; | 368 | extern const PropertyInfo qdev_prop_pcie_link_width; |
369 | extern const PropertyInfo qdev_prop_cpus390entitlement; | 369 | extern const PropertyInfo qdev_prop_cpus390entitlement; |
370 | extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; | 370 | extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; |
371 | +extern const PropertyInfo qdev_prop_vmapple_virtio_blk_variant; | 371 | +extern const PropertyInfo qdev_prop_vmapple_virtio_blk_variant; |
372 | 372 | ||
373 | #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ | 373 | #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ |
374 | DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) | 374 | DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) |
375 | @@ -XXX,XX +XXX,XX @@ extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; | 375 | @@ -XXX,XX +XXX,XX @@ extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; |
376 | DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \ | 376 | DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \ |
377 | IOThreadVirtQueueMappingList *) | 377 | IOThreadVirtQueueMappingList *) |
378 | 378 | ||
379 | +#define DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT(_n, _s, _f, _d) \ | 379 | +#define DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT(_n, _s, _f, _d) \ |
380 | + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_vmapple_virtio_blk_variant, \ | 380 | + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_vmapple_virtio_blk_variant, \ |
381 | + VMAppleVirtioBlkVariant) | 381 | + VMAppleVirtioBlkVariant) |
382 | + | 382 | + |
383 | #endif | 383 | #endif |
384 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | 384 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h |
385 | index XXXXXXX..XXXXXXX 100644 | 385 | index XXXXXXX..XXXXXXX 100644 |
386 | --- a/include/hw/virtio/virtio-blk.h | 386 | --- a/include/hw/virtio/virtio-blk.h |
387 | +++ b/include/hw/virtio/virtio-blk.h | 387 | +++ b/include/hw/virtio/virtio-blk.h |
388 | @@ -XXX,XX +XXX,XX @@ | 388 | @@ -XXX,XX +XXX,XX @@ |
389 | #include "qapi/qapi-types-virtio.h" | 389 | #include "qapi/qapi-types-virtio.h" |
390 | 390 | ||
391 | #define TYPE_VIRTIO_BLK "virtio-blk-device" | 391 | #define TYPE_VIRTIO_BLK "virtio-blk-device" |
392 | -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK) | 392 | -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK) |
393 | +OBJECT_DECLARE_TYPE(VirtIOBlock, VirtIOBlkClass, VIRTIO_BLK) | 393 | +OBJECT_DECLARE_TYPE(VirtIOBlock, VirtIOBlkClass, VIRTIO_BLK) |
394 | 394 | ||
395 | /* This is the last element of the write scatter-gather list */ | 395 | /* This is the last element of the write scatter-gather list */ |
396 | struct virtio_blk_inhdr | 396 | struct virtio_blk_inhdr |
397 | @@ -XXX,XX +XXX,XX @@ typedef struct MultiReqBuffer { | 397 | @@ -XXX,XX +XXX,XX @@ typedef struct MultiReqBuffer { |
398 | bool is_write; | 398 | bool is_write; |
399 | } MultiReqBuffer; | 399 | } MultiReqBuffer; |
400 | 400 | ||
401 | +typedef struct VirtIOBlkClass { | 401 | +typedef struct VirtIOBlkClass { |
402 | + /*< private >*/ | 402 | + /*< private >*/ |
403 | + VirtioDeviceClass parent; | 403 | + VirtioDeviceClass parent; |
404 | + /*< public >*/ | 404 | + /*< public >*/ |
405 | + bool (*handle_unknown_request)(VirtIOBlockReq *req, MultiReqBuffer *mrb, | 405 | + bool (*handle_unknown_request)(VirtIOBlockReq *req, MultiReqBuffer *mrb, |
406 | + uint32_t type); | 406 | + uint32_t type); |
407 | +} VirtIOBlkClass; | 407 | +} VirtIOBlkClass; |
408 | + | 408 | + |
409 | void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); | 409 | void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); |
410 | +void virtio_blk_free_request(VirtIOBlockReq *req); | 410 | +void virtio_blk_free_request(VirtIOBlockReq *req); |
411 | +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status); | 411 | +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status); |
412 | 412 | ||
413 | #endif | 413 | #endif |
414 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | 414 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h |
415 | index XXXXXXX..XXXXXXX 100644 | 415 | index XXXXXXX..XXXXXXX 100644 |
416 | --- a/include/hw/vmapple/vmapple.h | 416 | --- a/include/hw/vmapple/vmapple.h |
417 | +++ b/include/hw/vmapple/vmapple.h | 417 | +++ b/include/hw/vmapple/vmapple.h |
418 | @@ -XXX,XX +XXX,XX @@ | 418 | @@ -XXX,XX +XXX,XX @@ |
419 | 419 | ||
420 | #define TYPE_VMAPPLE_CFG "vmapple-cfg" | 420 | #define TYPE_VMAPPLE_CFG "vmapple-cfg" |
421 | 421 | ||
422 | +#define TYPE_VMAPPLE_VIRTIO_BLK_PCI "vmapple-virtio-blk-pci" | 422 | +#define TYPE_VMAPPLE_VIRTIO_BLK_PCI "vmapple-virtio-blk-pci" |
423 | + | 423 | + |
424 | #endif /* HW_VMAPPLE_VMAPPLE_H */ | 424 | #endif /* HW_VMAPPLE_VMAPPLE_H */ |
425 | diff --git a/qapi/virtio.json b/qapi/virtio.json | 425 | diff --git a/qapi/virtio.json b/qapi/virtio.json |
426 | index XXXXXXX..XXXXXXX 100644 | 426 | index XXXXXXX..XXXXXXX 100644 |
427 | --- a/qapi/virtio.json | 427 | --- a/qapi/virtio.json |
428 | +++ b/qapi/virtio.json | 428 | +++ b/qapi/virtio.json |
429 | @@ -XXX,XX +XXX,XX @@ | 429 | @@ -XXX,XX +XXX,XX @@ |
430 | ## | 430 | ## |
431 | { 'enum': 'GranuleMode', | 431 | { 'enum': 'GranuleMode', |
432 | 'data': [ '4k', '8k', '16k', '64k', 'host' ] } | 432 | 'data': [ '4k', '8k', '16k', '64k', 'host' ] } |
433 | + | 433 | + |
434 | +## | 434 | +## |
435 | +# @VMAppleVirtioBlkVariant: | 435 | +# @VMAppleVirtioBlkVariant: |
436 | +# | 436 | +# |
437 | +# @unspecified: The default, not a valid setting. | 437 | +# @unspecified: The default, not a valid setting. |
438 | +# | 438 | +# |
439 | +# @root: Block device holding the root volume | 439 | +# @root: Block device holding the root volume |
440 | +# | 440 | +# |
441 | +# @aux: Block device holding auxiliary data required for boot | 441 | +# @aux: Block device holding auxiliary data required for boot |
442 | +# | 442 | +# |
443 | +# Since: 9.2 | 443 | +# Since: 9.2 |
444 | +## | 444 | +## |
445 | +{ 'enum': 'VMAppleVirtioBlkVariant', | 445 | +{ 'enum': 'VMAppleVirtioBlkVariant', |
446 | + 'data': [ 'unspecified', 'root', 'aux' ] } | 446 | + 'data': [ 'unspecified', 'root', 'aux' ] } |
447 | -- | 447 | -- |
448 | 2.39.5 (Apple Git-154) | 448 | 2.39.5 (Apple Git-154) |
449 | 449 | ||
450 | 450 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <graf@amazon.com> | |
2 | |||
3 | Apple has its own virtio-blk PCI device ID where it deviates from the | ||
4 | official virtio-pci spec slightly: It puts a new "apple type" | ||
5 | field at a static offset in config space and introduces a new barrier | ||
6 | command. | ||
7 | |||
8 | This patch first creates a mechanism for virtio-blk downstream classes to | ||
9 | handle unknown commands. It then creates such a downstream class and a new | ||
10 | vmapple-virtio-blk-pci class which support the additional apple type config | ||
11 | identifier as well as the barrier command. | ||
12 | |||
13 | The 'aux' or 'root' device type are selected using the 'variant' property. | ||
14 | |||
15 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
16 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
17 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
18 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
19 | --- | ||
20 | |||
21 | v4: | ||
22 | |||
23 | * Use recommended object type declaration pattern. | ||
24 | * Correctly log unimplemented code paths. | ||
25 | * Most header code moved to .c, type name #defines moved to vmapple.h | ||
26 | |||
27 | v5: | ||
28 | |||
29 | * Corrected handling of potentially unaligned writes to virtio config area. | ||
30 | * Simplified passing through device variant type to subobject. | ||
31 | |||
32 | v9: | ||
33 | |||
34 | * Correctly specify class_size for VMAppleVirtIOBlkClass | ||
35 | |||
36 | v10: | ||
37 | |||
38 | * Folded v9 patch 16/16 into this one, changing the device type design to | ||
39 | provide a single device type with a variant property instead of 2 different | ||
40 | subtypes for aux and root volumes. | ||
41 | * Tidied up error reporting for the variant property. | ||
42 | |||
43 | hw/block/virtio-blk.c | 19 ++- | ||
44 | hw/core/qdev-properties-system.c | 8 ++ | ||
45 | hw/vmapple/Kconfig | 3 + | ||
46 | hw/vmapple/meson.build | 1 + | ||
47 | hw/vmapple/virtio-blk.c | 205 ++++++++++++++++++++++++++++ | ||
48 | include/hw/pci/pci_ids.h | 1 + | ||
49 | include/hw/qdev-properties-system.h | 5 + | ||
50 | include/hw/virtio/virtio-blk.h | 12 +- | ||
51 | include/hw/vmapple/vmapple.h | 2 + | ||
52 | qapi/virtio.json | 14 ++ | ||
53 | 10 files changed, 265 insertions(+), 5 deletions(-) | ||
54 | create mode 100644 hw/vmapple/virtio-blk.c | ||
55 | |||
56 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/hw/block/virtio-blk.c | ||
59 | +++ b/hw/block/virtio-blk.c | ||
60 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, | ||
61 | req->mr_next = NULL; | ||
62 | } | ||
63 | |||
64 | -static void virtio_blk_free_request(VirtIOBlockReq *req) | ||
65 | +void virtio_blk_free_request(VirtIOBlockReq *req) | ||
66 | { | ||
67 | g_free(req); | ||
68 | } | ||
69 | |||
70 | -static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | ||
71 | +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | ||
72 | { | ||
73 | VirtIOBlock *s = req->dev; | ||
74 | VirtIODevice *vdev = VIRTIO_DEVICE(s); | ||
75 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
76 | break; | ||
77 | } | ||
78 | default: | ||
79 | - virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | ||
80 | - virtio_blk_free_request(req); | ||
81 | + { | ||
82 | + /* | ||
83 | + * Give subclasses a chance to handle unknown requests. This way the | ||
84 | + * class lookup is not in the hot path. | ||
85 | + */ | ||
86 | + VirtIOBlkClass *vbk = VIRTIO_BLK_GET_CLASS(s); | ||
87 | + if (!vbk->handle_unknown_request || | ||
88 | + !vbk->handle_unknown_request(req, mrb, type)) { | ||
89 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | ||
90 | + virtio_blk_free_request(req); | ||
91 | + } | ||
92 | + } | ||
93 | } | ||
94 | return 0; | ||
95 | } | ||
96 | @@ -XXX,XX +XXX,XX @@ static const TypeInfo virtio_blk_info = { | ||
97 | .instance_size = sizeof(VirtIOBlock), | ||
98 | .instance_init = virtio_blk_instance_init, | ||
99 | .class_init = virtio_blk_class_init, | ||
100 | + .class_size = sizeof(VirtIOBlkClass), | ||
101 | }; | ||
102 | |||
103 | static void virtio_register_types(void) | ||
104 | diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c | ||
105 | index XXXXXXX..XXXXXXX 100644 | ||
106 | --- a/hw/core/qdev-properties-system.c | ||
107 | +++ b/hw/core/qdev-properties-system.c | ||
108 | @@ -XXX,XX +XXX,XX @@ const PropertyInfo qdev_prop_iothread_vq_mapping_list = { | ||
109 | .set = set_iothread_vq_mapping_list, | ||
110 | .release = release_iothread_vq_mapping_list, | ||
111 | }; | ||
112 | + | ||
113 | +const PropertyInfo qdev_prop_vmapple_virtio_blk_variant = { | ||
114 | + .name = "VMAppleVirtioBlkVariant", | ||
115 | + .enum_table = &VMAppleVirtioBlkVariant_lookup, | ||
116 | + .get = qdev_propinfo_get_enum, | ||
117 | + .set = qdev_propinfo_set_enum, | ||
118 | + .set_default_value = qdev_propinfo_set_default_value_enum, | ||
119 | +}; | ||
120 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | ||
121 | index XXXXXXX..XXXXXXX 100644 | ||
122 | --- a/hw/vmapple/Kconfig | ||
123 | +++ b/hw/vmapple/Kconfig | ||
124 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_BDIF | ||
125 | config VMAPPLE_CFG | ||
126 | bool | ||
127 | |||
128 | +config VMAPPLE_VIRTIO_BLK | ||
129 | + bool | ||
130 | + | ||
131 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | ||
132 | index XXXXXXX..XXXXXXX 100644 | ||
133 | --- a/hw/vmapple/meson.build | ||
134 | +++ b/hw/vmapple/meson.build | ||
135 | @@ -XXX,XX +XXX,XX @@ | ||
136 | system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | ||
137 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | ||
138 | system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) | ||
139 | +system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c')) | ||
140 | diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c | ||
141 | new file mode 100644 | ||
142 | index XXXXXXX..XXXXXXX | ||
143 | --- /dev/null | ||
144 | +++ b/hw/vmapple/virtio-blk.c | ||
145 | @@ -XXX,XX +XXX,XX @@ | ||
146 | +/* | ||
147 | + * VMApple specific VirtIO Block implementation | ||
148 | + * | ||
149 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
150 | + * | ||
151 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
152 | + * See the COPYING file in the top-level directory. | ||
153 | + * | ||
154 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
155 | + * | ||
156 | + * VMApple uses almost standard VirtIO Block, but with a few key differences: | ||
157 | + * | ||
158 | + * - Different PCI device/vendor ID | ||
159 | + * - An additional "type" identifier to differentiate AUX and Root volumes | ||
160 | + * - An additional BARRIER command | ||
161 | + */ | ||
162 | + | ||
163 | +#include "qemu/osdep.h" | ||
164 | +#include "hw/vmapple/vmapple.h" | ||
165 | +#include "hw/virtio/virtio-blk.h" | ||
166 | +#include "hw/virtio/virtio-pci.h" | ||
167 | +#include "qemu/bswap.h" | ||
168 | +#include "qemu/log.h" | ||
169 | +#include "qemu/module.h" | ||
170 | +#include "qapi/error.h" | ||
171 | + | ||
172 | +#define TYPE_VMAPPLE_VIRTIO_BLK "vmapple-virtio-blk" | ||
173 | +OBJECT_DECLARE_TYPE(VMAppleVirtIOBlk, VMAppleVirtIOBlkClass, VMAPPLE_VIRTIO_BLK) | ||
174 | + | ||
175 | +typedef struct VMAppleVirtIOBlkClass { | ||
176 | + VirtIOBlkClass parent; | ||
177 | + | ||
178 | + void (*get_config)(VirtIODevice *vdev, uint8_t *config); | ||
179 | +} VMAppleVirtIOBlkClass; | ||
180 | + | ||
181 | +typedef struct VMAppleVirtIOBlk { | ||
182 | + VirtIOBlock parent_obj; | ||
183 | + | ||
184 | + uint32_t apple_type; | ||
185 | +} VMAppleVirtIOBlk; | ||
186 | + | ||
187 | +/* | ||
188 | + * vmapple-virtio-blk-pci: This extends VirtioPCIProxy. | ||
189 | + */ | ||
190 | +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleVirtIOBlkPCI, VMAPPLE_VIRTIO_BLK_PCI) | ||
191 | + | ||
192 | +#define VIRTIO_BLK_T_APPLE_BARRIER 0x10000 | ||
193 | + | ||
194 | +static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req, | ||
195 | + MultiReqBuffer *mrb, | ||
196 | + uint32_t type) | ||
197 | +{ | ||
198 | + switch (type) { | ||
199 | + case VIRTIO_BLK_T_APPLE_BARRIER: | ||
200 | + qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n", | ||
201 | + __func__); | ||
202 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | ||
203 | + virtio_blk_free_request(req); | ||
204 | + return true; | ||
205 | + default: | ||
206 | + return false; | ||
207 | + } | ||
208 | +} | ||
209 | + | ||
210 | +/* | ||
211 | + * VMApple virtio-blk uses the same config format as normal virtio, with one | ||
212 | + * exception: It adds an "apple type" specififer at the same location that | ||
213 | + * the spec reserves for max_secure_erase_sectors. Let's hook into the | ||
214 | + * get_config code path here, run it as usual and then patch in the apple type. | ||
215 | + */ | ||
216 | +static void vmapple_virtio_blk_get_config(VirtIODevice *vdev, uint8_t *config) | ||
217 | +{ | ||
218 | + VMAppleVirtIOBlk *dev = VMAPPLE_VIRTIO_BLK(vdev); | ||
219 | + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_GET_CLASS(dev); | ||
220 | + struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config; | ||
221 | + | ||
222 | + vvbk->get_config(vdev, config); | ||
223 | + | ||
224 | + g_assert(dev->parent_obj.config_size >= endof(struct virtio_blk_config, zoned)); | ||
225 | + | ||
226 | + /* Apple abuses the field for max_secure_erase_sectors as type id */ | ||
227 | + stl_he_p(&blkcfg->max_secure_erase_sectors, dev->apple_type); | ||
228 | +} | ||
229 | + | ||
230 | +static void vmapple_virtio_blk_class_init(ObjectClass *klass, void *data) | ||
231 | +{ | ||
232 | + VirtIOBlkClass *vbk = VIRTIO_BLK_CLASS(klass); | ||
233 | + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); | ||
234 | + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_CLASS(klass); | ||
235 | + | ||
236 | + vbk->handle_unknown_request = vmapple_virtio_blk_handle_unknown_request; | ||
237 | + vvbk->get_config = vdc->get_config; | ||
238 | + vdc->get_config = vmapple_virtio_blk_get_config; | ||
239 | +} | ||
240 | + | ||
241 | +static const TypeInfo vmapple_virtio_blk_info = { | ||
242 | + .name = TYPE_VMAPPLE_VIRTIO_BLK, | ||
243 | + .parent = TYPE_VIRTIO_BLK, | ||
244 | + .instance_size = sizeof(VMAppleVirtIOBlk), | ||
245 | + .class_size = sizeof(VMAppleVirtIOBlkClass), | ||
246 | + .class_init = vmapple_virtio_blk_class_init, | ||
247 | +}; | ||
248 | + | ||
249 | +/* PCI Devices */ | ||
250 | + | ||
251 | +struct VMAppleVirtIOBlkPCI { | ||
252 | + VirtIOPCIProxy parent_obj; | ||
253 | + VMAppleVirtIOBlk vdev; | ||
254 | + VMAppleVirtioBlkVariant variant; | ||
255 | +}; | ||
256 | + | ||
257 | + | ||
258 | +static Property vmapple_virtio_blk_pci_properties[] = { | ||
259 | + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), | ||
260 | + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, | ||
261 | + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), | ||
262 | + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, | ||
263 | + DEV_NVECTORS_UNSPECIFIED), | ||
264 | + DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT("variant", VMAppleVirtIOBlkPCI, variant, | ||
265 | + VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED), | ||
266 | + DEFINE_PROP_END_OF_LIST(), | ||
267 | +}; | ||
268 | + | ||
269 | +static void vmapple_virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) | ||
270 | +{ | ||
271 | + ERRP_GUARD(); | ||
272 | + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(vpci_dev); | ||
273 | + DeviceState *vdev = DEVICE(&dev->vdev); | ||
274 | + VirtIOBlkConf *conf = &dev->vdev.parent_obj.conf; | ||
275 | + | ||
276 | + if (dev->variant == VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED) { | ||
277 | + error_setg(errp, "vmapple virtio block device variant unspecified"); | ||
278 | + error_append_hint(errp, | ||
279 | + "Variant property must be set to 'aux' or 'root'.\n" | ||
280 | + "Use a regular virtio-blk-pci device instead when " | ||
281 | + "neither is applicaple.\n"); | ||
282 | + return; | ||
283 | + } | ||
284 | + | ||
285 | + if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { | ||
286 | + conf->num_queues = virtio_pci_optimal_num_queues(0); | ||
287 | + } | ||
288 | + | ||
289 | + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { | ||
290 | + vpci_dev->nvectors = conf->num_queues + 1; | ||
291 | + } | ||
292 | + | ||
293 | + /* | ||
294 | + * We don't support zones, but we need the additional config space size. | ||
295 | + * Let's just expose the feature so the rest of the virtio-blk logic | ||
296 | + * allocates enough space for us. The guest will ignore zones anyway. | ||
297 | + */ | ||
298 | + virtio_add_feature(&dev->vdev.parent_obj.host_features, VIRTIO_BLK_F_ZONED); | ||
299 | + /* Propagate the apple type down to the virtio-blk device */ | ||
300 | + dev->vdev.apple_type = dev->variant; | ||
301 | + /* and spawn the virtio-blk device */ | ||
302 | + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); | ||
303 | + | ||
304 | + /* | ||
305 | + * The virtio-pci machinery adjusts its vendor/device ID based on whether | ||
306 | + * we support modern or legacy virtio. Let's patch it back to the Apple | ||
307 | + * identifiers here. | ||
308 | + */ | ||
309 | + pci_config_set_vendor_id(vpci_dev->pci_dev.config, PCI_VENDOR_ID_APPLE); | ||
310 | + pci_config_set_device_id(vpci_dev->pci_dev.config, | ||
311 | + PCI_DEVICE_ID_APPLE_VIRTIO_BLK); | ||
312 | +} | ||
313 | + | ||
314 | +static void vmapple_virtio_blk_pci_class_init(ObjectClass *klass, void *data) | ||
315 | +{ | ||
316 | + DeviceClass *dc = DEVICE_CLASS(klass); | ||
317 | + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); | ||
318 | + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); | ||
319 | + | ||
320 | + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); | ||
321 | + device_class_set_props(dc, vmapple_virtio_blk_pci_properties); | ||
322 | + k->realize = vmapple_virtio_blk_pci_realize; | ||
323 | + pcidev_k->vendor_id = PCI_VENDOR_ID_APPLE; | ||
324 | + pcidev_k->device_id = PCI_DEVICE_ID_APPLE_VIRTIO_BLK; | ||
325 | + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; | ||
326 | + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; | ||
327 | +} | ||
328 | + | ||
329 | +static void vmapple_virtio_blk_pci_instance_init(Object *obj) | ||
330 | +{ | ||
331 | + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(obj); | ||
332 | + | ||
333 | + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), | ||
334 | + TYPE_VMAPPLE_VIRTIO_BLK); | ||
335 | +} | ||
336 | + | ||
337 | +static const VirtioPCIDeviceTypeInfo vmapple_virtio_blk_pci_info = { | ||
338 | + .generic_name = TYPE_VMAPPLE_VIRTIO_BLK_PCI, | ||
339 | + .instance_size = sizeof(VMAppleVirtIOBlkPCI), | ||
340 | + .instance_init = vmapple_virtio_blk_pci_instance_init, | ||
341 | + .class_init = vmapple_virtio_blk_pci_class_init, | ||
342 | +}; | ||
343 | + | ||
344 | +static void vmapple_virtio_blk_register_types(void) | ||
345 | +{ | ||
346 | + type_register_static(&vmapple_virtio_blk_info); | ||
347 | + virtio_pci_types_register(&vmapple_virtio_blk_pci_info); | ||
348 | +} | ||
349 | + | ||
350 | +type_init(vmapple_virtio_blk_register_types) | ||
351 | diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h | ||
352 | index XXXXXXX..XXXXXXX 100644 | ||
353 | --- a/include/hw/pci/pci_ids.h | ||
354 | +++ b/include/hw/pci/pci_ids.h | ||
355 | @@ -XXX,XX +XXX,XX @@ | ||
356 | #define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 | ||
357 | #define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b | ||
358 | #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 | ||
359 | +#define PCI_DEVICE_ID_APPLE_VIRTIO_BLK 0x1a00 | ||
360 | |||
361 | #define PCI_VENDOR_ID_SUN 0x108e | ||
362 | #define PCI_DEVICE_ID_SUN_EBUS 0x1000 | ||
363 | diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h | ||
364 | index XXXXXXX..XXXXXXX 100644 | ||
365 | --- a/include/hw/qdev-properties-system.h | ||
366 | +++ b/include/hw/qdev-properties-system.h | ||
367 | @@ -XXX,XX +XXX,XX @@ extern const PropertyInfo qdev_prop_pcie_link_speed; | ||
368 | extern const PropertyInfo qdev_prop_pcie_link_width; | ||
369 | extern const PropertyInfo qdev_prop_cpus390entitlement; | ||
370 | extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; | ||
371 | +extern const PropertyInfo qdev_prop_vmapple_virtio_blk_variant; | ||
372 | |||
373 | #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ | ||
374 | DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) | ||
375 | @@ -XXX,XX +XXX,XX @@ extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; | ||
376 | DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \ | ||
377 | IOThreadVirtQueueMappingList *) | ||
378 | |||
379 | +#define DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT(_n, _s, _f, _d) \ | ||
380 | + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_vmapple_virtio_blk_variant, \ | ||
381 | + VMAppleVirtioBlkVariant) | ||
382 | + | ||
383 | #endif | ||
384 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | ||
385 | index XXXXXXX..XXXXXXX 100644 | ||
386 | --- a/include/hw/virtio/virtio-blk.h | ||
387 | +++ b/include/hw/virtio/virtio-blk.h | ||
388 | @@ -XXX,XX +XXX,XX @@ | ||
389 | #include "qapi/qapi-types-virtio.h" | ||
390 | |||
391 | #define TYPE_VIRTIO_BLK "virtio-blk-device" | ||
392 | -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK) | ||
393 | +OBJECT_DECLARE_TYPE(VirtIOBlock, VirtIOBlkClass, VIRTIO_BLK) | ||
394 | |||
395 | /* This is the last element of the write scatter-gather list */ | ||
396 | struct virtio_blk_inhdr | ||
397 | @@ -XXX,XX +XXX,XX @@ typedef struct MultiReqBuffer { | ||
398 | bool is_write; | ||
399 | } MultiReqBuffer; | ||
400 | |||
401 | +typedef struct VirtIOBlkClass { | ||
402 | + /*< private >*/ | ||
403 | + VirtioDeviceClass parent; | ||
404 | + /*< public >*/ | ||
405 | + bool (*handle_unknown_request)(VirtIOBlockReq *req, MultiReqBuffer *mrb, | ||
406 | + uint32_t type); | ||
407 | +} VirtIOBlkClass; | ||
408 | + | ||
409 | void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); | ||
410 | +void virtio_blk_free_request(VirtIOBlockReq *req); | ||
411 | +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status); | ||
412 | |||
413 | #endif | ||
414 | diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h | ||
415 | index XXXXXXX..XXXXXXX 100644 | ||
416 | --- a/include/hw/vmapple/vmapple.h | ||
417 | +++ b/include/hw/vmapple/vmapple.h | ||
418 | @@ -XXX,XX +XXX,XX @@ | ||
419 | |||
420 | #define TYPE_VMAPPLE_CFG "vmapple-cfg" | ||
421 | |||
422 | +#define TYPE_VMAPPLE_VIRTIO_BLK_PCI "vmapple-virtio-blk-pci" | ||
423 | + | ||
424 | #endif /* HW_VMAPPLE_VMAPPLE_H */ | ||
425 | diff --git a/qapi/virtio.json b/qapi/virtio.json | ||
426 | index XXXXXXX..XXXXXXX 100644 | ||
427 | --- a/qapi/virtio.json | ||
428 | +++ b/qapi/virtio.json | ||
429 | @@ -XXX,XX +XXX,XX @@ | ||
430 | ## | ||
431 | { 'enum': 'GranuleMode', | ||
432 | 'data': [ '4k', '8k', '16k', '64k', 'host' ] } | ||
433 | + | ||
434 | +## | ||
435 | +# @VMAppleVirtioBlkVariant: | ||
436 | +# | ||
437 | +# @unspecified: The default, not a valid setting. | ||
438 | +# | ||
439 | +# @root: Block device holding the root volume | ||
440 | +# | ||
441 | +# @aux: Block device holding auxiliary data required for boot | ||
442 | +# | ||
443 | +# Since: 9.2 | ||
444 | +## | ||
445 | +{ 'enum': 'VMAppleVirtioBlkVariant', | ||
446 | + 'data': [ 'unspecified', 'root', 'aux' ] } | ||
447 | -- | ||
448 | 2.39.5 (Apple Git-154) | ||
449 | |||
450 | diff view generated by jsdifflib |
1 | The virtio_blk_free_request() function has been a 1-liner forwarding | 1 | The virtio_blk_free_request() function has been a 1-liner forwarding |
---|---|---|---|
2 | to g_free() for a while now. We may as well call g_free on the request | 2 | to g_free() for a while now. We may as well call g_free on the request |
3 | pointer directly. | 3 | pointer directly. |
4 | 4 | ||
5 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 5 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
6 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 6 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
7 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 7 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
8 | --- | 8 | --- |
9 | hw/block/virtio-blk.c | 43 +++++++++++++++------------------- | 9 | hw/block/virtio-blk.c | 43 +++++++++++++++------------------- |
10 | hw/vmapple/virtio-blk.c | 2 +- | 10 | hw/vmapple/virtio-blk.c | 2 +- |
11 | include/hw/virtio/virtio-blk.h | 1 - | 11 | include/hw/virtio/virtio-blk.h | 1 - |
12 | 3 files changed, 20 insertions(+), 26 deletions(-) | 12 | 3 files changed, 20 insertions(+), 26 deletions(-) |
13 | 13 | ||
14 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 14 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c |
15 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/block/virtio-blk.c | 16 | --- a/hw/block/virtio-blk.c |
17 | +++ b/hw/block/virtio-blk.c | 17 | +++ b/hw/block/virtio-blk.c |
18 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, | 18 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, |
19 | req->mr_next = NULL; | 19 | req->mr_next = NULL; |
20 | } | 20 | } |
21 | 21 | ||
22 | -void virtio_blk_free_request(VirtIOBlockReq *req) | 22 | -void virtio_blk_free_request(VirtIOBlockReq *req) |
23 | -{ | 23 | -{ |
24 | - g_free(req); | 24 | - g_free(req); |
25 | -} | 25 | -} |
26 | - | 26 | - |
27 | void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | 27 | void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) |
28 | { | 28 | { |
29 | VirtIOBlock *s = req->dev; | 29 | VirtIOBlock *s = req->dev; |
30 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, | 30 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, |
31 | if (acct_failed) { | 31 | if (acct_failed) { |
32 | block_acct_failed(blk_get_stats(s->blk), &req->acct); | 32 | block_acct_failed(blk_get_stats(s->blk), &req->acct); |
33 | } | 33 | } |
34 | - virtio_blk_free_request(req); | 34 | - virtio_blk_free_request(req); |
35 | + g_free(req); | 35 | + g_free(req); |
36 | } | 36 | } |
37 | 37 | ||
38 | blk_error_action(s->blk, action, is_read, error); | 38 | blk_error_action(s->blk, action, is_read, error); |
39 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret) | 39 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret) |
40 | 40 | ||
41 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | 41 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
42 | block_acct_done(blk_get_stats(s->blk), &req->acct); | 42 | block_acct_done(blk_get_stats(s->blk), &req->acct); |
43 | - virtio_blk_free_request(req); | 43 | - virtio_blk_free_request(req); |
44 | + g_free(req); | 44 | + g_free(req); |
45 | } | 45 | } |
46 | } | 46 | } |
47 | 47 | ||
48 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_flush_complete(void *opaque, int ret) | 48 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_flush_complete(void *opaque, int ret) |
49 | 49 | ||
50 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | 50 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
51 | block_acct_done(blk_get_stats(s->blk), &req->acct); | 51 | block_acct_done(blk_get_stats(s->blk), &req->acct); |
52 | - virtio_blk_free_request(req); | 52 | - virtio_blk_free_request(req); |
53 | + g_free(req); | 53 | + g_free(req); |
54 | } | 54 | } |
55 | 55 | ||
56 | static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) | 56 | static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) |
57 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) | 57 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) |
58 | if (is_write_zeroes) { | 58 | if (is_write_zeroes) { |
59 | block_acct_done(blk_get_stats(s->blk), &req->acct); | 59 | block_acct_done(blk_get_stats(s->blk), &req->acct); |
60 | } | 60 | } |
61 | - virtio_blk_free_request(req); | 61 | - virtio_blk_free_request(req); |
62 | + g_free(req); | 62 | + g_free(req); |
63 | } | 63 | } |
64 | 64 | ||
65 | static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) | 65 | static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) |
66 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req) | 66 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req) |
67 | 67 | ||
68 | fail: | 68 | fail: |
69 | virtio_blk_req_complete(req, status); | 69 | virtio_blk_req_complete(req, status); |
70 | - virtio_blk_free_request(req); | 70 | - virtio_blk_free_request(req); |
71 | + g_free(req); | 71 | + g_free(req); |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb, | 74 | static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb, |
75 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) | 75 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) |
76 | 76 | ||
77 | out: | 77 | out: |
78 | virtio_blk_req_complete(req, err_status); | 78 | virtio_blk_req_complete(req, err_status); |
79 | - virtio_blk_free_request(req); | 79 | - virtio_blk_free_request(req); |
80 | + g_free(req); | 80 | + g_free(req); |
81 | g_free(data->zone_report_data.zones); | 81 | g_free(data->zone_report_data.zones); |
82 | g_free(data); | 82 | g_free(data); |
83 | } | 83 | } |
84 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, | 84 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, |
85 | return; | 85 | return; |
86 | out: | 86 | out: |
87 | virtio_blk_req_complete(req, err_status); | 87 | virtio_blk_req_complete(req, err_status); |
88 | - virtio_blk_free_request(req); | 88 | - virtio_blk_free_request(req); |
89 | + g_free(req); | 89 | + g_free(req); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) | 92 | static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) |
93 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) | 93 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) |
94 | } | 94 | } |
95 | 95 | ||
96 | virtio_blk_req_complete(req, err_status); | 96 | virtio_blk_req_complete(req, err_status); |
97 | - virtio_blk_free_request(req); | 97 | - virtio_blk_free_request(req); |
98 | + g_free(req); | 98 | + g_free(req); |
99 | } | 99 | } |
100 | 100 | ||
101 | static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) | 101 | static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) |
102 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) | 102 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) |
103 | return 0; | 103 | return 0; |
104 | out: | 104 | out: |
105 | virtio_blk_req_complete(req, err_status); | 105 | virtio_blk_req_complete(req, err_status); |
106 | - virtio_blk_free_request(req); | 106 | - virtio_blk_free_request(req); |
107 | + g_free(req); | 107 | + g_free(req); |
108 | return err_status; | 108 | return err_status; |
109 | } | 109 | } |
110 | 110 | ||
111 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) | 111 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) |
112 | 112 | ||
113 | out: | 113 | out: |
114 | virtio_blk_req_complete(req, err_status); | 114 | virtio_blk_req_complete(req, err_status); |
115 | - virtio_blk_free_request(req); | 115 | - virtio_blk_free_request(req); |
116 | + g_free(req); | 116 | + g_free(req); |
117 | g_free(data); | 117 | g_free(data); |
118 | } | 118 | } |
119 | 119 | ||
120 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, | 120 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, |
121 | 121 | ||
122 | out: | 122 | out: |
123 | virtio_blk_req_complete(req, err_status); | 123 | virtio_blk_req_complete(req, err_status); |
124 | - virtio_blk_free_request(req); | 124 | - virtio_blk_free_request(req); |
125 | + g_free(req); | 125 | + g_free(req); |
126 | return err_status; | 126 | return err_status; |
127 | } | 127 | } |
128 | 128 | ||
129 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 129 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
130 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); | 130 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); |
131 | block_acct_invalid(blk_get_stats(s->blk), | 131 | block_acct_invalid(blk_get_stats(s->blk), |
132 | is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); | 132 | is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); |
133 | - virtio_blk_free_request(req); | 133 | - virtio_blk_free_request(req); |
134 | + g_free(req); | 134 | + g_free(req); |
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 138 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
139 | VIRTIO_BLK_ID_BYTES)); | 139 | VIRTIO_BLK_ID_BYTES)); |
140 | iov_from_buf(in_iov, in_num, 0, serial, size); | 140 | iov_from_buf(in_iov, in_num, 0, serial, size); |
141 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | 141 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
142 | - virtio_blk_free_request(req); | 142 | - virtio_blk_free_request(req); |
143 | + g_free(req); | 143 | + g_free(req); |
144 | break; | 144 | break; |
145 | } | 145 | } |
146 | case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: | 146 | case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: |
147 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 147 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
148 | if (unlikely(!(type & VIRTIO_BLK_T_OUT) || | 148 | if (unlikely(!(type & VIRTIO_BLK_T_OUT) || |
149 | out_len > sizeof(dwz_hdr))) { | 149 | out_len > sizeof(dwz_hdr))) { |
150 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | 150 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); |
151 | - virtio_blk_free_request(req); | 151 | - virtio_blk_free_request(req); |
152 | + g_free(req); | 152 | + g_free(req); |
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
156 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 156 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
157 | is_write_zeroes); | 157 | is_write_zeroes); |
158 | if (err_status != VIRTIO_BLK_S_OK) { | 158 | if (err_status != VIRTIO_BLK_S_OK) { |
159 | virtio_blk_req_complete(req, err_status); | 159 | virtio_blk_req_complete(req, err_status); |
160 | - virtio_blk_free_request(req); | 160 | - virtio_blk_free_request(req); |
161 | + g_free(req); | 161 | + g_free(req); |
162 | } | 162 | } |
163 | 163 | ||
164 | break; | 164 | break; |
165 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 165 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
166 | if (!vbk->handle_unknown_request || | 166 | if (!vbk->handle_unknown_request || |
167 | !vbk->handle_unknown_request(req, mrb, type)) { | 167 | !vbk->handle_unknown_request(req, mrb, type)) { |
168 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | 168 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); |
169 | - virtio_blk_free_request(req); | 169 | - virtio_blk_free_request(req); |
170 | + g_free(req); | 170 | + g_free(req); |
171 | } | 171 | } |
172 | } | 172 | } |
173 | } | 173 | } |
174 | @@ -XXX,XX +XXX,XX @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | 174 | @@ -XXX,XX +XXX,XX @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) |
175 | while ((req = virtio_blk_get_request(s, vq))) { | 175 | while ((req = virtio_blk_get_request(s, vq))) { |
176 | if (virtio_blk_handle_request(req, &mrb)) { | 176 | if (virtio_blk_handle_request(req, &mrb)) { |
177 | virtqueue_detach_element(req->vq, &req->elem, 0); | 177 | virtqueue_detach_element(req->vq, &req->elem, 0); |
178 | - virtio_blk_free_request(req); | 178 | - virtio_blk_free_request(req); |
179 | + g_free(req); | 179 | + g_free(req); |
180 | break; | 180 | break; |
181 | } | 181 | } |
182 | } | 182 | } |
183 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque) | 183 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque) |
184 | while (req) { | 184 | while (req) { |
185 | next = req->next; | 185 | next = req->next; |
186 | virtqueue_detach_element(req->vq, &req->elem, 0); | 186 | virtqueue_detach_element(req->vq, &req->elem, 0); |
187 | - virtio_blk_free_request(req); | 187 | - virtio_blk_free_request(req); |
188 | + g_free(req); | 188 | + g_free(req); |
189 | req = next; | 189 | req = next; |
190 | } | 190 | } |
191 | break; | 191 | break; |
192 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_reset(VirtIODevice *vdev) | 192 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_reset(VirtIODevice *vdev) |
193 | /* No other threads can access req->vq here */ | 193 | /* No other threads can access req->vq here */ |
194 | virtqueue_detach_element(req->vq, &req->elem, 0); | 194 | virtqueue_detach_element(req->vq, &req->elem, 0); |
195 | 195 | ||
196 | - virtio_blk_free_request(req); | 196 | - virtio_blk_free_request(req); |
197 | + g_free(req); | 197 | + g_free(req); |
198 | } | 198 | } |
199 | } | 199 | } |
200 | 200 | ||
201 | diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c | 201 | diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c |
202 | index XXXXXXX..XXXXXXX 100644 | 202 | index XXXXXXX..XXXXXXX 100644 |
203 | --- a/hw/vmapple/virtio-blk.c | 203 | --- a/hw/vmapple/virtio-blk.c |
204 | +++ b/hw/vmapple/virtio-blk.c | 204 | +++ b/hw/vmapple/virtio-blk.c |
205 | @@ -XXX,XX +XXX,XX @@ static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req, | 205 | @@ -XXX,XX +XXX,XX @@ static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req, |
206 | qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n", | 206 | qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n", |
207 | __func__); | 207 | __func__); |
208 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | 208 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
209 | - virtio_blk_free_request(req); | 209 | - virtio_blk_free_request(req); |
210 | + g_free(req); | 210 | + g_free(req); |
211 | return true; | 211 | return true; |
212 | default: | 212 | default: |
213 | return false; | 213 | return false; |
214 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | 214 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h |
215 | index XXXXXXX..XXXXXXX 100644 | 215 | index XXXXXXX..XXXXXXX 100644 |
216 | --- a/include/hw/virtio/virtio-blk.h | 216 | --- a/include/hw/virtio/virtio-blk.h |
217 | +++ b/include/hw/virtio/virtio-blk.h | 217 | +++ b/include/hw/virtio/virtio-blk.h |
218 | @@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlkClass { | 218 | @@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlkClass { |
219 | } VirtIOBlkClass; | 219 | } VirtIOBlkClass; |
220 | 220 | ||
221 | void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); | 221 | void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); |
222 | -void virtio_blk_free_request(VirtIOBlockReq *req); | 222 | -void virtio_blk_free_request(VirtIOBlockReq *req); |
223 | void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status); | 223 | void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status); |
224 | 224 | ||
225 | #endif | 225 | #endif |
226 | -- | 226 | -- |
227 | 2.39.5 (Apple Git-154) | 227 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The virtio_blk_free_request() function has been a 1-liner forwarding | ||
2 | to g_free() for a while now. We may as well call g_free on the request | ||
3 | pointer directly. | ||
1 | 4 | ||
5 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
6 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
7 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
8 | --- | ||
9 | hw/block/virtio-blk.c | 43 +++++++++++++++------------------- | ||
10 | hw/vmapple/virtio-blk.c | 2 +- | ||
11 | include/hw/virtio/virtio-blk.h | 1 - | ||
12 | 3 files changed, 20 insertions(+), 26 deletions(-) | ||
13 | |||
14 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/hw/block/virtio-blk.c | ||
17 | +++ b/hw/block/virtio-blk.c | ||
18 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, | ||
19 | req->mr_next = NULL; | ||
20 | } | ||
21 | |||
22 | -void virtio_blk_free_request(VirtIOBlockReq *req) | ||
23 | -{ | ||
24 | - g_free(req); | ||
25 | -} | ||
26 | - | ||
27 | void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | ||
28 | { | ||
29 | VirtIOBlock *s = req->dev; | ||
30 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, | ||
31 | if (acct_failed) { | ||
32 | block_acct_failed(blk_get_stats(s->blk), &req->acct); | ||
33 | } | ||
34 | - virtio_blk_free_request(req); | ||
35 | + g_free(req); | ||
36 | } | ||
37 | |||
38 | blk_error_action(s->blk, action, is_read, error); | ||
39 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret) | ||
40 | |||
41 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | ||
42 | block_acct_done(blk_get_stats(s->blk), &req->acct); | ||
43 | - virtio_blk_free_request(req); | ||
44 | + g_free(req); | ||
45 | } | ||
46 | } | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_flush_complete(void *opaque, int ret) | ||
49 | |||
50 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | ||
51 | block_acct_done(blk_get_stats(s->blk), &req->acct); | ||
52 | - virtio_blk_free_request(req); | ||
53 | + g_free(req); | ||
54 | } | ||
55 | |||
56 | static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) | ||
57 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) | ||
58 | if (is_write_zeroes) { | ||
59 | block_acct_done(blk_get_stats(s->blk), &req->acct); | ||
60 | } | ||
61 | - virtio_blk_free_request(req); | ||
62 | + g_free(req); | ||
63 | } | ||
64 | |||
65 | static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) | ||
66 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req) | ||
67 | |||
68 | fail: | ||
69 | virtio_blk_req_complete(req, status); | ||
70 | - virtio_blk_free_request(req); | ||
71 | + g_free(req); | ||
72 | } | ||
73 | |||
74 | static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb, | ||
75 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) | ||
76 | |||
77 | out: | ||
78 | virtio_blk_req_complete(req, err_status); | ||
79 | - virtio_blk_free_request(req); | ||
80 | + g_free(req); | ||
81 | g_free(data->zone_report_data.zones); | ||
82 | g_free(data); | ||
83 | } | ||
84 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, | ||
85 | return; | ||
86 | out: | ||
87 | virtio_blk_req_complete(req, err_status); | ||
88 | - virtio_blk_free_request(req); | ||
89 | + g_free(req); | ||
90 | } | ||
91 | |||
92 | static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) | ||
93 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) | ||
94 | } | ||
95 | |||
96 | virtio_blk_req_complete(req, err_status); | ||
97 | - virtio_blk_free_request(req); | ||
98 | + g_free(req); | ||
99 | } | ||
100 | |||
101 | static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) | ||
102 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) | ||
103 | return 0; | ||
104 | out: | ||
105 | virtio_blk_req_complete(req, err_status); | ||
106 | - virtio_blk_free_request(req); | ||
107 | + g_free(req); | ||
108 | return err_status; | ||
109 | } | ||
110 | |||
111 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) | ||
112 | |||
113 | out: | ||
114 | virtio_blk_req_complete(req, err_status); | ||
115 | - virtio_blk_free_request(req); | ||
116 | + g_free(req); | ||
117 | g_free(data); | ||
118 | } | ||
119 | |||
120 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, | ||
121 | |||
122 | out: | ||
123 | virtio_blk_req_complete(req, err_status); | ||
124 | - virtio_blk_free_request(req); | ||
125 | + g_free(req); | ||
126 | return err_status; | ||
127 | } | ||
128 | |||
129 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
130 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); | ||
131 | block_acct_invalid(blk_get_stats(s->blk), | ||
132 | is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); | ||
133 | - virtio_blk_free_request(req); | ||
134 | + g_free(req); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
139 | VIRTIO_BLK_ID_BYTES)); | ||
140 | iov_from_buf(in_iov, in_num, 0, serial, size); | ||
141 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | ||
142 | - virtio_blk_free_request(req); | ||
143 | + g_free(req); | ||
144 | break; | ||
145 | } | ||
146 | case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: | ||
147 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
148 | if (unlikely(!(type & VIRTIO_BLK_T_OUT) || | ||
149 | out_len > sizeof(dwz_hdr))) { | ||
150 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | ||
151 | - virtio_blk_free_request(req); | ||
152 | + g_free(req); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
157 | is_write_zeroes); | ||
158 | if (err_status != VIRTIO_BLK_S_OK) { | ||
159 | virtio_blk_req_complete(req, err_status); | ||
160 | - virtio_blk_free_request(req); | ||
161 | + g_free(req); | ||
162 | } | ||
163 | |||
164 | break; | ||
165 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
166 | if (!vbk->handle_unknown_request || | ||
167 | !vbk->handle_unknown_request(req, mrb, type)) { | ||
168 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | ||
169 | - virtio_blk_free_request(req); | ||
170 | + g_free(req); | ||
171 | } | ||
172 | } | ||
173 | } | ||
174 | @@ -XXX,XX +XXX,XX @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | ||
175 | while ((req = virtio_blk_get_request(s, vq))) { | ||
176 | if (virtio_blk_handle_request(req, &mrb)) { | ||
177 | virtqueue_detach_element(req->vq, &req->elem, 0); | ||
178 | - virtio_blk_free_request(req); | ||
179 | + g_free(req); | ||
180 | break; | ||
181 | } | ||
182 | } | ||
183 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque) | ||
184 | while (req) { | ||
185 | next = req->next; | ||
186 | virtqueue_detach_element(req->vq, &req->elem, 0); | ||
187 | - virtio_blk_free_request(req); | ||
188 | + g_free(req); | ||
189 | req = next; | ||
190 | } | ||
191 | break; | ||
192 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_reset(VirtIODevice *vdev) | ||
193 | /* No other threads can access req->vq here */ | ||
194 | virtqueue_detach_element(req->vq, &req->elem, 0); | ||
195 | |||
196 | - virtio_blk_free_request(req); | ||
197 | + g_free(req); | ||
198 | } | ||
199 | } | ||
200 | |||
201 | diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c | ||
202 | index XXXXXXX..XXXXXXX 100644 | ||
203 | --- a/hw/vmapple/virtio-blk.c | ||
204 | +++ b/hw/vmapple/virtio-blk.c | ||
205 | @@ -XXX,XX +XXX,XX @@ static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req, | ||
206 | qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n", | ||
207 | __func__); | ||
208 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | ||
209 | - virtio_blk_free_request(req); | ||
210 | + g_free(req); | ||
211 | return true; | ||
212 | default: | ||
213 | return false; | ||
214 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | ||
215 | index XXXXXXX..XXXXXXX 100644 | ||
216 | --- a/include/hw/virtio/virtio-blk.h | ||
217 | +++ b/include/hw/virtio/virtio-blk.h | ||
218 | @@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlkClass { | ||
219 | } VirtIOBlkClass; | ||
220 | |||
221 | void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); | ||
222 | -void virtio_blk_free_request(VirtIOBlockReq *req); | ||
223 | void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status); | ||
224 | |||
225 | #endif | ||
226 | -- | ||
227 | 2.39.5 (Apple Git-154) | diff view generated by jsdifflib |
1 | From: Alexander Graf <graf@amazon.com> | 1 | From: Alexander Graf <graf@amazon.com> |
---|---|---|---|
2 | 2 | ||
3 | Apple defines a new "vmapple" machine type as part of its proprietary | 3 | Apple defines a new "vmapple" machine type as part of its proprietary |
4 | macOS Virtualization.Framework vmm. This machine type is similar to the | 4 | macOS Virtualization.Framework vmm. This machine type is similar to the |
5 | virt one, but with subtle differences in base devices, a few special | 5 | virt one, but with subtle differences in base devices, a few special |
6 | vmapple device additions and a vastly different boot chain. | 6 | vmapple device additions and a vastly different boot chain. |
7 | 7 | ||
8 | This patch reimplements this machine type in QEMU. To use it, you | 8 | This patch reimplements this machine type in QEMU. To use it, you |
9 | have to have a readily installed version of macOS for VMApple, | 9 | have to have a readily installed version of macOS for VMApple, |
10 | run on macOS with -accel hvf, pass the Virtualization.Framework | 10 | run on macOS with -accel hvf, pass the Virtualization.Framework |
11 | boot rom (AVPBooter) in via -bios, pass the aux and root volume as pflash | 11 | boot rom (AVPBooter) in via -bios, pass the aux and root volume as pflash |
12 | and pass aux and root volume as virtio drives. In addition, you also | 12 | and pass aux and root volume as virtio drives. In addition, you also |
13 | need to find the machine UUID and pass that as -M vmapple,uuid= parameter: | 13 | need to find the machine UUID and pass that as -M vmapple,uuid= parameter: |
14 | 14 | ||
15 | $ qemu-system-aarch64 -accel hvf -M vmapple,uuid=0x1234 -m 4G \ | 15 | $ qemu-system-aarch64 -accel hvf -M vmapple,uuid=0x1234 -m 4G \ |
16 | -bios /System/Library/Frameworks/Virtualization.framework/Versions/A/Resources/AVPBooter.vmapple2.bin | 16 | -bios /System/Library/Frameworks/Virtualization.framework/Versions/A/Resources/AVPBooter.vmapple2.bin |
17 | -drive file=aux,if=pflash,format=raw \ | 17 | -drive file=aux,if=pflash,format=raw \ |
18 | -drive file=root,if=pflash,format=raw \ | 18 | -drive file=root,if=pflash,format=raw \ |
19 | -drive file=aux,if=none,id=aux,format=raw \ | 19 | -drive file=aux,if=none,id=aux,format=raw \ |
20 | -device vmapple-virtio-blk-pci,variant=aux,drive=aux \ | 20 | -device vmapple-virtio-blk-pci,variant=aux,drive=aux \ |
21 | -drive file=root,if=none,id=root,format=raw \ | 21 | -drive file=root,if=none,id=root,format=raw \ |
22 | -device vmapple-virtio-blk-pci,variant=root,drive=root | 22 | -device vmapple-virtio-blk-pci,variant=root,drive=root |
23 | 23 | ||
24 | With all these in place, you should be able to see macOS booting | 24 | With all these in place, you should be able to see macOS booting |
25 | successfully. | 25 | successfully. |
26 | 26 | ||
27 | Known issues: | 27 | Known issues: |
28 | - Keyboard and mouse/tablet input is laggy. The reason is a quirk/bug | 28 | - Keyboard and mouse/tablet input is laggy. The reason is a quirk/bug |
29 | in macOS's XHCI driver when using pin-based interrupts instead of | 29 | in macOS's XHCI driver when using pin-based interrupts instead of |
30 | MSI-X. A workaround is in the works. | 30 | MSI-X. A workaround is in the works. |
31 | - Currently only macOS 12 guests are supported. The boot process for | 31 | - Currently only macOS 12 guests are supported. The boot process for |
32 | 13+ will need further investigation and adjustment. | 32 | 13+ will need further investigation and adjustment. |
33 | 33 | ||
34 | Signed-off-by: Alexander Graf <graf@amazon.com> | 34 | Signed-off-by: Alexander Graf <graf@amazon.com> |
35 | Co-authored-by: Phil Dennis-Jordan <phil@philjordan.eu> | 35 | Co-authored-by: Phil Dennis-Jordan <phil@philjordan.eu> |
36 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | 36 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> |
37 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 37 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
38 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | 38 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> |
39 | --- | 39 | --- |
40 | 40 | ||
41 | v3: | 41 | v3: |
42 | * Rebased on latest upstream, updated affinity and NIC creation | 42 | * Rebased on latest upstream, updated affinity and NIC creation |
43 | API usage | 43 | API usage |
44 | * Included Apple-variant virtio-blk in build dependency | 44 | * Included Apple-variant virtio-blk in build dependency |
45 | * Updated API usage for setting 'redist-region-count' array-typed property on GIC. | 45 | * Updated API usage for setting 'redist-region-count' array-typed property on GIC. |
46 | * Switched from virtio HID devices (for which macOS 12 does not contain | 46 | * Switched from virtio HID devices (for which macOS 12 does not contain |
47 | drivers) to an XHCI USB controller and USB HID devices. | 47 | drivers) to an XHCI USB controller and USB HID devices. |
48 | 48 | ||
49 | v4: | 49 | v4: |
50 | * Fixups for v4 changes to the other patches in the set. | 50 | * Fixups for v4 changes to the other patches in the set. |
51 | * Corrected the assert macro to use | 51 | * Corrected the assert macro to use |
52 | * Removed superfluous endian conversions corresponding to cfg's. | 52 | * Removed superfluous endian conversions corresponding to cfg's. |
53 | * Init error handling improvement. | 53 | * Init error handling improvement. |
54 | * No need to select CPU type on TCG, as only HVF is supported. | 54 | * No need to select CPU type on TCG, as only HVF is supported. |
55 | * Machine type version bumped to 9.2 | 55 | * Machine type version bumped to 9.2 |
56 | * #include order improved | 56 | * #include order improved |
57 | 57 | ||
58 | v5: | 58 | v5: |
59 | * Fixed memory reservation for ecam alias region. | 59 | * Fixed memory reservation for ecam alias region. |
60 | * Better error handling setting properties on devices. | 60 | * Better error handling setting properties on devices. |
61 | * Simplified the machine ECID/UUID extraction script and actually created a | 61 | * Simplified the machine ECID/UUID extraction script and actually created a |
62 | file for it rather than quoting its code in documentation. | 62 | file for it rather than quoting its code in documentation. |
63 | 63 | ||
64 | v7: | 64 | v7: |
65 | * Tiny error handling fix, un-inlined function. | 65 | * Tiny error handling fix, un-inlined function. |
66 | 66 | ||
67 | v8: | 67 | v8: |
68 | * Use object_property_add_uint64_ptr rather than defining custom UUID | 68 | * Use object_property_add_uint64_ptr rather than defining custom UUID |
69 | property get/set functions. | 69 | property get/set functions. |
70 | 70 | ||
71 | v9: | 71 | v9: |
72 | * Documentation improvements | 72 | * Documentation improvements |
73 | * Fixed variable name and struct field used during pvpanic device creation. | 73 | * Fixed variable name and struct field used during pvpanic device creation. |
74 | 74 | ||
75 | v10: | 75 | v10: |
76 | * Documentation fixup for changed virtio-blk device type. | 76 | * Documentation fixup for changed virtio-blk device type. |
77 | * Small improvements to shell commands in documentation. | 77 | * Small improvements to shell commands in documentation. |
78 | * Improved propagation of errors during cfg device instantiation. | 78 | * Improved propagation of errors during cfg device instantiation. |
79 | 79 | ||
80 | v11: | 80 | v11: |
81 | * Quoted more strings in the documentation's shell script code. | 81 | * Quoted more strings in the documentation's shell script code. |
82 | 82 | ||
83 | v13: | 83 | v13: |
84 | * Bumped the machine type version from 9.2 to 10.0. | 84 | * Bumped the machine type version from 9.2 to 10.0. |
85 | 85 | ||
86 | MAINTAINERS | 1 + | 86 | MAINTAINERS | 1 + |
87 | contrib/vmapple/uuid.sh | 9 + | 87 | contrib/vmapple/uuid.sh | 9 + |
88 | docs/system/arm/vmapple.rst | 63 ++++ | 88 | docs/system/arm/vmapple.rst | 63 ++++ |
89 | docs/system/target-arm.rst | 1 + | 89 | docs/system/target-arm.rst | 1 + |
90 | hw/vmapple/Kconfig | 20 ++ | 90 | hw/vmapple/Kconfig | 20 ++ |
91 | hw/vmapple/meson.build | 1 + | 91 | hw/vmapple/meson.build | 1 + |
92 | hw/vmapple/vmapple.c | 646 ++++++++++++++++++++++++++++++++++++ | 92 | hw/vmapple/vmapple.c | 646 ++++++++++++++++++++++++++++++++++++ |
93 | 7 files changed, 741 insertions(+) | 93 | 7 files changed, 741 insertions(+) |
94 | create mode 100755 contrib/vmapple/uuid.sh | 94 | create mode 100755 contrib/vmapple/uuid.sh |
95 | create mode 100644 docs/system/arm/vmapple.rst | 95 | create mode 100644 docs/system/arm/vmapple.rst |
96 | create mode 100644 hw/vmapple/vmapple.c | 96 | create mode 100644 hw/vmapple/vmapple.c |
97 | 97 | ||
98 | diff --git a/MAINTAINERS b/MAINTAINERS | 98 | diff --git a/MAINTAINERS b/MAINTAINERS |
99 | index XXXXXXX..XXXXXXX 100644 | 99 | index XXXXXXX..XXXXXXX 100644 |
100 | --- a/MAINTAINERS | 100 | --- a/MAINTAINERS |
101 | +++ b/MAINTAINERS | 101 | +++ b/MAINTAINERS |
102 | @@ -XXX,XX +XXX,XX @@ R: Phil Dennis-Jordan <phil@philjordan.eu> | 102 | @@ -XXX,XX +XXX,XX @@ R: Phil Dennis-Jordan <phil@philjordan.eu> |
103 | S: Maintained | 103 | S: Maintained |
104 | F: hw/vmapple/* | 104 | F: hw/vmapple/* |
105 | F: include/hw/vmapple/* | 105 | F: include/hw/vmapple/* |
106 | +F: docs/system/arm/vmapple.rst | 106 | +F: docs/system/arm/vmapple.rst |
107 | 107 | ||
108 | Subsystems | 108 | Subsystems |
109 | ---------- | 109 | ---------- |
110 | diff --git a/contrib/vmapple/uuid.sh b/contrib/vmapple/uuid.sh | 110 | diff --git a/contrib/vmapple/uuid.sh b/contrib/vmapple/uuid.sh |
111 | new file mode 100755 | 111 | new file mode 100755 |
112 | index XXXXXXX..XXXXXXX | 112 | index XXXXXXX..XXXXXXX |
113 | --- /dev/null | 113 | --- /dev/null |
114 | +++ b/contrib/vmapple/uuid.sh | 114 | +++ b/contrib/vmapple/uuid.sh |
115 | @@ -XXX,XX +XXX,XX @@ | 115 | @@ -XXX,XX +XXX,XX @@ |
116 | +#!/bin/sh | 116 | +#!/bin/sh |
117 | +# Used for converting a guest provisioned using Virtualization.framework | 117 | +# Used for converting a guest provisioned using Virtualization.framework |
118 | +# for use with the QEMU 'vmapple' aarch64 machine type. | 118 | +# for use with the QEMU 'vmapple' aarch64 machine type. |
119 | +# | 119 | +# |
120 | +# Extracts the Machine UUID from Virtualization.framework VM JSON file. | 120 | +# Extracts the Machine UUID from Virtualization.framework VM JSON file. |
121 | +# (as produced by 'macosvm', passed as command line argument) | 121 | +# (as produced by 'macosvm', passed as command line argument) |
122 | + | 122 | + |
123 | +plutil -extract machineId raw "$1" | base64 -d | plutil -extract ECID raw - | 123 | +plutil -extract machineId raw "$1" | base64 -d | plutil -extract ECID raw - |
124 | + | 124 | + |
125 | diff --git a/docs/system/arm/vmapple.rst b/docs/system/arm/vmapple.rst | 125 | diff --git a/docs/system/arm/vmapple.rst b/docs/system/arm/vmapple.rst |
126 | new file mode 100644 | 126 | new file mode 100644 |
127 | index XXXXXXX..XXXXXXX | 127 | index XXXXXXX..XXXXXXX |
128 | --- /dev/null | 128 | --- /dev/null |
129 | +++ b/docs/system/arm/vmapple.rst | 129 | +++ b/docs/system/arm/vmapple.rst |
130 | @@ -XXX,XX +XXX,XX @@ | 130 | @@ -XXX,XX +XXX,XX @@ |
131 | +VMApple machine emulation | 131 | +VMApple machine emulation |
132 | +======================================================================================== | 132 | +======================================================================================== |
133 | + | 133 | + |
134 | +VMApple is the device model that the macOS built-in hypervisor called "Virtualization.framework" | 134 | +VMApple is the device model that the macOS built-in hypervisor called "Virtualization.framework" |
135 | +exposes to Apple Silicon macOS guests. The "vmapple" machine model in QEMU implements the same | 135 | +exposes to Apple Silicon macOS guests. The "vmapple" machine model in QEMU implements the same |
136 | +device model, but does not use any code from Virtualization.Framework. | 136 | +device model, but does not use any code from Virtualization.Framework. |
137 | + | 137 | + |
138 | +Prerequisites | 138 | +Prerequisites |
139 | +------------- | 139 | +------------- |
140 | + | 140 | + |
141 | +To run the vmapple machine model, you need to | 141 | +To run the vmapple machine model, you need to |
142 | + | 142 | + |
143 | + * Run on Apple Silicon | 143 | + * Run on Apple Silicon |
144 | + * Run on macOS 12.0 or above | 144 | + * Run on macOS 12.0 or above |
145 | + * Have an already installed copy of a Virtualization.Framework macOS 12 virtual | 145 | + * Have an already installed copy of a Virtualization.Framework macOS 12 virtual |
146 | + machine. Note that newer versions than 12.x are currently NOT supported on | 146 | + machine. Note that newer versions than 12.x are currently NOT supported on |
147 | + the guest side. I will assume that you installed it using the | 147 | + the guest side. I will assume that you installed it using the |
148 | + `macosvm <https://github.com/s-u/macosvm>` CLI. | 148 | + `macosvm <https://github.com/s-u/macosvm>` CLI. |
149 | + | 149 | + |
150 | +First, we need to extract the UUID from the virtual machine that you installed. You can do this | 150 | +First, we need to extract the UUID from the virtual machine that you installed. You can do this |
151 | +by running the shell script in contrib/vmapple/uuid.sh on the macosvm.json file. | 151 | +by running the shell script in contrib/vmapple/uuid.sh on the macosvm.json file. |
152 | + | 152 | + |
153 | +.. code-block:: bash | 153 | +.. code-block:: bash |
154 | + :caption: uuid.sh script to extract the UUID from a macosvm.json file | 154 | + :caption: uuid.sh script to extract the UUID from a macosvm.json file |
155 | + | 155 | + |
156 | + $ contrib/vmapple/uuid.sh "path/to/macosvm.json" | 156 | + $ contrib/vmapple/uuid.sh "path/to/macosvm.json" |
157 | + | 157 | + |
158 | +Now we also need to trim the aux partition. It contains metadata that we can just discard: | 158 | +Now we also need to trim the aux partition. It contains metadata that we can just discard: |
159 | + | 159 | + |
160 | +.. code-block:: bash | 160 | +.. code-block:: bash |
161 | + :caption: Command to trim the aux file | 161 | + :caption: Command to trim the aux file |
162 | + | 162 | + |
163 | + $ dd if="aux.img" of="aux.img.trimmed" bs=$(( 0x4000 )) skip=1 | 163 | + $ dd if="aux.img" of="aux.img.trimmed" bs=$(( 0x4000 )) skip=1 |
164 | + | 164 | + |
165 | +How to run | 165 | +How to run |
166 | +---------- | 166 | +---------- |
167 | + | 167 | + |
168 | +Then, we can launch QEMU with the Virtualization.Framework pre-boot environment and the readily | 168 | +Then, we can launch QEMU with the Virtualization.Framework pre-boot environment and the readily |
169 | +installed target disk images. I recommend to port forward the VM's ssh and vnc ports to the host | 169 | +installed target disk images. I recommend to port forward the VM's ssh and vnc ports to the host |
170 | +to get better interactive access into the target system: | 170 | +to get better interactive access into the target system: |
171 | + | 171 | + |
172 | +.. code-block:: bash | 172 | +.. code-block:: bash |
173 | + :caption: Example execution command line | 173 | + :caption: Example execution command line |
174 | + | 174 | + |
175 | + $ UUID="$(contrib/vmapple/uuid.sh 'macosvm.json')" | 175 | + $ UUID="$(contrib/vmapple/uuid.sh 'macosvm.json')" |
176 | + $ AVPBOOTER="/System/Library/Frameworks/Virtualization.framework/Resources/AVPBooter.vmapple2.bin" | 176 | + $ AVPBOOTER="/System/Library/Frameworks/Virtualization.framework/Resources/AVPBooter.vmapple2.bin" |
177 | + $ AUX="aux.img.trimmed" | 177 | + $ AUX="aux.img.trimmed" |
178 | + $ DISK="disk.img" | 178 | + $ DISK="disk.img" |
179 | + $ qemu-system-aarch64 \ | 179 | + $ qemu-system-aarch64 \ |
180 | + -serial mon:stdio \ | 180 | + -serial mon:stdio \ |
181 | + -m 4G \ | 181 | + -m 4G \ |
182 | + -accel hvf \ | 182 | + -accel hvf \ |
183 | + -M vmapple,uuid="$UUID" \ | 183 | + -M vmapple,uuid="$UUID" \ |
184 | + -bios "$AVPBOOTER" \ | 184 | + -bios "$AVPBOOTER" \ |
185 | + -drive file="$AUX",if=pflash,format=raw \ | 185 | + -drive file="$AUX",if=pflash,format=raw \ |
186 | + -drive file="$DISK",if=pflash,format=raw \ | 186 | + -drive file="$DISK",if=pflash,format=raw \ |
187 | + -drive file="$AUX",if=none,id=aux,format=raw \ | 187 | + -drive file="$AUX",if=none,id=aux,format=raw \ |
188 | + -drive file="$DISK",if=none,id=root,format=raw \ | 188 | + -drive file="$DISK",if=none,id=root,format=raw \ |
189 | + -device vmapple-virtio-blk-pci,variant=aux,drive=aux \ | 189 | + -device vmapple-virtio-blk-pci,variant=aux,drive=aux \ |
190 | + -device vmapple-virtio-blk-pci,variant=root,drive=root \ | 190 | + -device vmapple-virtio-blk-pci,variant=root,drive=root \ |
191 | + -netdev user,id=net0,ipv6=off,hostfwd=tcp::2222-:22,hostfwd=tcp::5901-:5900 \ | 191 | + -netdev user,id=net0,ipv6=off,hostfwd=tcp::2222-:22,hostfwd=tcp::5901-:5900 \ |
192 | + -device virtio-net-pci,netdev=net0 | 192 | + -device virtio-net-pci,netdev=net0 |
193 | + | 193 | + |
194 | diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst | 194 | diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst |
195 | index XXXXXXX..XXXXXXX 100644 | 195 | index XXXXXXX..XXXXXXX 100644 |
196 | --- a/docs/system/target-arm.rst | 196 | --- a/docs/system/target-arm.rst |
197 | +++ b/docs/system/target-arm.rst | 197 | +++ b/docs/system/target-arm.rst |
198 | @@ -XXX,XX +XXX,XX @@ Board-specific documentation | 198 | @@ -XXX,XX +XXX,XX @@ Board-specific documentation |
199 | arm/stellaris | 199 | arm/stellaris |
200 | arm/stm32 | 200 | arm/stm32 |
201 | arm/virt | 201 | arm/virt |
202 | + arm/vmapple | 202 | + arm/vmapple |
203 | arm/xenpvh | 203 | arm/xenpvh |
204 | arm/xlnx-versal-virt | 204 | arm/xlnx-versal-virt |
205 | arm/xlnx-zynq | 205 | arm/xlnx-zynq |
206 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | 206 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig |
207 | index XXXXXXX..XXXXXXX 100644 | 207 | index XXXXXXX..XXXXXXX 100644 |
208 | --- a/hw/vmapple/Kconfig | 208 | --- a/hw/vmapple/Kconfig |
209 | +++ b/hw/vmapple/Kconfig | 209 | +++ b/hw/vmapple/Kconfig |
210 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_CFG | 210 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_CFG |
211 | config VMAPPLE_VIRTIO_BLK | 211 | config VMAPPLE_VIRTIO_BLK |
212 | bool | 212 | bool |
213 | 213 | ||
214 | +config VMAPPLE | 214 | +config VMAPPLE |
215 | + bool | 215 | + bool |
216 | + depends on ARM | 216 | + depends on ARM |
217 | + depends on HVF | 217 | + depends on HVF |
218 | + default y if ARM | 218 | + default y if ARM |
219 | + imply PCI_DEVICES | 219 | + imply PCI_DEVICES |
220 | + select ARM_GIC | 220 | + select ARM_GIC |
221 | + select PLATFORM_BUS | 221 | + select PLATFORM_BUS |
222 | + select PCI_EXPRESS | 222 | + select PCI_EXPRESS |
223 | + select PCI_EXPRESS_GENERIC_BRIDGE | 223 | + select PCI_EXPRESS_GENERIC_BRIDGE |
224 | + select PL011 # UART | 224 | + select PL011 # UART |
225 | + select PL031 # RTC | 225 | + select PL031 # RTC |
226 | + select PL061 # GPIO | 226 | + select PL061 # GPIO |
227 | + select GPIO_PWR | 227 | + select GPIO_PWR |
228 | + select PVPANIC_MMIO | 228 | + select PVPANIC_MMIO |
229 | + select VMAPPLE_AES | 229 | + select VMAPPLE_AES |
230 | + select VMAPPLE_BDIF | 230 | + select VMAPPLE_BDIF |
231 | + select VMAPPLE_CFG | 231 | + select VMAPPLE_CFG |
232 | + select MAC_PVG_MMIO | 232 | + select MAC_PVG_MMIO |
233 | + select VMAPPLE_VIRTIO_BLK | 233 | + select VMAPPLE_VIRTIO_BLK |
234 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | 234 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build |
235 | index XXXXXXX..XXXXXXX 100644 | 235 | index XXXXXXX..XXXXXXX 100644 |
236 | --- a/hw/vmapple/meson.build | 236 | --- a/hw/vmapple/meson.build |
237 | +++ b/hw/vmapple/meson.build | 237 | +++ b/hw/vmapple/meson.build |
238 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | 238 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) |
239 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | 239 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) |
240 | system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) | 240 | system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) |
241 | system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c')) | 241 | system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c')) |
242 | +specific_ss.add(when: 'CONFIG_VMAPPLE', if_true: files('vmapple.c')) | 242 | +specific_ss.add(when: 'CONFIG_VMAPPLE', if_true: files('vmapple.c')) |
243 | diff --git a/hw/vmapple/vmapple.c b/hw/vmapple/vmapple.c | 243 | diff --git a/hw/vmapple/vmapple.c b/hw/vmapple/vmapple.c |
244 | new file mode 100644 | 244 | new file mode 100644 |
245 | index XXXXXXX..XXXXXXX | 245 | index XXXXXXX..XXXXXXX |
246 | --- /dev/null | 246 | --- /dev/null |
247 | +++ b/hw/vmapple/vmapple.c | 247 | +++ b/hw/vmapple/vmapple.c |
248 | @@ -XXX,XX +XXX,XX @@ | 248 | @@ -XXX,XX +XXX,XX @@ |
249 | +/* | 249 | +/* |
250 | + * VMApple machine emulation | 250 | + * VMApple machine emulation |
251 | + * | 251 | + * |
252 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | 252 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
253 | + * | 253 | + * |
254 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 254 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
255 | + * See the COPYING file in the top-level directory. | 255 | + * See the COPYING file in the top-level directory. |
256 | + * | 256 | + * |
257 | + * SPDX-License-Identifier: GPL-2.0-or-later | 257 | + * SPDX-License-Identifier: GPL-2.0-or-later |
258 | + * | 258 | + * |
259 | + * VMApple is the device model that the macOS built-in hypervisor called | 259 | + * VMApple is the device model that the macOS built-in hypervisor called |
260 | + * "Virtualization.framework" exposes to Apple Silicon macOS guests. The | 260 | + * "Virtualization.framework" exposes to Apple Silicon macOS guests. The |
261 | + * machine model in this file implements the same device model in QEMU, but | 261 | + * machine model in this file implements the same device model in QEMU, but |
262 | + * does not use any code from Virtualization.Framework. | 262 | + * does not use any code from Virtualization.Framework. |
263 | + */ | 263 | + */ |
264 | + | 264 | + |
265 | +#include "qemu/osdep.h" | 265 | +#include "qemu/osdep.h" |
266 | +#include "qemu/bitops.h" | 266 | +#include "qemu/bitops.h" |
267 | +#include "qemu/datadir.h" | 267 | +#include "qemu/datadir.h" |
268 | +#include "qemu/error-report.h" | 268 | +#include "qemu/error-report.h" |
269 | +#include "qemu/guest-random.h" | 269 | +#include "qemu/guest-random.h" |
270 | +#include "qemu/help-texts.h" | 270 | +#include "qemu/help-texts.h" |
271 | +#include "qemu/log.h" | 271 | +#include "qemu/log.h" |
272 | +#include "qemu/module.h" | 272 | +#include "qemu/module.h" |
273 | +#include "qemu/option.h" | 273 | +#include "qemu/option.h" |
274 | +#include "qemu/units.h" | 274 | +#include "qemu/units.h" |
275 | +#include "monitor/qdev.h" | 275 | +#include "monitor/qdev.h" |
276 | +#include "hw/boards.h" | 276 | +#include "hw/boards.h" |
277 | +#include "hw/irq.h" | 277 | +#include "hw/irq.h" |
278 | +#include "hw/loader.h" | 278 | +#include "hw/loader.h" |
279 | +#include "hw/qdev-properties.h" | 279 | +#include "hw/qdev-properties.h" |
280 | +#include "hw/sysbus.h" | 280 | +#include "hw/sysbus.h" |
281 | +#include "hw/usb.h" | 281 | +#include "hw/usb.h" |
282 | +#include "hw/arm/boot.h" | 282 | +#include "hw/arm/boot.h" |
283 | +#include "hw/arm/primecell.h" | 283 | +#include "hw/arm/primecell.h" |
284 | +#include "hw/char/pl011.h" | 284 | +#include "hw/char/pl011.h" |
285 | +#include "hw/intc/arm_gic.h" | 285 | +#include "hw/intc/arm_gic.h" |
286 | +#include "hw/intc/arm_gicv3_common.h" | 286 | +#include "hw/intc/arm_gicv3_common.h" |
287 | +#include "hw/misc/pvpanic.h" | 287 | +#include "hw/misc/pvpanic.h" |
288 | +#include "hw/pci-host/gpex.h" | 288 | +#include "hw/pci-host/gpex.h" |
289 | +#include "hw/usb/xhci.h" | 289 | +#include "hw/usb/xhci.h" |
290 | +#include "hw/virtio/virtio-pci.h" | 290 | +#include "hw/virtio/virtio-pci.h" |
291 | +#include "hw/vmapple/vmapple.h" | 291 | +#include "hw/vmapple/vmapple.h" |
292 | +#include "net/net.h" | 292 | +#include "net/net.h" |
293 | +#include "qapi/error.h" | 293 | +#include "qapi/error.h" |
294 | +#include "qapi/qmp/qlist.h" | 294 | +#include "qapi/qmp/qlist.h" |
295 | +#include "qapi/visitor.h" | 295 | +#include "qapi/visitor.h" |
296 | +#include "qapi/qapi-visit-common.h" | 296 | +#include "qapi/qapi-visit-common.h" |
297 | +#include "standard-headers/linux/input.h" | 297 | +#include "standard-headers/linux/input.h" |
298 | +#include "sysemu/hvf.h" | 298 | +#include "sysemu/hvf.h" |
299 | +#include "sysemu/kvm.h" | 299 | +#include "sysemu/kvm.h" |
300 | +#include "sysemu/reset.h" | 300 | +#include "sysemu/reset.h" |
301 | +#include "sysemu/runstate.h" | 301 | +#include "sysemu/runstate.h" |
302 | +#include "sysemu/sysemu.h" | 302 | +#include "sysemu/sysemu.h" |
303 | +#include "target/arm/internals.h" | 303 | +#include "target/arm/internals.h" |
304 | +#include "target/arm/kvm_arm.h" | 304 | +#include "target/arm/kvm_arm.h" |
305 | + | 305 | + |
306 | +struct VMAppleMachineClass { | 306 | +struct VMAppleMachineClass { |
307 | + MachineClass parent; | 307 | + MachineClass parent; |
308 | +}; | 308 | +}; |
309 | + | 309 | + |
310 | +struct VMAppleMachineState { | 310 | +struct VMAppleMachineState { |
311 | + MachineState parent; | 311 | + MachineState parent; |
312 | + | 312 | + |
313 | + Notifier machine_done; | 313 | + Notifier machine_done; |
314 | + struct arm_boot_info bootinfo; | 314 | + struct arm_boot_info bootinfo; |
315 | + MemMapEntry *memmap; | 315 | + MemMapEntry *memmap; |
316 | + const int *irqmap; | 316 | + const int *irqmap; |
317 | + DeviceState *gic; | 317 | + DeviceState *gic; |
318 | + DeviceState *cfg; | 318 | + DeviceState *cfg; |
319 | + DeviceState *pvpanic; | 319 | + DeviceState *pvpanic; |
320 | + Notifier powerdown_notifier; | 320 | + Notifier powerdown_notifier; |
321 | + PCIBus *bus; | 321 | + PCIBus *bus; |
322 | + MemoryRegion fw_mr; | 322 | + MemoryRegion fw_mr; |
323 | + MemoryRegion ecam_alias; | 323 | + MemoryRegion ecam_alias; |
324 | + uint64_t uuid; | 324 | + uint64_t uuid; |
325 | +}; | 325 | +}; |
326 | + | 326 | + |
327 | +#define DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, latest) \ | 327 | +#define DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, latest) \ |
328 | + static void vmapple##major##_##minor##_class_init(ObjectClass *oc, \ | 328 | + static void vmapple##major##_##minor##_class_init(ObjectClass *oc, \ |
329 | + void *data) \ | 329 | + void *data) \ |
330 | + { \ | 330 | + { \ |
331 | + MachineClass *mc = MACHINE_CLASS(oc); \ | 331 | + MachineClass *mc = MACHINE_CLASS(oc); \ |
332 | + vmapple_machine_##major##_##minor##_options(mc); \ | 332 | + vmapple_machine_##major##_##minor##_options(mc); \ |
333 | + mc->desc = "QEMU " # major "." # minor " Apple Virtual Machine"; \ | 333 | + mc->desc = "QEMU " # major "." # minor " Apple Virtual Machine"; \ |
334 | + if (latest) { \ | 334 | + if (latest) { \ |
335 | + mc->alias = "vmapple"; \ | 335 | + mc->alias = "vmapple"; \ |
336 | + } \ | 336 | + } \ |
337 | + } \ | 337 | + } \ |
338 | + static const TypeInfo machvmapple##major##_##minor##_info = { \ | 338 | + static const TypeInfo machvmapple##major##_##minor##_info = { \ |
339 | + .name = MACHINE_TYPE_NAME("vmapple-" # major "." # minor), \ | 339 | + .name = MACHINE_TYPE_NAME("vmapple-" # major "." # minor), \ |
340 | + .parent = TYPE_VMAPPLE_MACHINE, \ | 340 | + .parent = TYPE_VMAPPLE_MACHINE, \ |
341 | + .class_init = vmapple##major##_##minor##_class_init, \ | 341 | + .class_init = vmapple##major##_##minor##_class_init, \ |
342 | + }; \ | 342 | + }; \ |
343 | + static void machvmapple_machine_##major##_##minor##_init(void) \ | 343 | + static void machvmapple_machine_##major##_##minor##_init(void) \ |
344 | + { \ | 344 | + { \ |
345 | + type_register_static(&machvmapple##major##_##minor##_info); \ | 345 | + type_register_static(&machvmapple##major##_##minor##_info); \ |
346 | + } \ | 346 | + } \ |
347 | + type_init(machvmapple_machine_##major##_##minor##_init); | 347 | + type_init(machvmapple_machine_##major##_##minor##_init); |
348 | + | 348 | + |
349 | +#define DEFINE_VMAPPLE_MACHINE_AS_LATEST(major, minor) \ | 349 | +#define DEFINE_VMAPPLE_MACHINE_AS_LATEST(major, minor) \ |
350 | + DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, true) | 350 | + DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, true) |
351 | +#define DEFINE_VMAPPLE_MACHINE(major, minor) \ | 351 | +#define DEFINE_VMAPPLE_MACHINE(major, minor) \ |
352 | + DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, false) | 352 | + DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, false) |
353 | + | 353 | + |
354 | +#define TYPE_VMAPPLE_MACHINE MACHINE_TYPE_NAME("vmapple") | 354 | +#define TYPE_VMAPPLE_MACHINE MACHINE_TYPE_NAME("vmapple") |
355 | +OBJECT_DECLARE_TYPE(VMAppleMachineState, VMAppleMachineClass, VMAPPLE_MACHINE) | 355 | +OBJECT_DECLARE_TYPE(VMAppleMachineState, VMAppleMachineClass, VMAPPLE_MACHINE) |
356 | + | 356 | + |
357 | +/* Number of external interrupt lines to configure the GIC with */ | 357 | +/* Number of external interrupt lines to configure the GIC with */ |
358 | +#define NUM_IRQS 256 | 358 | +#define NUM_IRQS 256 |
359 | + | 359 | + |
360 | +enum { | 360 | +enum { |
361 | + VMAPPLE_FIRMWARE, | 361 | + VMAPPLE_FIRMWARE, |
362 | + VMAPPLE_CONFIG, | 362 | + VMAPPLE_CONFIG, |
363 | + VMAPPLE_MEM, | 363 | + VMAPPLE_MEM, |
364 | + VMAPPLE_GIC_DIST, | 364 | + VMAPPLE_GIC_DIST, |
365 | + VMAPPLE_GIC_REDIST, | 365 | + VMAPPLE_GIC_REDIST, |
366 | + VMAPPLE_UART, | 366 | + VMAPPLE_UART, |
367 | + VMAPPLE_RTC, | 367 | + VMAPPLE_RTC, |
368 | + VMAPPLE_PCIE, | 368 | + VMAPPLE_PCIE, |
369 | + VMAPPLE_PCIE_MMIO, | 369 | + VMAPPLE_PCIE_MMIO, |
370 | + VMAPPLE_PCIE_ECAM, | 370 | + VMAPPLE_PCIE_ECAM, |
371 | + VMAPPLE_GPIO, | 371 | + VMAPPLE_GPIO, |
372 | + VMAPPLE_PVPANIC, | 372 | + VMAPPLE_PVPANIC, |
373 | + VMAPPLE_APV_GFX, | 373 | + VMAPPLE_APV_GFX, |
374 | + VMAPPLE_APV_IOSFC, | 374 | + VMAPPLE_APV_IOSFC, |
375 | + VMAPPLE_AES_1, | 375 | + VMAPPLE_AES_1, |
376 | + VMAPPLE_AES_2, | 376 | + VMAPPLE_AES_2, |
377 | + VMAPPLE_BDOOR, | 377 | + VMAPPLE_BDOOR, |
378 | + VMAPPLE_MEMMAP_LAST, | 378 | + VMAPPLE_MEMMAP_LAST, |
379 | +}; | 379 | +}; |
380 | + | 380 | + |
381 | +static MemMapEntry memmap[] = { | 381 | +static MemMapEntry memmap[] = { |
382 | + [VMAPPLE_FIRMWARE] = { 0x00100000, 0x00100000 }, | 382 | + [VMAPPLE_FIRMWARE] = { 0x00100000, 0x00100000 }, |
383 | + [VMAPPLE_CONFIG] = { 0x00400000, 0x00010000 }, | 383 | + [VMAPPLE_CONFIG] = { 0x00400000, 0x00010000 }, |
384 | + | 384 | + |
385 | + [VMAPPLE_GIC_DIST] = { 0x10000000, 0x00010000 }, | 385 | + [VMAPPLE_GIC_DIST] = { 0x10000000, 0x00010000 }, |
386 | + [VMAPPLE_GIC_REDIST] = { 0x10010000, 0x00400000 }, | 386 | + [VMAPPLE_GIC_REDIST] = { 0x10010000, 0x00400000 }, |
387 | + | 387 | + |
388 | + [VMAPPLE_UART] = { 0x20010000, 0x00010000 }, | 388 | + [VMAPPLE_UART] = { 0x20010000, 0x00010000 }, |
389 | + [VMAPPLE_RTC] = { 0x20050000, 0x00001000 }, | 389 | + [VMAPPLE_RTC] = { 0x20050000, 0x00001000 }, |
390 | + [VMAPPLE_GPIO] = { 0x20060000, 0x00001000 }, | 390 | + [VMAPPLE_GPIO] = { 0x20060000, 0x00001000 }, |
391 | + [VMAPPLE_PVPANIC] = { 0x20070000, 0x00000002 }, | 391 | + [VMAPPLE_PVPANIC] = { 0x20070000, 0x00000002 }, |
392 | + [VMAPPLE_BDOOR] = { 0x30000000, 0x00200000 }, | 392 | + [VMAPPLE_BDOOR] = { 0x30000000, 0x00200000 }, |
393 | + [VMAPPLE_APV_GFX] = { 0x30200000, 0x00010000 }, | 393 | + [VMAPPLE_APV_GFX] = { 0x30200000, 0x00010000 }, |
394 | + [VMAPPLE_APV_IOSFC] = { 0x30210000, 0x00010000 }, | 394 | + [VMAPPLE_APV_IOSFC] = { 0x30210000, 0x00010000 }, |
395 | + [VMAPPLE_AES_1] = { 0x30220000, 0x00004000 }, | 395 | + [VMAPPLE_AES_1] = { 0x30220000, 0x00004000 }, |
396 | + [VMAPPLE_AES_2] = { 0x30230000, 0x00004000 }, | 396 | + [VMAPPLE_AES_2] = { 0x30230000, 0x00004000 }, |
397 | + [VMAPPLE_PCIE_ECAM] = { 0x40000000, 0x10000000 }, | 397 | + [VMAPPLE_PCIE_ECAM] = { 0x40000000, 0x10000000 }, |
398 | + [VMAPPLE_PCIE_MMIO] = { 0x50000000, 0x1fff0000 }, | 398 | + [VMAPPLE_PCIE_MMIO] = { 0x50000000, 0x1fff0000 }, |
399 | + | 399 | + |
400 | + /* Actual RAM size depends on configuration */ | 400 | + /* Actual RAM size depends on configuration */ |
401 | + [VMAPPLE_MEM] = { 0x70000000ULL, GiB}, | 401 | + [VMAPPLE_MEM] = { 0x70000000ULL, GiB}, |
402 | +}; | 402 | +}; |
403 | + | 403 | + |
404 | +static const int irqmap[] = { | 404 | +static const int irqmap[] = { |
405 | + [VMAPPLE_UART] = 1, | 405 | + [VMAPPLE_UART] = 1, |
406 | + [VMAPPLE_RTC] = 2, | 406 | + [VMAPPLE_RTC] = 2, |
407 | + [VMAPPLE_GPIO] = 0x5, | 407 | + [VMAPPLE_GPIO] = 0x5, |
408 | + [VMAPPLE_APV_IOSFC] = 0x10, | 408 | + [VMAPPLE_APV_IOSFC] = 0x10, |
409 | + [VMAPPLE_APV_GFX] = 0x11, | 409 | + [VMAPPLE_APV_GFX] = 0x11, |
410 | + [VMAPPLE_AES_1] = 0x12, | 410 | + [VMAPPLE_AES_1] = 0x12, |
411 | + [VMAPPLE_PCIE] = 0x20, | 411 | + [VMAPPLE_PCIE] = 0x20, |
412 | +}; | 412 | +}; |
413 | + | 413 | + |
414 | +#define GPEX_NUM_IRQS 16 | 414 | +#define GPEX_NUM_IRQS 16 |
415 | + | 415 | + |
416 | +static void create_bdif(VMAppleMachineState *vms, MemoryRegion *mem) | 416 | +static void create_bdif(VMAppleMachineState *vms, MemoryRegion *mem) |
417 | +{ | 417 | +{ |
418 | + DeviceState *bdif; | 418 | + DeviceState *bdif; |
419 | + SysBusDevice *bdif_sb; | 419 | + SysBusDevice *bdif_sb; |
420 | + DriveInfo *di_aux = drive_get(IF_PFLASH, 0, 0); | 420 | + DriveInfo *di_aux = drive_get(IF_PFLASH, 0, 0); |
421 | + DriveInfo *di_root = drive_get(IF_PFLASH, 0, 1); | 421 | + DriveInfo *di_root = drive_get(IF_PFLASH, 0, 1); |
422 | + | 422 | + |
423 | + if (!di_aux) { | 423 | + if (!di_aux) { |
424 | + error_report("No AUX device. Please specify one as pflash drive."); | 424 | + error_report("No AUX device. Please specify one as pflash drive."); |
425 | + exit(1); | 425 | + exit(1); |
426 | + } | 426 | + } |
427 | + | 427 | + |
428 | + if (!di_root) { | 428 | + if (!di_root) { |
429 | + /* Fall back to the first IF_VIRTIO device as root device */ | 429 | + /* Fall back to the first IF_VIRTIO device as root device */ |
430 | + di_root = drive_get(IF_VIRTIO, 0, 0); | 430 | + di_root = drive_get(IF_VIRTIO, 0, 0); |
431 | + } | 431 | + } |
432 | + | 432 | + |
433 | + if (!di_root) { | 433 | + if (!di_root) { |
434 | + error_report("No root device. Please specify one as virtio drive."); | 434 | + error_report("No root device. Please specify one as virtio drive."); |
435 | + exit(1); | 435 | + exit(1); |
436 | + } | 436 | + } |
437 | + | 437 | + |
438 | + /* PV backdoor device */ | 438 | + /* PV backdoor device */ |
439 | + bdif = qdev_new(TYPE_VMAPPLE_BDIF); | 439 | + bdif = qdev_new(TYPE_VMAPPLE_BDIF); |
440 | + bdif_sb = SYS_BUS_DEVICE(bdif); | 440 | + bdif_sb = SYS_BUS_DEVICE(bdif); |
441 | + sysbus_mmio_map(bdif_sb, 0, vms->memmap[VMAPPLE_BDOOR].base); | 441 | + sysbus_mmio_map(bdif_sb, 0, vms->memmap[VMAPPLE_BDOOR].base); |
442 | + | 442 | + |
443 | + qdev_prop_set_drive(DEVICE(bdif), "aux", blk_by_legacy_dinfo(di_aux)); | 443 | + qdev_prop_set_drive(DEVICE(bdif), "aux", blk_by_legacy_dinfo(di_aux)); |
444 | + qdev_prop_set_drive(DEVICE(bdif), "root", blk_by_legacy_dinfo(di_root)); | 444 | + qdev_prop_set_drive(DEVICE(bdif), "root", blk_by_legacy_dinfo(di_root)); |
445 | + | 445 | + |
446 | + sysbus_realize_and_unref(bdif_sb, &error_fatal); | 446 | + sysbus_realize_and_unref(bdif_sb, &error_fatal); |
447 | +} | 447 | +} |
448 | + | 448 | + |
449 | +static void create_pvpanic(VMAppleMachineState *vms, MemoryRegion *mem) | 449 | +static void create_pvpanic(VMAppleMachineState *vms, MemoryRegion *mem) |
450 | +{ | 450 | +{ |
451 | + SysBusDevice *pvpanic; | 451 | + SysBusDevice *pvpanic; |
452 | + | 452 | + |
453 | + vms->pvpanic = qdev_new(TYPE_PVPANIC_MMIO_DEVICE); | 453 | + vms->pvpanic = qdev_new(TYPE_PVPANIC_MMIO_DEVICE); |
454 | + pvpanic = SYS_BUS_DEVICE(vms->pvpanic); | 454 | + pvpanic = SYS_BUS_DEVICE(vms->pvpanic); |
455 | + sysbus_mmio_map(pvpanic, 0, vms->memmap[VMAPPLE_PVPANIC].base); | 455 | + sysbus_mmio_map(pvpanic, 0, vms->memmap[VMAPPLE_PVPANIC].base); |
456 | + | 456 | + |
457 | + sysbus_realize_and_unref(pvpanic, &error_fatal); | 457 | + sysbus_realize_and_unref(pvpanic, &error_fatal); |
458 | +} | 458 | +} |
459 | + | 459 | + |
460 | +static bool create_cfg(VMAppleMachineState *vms, MemoryRegion *mem, | 460 | +static bool create_cfg(VMAppleMachineState *vms, MemoryRegion *mem, |
461 | + Error **errp) | 461 | + Error **errp) |
462 | +{ | 462 | +{ |
463 | + ERRP_GUARD(); | 463 | + ERRP_GUARD(); |
464 | + SysBusDevice *cfg; | 464 | + SysBusDevice *cfg; |
465 | + MachineState *machine = MACHINE(vms); | 465 | + MachineState *machine = MACHINE(vms); |
466 | + uint32_t rnd = 1; | 466 | + uint32_t rnd = 1; |
467 | + | 467 | + |
468 | + vms->cfg = qdev_new(TYPE_VMAPPLE_CFG); | 468 | + vms->cfg = qdev_new(TYPE_VMAPPLE_CFG); |
469 | + cfg = SYS_BUS_DEVICE(vms->cfg); | 469 | + cfg = SYS_BUS_DEVICE(vms->cfg); |
470 | + sysbus_mmio_map(cfg, 0, vms->memmap[VMAPPLE_CONFIG].base); | 470 | + sysbus_mmio_map(cfg, 0, vms->memmap[VMAPPLE_CONFIG].base); |
471 | + | 471 | + |
472 | + qemu_guest_getrandom_nofail(&rnd, sizeof(rnd)); | 472 | + qemu_guest_getrandom_nofail(&rnd, sizeof(rnd)); |
473 | + | 473 | + |
474 | + qdev_prop_set_uint32(vms->cfg, "nr-cpus", machine->smp.cpus); | 474 | + qdev_prop_set_uint32(vms->cfg, "nr-cpus", machine->smp.cpus); |
475 | + qdev_prop_set_uint64(vms->cfg, "ecid", vms->uuid); | 475 | + qdev_prop_set_uint64(vms->cfg, "ecid", vms->uuid); |
476 | + qdev_prop_set_uint64(vms->cfg, "ram-size", machine->ram_size); | 476 | + qdev_prop_set_uint64(vms->cfg, "ram-size", machine->ram_size); |
477 | + qdev_prop_set_uint32(vms->cfg, "rnd", rnd); | 477 | + qdev_prop_set_uint32(vms->cfg, "rnd", rnd); |
478 | + | 478 | + |
479 | + if (!sysbus_realize_and_unref(cfg, errp)) { | 479 | + if (!sysbus_realize_and_unref(cfg, errp)) { |
480 | + error_prepend(errp, "Error creating vmapple cfg device: "); | 480 | + error_prepend(errp, "Error creating vmapple cfg device: "); |
481 | + return false; | 481 | + return false; |
482 | + } | 482 | + } |
483 | + | 483 | + |
484 | + return true; | 484 | + return true; |
485 | +} | 485 | +} |
486 | + | 486 | + |
487 | +static void create_gfx(VMAppleMachineState *vms, MemoryRegion *mem) | 487 | +static void create_gfx(VMAppleMachineState *vms, MemoryRegion *mem) |
488 | +{ | 488 | +{ |
489 | + int irq_gfx = vms->irqmap[VMAPPLE_APV_GFX]; | 489 | + int irq_gfx = vms->irqmap[VMAPPLE_APV_GFX]; |
490 | + int irq_iosfc = vms->irqmap[VMAPPLE_APV_IOSFC]; | 490 | + int irq_iosfc = vms->irqmap[VMAPPLE_APV_IOSFC]; |
491 | + SysBusDevice *gfx; | 491 | + SysBusDevice *gfx; |
492 | + | 492 | + |
493 | + gfx = SYS_BUS_DEVICE(qdev_new("apple-gfx-mmio")); | 493 | + gfx = SYS_BUS_DEVICE(qdev_new("apple-gfx-mmio")); |
494 | + sysbus_mmio_map(gfx, 0, vms->memmap[VMAPPLE_APV_GFX].base); | 494 | + sysbus_mmio_map(gfx, 0, vms->memmap[VMAPPLE_APV_GFX].base); |
495 | + sysbus_mmio_map(gfx, 1, vms->memmap[VMAPPLE_APV_IOSFC].base); | 495 | + sysbus_mmio_map(gfx, 1, vms->memmap[VMAPPLE_APV_IOSFC].base); |
496 | + sysbus_connect_irq(gfx, 0, qdev_get_gpio_in(vms->gic, irq_gfx)); | 496 | + sysbus_connect_irq(gfx, 0, qdev_get_gpio_in(vms->gic, irq_gfx)); |
497 | + sysbus_connect_irq(gfx, 1, qdev_get_gpio_in(vms->gic, irq_iosfc)); | 497 | + sysbus_connect_irq(gfx, 1, qdev_get_gpio_in(vms->gic, irq_iosfc)); |
498 | + sysbus_realize_and_unref(gfx, &error_fatal); | 498 | + sysbus_realize_and_unref(gfx, &error_fatal); |
499 | +} | 499 | +} |
500 | + | 500 | + |
501 | +static void create_aes(VMAppleMachineState *vms, MemoryRegion *mem) | 501 | +static void create_aes(VMAppleMachineState *vms, MemoryRegion *mem) |
502 | +{ | 502 | +{ |
503 | + int irq = vms->irqmap[VMAPPLE_AES_1]; | 503 | + int irq = vms->irqmap[VMAPPLE_AES_1]; |
504 | + SysBusDevice *aes; | 504 | + SysBusDevice *aes; |
505 | + | 505 | + |
506 | + aes = SYS_BUS_DEVICE(qdev_new(TYPE_APPLE_AES)); | 506 | + aes = SYS_BUS_DEVICE(qdev_new(TYPE_APPLE_AES)); |
507 | + sysbus_mmio_map(aes, 0, vms->memmap[VMAPPLE_AES_1].base); | 507 | + sysbus_mmio_map(aes, 0, vms->memmap[VMAPPLE_AES_1].base); |
508 | + sysbus_mmio_map(aes, 1, vms->memmap[VMAPPLE_AES_2].base); | 508 | + sysbus_mmio_map(aes, 1, vms->memmap[VMAPPLE_AES_2].base); |
509 | + sysbus_connect_irq(aes, 0, qdev_get_gpio_in(vms->gic, irq)); | 509 | + sysbus_connect_irq(aes, 0, qdev_get_gpio_in(vms->gic, irq)); |
510 | + sysbus_realize_and_unref(aes, &error_fatal); | 510 | + sysbus_realize_and_unref(aes, &error_fatal); |
511 | +} | 511 | +} |
512 | + | 512 | + |
513 | +static int arm_gic_ppi_index(int cpu_nr, int ppi_index) | 513 | +static int arm_gic_ppi_index(int cpu_nr, int ppi_index) |
514 | +{ | 514 | +{ |
515 | + return NUM_IRQS + cpu_nr * GIC_INTERNAL + ppi_index; | 515 | + return NUM_IRQS + cpu_nr * GIC_INTERNAL + ppi_index; |
516 | +} | 516 | +} |
517 | + | 517 | + |
518 | +static void create_gic(VMAppleMachineState *vms, MemoryRegion *mem) | 518 | +static void create_gic(VMAppleMachineState *vms, MemoryRegion *mem) |
519 | +{ | 519 | +{ |
520 | + MachineState *ms = MACHINE(vms); | 520 | + MachineState *ms = MACHINE(vms); |
521 | + /* We create a standalone GIC */ | 521 | + /* We create a standalone GIC */ |
522 | + SysBusDevice *gicbusdev; | 522 | + SysBusDevice *gicbusdev; |
523 | + QList *redist_region_count; | 523 | + QList *redist_region_count; |
524 | + int i; | 524 | + int i; |
525 | + unsigned int smp_cpus = ms->smp.cpus; | 525 | + unsigned int smp_cpus = ms->smp.cpus; |
526 | + | 526 | + |
527 | + vms->gic = qdev_new(gicv3_class_name()); | 527 | + vms->gic = qdev_new(gicv3_class_name()); |
528 | + qdev_prop_set_uint32(vms->gic, "revision", 3); | 528 | + qdev_prop_set_uint32(vms->gic, "revision", 3); |
529 | + qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); | 529 | + qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); |
530 | + /* | 530 | + /* |
531 | + * Note that the num-irq property counts both internal and external | 531 | + * Note that the num-irq property counts both internal and external |
532 | + * interrupts; there are always 32 of the former (mandated by GIC spec). | 532 | + * interrupts; there are always 32 of the former (mandated by GIC spec). |
533 | + */ | 533 | + */ |
534 | + qdev_prop_set_uint32(vms->gic, "num-irq", NUM_IRQS + 32); | 534 | + qdev_prop_set_uint32(vms->gic, "num-irq", NUM_IRQS + 32); |
535 | + | 535 | + |
536 | + uint32_t redist0_capacity = | 536 | + uint32_t redist0_capacity = |
537 | + vms->memmap[VMAPPLE_GIC_REDIST].size / GICV3_REDIST_SIZE; | 537 | + vms->memmap[VMAPPLE_GIC_REDIST].size / GICV3_REDIST_SIZE; |
538 | + uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); | 538 | + uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); |
539 | + | 539 | + |
540 | + redist_region_count = qlist_new(); | 540 | + redist_region_count = qlist_new(); |
541 | + qlist_append_int(redist_region_count, redist0_count); | 541 | + qlist_append_int(redist_region_count, redist0_count); |
542 | + qdev_prop_set_array(vms->gic, "redist-region-count", redist_region_count); | 542 | + qdev_prop_set_array(vms->gic, "redist-region-count", redist_region_count); |
543 | + | 543 | + |
544 | + gicbusdev = SYS_BUS_DEVICE(vms->gic); | 544 | + gicbusdev = SYS_BUS_DEVICE(vms->gic); |
545 | + sysbus_realize_and_unref(gicbusdev, &error_fatal); | 545 | + sysbus_realize_and_unref(gicbusdev, &error_fatal); |
546 | + sysbus_mmio_map(gicbusdev, 0, vms->memmap[VMAPPLE_GIC_DIST].base); | 546 | + sysbus_mmio_map(gicbusdev, 0, vms->memmap[VMAPPLE_GIC_DIST].base); |
547 | + sysbus_mmio_map(gicbusdev, 1, vms->memmap[VMAPPLE_GIC_REDIST].base); | 547 | + sysbus_mmio_map(gicbusdev, 1, vms->memmap[VMAPPLE_GIC_REDIST].base); |
548 | + | 548 | + |
549 | + /* | 549 | + /* |
550 | + * Wire the outputs from each CPU's generic timer and the GICv3 | 550 | + * Wire the outputs from each CPU's generic timer and the GICv3 |
551 | + * maintenance interrupt signal to the appropriate GIC PPI inputs, | 551 | + * maintenance interrupt signal to the appropriate GIC PPI inputs, |
552 | + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. | 552 | + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. |
553 | + */ | 553 | + */ |
554 | + for (i = 0; i < smp_cpus; i++) { | 554 | + for (i = 0; i < smp_cpus; i++) { |
555 | + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); | 555 | + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); |
556 | + | 556 | + |
557 | + /* Map the virt timer to PPI 27 */ | 557 | + /* Map the virt timer to PPI 27 */ |
558 | + qdev_connect_gpio_out(cpudev, GTIMER_VIRT, | 558 | + qdev_connect_gpio_out(cpudev, GTIMER_VIRT, |
559 | + qdev_get_gpio_in(vms->gic, | 559 | + qdev_get_gpio_in(vms->gic, |
560 | + arm_gic_ppi_index(i, 27))); | 560 | + arm_gic_ppi_index(i, 27))); |
561 | + | 561 | + |
562 | + /* Map the GIC IRQ and FIQ lines to CPU */ | 562 | + /* Map the GIC IRQ and FIQ lines to CPU */ |
563 | + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); | 563 | + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); |
564 | + sysbus_connect_irq(gicbusdev, i + smp_cpus, | 564 | + sysbus_connect_irq(gicbusdev, i + smp_cpus, |
565 | + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); | 565 | + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); |
566 | + } | 566 | + } |
567 | +} | 567 | +} |
568 | + | 568 | + |
569 | +static void create_uart(const VMAppleMachineState *vms, int uart, | 569 | +static void create_uart(const VMAppleMachineState *vms, int uart, |
570 | + MemoryRegion *mem, Chardev *chr) | 570 | + MemoryRegion *mem, Chardev *chr) |
571 | +{ | 571 | +{ |
572 | + hwaddr base = vms->memmap[uart].base; | 572 | + hwaddr base = vms->memmap[uart].base; |
573 | + int irq = vms->irqmap[uart]; | 573 | + int irq = vms->irqmap[uart]; |
574 | + DeviceState *dev = qdev_new(TYPE_PL011); | 574 | + DeviceState *dev = qdev_new(TYPE_PL011); |
575 | + SysBusDevice *s = SYS_BUS_DEVICE(dev); | 575 | + SysBusDevice *s = SYS_BUS_DEVICE(dev); |
576 | + | 576 | + |
577 | + qdev_prop_set_chr(dev, "chardev", chr); | 577 | + qdev_prop_set_chr(dev, "chardev", chr); |
578 | + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); | 578 | + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); |
579 | + memory_region_add_subregion(mem, base, | 579 | + memory_region_add_subregion(mem, base, |
580 | + sysbus_mmio_get_region(s, 0)); | 580 | + sysbus_mmio_get_region(s, 0)); |
581 | + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); | 581 | + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); |
582 | +} | 582 | +} |
583 | + | 583 | + |
584 | +static void create_rtc(const VMAppleMachineState *vms) | 584 | +static void create_rtc(const VMAppleMachineState *vms) |
585 | +{ | 585 | +{ |
586 | + hwaddr base = vms->memmap[VMAPPLE_RTC].base; | 586 | + hwaddr base = vms->memmap[VMAPPLE_RTC].base; |
587 | + int irq = vms->irqmap[VMAPPLE_RTC]; | 587 | + int irq = vms->irqmap[VMAPPLE_RTC]; |
588 | + | 588 | + |
589 | + sysbus_create_simple("pl031", base, qdev_get_gpio_in(vms->gic, irq)); | 589 | + sysbus_create_simple("pl031", base, qdev_get_gpio_in(vms->gic, irq)); |
590 | +} | 590 | +} |
591 | + | 591 | + |
592 | +static DeviceState *gpio_key_dev; | 592 | +static DeviceState *gpio_key_dev; |
593 | +static void vmapple_powerdown_req(Notifier *n, void *opaque) | 593 | +static void vmapple_powerdown_req(Notifier *n, void *opaque) |
594 | +{ | 594 | +{ |
595 | + /* use gpio Pin 3 for power button event */ | 595 | + /* use gpio Pin 3 for power button event */ |
596 | + qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); | 596 | + qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); |
597 | +} | 597 | +} |
598 | + | 598 | + |
599 | +static void create_gpio_devices(const VMAppleMachineState *vms, int gpio, | 599 | +static void create_gpio_devices(const VMAppleMachineState *vms, int gpio, |
600 | + MemoryRegion *mem) | 600 | + MemoryRegion *mem) |
601 | +{ | 601 | +{ |
602 | + DeviceState *pl061_dev; | 602 | + DeviceState *pl061_dev; |
603 | + hwaddr base = vms->memmap[gpio].base; | 603 | + hwaddr base = vms->memmap[gpio].base; |
604 | + int irq = vms->irqmap[gpio]; | 604 | + int irq = vms->irqmap[gpio]; |
605 | + SysBusDevice *s; | 605 | + SysBusDevice *s; |
606 | + | 606 | + |
607 | + pl061_dev = qdev_new("pl061"); | 607 | + pl061_dev = qdev_new("pl061"); |
608 | + /* Pull lines down to 0 if not driven by the PL061 */ | 608 | + /* Pull lines down to 0 if not driven by the PL061 */ |
609 | + qdev_prop_set_uint32(pl061_dev, "pullups", 0); | 609 | + qdev_prop_set_uint32(pl061_dev, "pullups", 0); |
610 | + qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff); | 610 | + qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff); |
611 | + s = SYS_BUS_DEVICE(pl061_dev); | 611 | + s = SYS_BUS_DEVICE(pl061_dev); |
612 | + sysbus_realize_and_unref(s, &error_fatal); | 612 | + sysbus_realize_and_unref(s, &error_fatal); |
613 | + memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0)); | 613 | + memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0)); |
614 | + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); | 614 | + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); |
615 | + gpio_key_dev = sysbus_create_simple("gpio-key", -1, | 615 | + gpio_key_dev = sysbus_create_simple("gpio-key", -1, |
616 | + qdev_get_gpio_in(pl061_dev, 3)); | 616 | + qdev_get_gpio_in(pl061_dev, 3)); |
617 | +} | 617 | +} |
618 | + | 618 | + |
619 | +static void vmapple_firmware_init(VMAppleMachineState *vms, | 619 | +static void vmapple_firmware_init(VMAppleMachineState *vms, |
620 | + MemoryRegion *sysmem) | 620 | + MemoryRegion *sysmem) |
621 | +{ | 621 | +{ |
622 | + hwaddr size = vms->memmap[VMAPPLE_FIRMWARE].size; | 622 | + hwaddr size = vms->memmap[VMAPPLE_FIRMWARE].size; |
623 | + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; | 623 | + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; |
624 | + const char *bios_name; | 624 | + const char *bios_name; |
625 | + int image_size; | 625 | + int image_size; |
626 | + char *fname; | 626 | + char *fname; |
627 | + | 627 | + |
628 | + bios_name = MACHINE(vms)->firmware; | 628 | + bios_name = MACHINE(vms)->firmware; |
629 | + if (!bios_name) { | 629 | + if (!bios_name) { |
630 | + error_report("No firmware specified"); | 630 | + error_report("No firmware specified"); |
631 | + exit(1); | 631 | + exit(1); |
632 | + } | 632 | + } |
633 | + | 633 | + |
634 | + fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); | 634 | + fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); |
635 | + if (!fname) { | 635 | + if (!fname) { |
636 | + error_report("Could not find ROM image '%s'", bios_name); | 636 | + error_report("Could not find ROM image '%s'", bios_name); |
637 | + exit(1); | 637 | + exit(1); |
638 | + } | 638 | + } |
639 | + | 639 | + |
640 | + memory_region_init_ram(&vms->fw_mr, NULL, "firmware", size, &error_fatal); | 640 | + memory_region_init_ram(&vms->fw_mr, NULL, "firmware", size, &error_fatal); |
641 | + image_size = load_image_mr(fname, &vms->fw_mr); | 641 | + image_size = load_image_mr(fname, &vms->fw_mr); |
642 | + | 642 | + |
643 | + g_free(fname); | 643 | + g_free(fname); |
644 | + if (image_size < 0) { | 644 | + if (image_size < 0) { |
645 | + error_report("Could not load ROM image '%s'", bios_name); | 645 | + error_report("Could not load ROM image '%s'", bios_name); |
646 | + exit(1); | 646 | + exit(1); |
647 | + } | 647 | + } |
648 | + | 648 | + |
649 | + memory_region_add_subregion(get_system_memory(), base, &vms->fw_mr); | 649 | + memory_region_add_subregion(get_system_memory(), base, &vms->fw_mr); |
650 | +} | 650 | +} |
651 | + | 651 | + |
652 | +static void create_pcie(VMAppleMachineState *vms) | 652 | +static void create_pcie(VMAppleMachineState *vms) |
653 | +{ | 653 | +{ |
654 | + hwaddr base_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].base; | 654 | + hwaddr base_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].base; |
655 | + hwaddr size_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].size; | 655 | + hwaddr size_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].size; |
656 | + hwaddr base_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].base; | 656 | + hwaddr base_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].base; |
657 | + hwaddr size_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].size; | 657 | + hwaddr size_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].size; |
658 | + int irq = vms->irqmap[VMAPPLE_PCIE]; | 658 | + int irq = vms->irqmap[VMAPPLE_PCIE]; |
659 | + MemoryRegion *mmio_alias; | 659 | + MemoryRegion *mmio_alias; |
660 | + MemoryRegion *mmio_reg; | 660 | + MemoryRegion *mmio_reg; |
661 | + MemoryRegion *ecam_reg; | 661 | + MemoryRegion *ecam_reg; |
662 | + DeviceState *dev; | 662 | + DeviceState *dev; |
663 | + int i; | 663 | + int i; |
664 | + PCIHostState *pci; | 664 | + PCIHostState *pci; |
665 | + DeviceState *usb_controller; | 665 | + DeviceState *usb_controller; |
666 | + USBBus *usb_bus; | 666 | + USBBus *usb_bus; |
667 | + | 667 | + |
668 | + dev = qdev_new(TYPE_GPEX_HOST); | 668 | + dev = qdev_new(TYPE_GPEX_HOST); |
669 | + qdev_prop_set_uint32(dev, "num-irqs", GPEX_NUM_IRQS); | 669 | + qdev_prop_set_uint32(dev, "num-irqs", GPEX_NUM_IRQS); |
670 | + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); | 670 | + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); |
671 | + | 671 | + |
672 | + /* Map only the first size_ecam bytes of ECAM space */ | 672 | + /* Map only the first size_ecam bytes of ECAM space */ |
673 | + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); | 673 | + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); |
674 | + memory_region_init_alias(&vms->ecam_alias, OBJECT(dev), "pcie-ecam", | 674 | + memory_region_init_alias(&vms->ecam_alias, OBJECT(dev), "pcie-ecam", |
675 | + ecam_reg, 0, size_ecam); | 675 | + ecam_reg, 0, size_ecam); |
676 | + memory_region_add_subregion(get_system_memory(), base_ecam, | 676 | + memory_region_add_subregion(get_system_memory(), base_ecam, |
677 | + &vms->ecam_alias); | 677 | + &vms->ecam_alias); |
678 | + | 678 | + |
679 | + /* | 679 | + /* |
680 | + * Map the MMIO window from [0x50000000-0x7fff0000] in PCI space into | 680 | + * Map the MMIO window from [0x50000000-0x7fff0000] in PCI space into |
681 | + * system address space at [0x50000000-0x7fff0000]. | 681 | + * system address space at [0x50000000-0x7fff0000]. |
682 | + */ | 682 | + */ |
683 | + mmio_alias = g_new0(MemoryRegion, 1); | 683 | + mmio_alias = g_new0(MemoryRegion, 1); |
684 | + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); | 684 | + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); |
685 | + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", | 685 | + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", |
686 | + mmio_reg, base_mmio, size_mmio); | 686 | + mmio_reg, base_mmio, size_mmio); |
687 | + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); | 687 | + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); |
688 | + | 688 | + |
689 | + for (i = 0; i < GPEX_NUM_IRQS; i++) { | 689 | + for (i = 0; i < GPEX_NUM_IRQS; i++) { |
690 | + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | 690 | + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, |
691 | + qdev_get_gpio_in(vms->gic, irq + i)); | 691 | + qdev_get_gpio_in(vms->gic, irq + i)); |
692 | + gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); | 692 | + gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); |
693 | + } | 693 | + } |
694 | + | 694 | + |
695 | + pci = PCI_HOST_BRIDGE(dev); | 695 | + pci = PCI_HOST_BRIDGE(dev); |
696 | + vms->bus = pci->bus; | 696 | + vms->bus = pci->bus; |
697 | + g_assert(vms->bus); | 697 | + g_assert(vms->bus); |
698 | + | 698 | + |
699 | + while ((dev = qemu_create_nic_device("virtio-net-pci", true, NULL))) { | 699 | + while ((dev = qemu_create_nic_device("virtio-net-pci", true, NULL))) { |
700 | + qdev_realize_and_unref(dev, BUS(vms->bus), &error_fatal); | 700 | + qdev_realize_and_unref(dev, BUS(vms->bus), &error_fatal); |
701 | + } | 701 | + } |
702 | + | 702 | + |
703 | + usb_controller = qdev_new(TYPE_QEMU_XHCI); | 703 | + usb_controller = qdev_new(TYPE_QEMU_XHCI); |
704 | + qdev_realize_and_unref(usb_controller, BUS(pci->bus), &error_fatal); | 704 | + qdev_realize_and_unref(usb_controller, BUS(pci->bus), &error_fatal); |
705 | + | 705 | + |
706 | + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, | 706 | + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, |
707 | + &error_fatal)); | 707 | + &error_fatal)); |
708 | + usb_create_simple(usb_bus, "usb-kbd"); | 708 | + usb_create_simple(usb_bus, "usb-kbd"); |
709 | + usb_create_simple(usb_bus, "usb-tablet"); | 709 | + usb_create_simple(usb_bus, "usb-tablet"); |
710 | +} | 710 | +} |
711 | + | 711 | + |
712 | +static void vmapple_reset(void *opaque) | 712 | +static void vmapple_reset(void *opaque) |
713 | +{ | 713 | +{ |
714 | + VMAppleMachineState *vms = opaque; | 714 | + VMAppleMachineState *vms = opaque; |
715 | + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; | 715 | + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; |
716 | + | 716 | + |
717 | + cpu_set_pc(first_cpu, base); | 717 | + cpu_set_pc(first_cpu, base); |
718 | +} | 718 | +} |
719 | + | 719 | + |
720 | +static void mach_vmapple_init(MachineState *machine) | 720 | +static void mach_vmapple_init(MachineState *machine) |
721 | +{ | 721 | +{ |
722 | + VMAppleMachineState *vms = VMAPPLE_MACHINE(machine); | 722 | + VMAppleMachineState *vms = VMAPPLE_MACHINE(machine); |
723 | + MachineClass *mc = MACHINE_GET_CLASS(machine); | 723 | + MachineClass *mc = MACHINE_GET_CLASS(machine); |
724 | + const CPUArchIdList *possible_cpus; | 724 | + const CPUArchIdList *possible_cpus; |
725 | + MemoryRegion *sysmem = get_system_memory(); | 725 | + MemoryRegion *sysmem = get_system_memory(); |
726 | + int n; | 726 | + int n; |
727 | + unsigned int smp_cpus = machine->smp.cpus; | 727 | + unsigned int smp_cpus = machine->smp.cpus; |
728 | + unsigned int max_cpus = machine->smp.max_cpus; | 728 | + unsigned int max_cpus = machine->smp.max_cpus; |
729 | + | 729 | + |
730 | + vms->memmap = memmap; | 730 | + vms->memmap = memmap; |
731 | + machine->usb = true; | 731 | + machine->usb = true; |
732 | + | 732 | + |
733 | + possible_cpus = mc->possible_cpu_arch_ids(machine); | 733 | + possible_cpus = mc->possible_cpu_arch_ids(machine); |
734 | + assert(possible_cpus->len == max_cpus); | 734 | + assert(possible_cpus->len == max_cpus); |
735 | + for (n = 0; n < possible_cpus->len; n++) { | 735 | + for (n = 0; n < possible_cpus->len; n++) { |
736 | + Object *cpu; | 736 | + Object *cpu; |
737 | + CPUState *cs; | 737 | + CPUState *cs; |
738 | + | 738 | + |
739 | + if (n >= smp_cpus) { | 739 | + if (n >= smp_cpus) { |
740 | + break; | 740 | + break; |
741 | + } | 741 | + } |
742 | + | 742 | + |
743 | + cpu = object_new(possible_cpus->cpus[n].type); | 743 | + cpu = object_new(possible_cpus->cpus[n].type); |
744 | + object_property_set_int(cpu, "mp-affinity", | 744 | + object_property_set_int(cpu, "mp-affinity", |
745 | + possible_cpus->cpus[n].arch_id, &error_fatal); | 745 | + possible_cpus->cpus[n].arch_id, &error_fatal); |
746 | + | 746 | + |
747 | + cs = CPU(cpu); | 747 | + cs = CPU(cpu); |
748 | + cs->cpu_index = n; | 748 | + cs->cpu_index = n; |
749 | + | 749 | + |
750 | + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpu), | 750 | + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpu), |
751 | + &error_fatal); | 751 | + &error_fatal); |
752 | + | 752 | + |
753 | + if (object_property_find(cpu, "has_el3")) { | 753 | + if (object_property_find(cpu, "has_el3")) { |
754 | + object_property_set_bool(cpu, "has_el3", false, &error_fatal); | 754 | + object_property_set_bool(cpu, "has_el3", false, &error_fatal); |
755 | + } | 755 | + } |
756 | + if (object_property_find(cpu, "has_el2")) { | 756 | + if (object_property_find(cpu, "has_el2")) { |
757 | + object_property_set_bool(cpu, "has_el2", false, &error_fatal); | 757 | + object_property_set_bool(cpu, "has_el2", false, &error_fatal); |
758 | + } | 758 | + } |
759 | + object_property_set_int(cpu, "psci-conduit", QEMU_PSCI_CONDUIT_HVC, | 759 | + object_property_set_int(cpu, "psci-conduit", QEMU_PSCI_CONDUIT_HVC, |
760 | + &error_fatal); | 760 | + &error_fatal); |
761 | + | 761 | + |
762 | + /* Secondary CPUs start in PSCI powered-down state */ | 762 | + /* Secondary CPUs start in PSCI powered-down state */ |
763 | + if (n > 0) { | 763 | + if (n > 0) { |
764 | + object_property_set_bool(cpu, "start-powered-off", true, | 764 | + object_property_set_bool(cpu, "start-powered-off", true, |
765 | + &error_fatal); | 765 | + &error_fatal); |
766 | + } | 766 | + } |
767 | + | 767 | + |
768 | + object_property_set_link(cpu, "memory", OBJECT(sysmem), &error_abort); | 768 | + object_property_set_link(cpu, "memory", OBJECT(sysmem), &error_abort); |
769 | + qdev_realize(DEVICE(cpu), NULL, &error_fatal); | 769 | + qdev_realize(DEVICE(cpu), NULL, &error_fatal); |
770 | + object_unref(cpu); | 770 | + object_unref(cpu); |
771 | + } | 771 | + } |
772 | + | 772 | + |
773 | + memory_region_add_subregion(sysmem, vms->memmap[VMAPPLE_MEM].base, | 773 | + memory_region_add_subregion(sysmem, vms->memmap[VMAPPLE_MEM].base, |
774 | + machine->ram); | 774 | + machine->ram); |
775 | + | 775 | + |
776 | + create_gic(vms, sysmem); | 776 | + create_gic(vms, sysmem); |
777 | + create_bdif(vms, sysmem); | 777 | + create_bdif(vms, sysmem); |
778 | + create_pvpanic(vms, sysmem); | 778 | + create_pvpanic(vms, sysmem); |
779 | + create_aes(vms, sysmem); | 779 | + create_aes(vms, sysmem); |
780 | + create_gfx(vms, sysmem); | 780 | + create_gfx(vms, sysmem); |
781 | + create_uart(vms, VMAPPLE_UART, sysmem, serial_hd(0)); | 781 | + create_uart(vms, VMAPPLE_UART, sysmem, serial_hd(0)); |
782 | + create_rtc(vms); | 782 | + create_rtc(vms); |
783 | + create_pcie(vms); | 783 | + create_pcie(vms); |
784 | + | 784 | + |
785 | + create_gpio_devices(vms, VMAPPLE_GPIO, sysmem); | 785 | + create_gpio_devices(vms, VMAPPLE_GPIO, sysmem); |
786 | + | 786 | + |
787 | + vmapple_firmware_init(vms, sysmem); | 787 | + vmapple_firmware_init(vms, sysmem); |
788 | + create_cfg(vms, sysmem, &error_fatal); | 788 | + create_cfg(vms, sysmem, &error_fatal); |
789 | + | 789 | + |
790 | + /* connect powerdown request */ | 790 | + /* connect powerdown request */ |
791 | + vms->powerdown_notifier.notify = vmapple_powerdown_req; | 791 | + vms->powerdown_notifier.notify = vmapple_powerdown_req; |
792 | + qemu_register_powerdown_notifier(&vms->powerdown_notifier); | 792 | + qemu_register_powerdown_notifier(&vms->powerdown_notifier); |
793 | + | 793 | + |
794 | + vms->bootinfo.ram_size = machine->ram_size; | 794 | + vms->bootinfo.ram_size = machine->ram_size; |
795 | + vms->bootinfo.board_id = -1; | 795 | + vms->bootinfo.board_id = -1; |
796 | + vms->bootinfo.loader_start = vms->memmap[VMAPPLE_MEM].base; | 796 | + vms->bootinfo.loader_start = vms->memmap[VMAPPLE_MEM].base; |
797 | + vms->bootinfo.skip_dtb_autoload = true; | 797 | + vms->bootinfo.skip_dtb_autoload = true; |
798 | + vms->bootinfo.firmware_loaded = true; | 798 | + vms->bootinfo.firmware_loaded = true; |
799 | + arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo); | 799 | + arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo); |
800 | + | 800 | + |
801 | + qemu_register_reset(vmapple_reset, vms); | 801 | + qemu_register_reset(vmapple_reset, vms); |
802 | +} | 802 | +} |
803 | + | 803 | + |
804 | +static CpuInstanceProperties | 804 | +static CpuInstanceProperties |
805 | +vmapple_cpu_index_to_props(MachineState *ms, unsigned cpu_index) | 805 | +vmapple_cpu_index_to_props(MachineState *ms, unsigned cpu_index) |
806 | +{ | 806 | +{ |
807 | + MachineClass *mc = MACHINE_GET_CLASS(ms); | 807 | + MachineClass *mc = MACHINE_GET_CLASS(ms); |
808 | + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); | 808 | + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); |
809 | + | 809 | + |
810 | + assert(cpu_index < possible_cpus->len); | 810 | + assert(cpu_index < possible_cpus->len); |
811 | + return possible_cpus->cpus[cpu_index].props; | 811 | + return possible_cpus->cpus[cpu_index].props; |
812 | +} | 812 | +} |
813 | + | 813 | + |
814 | + | 814 | + |
815 | +static int64_t vmapple_get_default_cpu_node_id(const MachineState *ms, int idx) | 815 | +static int64_t vmapple_get_default_cpu_node_id(const MachineState *ms, int idx) |
816 | +{ | 816 | +{ |
817 | + return idx % ms->numa_state->num_nodes; | 817 | + return idx % ms->numa_state->num_nodes; |
818 | +} | 818 | +} |
819 | + | 819 | + |
820 | +static const CPUArchIdList *vmapple_possible_cpu_arch_ids(MachineState *ms) | 820 | +static const CPUArchIdList *vmapple_possible_cpu_arch_ids(MachineState *ms) |
821 | +{ | 821 | +{ |
822 | + int n; | 822 | + int n; |
823 | + unsigned int max_cpus = ms->smp.max_cpus; | 823 | + unsigned int max_cpus = ms->smp.max_cpus; |
824 | + | 824 | + |
825 | + if (ms->possible_cpus) { | 825 | + if (ms->possible_cpus) { |
826 | + assert(ms->possible_cpus->len == max_cpus); | 826 | + assert(ms->possible_cpus->len == max_cpus); |
827 | + return ms->possible_cpus; | 827 | + return ms->possible_cpus; |
828 | + } | 828 | + } |
829 | + | 829 | + |
830 | + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + | 830 | + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + |
831 | + sizeof(CPUArchId) * max_cpus); | 831 | + sizeof(CPUArchId) * max_cpus); |
832 | + ms->possible_cpus->len = max_cpus; | 832 | + ms->possible_cpus->len = max_cpus; |
833 | + for (n = 0; n < ms->possible_cpus->len; n++) { | 833 | + for (n = 0; n < ms->possible_cpus->len; n++) { |
834 | + ms->possible_cpus->cpus[n].type = ms->cpu_type; | 834 | + ms->possible_cpus->cpus[n].type = ms->cpu_type; |
835 | + ms->possible_cpus->cpus[n].arch_id = | 835 | + ms->possible_cpus->cpus[n].arch_id = |
836 | + arm_build_mp_affinity(n, GICV3_TARGETLIST_BITS); | 836 | + arm_build_mp_affinity(n, GICV3_TARGETLIST_BITS); |
837 | + ms->possible_cpus->cpus[n].props.has_thread_id = true; | 837 | + ms->possible_cpus->cpus[n].props.has_thread_id = true; |
838 | + ms->possible_cpus->cpus[n].props.thread_id = n; | 838 | + ms->possible_cpus->cpus[n].props.thread_id = n; |
839 | + } | 839 | + } |
840 | + return ms->possible_cpus; | 840 | + return ms->possible_cpus; |
841 | +} | 841 | +} |
842 | + | 842 | + |
843 | +static void vmapple_machine_class_init(ObjectClass *oc, void *data) | 843 | +static void vmapple_machine_class_init(ObjectClass *oc, void *data) |
844 | +{ | 844 | +{ |
845 | + MachineClass *mc = MACHINE_CLASS(oc); | 845 | + MachineClass *mc = MACHINE_CLASS(oc); |
846 | + | 846 | + |
847 | + mc->init = mach_vmapple_init; | 847 | + mc->init = mach_vmapple_init; |
848 | + mc->max_cpus = 32; | 848 | + mc->max_cpus = 32; |
849 | + mc->block_default_type = IF_VIRTIO; | 849 | + mc->block_default_type = IF_VIRTIO; |
850 | + mc->no_cdrom = 1; | 850 | + mc->no_cdrom = 1; |
851 | + mc->pci_allow_0_address = true; | 851 | + mc->pci_allow_0_address = true; |
852 | + mc->minimum_page_bits = 12; | 852 | + mc->minimum_page_bits = 12; |
853 | + mc->possible_cpu_arch_ids = vmapple_possible_cpu_arch_ids; | 853 | + mc->possible_cpu_arch_ids = vmapple_possible_cpu_arch_ids; |
854 | + mc->cpu_index_to_instance_props = vmapple_cpu_index_to_props; | 854 | + mc->cpu_index_to_instance_props = vmapple_cpu_index_to_props; |
855 | + mc->default_cpu_type = ARM_CPU_TYPE_NAME("host"); | 855 | + mc->default_cpu_type = ARM_CPU_TYPE_NAME("host"); |
856 | + mc->get_default_cpu_node_id = vmapple_get_default_cpu_node_id; | 856 | + mc->get_default_cpu_node_id = vmapple_get_default_cpu_node_id; |
857 | + mc->default_ram_id = "mach-vmapple.ram"; | 857 | + mc->default_ram_id = "mach-vmapple.ram"; |
858 | + | 858 | + |
859 | + object_register_sugar_prop(TYPE_VIRTIO_PCI, "disable-legacy", | 859 | + object_register_sugar_prop(TYPE_VIRTIO_PCI, "disable-legacy", |
860 | + "on", true); | 860 | + "on", true); |
861 | +} | 861 | +} |
862 | + | 862 | + |
863 | +static void vmapple_instance_init(Object *obj) | 863 | +static void vmapple_instance_init(Object *obj) |
864 | +{ | 864 | +{ |
865 | + VMAppleMachineState *vms = VMAPPLE_MACHINE(obj); | 865 | + VMAppleMachineState *vms = VMAPPLE_MACHINE(obj); |
866 | + | 866 | + |
867 | + vms->irqmap = irqmap; | 867 | + vms->irqmap = irqmap; |
868 | + | 868 | + |
869 | + object_property_add_uint64_ptr(obj, "uuid", &vms->uuid, | 869 | + object_property_add_uint64_ptr(obj, "uuid", &vms->uuid, |
870 | + OBJ_PROP_FLAG_READWRITE); | 870 | + OBJ_PROP_FLAG_READWRITE); |
871 | + object_property_set_description(obj, "uuid", "Machine UUID (SDOM)"); | 871 | + object_property_set_description(obj, "uuid", "Machine UUID (SDOM)"); |
872 | +} | 872 | +} |
873 | + | 873 | + |
874 | +static const TypeInfo vmapple_machine_info = { | 874 | +static const TypeInfo vmapple_machine_info = { |
875 | + .name = TYPE_VMAPPLE_MACHINE, | 875 | + .name = TYPE_VMAPPLE_MACHINE, |
876 | + .parent = TYPE_MACHINE, | 876 | + .parent = TYPE_MACHINE, |
877 | + .abstract = true, | 877 | + .abstract = true, |
878 | + .instance_size = sizeof(VMAppleMachineState), | 878 | + .instance_size = sizeof(VMAppleMachineState), |
879 | + .class_size = sizeof(VMAppleMachineClass), | 879 | + .class_size = sizeof(VMAppleMachineClass), |
880 | + .class_init = vmapple_machine_class_init, | 880 | + .class_init = vmapple_machine_class_init, |
881 | + .instance_init = vmapple_instance_init, | 881 | + .instance_init = vmapple_instance_init, |
882 | +}; | 882 | +}; |
883 | + | 883 | + |
884 | +static void machvmapple_machine_init(void) | 884 | +static void machvmapple_machine_init(void) |
885 | +{ | 885 | +{ |
886 | + type_register_static(&vmapple_machine_info); | 886 | + type_register_static(&vmapple_machine_info); |
887 | +} | 887 | +} |
888 | +type_init(machvmapple_machine_init); | 888 | +type_init(machvmapple_machine_init); |
889 | + | 889 | + |
890 | +static void vmapple_machine_10_0_options(MachineClass *mc) | 890 | +static void vmapple_machine_10_0_options(MachineClass *mc) |
891 | +{ | 891 | +{ |
892 | +} | 892 | +} |
893 | +DEFINE_VMAPPLE_MACHINE_AS_LATEST(10, 0) | 893 | +DEFINE_VMAPPLE_MACHINE_AS_LATEST(10, 0) |
894 | + | 894 | + |
895 | -- | 895 | -- |
896 | 2.39.5 (Apple Git-154) | 896 | 2.39.5 (Apple Git-154) |
897 | 897 | ||
898 | 898 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <graf@amazon.com> | |
2 | |||
3 | Apple defines a new "vmapple" machine type as part of its proprietary | ||
4 | macOS Virtualization.Framework vmm. This machine type is similar to the | ||
5 | virt one, but with subtle differences in base devices, a few special | ||
6 | vmapple device additions and a vastly different boot chain. | ||
7 | |||
8 | This patch reimplements this machine type in QEMU. To use it, you | ||
9 | have to have a readily installed version of macOS for VMApple, | ||
10 | run on macOS with -accel hvf, pass the Virtualization.Framework | ||
11 | boot rom (AVPBooter) in via -bios, pass the aux and root volume as pflash | ||
12 | and pass aux and root volume as virtio drives. In addition, you also | ||
13 | need to find the machine UUID and pass that as -M vmapple,uuid= parameter: | ||
14 | |||
15 | $ qemu-system-aarch64 -accel hvf -M vmapple,uuid=0x1234 -m 4G \ | ||
16 | -bios /System/Library/Frameworks/Virtualization.framework/Versions/A/Resources/AVPBooter.vmapple2.bin | ||
17 | -drive file=aux,if=pflash,format=raw \ | ||
18 | -drive file=root,if=pflash,format=raw \ | ||
19 | -drive file=aux,if=none,id=aux,format=raw \ | ||
20 | -device vmapple-virtio-blk-pci,variant=aux,drive=aux \ | ||
21 | -drive file=root,if=none,id=root,format=raw \ | ||
22 | -device vmapple-virtio-blk-pci,variant=root,drive=root | ||
23 | |||
24 | With all these in place, you should be able to see macOS booting | ||
25 | successfully. | ||
26 | |||
27 | Known issues: | ||
28 | - Keyboard and mouse/tablet input is laggy. The reason for this is | ||
29 | that macOS's XHCI driver seems to expect interrupter mapping to | ||
30 | be disabled when MSI/MSI-X is unavailable. I have found a | ||
31 | workaround but discovered a bunch of other XHCI spec non-compliance | ||
32 | in the process, so I'm fixing all of those in a separate patch | ||
33 | set. | ||
34 | - Currently only macOS 12 guests are supported. The boot process for | ||
35 | 13+ will need further investigation and adjustment. | ||
36 | |||
37 | Signed-off-by: Alexander Graf <graf@amazon.com> | ||
38 | Co-authored-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
39 | Signed-off-by: Phil Dennis-Jordan <phil@philjordan.eu> | ||
40 | Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
41 | Tested-by: Akihiko Odaki <akihiko.odaki@daynix.com> | ||
42 | --- | ||
43 | |||
44 | v3: | ||
45 | * Rebased on latest upstream, updated affinity and NIC creation | ||
46 | API usage | ||
47 | * Included Apple-variant virtio-blk in build dependency | ||
48 | * Updated API usage for setting 'redist-region-count' array-typed property on GIC. | ||
49 | * Switched from virtio HID devices (for which macOS 12 does not contain | ||
50 | drivers) to an XHCI USB controller and USB HID devices. | ||
51 | |||
52 | v4: | ||
53 | * Fixups for v4 changes to the other patches in the set. | ||
54 | * Corrected the assert macro to use | ||
55 | * Removed superfluous endian conversions corresponding to cfg's. | ||
56 | * Init error handling improvement. | ||
57 | * No need to select CPU type on TCG, as only HVF is supported. | ||
58 | * Machine type version bumped to 9.2 | ||
59 | * #include order improved | ||
60 | |||
61 | v5: | ||
62 | * Fixed memory reservation for ecam alias region. | ||
63 | * Better error handling setting properties on devices. | ||
64 | * Simplified the machine ECID/UUID extraction script and actually created a | ||
65 | file for it rather than quoting its code in documentation. | ||
66 | |||
67 | v7: | ||
68 | * Tiny error handling fix, un-inlined function. | ||
69 | |||
70 | v8: | ||
71 | * Use object_property_add_uint64_ptr rather than defining custom UUID | ||
72 | property get/set functions. | ||
73 | |||
74 | v9: | ||
75 | * Documentation improvements | ||
76 | * Fixed variable name and struct field used during pvpanic device creation. | ||
77 | |||
78 | v10: | ||
79 | * Documentation fixup for changed virtio-blk device type. | ||
80 | * Small improvements to shell commands in documentation. | ||
81 | * Improved propagation of errors during cfg device instantiation. | ||
82 | |||
83 | v11: | ||
84 | * Quoted more strings in the documentation's shell script code. | ||
85 | |||
86 | v13: | ||
87 | * Bumped the machine type version from 9.2 to 10.0. | ||
88 | |||
89 | MAINTAINERS | 1 + | ||
90 | contrib/vmapple/uuid.sh | 9 + | ||
91 | docs/system/arm/vmapple.rst | 63 ++++ | ||
92 | docs/system/target-arm.rst | 1 + | ||
93 | hw/vmapple/Kconfig | 20 ++ | ||
94 | hw/vmapple/meson.build | 1 + | ||
95 | hw/vmapple/vmapple.c | 648 ++++++++++++++++++++++++++++++++++++ | ||
96 | 7 files changed, 743 insertions(+) | ||
97 | create mode 100755 contrib/vmapple/uuid.sh | ||
98 | create mode 100644 docs/system/arm/vmapple.rst | ||
99 | create mode 100644 hw/vmapple/vmapple.c | ||
100 | |||
101 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
102 | index XXXXXXX..XXXXXXX 100644 | ||
103 | --- a/MAINTAINERS | ||
104 | +++ b/MAINTAINERS | ||
105 | @@ -XXX,XX +XXX,XX @@ M: Phil Dennis-Jordan <phil@philjordan.eu> | ||
106 | S: Maintained | ||
107 | F: hw/vmapple/* | ||
108 | F: include/hw/vmapple/* | ||
109 | +F: docs/system/arm/vmapple.rst | ||
110 | |||
111 | Subsystems | ||
112 | ---------- | ||
113 | diff --git a/contrib/vmapple/uuid.sh b/contrib/vmapple/uuid.sh | ||
114 | new file mode 100755 | ||
115 | index XXXXXXX..XXXXXXX | ||
116 | --- /dev/null | ||
117 | +++ b/contrib/vmapple/uuid.sh | ||
118 | @@ -XXX,XX +XXX,XX @@ | ||
119 | +#!/bin/sh | ||
120 | +# Used for converting a guest provisioned using Virtualization.framework | ||
121 | +# for use with the QEMU 'vmapple' aarch64 machine type. | ||
122 | +# | ||
123 | +# Extracts the Machine UUID from Virtualization.framework VM JSON file. | ||
124 | +# (as produced by 'macosvm', passed as command line argument) | ||
125 | + | ||
126 | +plutil -extract machineId raw "$1" | base64 -d | plutil -extract ECID raw - | ||
127 | + | ||
128 | diff --git a/docs/system/arm/vmapple.rst b/docs/system/arm/vmapple.rst | ||
129 | new file mode 100644 | ||
130 | index XXXXXXX..XXXXXXX | ||
131 | --- /dev/null | ||
132 | +++ b/docs/system/arm/vmapple.rst | ||
133 | @@ -XXX,XX +XXX,XX @@ | ||
134 | +VMApple machine emulation | ||
135 | +======================================================================================== | ||
136 | + | ||
137 | +VMApple is the device model that the macOS built-in hypervisor called "Virtualization.framework" | ||
138 | +exposes to Apple Silicon macOS guests. The "vmapple" machine model in QEMU implements the same | ||
139 | +device model, but does not use any code from Virtualization.Framework. | ||
140 | + | ||
141 | +Prerequisites | ||
142 | +------------- | ||
143 | + | ||
144 | +To run the vmapple machine model, you need to | ||
145 | + | ||
146 | + * Run on Apple Silicon | ||
147 | + * Run on macOS 12.0 or above | ||
148 | + * Have an already installed copy of a Virtualization.Framework macOS 12 virtual | ||
149 | + machine. Note that newer versions than 12.x are currently NOT supported on | ||
150 | + the guest side. I will assume that you installed it using the | ||
151 | + `macosvm <https://github.com/s-u/macosvm>` CLI. | ||
152 | + | ||
153 | +First, we need to extract the UUID from the virtual machine that you installed. You can do this | ||
154 | +by running the shell script in contrib/vmapple/uuid.sh on the macosvm.json file. | ||
155 | + | ||
156 | +.. code-block:: bash | ||
157 | + :caption: uuid.sh script to extract the UUID from a macosvm.json file | ||
158 | + | ||
159 | + $ contrib/vmapple/uuid.sh "path/to/macosvm.json" | ||
160 | + | ||
161 | +Now we also need to trim the aux partition. It contains metadata that we can just discard: | ||
162 | + | ||
163 | +.. code-block:: bash | ||
164 | + :caption: Command to trim the aux file | ||
165 | + | ||
166 | + $ dd if="aux.img" of="aux.img.trimmed" bs=$(( 0x4000 )) skip=1 | ||
167 | + | ||
168 | +How to run | ||
169 | +---------- | ||
170 | + | ||
171 | +Then, we can launch QEMU with the Virtualization.Framework pre-boot environment and the readily | ||
172 | +installed target disk images. I recommend to port forward the VM's ssh and vnc ports to the host | ||
173 | +to get better interactive access into the target system: | ||
174 | + | ||
175 | +.. code-block:: bash | ||
176 | + :caption: Example execution command line | ||
177 | + | ||
178 | + $ UUID="$(contrib/vmapple/uuid.sh 'macosvm.json')" | ||
179 | + $ AVPBOOTER="/System/Library/Frameworks/Virtualization.framework/Resources/AVPBooter.vmapple2.bin" | ||
180 | + $ AUX="aux.img.trimmed" | ||
181 | + $ DISK="disk.img" | ||
182 | + $ qemu-system-aarch64 \ | ||
183 | + -serial mon:stdio \ | ||
184 | + -m 4G \ | ||
185 | + -accel hvf \ | ||
186 | + -M vmapple,uuid="$UUID" \ | ||
187 | + -bios "$AVPBOOTER" \ | ||
188 | + -drive file="$AUX",if=pflash,format=raw \ | ||
189 | + -drive file="$DISK",if=pflash,format=raw \ | ||
190 | + -drive file="$AUX",if=none,id=aux,format=raw \ | ||
191 | + -drive file="$DISK",if=none,id=root,format=raw \ | ||
192 | + -device vmapple-virtio-blk-pci,variant=aux,drive=aux \ | ||
193 | + -device vmapple-virtio-blk-pci,variant=root,drive=root \ | ||
194 | + -netdev user,id=net0,ipv6=off,hostfwd=tcp::2222-:22,hostfwd=tcp::5901-:5900 \ | ||
195 | + -device virtio-net-pci,netdev=net0 | ||
196 | + | ||
197 | diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst | ||
198 | index XXXXXXX..XXXXXXX 100644 | ||
199 | --- a/docs/system/target-arm.rst | ||
200 | +++ b/docs/system/target-arm.rst | ||
201 | @@ -XXX,XX +XXX,XX @@ Board-specific documentation | ||
202 | arm/stellaris | ||
203 | arm/stm32 | ||
204 | arm/virt | ||
205 | + arm/vmapple | ||
206 | arm/xenpvh | ||
207 | arm/xlnx-versal-virt | ||
208 | arm/xlnx-zynq | ||
209 | diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig | ||
210 | index XXXXXXX..XXXXXXX 100644 | ||
211 | --- a/hw/vmapple/Kconfig | ||
212 | +++ b/hw/vmapple/Kconfig | ||
213 | @@ -XXX,XX +XXX,XX @@ config VMAPPLE_CFG | ||
214 | config VMAPPLE_VIRTIO_BLK | ||
215 | bool | ||
216 | |||
217 | +config VMAPPLE | ||
218 | + bool | ||
219 | + depends on ARM | ||
220 | + depends on HVF | ||
221 | + default y if ARM | ||
222 | + imply PCI_DEVICES | ||
223 | + select ARM_GIC | ||
224 | + select PLATFORM_BUS | ||
225 | + select PCI_EXPRESS | ||
226 | + select PCI_EXPRESS_GENERIC_BRIDGE | ||
227 | + select PL011 # UART | ||
228 | + select PL031 # RTC | ||
229 | + select PL061 # GPIO | ||
230 | + select GPIO_PWR | ||
231 | + select PVPANIC_MMIO | ||
232 | + select VMAPPLE_AES | ||
233 | + select VMAPPLE_BDIF | ||
234 | + select VMAPPLE_CFG | ||
235 | + select MAC_PVG_MMIO | ||
236 | + select VMAPPLE_VIRTIO_BLK | ||
237 | diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build | ||
238 | index XXXXXXX..XXXXXXX 100644 | ||
239 | --- a/hw/vmapple/meson.build | ||
240 | +++ b/hw/vmapple/meson.build | ||
241 | @@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) | ||
242 | system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) | ||
243 | system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) | ||
244 | system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c')) | ||
245 | +specific_ss.add(when: 'CONFIG_VMAPPLE', if_true: files('vmapple.c')) | ||
246 | diff --git a/hw/vmapple/vmapple.c b/hw/vmapple/vmapple.c | ||
247 | new file mode 100644 | ||
248 | index XXXXXXX..XXXXXXX | ||
249 | --- /dev/null | ||
250 | +++ b/hw/vmapple/vmapple.c | ||
251 | @@ -XXX,XX +XXX,XX @@ | ||
252 | +/* | ||
253 | + * VMApple machine emulation | ||
254 | + * | ||
255 | + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
256 | + * | ||
257 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
258 | + * See the COPYING file in the top-level directory. | ||
259 | + * | ||
260 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
261 | + * | ||
262 | + * VMApple is the device model that the macOS built-in hypervisor called | ||
263 | + * "Virtualization.framework" exposes to Apple Silicon macOS guests. The | ||
264 | + * machine model in this file implements the same device model in QEMU, but | ||
265 | + * does not use any code from Virtualization.Framework. | ||
266 | + */ | ||
267 | + | ||
268 | +#include "qemu/osdep.h" | ||
269 | +#include "qemu/bitops.h" | ||
270 | +#include "qemu/datadir.h" | ||
271 | +#include "qemu/error-report.h" | ||
272 | +#include "qemu/guest-random.h" | ||
273 | +#include "qemu/help-texts.h" | ||
274 | +#include "qemu/log.h" | ||
275 | +#include "qemu/module.h" | ||
276 | +#include "qemu/option.h" | ||
277 | +#include "qemu/units.h" | ||
278 | +#include "monitor/qdev.h" | ||
279 | +#include "hw/boards.h" | ||
280 | +#include "hw/irq.h" | ||
281 | +#include "hw/loader.h" | ||
282 | +#include "hw/qdev-properties.h" | ||
283 | +#include "hw/sysbus.h" | ||
284 | +#include "hw/usb.h" | ||
285 | +#include "hw/arm/boot.h" | ||
286 | +#include "hw/arm/primecell.h" | ||
287 | +#include "hw/char/pl011.h" | ||
288 | +#include "hw/intc/arm_gic.h" | ||
289 | +#include "hw/intc/arm_gicv3_common.h" | ||
290 | +#include "hw/misc/pvpanic.h" | ||
291 | +#include "hw/pci-host/gpex.h" | ||
292 | +#include "hw/usb/xhci.h" | ||
293 | +#include "hw/virtio/virtio-pci.h" | ||
294 | +#include "hw/vmapple/vmapple.h" | ||
295 | +#include "net/net.h" | ||
296 | +#include "qapi/error.h" | ||
297 | +#include "qapi/qmp/qlist.h" | ||
298 | +#include "qapi/visitor.h" | ||
299 | +#include "qapi/qapi-visit-common.h" | ||
300 | +#include "standard-headers/linux/input.h" | ||
301 | +#include "sysemu/hvf.h" | ||
302 | +#include "sysemu/kvm.h" | ||
303 | +#include "sysemu/reset.h" | ||
304 | +#include "sysemu/runstate.h" | ||
305 | +#include "sysemu/sysemu.h" | ||
306 | +#include "target/arm/internals.h" | ||
307 | +#include "target/arm/kvm_arm.h" | ||
308 | + | ||
309 | +struct VMAppleMachineClass { | ||
310 | + MachineClass parent; | ||
311 | +}; | ||
312 | + | ||
313 | +struct VMAppleMachineState { | ||
314 | + MachineState parent; | ||
315 | + | ||
316 | + Notifier machine_done; | ||
317 | + struct arm_boot_info bootinfo; | ||
318 | + MemMapEntry *memmap; | ||
319 | + const int *irqmap; | ||
320 | + DeviceState *gic; | ||
321 | + DeviceState *cfg; | ||
322 | + DeviceState *pvpanic; | ||
323 | + Notifier powerdown_notifier; | ||
324 | + PCIBus *bus; | ||
325 | + MemoryRegion fw_mr; | ||
326 | + MemoryRegion ecam_alias; | ||
327 | + uint64_t uuid; | ||
328 | +}; | ||
329 | + | ||
330 | +#define DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, latest) \ | ||
331 | + static void vmapple##major##_##minor##_class_init(ObjectClass *oc, \ | ||
332 | + void *data) \ | ||
333 | + { \ | ||
334 | + MachineClass *mc = MACHINE_CLASS(oc); \ | ||
335 | + vmapple_machine_##major##_##minor##_options(mc); \ | ||
336 | + mc->desc = "QEMU " # major "." # minor " Apple Virtual Machine"; \ | ||
337 | + if (latest) { \ | ||
338 | + mc->alias = "vmapple"; \ | ||
339 | + } \ | ||
340 | + } \ | ||
341 | + static const TypeInfo machvmapple##major##_##minor##_info = { \ | ||
342 | + .name = MACHINE_TYPE_NAME("vmapple-" # major "." # minor), \ | ||
343 | + .parent = TYPE_VMAPPLE_MACHINE, \ | ||
344 | + .class_init = vmapple##major##_##minor##_class_init, \ | ||
345 | + }; \ | ||
346 | + static void machvmapple_machine_##major##_##minor##_init(void) \ | ||
347 | + { \ | ||
348 | + type_register_static(&machvmapple##major##_##minor##_info); \ | ||
349 | + } \ | ||
350 | + type_init(machvmapple_machine_##major##_##minor##_init); | ||
351 | + | ||
352 | +#define DEFINE_VMAPPLE_MACHINE_AS_LATEST(major, minor) \ | ||
353 | + DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, true) | ||
354 | +#define DEFINE_VMAPPLE_MACHINE(major, minor) \ | ||
355 | + DEFINE_VMAPPLE_MACHINE_LATEST(major, minor, false) | ||
356 | + | ||
357 | +#define TYPE_VMAPPLE_MACHINE MACHINE_TYPE_NAME("vmapple") | ||
358 | +OBJECT_DECLARE_TYPE(VMAppleMachineState, VMAppleMachineClass, VMAPPLE_MACHINE) | ||
359 | + | ||
360 | +/* Number of external interrupt lines to configure the GIC with */ | ||
361 | +#define NUM_IRQS 256 | ||
362 | + | ||
363 | +enum { | ||
364 | + VMAPPLE_FIRMWARE, | ||
365 | + VMAPPLE_CONFIG, | ||
366 | + VMAPPLE_MEM, | ||
367 | + VMAPPLE_GIC_DIST, | ||
368 | + VMAPPLE_GIC_REDIST, | ||
369 | + VMAPPLE_UART, | ||
370 | + VMAPPLE_RTC, | ||
371 | + VMAPPLE_PCIE, | ||
372 | + VMAPPLE_PCIE_MMIO, | ||
373 | + VMAPPLE_PCIE_ECAM, | ||
374 | + VMAPPLE_GPIO, | ||
375 | + VMAPPLE_PVPANIC, | ||
376 | + VMAPPLE_APV_GFX, | ||
377 | + VMAPPLE_APV_IOSFC, | ||
378 | + VMAPPLE_AES_1, | ||
379 | + VMAPPLE_AES_2, | ||
380 | + VMAPPLE_BDOOR, | ||
381 | + VMAPPLE_MEMMAP_LAST, | ||
382 | +}; | ||
383 | + | ||
384 | +static MemMapEntry memmap[] = { | ||
385 | + [VMAPPLE_FIRMWARE] = { 0x00100000, 0x00100000 }, | ||
386 | + [VMAPPLE_CONFIG] = { 0x00400000, 0x00010000 }, | ||
387 | + | ||
388 | + [VMAPPLE_GIC_DIST] = { 0x10000000, 0x00010000 }, | ||
389 | + [VMAPPLE_GIC_REDIST] = { 0x10010000, 0x00400000 }, | ||
390 | + | ||
391 | + [VMAPPLE_UART] = { 0x20010000, 0x00010000 }, | ||
392 | + [VMAPPLE_RTC] = { 0x20050000, 0x00001000 }, | ||
393 | + [VMAPPLE_GPIO] = { 0x20060000, 0x00001000 }, | ||
394 | + [VMAPPLE_PVPANIC] = { 0x20070000, 0x00000002 }, | ||
395 | + [VMAPPLE_BDOOR] = { 0x30000000, 0x00200000 }, | ||
396 | + [VMAPPLE_APV_GFX] = { 0x30200000, 0x00010000 }, | ||
397 | + [VMAPPLE_APV_IOSFC] = { 0x30210000, 0x00010000 }, | ||
398 | + [VMAPPLE_AES_1] = { 0x30220000, 0x00004000 }, | ||
399 | + [VMAPPLE_AES_2] = { 0x30230000, 0x00004000 }, | ||
400 | + [VMAPPLE_PCIE_ECAM] = { 0x40000000, 0x10000000 }, | ||
401 | + [VMAPPLE_PCIE_MMIO] = { 0x50000000, 0x1fff0000 }, | ||
402 | + | ||
403 | + /* Actual RAM size depends on configuration */ | ||
404 | + [VMAPPLE_MEM] = { 0x70000000ULL, GiB}, | ||
405 | +}; | ||
406 | + | ||
407 | +static const int irqmap[] = { | ||
408 | + [VMAPPLE_UART] = 1, | ||
409 | + [VMAPPLE_RTC] = 2, | ||
410 | + [VMAPPLE_GPIO] = 0x5, | ||
411 | + [VMAPPLE_APV_IOSFC] = 0x10, | ||
412 | + [VMAPPLE_APV_GFX] = 0x11, | ||
413 | + [VMAPPLE_AES_1] = 0x12, | ||
414 | + [VMAPPLE_PCIE] = 0x20, | ||
415 | +}; | ||
416 | + | ||
417 | +#define GPEX_NUM_IRQS 16 | ||
418 | + | ||
419 | +static void create_bdif(VMAppleMachineState *vms, MemoryRegion *mem) | ||
420 | +{ | ||
421 | + DeviceState *bdif; | ||
422 | + SysBusDevice *bdif_sb; | ||
423 | + DriveInfo *di_aux = drive_get(IF_PFLASH, 0, 0); | ||
424 | + DriveInfo *di_root = drive_get(IF_PFLASH, 0, 1); | ||
425 | + | ||
426 | + if (!di_aux) { | ||
427 | + error_report("No AUX device. Please specify one as pflash drive."); | ||
428 | + exit(1); | ||
429 | + } | ||
430 | + | ||
431 | + if (!di_root) { | ||
432 | + /* Fall back to the first IF_VIRTIO device as root device */ | ||
433 | + di_root = drive_get(IF_VIRTIO, 0, 0); | ||
434 | + } | ||
435 | + | ||
436 | + if (!di_root) { | ||
437 | + error_report("No root device. Please specify one as virtio drive."); | ||
438 | + exit(1); | ||
439 | + } | ||
440 | + | ||
441 | + /* PV backdoor device */ | ||
442 | + bdif = qdev_new(TYPE_VMAPPLE_BDIF); | ||
443 | + bdif_sb = SYS_BUS_DEVICE(bdif); | ||
444 | + sysbus_mmio_map(bdif_sb, 0, vms->memmap[VMAPPLE_BDOOR].base); | ||
445 | + | ||
446 | + qdev_prop_set_drive(DEVICE(bdif), "aux", blk_by_legacy_dinfo(di_aux)); | ||
447 | + qdev_prop_set_drive(DEVICE(bdif), "root", blk_by_legacy_dinfo(di_root)); | ||
448 | + | ||
449 | + sysbus_realize_and_unref(bdif_sb, &error_fatal); | ||
450 | +} | ||
451 | + | ||
452 | +static void create_pvpanic(VMAppleMachineState *vms, MemoryRegion *mem) | ||
453 | +{ | ||
454 | + SysBusDevice *pvpanic; | ||
455 | + | ||
456 | + vms->pvpanic = qdev_new(TYPE_PVPANIC_MMIO_DEVICE); | ||
457 | + pvpanic = SYS_BUS_DEVICE(vms->pvpanic); | ||
458 | + sysbus_mmio_map(pvpanic, 0, vms->memmap[VMAPPLE_PVPANIC].base); | ||
459 | + | ||
460 | + sysbus_realize_and_unref(pvpanic, &error_fatal); | ||
461 | +} | ||
462 | + | ||
463 | +static bool create_cfg(VMAppleMachineState *vms, MemoryRegion *mem, | ||
464 | + Error **errp) | ||
465 | +{ | ||
466 | + ERRP_GUARD(); | ||
467 | + SysBusDevice *cfg; | ||
468 | + MachineState *machine = MACHINE(vms); | ||
469 | + uint32_t rnd = 1; | ||
470 | + | ||
471 | + vms->cfg = qdev_new(TYPE_VMAPPLE_CFG); | ||
472 | + cfg = SYS_BUS_DEVICE(vms->cfg); | ||
473 | + sysbus_mmio_map(cfg, 0, vms->memmap[VMAPPLE_CONFIG].base); | ||
474 | + | ||
475 | + qemu_guest_getrandom_nofail(&rnd, sizeof(rnd)); | ||
476 | + | ||
477 | + qdev_prop_set_uint32(vms->cfg, "nr-cpus", machine->smp.cpus); | ||
478 | + qdev_prop_set_uint64(vms->cfg, "ecid", vms->uuid); | ||
479 | + qdev_prop_set_uint64(vms->cfg, "ram-size", machine->ram_size); | ||
480 | + qdev_prop_set_uint32(vms->cfg, "rnd", rnd); | ||
481 | + | ||
482 | + if (!sysbus_realize_and_unref(cfg, errp)) { | ||
483 | + error_prepend(errp, "Error creating vmapple cfg device: "); | ||
484 | + return false; | ||
485 | + } | ||
486 | + | ||
487 | + return true; | ||
488 | +} | ||
489 | + | ||
490 | +static void create_gfx(VMAppleMachineState *vms, MemoryRegion *mem) | ||
491 | +{ | ||
492 | + int irq_gfx = vms->irqmap[VMAPPLE_APV_GFX]; | ||
493 | + int irq_iosfc = vms->irqmap[VMAPPLE_APV_IOSFC]; | ||
494 | + SysBusDevice *gfx; | ||
495 | + | ||
496 | + gfx = SYS_BUS_DEVICE(qdev_new("apple-gfx-mmio")); | ||
497 | + sysbus_mmio_map(gfx, 0, vms->memmap[VMAPPLE_APV_GFX].base); | ||
498 | + sysbus_mmio_map(gfx, 1, vms->memmap[VMAPPLE_APV_IOSFC].base); | ||
499 | + sysbus_connect_irq(gfx, 0, qdev_get_gpio_in(vms->gic, irq_gfx)); | ||
500 | + sysbus_connect_irq(gfx, 1, qdev_get_gpio_in(vms->gic, irq_iosfc)); | ||
501 | + sysbus_realize_and_unref(gfx, &error_fatal); | ||
502 | +} | ||
503 | + | ||
504 | +static void create_aes(VMAppleMachineState *vms, MemoryRegion *mem) | ||
505 | +{ | ||
506 | + int irq = vms->irqmap[VMAPPLE_AES_1]; | ||
507 | + SysBusDevice *aes; | ||
508 | + | ||
509 | + aes = SYS_BUS_DEVICE(qdev_new(TYPE_APPLE_AES)); | ||
510 | + sysbus_mmio_map(aes, 0, vms->memmap[VMAPPLE_AES_1].base); | ||
511 | + sysbus_mmio_map(aes, 1, vms->memmap[VMAPPLE_AES_2].base); | ||
512 | + sysbus_connect_irq(aes, 0, qdev_get_gpio_in(vms->gic, irq)); | ||
513 | + sysbus_realize_and_unref(aes, &error_fatal); | ||
514 | +} | ||
515 | + | ||
516 | +static int arm_gic_ppi_index(int cpu_nr, int ppi_index) | ||
517 | +{ | ||
518 | + return NUM_IRQS + cpu_nr * GIC_INTERNAL + ppi_index; | ||
519 | +} | ||
520 | + | ||
521 | +static void create_gic(VMAppleMachineState *vms, MemoryRegion *mem) | ||
522 | +{ | ||
523 | + MachineState *ms = MACHINE(vms); | ||
524 | + /* We create a standalone GIC */ | ||
525 | + SysBusDevice *gicbusdev; | ||
526 | + QList *redist_region_count; | ||
527 | + int i; | ||
528 | + unsigned int smp_cpus = ms->smp.cpus; | ||
529 | + | ||
530 | + vms->gic = qdev_new(gicv3_class_name()); | ||
531 | + qdev_prop_set_uint32(vms->gic, "revision", 3); | ||
532 | + qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); | ||
533 | + /* | ||
534 | + * Note that the num-irq property counts both internal and external | ||
535 | + * interrupts; there are always 32 of the former (mandated by GIC spec). | ||
536 | + */ | ||
537 | + qdev_prop_set_uint32(vms->gic, "num-irq", NUM_IRQS + 32); | ||
538 | + | ||
539 | + uint32_t redist0_capacity = | ||
540 | + vms->memmap[VMAPPLE_GIC_REDIST].size / GICV3_REDIST_SIZE; | ||
541 | + uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); | ||
542 | + | ||
543 | + redist_region_count = qlist_new(); | ||
544 | + qlist_append_int(redist_region_count, redist0_count); | ||
545 | + qdev_prop_set_array(vms->gic, "redist-region-count", redist_region_count); | ||
546 | + | ||
547 | + gicbusdev = SYS_BUS_DEVICE(vms->gic); | ||
548 | + sysbus_realize_and_unref(gicbusdev, &error_fatal); | ||
549 | + sysbus_mmio_map(gicbusdev, 0, vms->memmap[VMAPPLE_GIC_DIST].base); | ||
550 | + sysbus_mmio_map(gicbusdev, 1, vms->memmap[VMAPPLE_GIC_REDIST].base); | ||
551 | + | ||
552 | + /* | ||
553 | + * Wire the outputs from each CPU's generic timer and the GICv3 | ||
554 | + * maintenance interrupt signal to the appropriate GIC PPI inputs, | ||
555 | + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. | ||
556 | + */ | ||
557 | + for (i = 0; i < smp_cpus; i++) { | ||
558 | + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); | ||
559 | + | ||
560 | + /* Map the virt timer to PPI 27 */ | ||
561 | + qdev_connect_gpio_out(cpudev, GTIMER_VIRT, | ||
562 | + qdev_get_gpio_in(vms->gic, | ||
563 | + arm_gic_ppi_index(i, 27))); | ||
564 | + | ||
565 | + /* Map the GIC IRQ and FIQ lines to CPU */ | ||
566 | + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); | ||
567 | + sysbus_connect_irq(gicbusdev, i + smp_cpus, | ||
568 | + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); | ||
569 | + } | ||
570 | +} | ||
571 | + | ||
572 | +static void create_uart(const VMAppleMachineState *vms, int uart, | ||
573 | + MemoryRegion *mem, Chardev *chr) | ||
574 | +{ | ||
575 | + hwaddr base = vms->memmap[uart].base; | ||
576 | + int irq = vms->irqmap[uart]; | ||
577 | + DeviceState *dev = qdev_new(TYPE_PL011); | ||
578 | + SysBusDevice *s = SYS_BUS_DEVICE(dev); | ||
579 | + | ||
580 | + qdev_prop_set_chr(dev, "chardev", chr); | ||
581 | + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); | ||
582 | + memory_region_add_subregion(mem, base, | ||
583 | + sysbus_mmio_get_region(s, 0)); | ||
584 | + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); | ||
585 | +} | ||
586 | + | ||
587 | +static void create_rtc(const VMAppleMachineState *vms) | ||
588 | +{ | ||
589 | + hwaddr base = vms->memmap[VMAPPLE_RTC].base; | ||
590 | + int irq = vms->irqmap[VMAPPLE_RTC]; | ||
591 | + | ||
592 | + sysbus_create_simple("pl031", base, qdev_get_gpio_in(vms->gic, irq)); | ||
593 | +} | ||
594 | + | ||
595 | +static DeviceState *gpio_key_dev; | ||
596 | +static void vmapple_powerdown_req(Notifier *n, void *opaque) | ||
597 | +{ | ||
598 | + /* use gpio Pin 3 for power button event */ | ||
599 | + qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); | ||
600 | +} | ||
601 | + | ||
602 | +static void create_gpio_devices(const VMAppleMachineState *vms, int gpio, | ||
603 | + MemoryRegion *mem) | ||
604 | +{ | ||
605 | + DeviceState *pl061_dev; | ||
606 | + hwaddr base = vms->memmap[gpio].base; | ||
607 | + int irq = vms->irqmap[gpio]; | ||
608 | + SysBusDevice *s; | ||
609 | + | ||
610 | + pl061_dev = qdev_new("pl061"); | ||
611 | + /* Pull lines down to 0 if not driven by the PL061 */ | ||
612 | + qdev_prop_set_uint32(pl061_dev, "pullups", 0); | ||
613 | + qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff); | ||
614 | + s = SYS_BUS_DEVICE(pl061_dev); | ||
615 | + sysbus_realize_and_unref(s, &error_fatal); | ||
616 | + memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0)); | ||
617 | + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); | ||
618 | + gpio_key_dev = sysbus_create_simple("gpio-key", -1, | ||
619 | + qdev_get_gpio_in(pl061_dev, 3)); | ||
620 | +} | ||
621 | + | ||
622 | +static void vmapple_firmware_init(VMAppleMachineState *vms, | ||
623 | + MemoryRegion *sysmem) | ||
624 | +{ | ||
625 | + hwaddr size = vms->memmap[VMAPPLE_FIRMWARE].size; | ||
626 | + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; | ||
627 | + const char *bios_name; | ||
628 | + int image_size; | ||
629 | + char *fname; | ||
630 | + | ||
631 | + bios_name = MACHINE(vms)->firmware; | ||
632 | + if (!bios_name) { | ||
633 | + error_report("No firmware specified"); | ||
634 | + exit(1); | ||
635 | + } | ||
636 | + | ||
637 | + fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); | ||
638 | + if (!fname) { | ||
639 | + error_report("Could not find ROM image '%s'", bios_name); | ||
640 | + exit(1); | ||
641 | + } | ||
642 | + | ||
643 | + memory_region_init_ram(&vms->fw_mr, NULL, "firmware", size, &error_fatal); | ||
644 | + image_size = load_image_mr(fname, &vms->fw_mr); | ||
645 | + | ||
646 | + g_free(fname); | ||
647 | + if (image_size < 0) { | ||
648 | + error_report("Could not load ROM image '%s'", bios_name); | ||
649 | + exit(1); | ||
650 | + } | ||
651 | + | ||
652 | + memory_region_add_subregion(get_system_memory(), base, &vms->fw_mr); | ||
653 | +} | ||
654 | + | ||
655 | +static void create_pcie(VMAppleMachineState *vms) | ||
656 | +{ | ||
657 | + hwaddr base_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].base; | ||
658 | + hwaddr size_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].size; | ||
659 | + hwaddr base_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].base; | ||
660 | + hwaddr size_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].size; | ||
661 | + int irq = vms->irqmap[VMAPPLE_PCIE]; | ||
662 | + MemoryRegion *mmio_alias; | ||
663 | + MemoryRegion *mmio_reg; | ||
664 | + MemoryRegion *ecam_reg; | ||
665 | + DeviceState *dev; | ||
666 | + int i; | ||
667 | + PCIHostState *pci; | ||
668 | + DeviceState *usb_controller; | ||
669 | + USBBus *usb_bus; | ||
670 | + | ||
671 | + dev = qdev_new(TYPE_GPEX_HOST); | ||
672 | + qdev_prop_set_uint32(dev, "num-irqs", GPEX_NUM_IRQS); | ||
673 | + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); | ||
674 | + | ||
675 | + /* Map only the first size_ecam bytes of ECAM space */ | ||
676 | + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); | ||
677 | + memory_region_init_alias(&vms->ecam_alias, OBJECT(dev), "pcie-ecam", | ||
678 | + ecam_reg, 0, size_ecam); | ||
679 | + memory_region_add_subregion(get_system_memory(), base_ecam, | ||
680 | + &vms->ecam_alias); | ||
681 | + | ||
682 | + /* | ||
683 | + * Map the MMIO window from [0x50000000-0x7fff0000] in PCI space into | ||
684 | + * system address space at [0x50000000-0x7fff0000]. | ||
685 | + */ | ||
686 | + mmio_alias = g_new0(MemoryRegion, 1); | ||
687 | + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); | ||
688 | + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", | ||
689 | + mmio_reg, base_mmio, size_mmio); | ||
690 | + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); | ||
691 | + | ||
692 | + for (i = 0; i < GPEX_NUM_IRQS; i++) { | ||
693 | + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | ||
694 | + qdev_get_gpio_in(vms->gic, irq + i)); | ||
695 | + gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); | ||
696 | + } | ||
697 | + | ||
698 | + pci = PCI_HOST_BRIDGE(dev); | ||
699 | + vms->bus = pci->bus; | ||
700 | + g_assert(vms->bus); | ||
701 | + | ||
702 | + while ((dev = qemu_create_nic_device("virtio-net-pci", true, NULL))) { | ||
703 | + qdev_realize_and_unref(dev, BUS(vms->bus), &error_fatal); | ||
704 | + } | ||
705 | + | ||
706 | + if (defaults_enabled()) { | ||
707 | + usb_controller = qdev_new(TYPE_QEMU_XHCI); | ||
708 | + qdev_realize_and_unref(usb_controller, BUS(pci->bus), &error_fatal); | ||
709 | + | ||
710 | + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, | ||
711 | + &error_fatal)); | ||
712 | + usb_create_simple(usb_bus, "usb-kbd"); | ||
713 | + usb_create_simple(usb_bus, "usb-tablet"); | ||
714 | + } | ||
715 | +} | ||
716 | + | ||
717 | +static void vmapple_reset(void *opaque) | ||
718 | +{ | ||
719 | + VMAppleMachineState *vms = opaque; | ||
720 | + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; | ||
721 | + | ||
722 | + cpu_set_pc(first_cpu, base); | ||
723 | +} | ||
724 | + | ||
725 | +static void mach_vmapple_init(MachineState *machine) | ||
726 | +{ | ||
727 | + VMAppleMachineState *vms = VMAPPLE_MACHINE(machine); | ||
728 | + MachineClass *mc = MACHINE_GET_CLASS(machine); | ||
729 | + const CPUArchIdList *possible_cpus; | ||
730 | + MemoryRegion *sysmem = get_system_memory(); | ||
731 | + int n; | ||
732 | + unsigned int smp_cpus = machine->smp.cpus; | ||
733 | + unsigned int max_cpus = machine->smp.max_cpus; | ||
734 | + | ||
735 | + vms->memmap = memmap; | ||
736 | + machine->usb = true; | ||
737 | + | ||
738 | + possible_cpus = mc->possible_cpu_arch_ids(machine); | ||
739 | + assert(possible_cpus->len == max_cpus); | ||
740 | + for (n = 0; n < possible_cpus->len; n++) { | ||
741 | + Object *cpu; | ||
742 | + CPUState *cs; | ||
743 | + | ||
744 | + if (n >= smp_cpus) { | ||
745 | + break; | ||
746 | + } | ||
747 | + | ||
748 | + cpu = object_new(possible_cpus->cpus[n].type); | ||
749 | + object_property_set_int(cpu, "mp-affinity", | ||
750 | + possible_cpus->cpus[n].arch_id, &error_fatal); | ||
751 | + | ||
752 | + cs = CPU(cpu); | ||
753 | + cs->cpu_index = n; | ||
754 | + | ||
755 | + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpu), | ||
756 | + &error_fatal); | ||
757 | + | ||
758 | + if (object_property_find(cpu, "has_el3")) { | ||
759 | + object_property_set_bool(cpu, "has_el3", false, &error_fatal); | ||
760 | + } | ||
761 | + if (object_property_find(cpu, "has_el2")) { | ||
762 | + object_property_set_bool(cpu, "has_el2", false, &error_fatal); | ||
763 | + } | ||
764 | + object_property_set_int(cpu, "psci-conduit", QEMU_PSCI_CONDUIT_HVC, | ||
765 | + &error_fatal); | ||
766 | + | ||
767 | + /* Secondary CPUs start in PSCI powered-down state */ | ||
768 | + if (n > 0) { | ||
769 | + object_property_set_bool(cpu, "start-powered-off", true, | ||
770 | + &error_fatal); | ||
771 | + } | ||
772 | + | ||
773 | + object_property_set_link(cpu, "memory", OBJECT(sysmem), &error_abort); | ||
774 | + qdev_realize(DEVICE(cpu), NULL, &error_fatal); | ||
775 | + object_unref(cpu); | ||
776 | + } | ||
777 | + | ||
778 | + memory_region_add_subregion(sysmem, vms->memmap[VMAPPLE_MEM].base, | ||
779 | + machine->ram); | ||
780 | + | ||
781 | + create_gic(vms, sysmem); | ||
782 | + create_bdif(vms, sysmem); | ||
783 | + create_pvpanic(vms, sysmem); | ||
784 | + create_aes(vms, sysmem); | ||
785 | + create_gfx(vms, sysmem); | ||
786 | + create_uart(vms, VMAPPLE_UART, sysmem, serial_hd(0)); | ||
787 | + create_rtc(vms); | ||
788 | + create_pcie(vms); | ||
789 | + | ||
790 | + create_gpio_devices(vms, VMAPPLE_GPIO, sysmem); | ||
791 | + | ||
792 | + vmapple_firmware_init(vms, sysmem); | ||
793 | + create_cfg(vms, sysmem, &error_fatal); | ||
794 | + | ||
795 | + /* connect powerdown request */ | ||
796 | + vms->powerdown_notifier.notify = vmapple_powerdown_req; | ||
797 | + qemu_register_powerdown_notifier(&vms->powerdown_notifier); | ||
798 | + | ||
799 | + vms->bootinfo.ram_size = machine->ram_size; | ||
800 | + vms->bootinfo.board_id = -1; | ||
801 | + vms->bootinfo.loader_start = vms->memmap[VMAPPLE_MEM].base; | ||
802 | + vms->bootinfo.skip_dtb_autoload = true; | ||
803 | + vms->bootinfo.firmware_loaded = true; | ||
804 | + arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo); | ||
805 | + | ||
806 | + qemu_register_reset(vmapple_reset, vms); | ||
807 | +} | ||
808 | + | ||
809 | +static CpuInstanceProperties | ||
810 | +vmapple_cpu_index_to_props(MachineState *ms, unsigned cpu_index) | ||
811 | +{ | ||
812 | + MachineClass *mc = MACHINE_GET_CLASS(ms); | ||
813 | + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); | ||
814 | + | ||
815 | + assert(cpu_index < possible_cpus->len); | ||
816 | + return possible_cpus->cpus[cpu_index].props; | ||
817 | +} | ||
818 | + | ||
819 | + | ||
820 | +static int64_t vmapple_get_default_cpu_node_id(const MachineState *ms, int idx) | ||
821 | +{ | ||
822 | + return idx % ms->numa_state->num_nodes; | ||
823 | +} | ||
824 | + | ||
825 | +static const CPUArchIdList *vmapple_possible_cpu_arch_ids(MachineState *ms) | ||
826 | +{ | ||
827 | + int n; | ||
828 | + unsigned int max_cpus = ms->smp.max_cpus; | ||
829 | + | ||
830 | + if (ms->possible_cpus) { | ||
831 | + assert(ms->possible_cpus->len == max_cpus); | ||
832 | + return ms->possible_cpus; | ||
833 | + } | ||
834 | + | ||
835 | + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + | ||
836 | + sizeof(CPUArchId) * max_cpus); | ||
837 | + ms->possible_cpus->len = max_cpus; | ||
838 | + for (n = 0; n < ms->possible_cpus->len; n++) { | ||
839 | + ms->possible_cpus->cpus[n].type = ms->cpu_type; | ||
840 | + ms->possible_cpus->cpus[n].arch_id = | ||
841 | + arm_build_mp_affinity(n, GICV3_TARGETLIST_BITS); | ||
842 | + ms->possible_cpus->cpus[n].props.has_thread_id = true; | ||
843 | + ms->possible_cpus->cpus[n].props.thread_id = n; | ||
844 | + } | ||
845 | + return ms->possible_cpus; | ||
846 | +} | ||
847 | + | ||
848 | +static void vmapple_machine_class_init(ObjectClass *oc, void *data) | ||
849 | +{ | ||
850 | + MachineClass *mc = MACHINE_CLASS(oc); | ||
851 | + | ||
852 | + mc->init = mach_vmapple_init; | ||
853 | + mc->max_cpus = 32; | ||
854 | + mc->block_default_type = IF_VIRTIO; | ||
855 | + mc->no_cdrom = 1; | ||
856 | + mc->pci_allow_0_address = true; | ||
857 | + mc->minimum_page_bits = 12; | ||
858 | + mc->possible_cpu_arch_ids = vmapple_possible_cpu_arch_ids; | ||
859 | + mc->cpu_index_to_instance_props = vmapple_cpu_index_to_props; | ||
860 | + mc->default_cpu_type = ARM_CPU_TYPE_NAME("host"); | ||
861 | + mc->get_default_cpu_node_id = vmapple_get_default_cpu_node_id; | ||
862 | + mc->default_ram_id = "mach-vmapple.ram"; | ||
863 | + | ||
864 | + object_register_sugar_prop(TYPE_VIRTIO_PCI, "disable-legacy", | ||
865 | + "on", true); | ||
866 | +} | ||
867 | + | ||
868 | +static void vmapple_instance_init(Object *obj) | ||
869 | +{ | ||
870 | + VMAppleMachineState *vms = VMAPPLE_MACHINE(obj); | ||
871 | + | ||
872 | + vms->irqmap = irqmap; | ||
873 | + | ||
874 | + object_property_add_uint64_ptr(obj, "uuid", &vms->uuid, | ||
875 | + OBJ_PROP_FLAG_READWRITE); | ||
876 | + object_property_set_description(obj, "uuid", "Machine UUID (SDOM)"); | ||
877 | +} | ||
878 | + | ||
879 | +static const TypeInfo vmapple_machine_info = { | ||
880 | + .name = TYPE_VMAPPLE_MACHINE, | ||
881 | + .parent = TYPE_MACHINE, | ||
882 | + .abstract = true, | ||
883 | + .instance_size = sizeof(VMAppleMachineState), | ||
884 | + .class_size = sizeof(VMAppleMachineClass), | ||
885 | + .class_init = vmapple_machine_class_init, | ||
886 | + .instance_init = vmapple_instance_init, | ||
887 | +}; | ||
888 | + | ||
889 | +static void machvmapple_machine_init(void) | ||
890 | +{ | ||
891 | + type_register_static(&vmapple_machine_info); | ||
892 | +} | ||
893 | +type_init(machvmapple_machine_init); | ||
894 | + | ||
895 | +static void vmapple_machine_10_0_options(MachineClass *mc) | ||
896 | +{ | ||
897 | +} | ||
898 | +DEFINE_VMAPPLE_MACHINE_AS_LATEST(10, 0) | ||
899 | + | ||
900 | -- | ||
901 | 2.39.5 (Apple Git-154) | ||
902 | |||
903 | diff view generated by jsdifflib |