include/system/hvf_int.h | 12 ++++++- accel/hvf/hvf-all.c | 78 ++++++++++++++++++++++++++++++---------- 2 files changed, 71 insertions(+), 19 deletions(-)
Instead of having a fixed limit of 32 slots, we allow the number of slots
to expand. Currently there does not seem to be a need to add a limit, but
if the allocation fails, we will abort. The KVM backend was used for
inspiration here.
Signed-off-by: Joelle van Dyne <j@getutm.app>
---
include/system/hvf_int.h | 12 ++++++-
accel/hvf/hvf-all.c | 78 ++++++++++++++++++++++++++++++----------
2 files changed, 71 insertions(+), 19 deletions(-)
diff --git a/include/system/hvf_int.h b/include/system/hvf_int.h
index 3d2be4092e..1d2616595c 100644
--- a/include/system/hvf_int.h
+++ b/include/system/hvf_int.h
@@ -27,6 +27,7 @@ typedef hv_vcpuid_t hvf_vcpuid;
/* hvf_slot flags */
#define HVF_SLOT_LOG (1 << 0)
+/* Represent memory logically mapped by QEMU */
typedef struct hvf_slot {
uint64_t start;
uint64_t size;
@@ -36,6 +37,14 @@ typedef struct hvf_slot {
MemoryRegion *region;
} hvf_slot;
+/* Represent memory currently mapped in HVF */
+typedef struct hvf_mac_slot {
+ int present;
+ uint64_t size;
+ uint64_t gpa_start;
+ uint64_t gva;
+} hvf_mac_slot;
+
typedef struct hvf_vcpu_caps {
uint64_t vmx_cap_pinbased;
uint64_t vmx_cap_procbased;
@@ -48,7 +57,8 @@ typedef struct hvf_vcpu_caps {
struct HVFState {
AccelState parent_obj;
- hvf_slot slots[32];
+ hvf_slot *slots;
+ hvf_mac_slot *mac_slots;
int num_slots;
hvf_vcpu_caps *hvf_caps;
diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c
index 0a4b498e83..367ff4fd40 100644
--- a/accel/hvf/hvf-all.c
+++ b/accel/hvf/hvf-all.c
@@ -21,14 +21,8 @@
bool hvf_allowed;
-struct mac_slot {
- int present;
- uint64_t size;
- uint64_t gpa_start;
- uint64_t gva;
-};
-
-struct mac_slot mac_slots[32];
+/* Default num of memslots to be allocated when VM starts */
+#define HVF_MEMSLOTS_NUM_ALLOC_DEFAULT 32
const char *hvf_return_string(hv_return_t ret)
{
@@ -58,12 +52,60 @@ void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
abort();
}
+static bool hvf_slots_grow(HVFState *state, unsigned int num_slots_new)
+{
+ unsigned int i, cur = state->num_slots;
+ hvf_slot *slots;
+ hvf_mac_slot *mac_slots;
+
+ assert(num_slots_new > cur);
+ if (cur == 0) {
+ slots = g_new0(hvf_slot, num_slots_new);
+ if (!slots) {
+ return false;
+ }
+ mac_slots = g_new0(hvf_mac_slot, num_slots_new);
+ if (!mac_slots) {
+ g_free(slots);
+ return false;
+ }
+ } else {
+ slots = g_renew(hvf_slot, state->slots, num_slots_new);
+ if (!slots) {
+ return false;
+ }
+ mac_slots = g_renew(hvf_mac_slot, state->mac_slots, num_slots_new);
+ if (!mac_slots) {
+ /* save allocated slots but not new size */
+ state->slots = slots;
+ return false;
+ }
+ /*
+ * g_renew() doesn't initialize extended buffers, however hvf
+ * memslots require fields to be zero-initialized. E.g. pointers,
+ * memory_size field, etc.
+ */
+ memset(&slots[cur], 0x0, sizeof(slots[0]) * (num_slots_new - cur));
+ memset(&mac_slots[cur], 0x0, sizeof(mac_slots[0]) * (num_slots_new - cur));
+ }
+
+ for (i = cur; i < num_slots_new; i++) {
+ slots[i].slot_id = i;
+ }
+
+ state->slots = slots;
+ state->mac_slots = mac_slots;
+ state->num_slots = num_slots_new;
+
+ return true;
+}
+
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
{
- struct mac_slot *macslot;
+ hvf_mac_slot *macslot;
hv_return_t ret;
- macslot = &mac_slots[slot->slot_id];
+ macslot = &hvf_state->mac_slots[slot->slot_id];
if (macslot->present) {
if (macslot->size != slot->size) {
@@ -160,8 +202,11 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
}
if (x == hvf_state->num_slots) {
- error_report("No free slots");
- abort();
+ if (!hvf_slots_grow(hvf_state, hvf_state->num_slots * 2)) {
+ error_report("Cannot allocate any more slots");
+ abort();
+ }
+ mem = &hvf_state->slots[x];
}
mem->size = int128_get64(section->size);
@@ -250,11 +295,11 @@ static MemoryListener hvf_memory_listener = {
static int hvf_accel_init(AccelState *as, MachineState *ms)
{
- int x;
hv_return_t ret;
HVFState *s = HVF_STATE(as);
int pa_range = 36;
MachineClass *mc = MACHINE_GET_CLASS(ms);
+ bool success;
if (mc->hvf_get_physical_address_range) {
pa_range = mc->hvf_get_physical_address_range(ms);
@@ -271,11 +316,8 @@ static int hvf_accel_init(AccelState *as, MachineState *ms)
}
assert_hvf_ok(ret);
- s->num_slots = ARRAY_SIZE(s->slots);
- for (x = 0; x < s->num_slots; ++x) {
- s->slots[x].size = 0;
- s->slots[x].slot_id = x;
- }
+ success = hvf_slots_grow(s, HVF_MEMSLOTS_NUM_ALLOC_DEFAULT);
+ assert(success);
QTAILQ_INIT(&s->hvf_sw_breakpoints);
--
2.41.0
Hi Joelle, On 3/12/25 04:58, Joelle van Dyne wrote: > Instead of having a fixed limit of 32 slots, we allow the number of slots > to expand. Currently there does not seem to be a need to add a limit, but > if the allocation fails, we will abort. The KVM backend was used for > inspiration here. > > Signed-off-by: Joelle van Dyne <j@getutm.app> > --- > include/system/hvf_int.h | 12 ++++++- > accel/hvf/hvf-all.c | 78 ++++++++++++++++++++++++++++++---------- > 2 files changed, 71 insertions(+), 19 deletions(-) See first 12 patches of: https://lore.kernel.org/qemu-devel/20251114200422.4280-1-philmd@linaro.org/
Instead of silently failing, we should print a warning so it is clear why
a future guest access to that location might cause an exception.
Signed-off-by: Joelle van Dyne <j@getutm.app>
---
accel/hvf/hvf-all.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c
index 0a4b498e83..7823d8416c 100644
--- a/accel/hvf/hvf-all.c
+++ b/accel/hvf/hvf-all.c
@@ -112,6 +112,14 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
!QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
+ if (add) {
+ warn_report("Cannot add 0x%016llX:0x%016llX because it is not "
+ "aligned to page size (0x%X)",
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(section->size),
+ (uint32_t)page_size);
+ }
/* Not page aligned, so we can not map as RAM */
add = false;
}
--
2.41.0
© 2016 - 2025 Red Hat, Inc.