[Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code

David Hildenbrand posted 3 patches 7 years, 9 months ago
There is a newer version of this series
[Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by David Hildenbrand 7 years, 9 months ago
To be able to reuse MemoryDevice logic from other devices besides
pc-dimm, factor the relevant stuff out into the MemoryDevice code.

As we don't care about slots for memory devices that are not pc-dimm,
don't factor that part out.

Most of this patch just moves checks and logic around. While at it, make
the code properly detect certain error conditions better (e.g. fragmented
memory).

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 hw/i386/pc.c                   |  12 +--
 hw/mem/memory-device.c         | 162 ++++++++++++++++++++++++++++++++++++
 hw/mem/pc-dimm.c               | 185 +++--------------------------------------
 hw/ppc/spapr.c                 |   9 +-
 include/hw/mem/memory-device.h |   4 +
 include/hw/mem/pc-dimm.h       |  14 +---
 6 files changed, 185 insertions(+), 201 deletions(-)

diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index fa8862af33..1c25546a0c 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1711,7 +1711,7 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
         goto out;
     }
 
-    pc_dimm_memory_plug(dev, &pcms->hotplug_memory, mr, align, &local_err);
+    pc_dimm_memory_plug(dev, align, &local_err);
     if (local_err) {
         goto out;
     }
@@ -1761,17 +1761,9 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
                            DeviceState *dev, Error **errp)
 {
     PCMachineState *pcms = PC_MACHINE(hotplug_dev);
-    PCDIMMDevice *dimm = PC_DIMM(dev);
-    PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
-    MemoryRegion *mr;
     HotplugHandlerClass *hhc;
     Error *local_err = NULL;
 
-    mr = ddc->get_memory_region(dimm, &local_err);
-    if (local_err) {
-        goto out;
-    }
-
     hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
     hhc->unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
 
@@ -1779,7 +1771,7 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
         goto out;
     }
 
-    pc_dimm_memory_unplug(dev, &pcms->hotplug_memory, mr);
+    pc_dimm_memory_unplug(dev);
     object_unparent(OBJECT(dev));
 
  out:
diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
index b860c9c582..b96efa3bf4 100644
--- a/hw/mem/memory-device.c
+++ b/hw/mem/memory-device.c
@@ -15,6 +15,8 @@
 #include "qapi/error.h"
 #include "hw/boards.h"
 #include "qemu/range.h"
+#include "hw/virtio/vhost.h"
+#include "sysemu/kvm.h"
 
 static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
 {
@@ -106,6 +108,166 @@ uint64_t get_plugged_memory_size(void)
     return size;
 }
 
+static int memory_device_used_region_size_internal(Object *obj, void *opaque)
+{
+    uint64_t *size = opaque;
+
+    if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
+        DeviceState *dev = DEVICE(obj);
+        MemoryDeviceState *md = MEMORY_DEVICE(obj);
+        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
+
+        if (dev->realized) {
+            *size += mdc->get_region_size(md, &error_abort);
+        }
+    }
+
+    object_child_foreach(obj, memory_device_used_region_size_internal, opaque);
+    return 0;
+}
+
+static uint64_t memory_device_used_region_size(void)
+{
+    uint64_t size = 0;
+
+    memory_device_used_region_size_internal(qdev_get_machine(), &size);
+
+    return size;
+}
+
+uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
+                                     uint64_t size, Error **errp)
+{
+    const uint64_t used_region_size = memory_device_used_region_size();
+    uint64_t address_space_start, address_space_end;
+    MachineState *machine = MACHINE(qdev_get_machine());
+    MachineClass *mc = MACHINE_GET_CLASS(machine);
+    MemoryHotplugState *hpms;
+    GSList *list = NULL, *item;
+    uint64_t new_addr = 0;
+
+    if (!mc->get_memory_hotplug_state) {
+        error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
+                         "supported by the machine");
+        return 0;
+    }
+
+    hpms = mc->get_memory_hotplug_state(machine);
+    if (!hpms || !memory_region_size(&hpms->mr)) {
+        error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
+                         "enabled, please specify the maxmem option");
+        return 0;
+    }
+    address_space_start = hpms->base;
+    address_space_end = address_space_start + memory_region_size(&hpms->mr);
+    g_assert(address_space_end >= address_space_start);
+
+    if (used_region_size + size > machine->maxram_size - machine->ram_size) {
+        error_setg(errp, "not enough space, currently 0x%" PRIx64
+                   " in use of total hot pluggable 0x" RAM_ADDR_FMT,
+                   used_region_size, machine->maxram_size - machine->ram_size);
+        return 0;
+    }
+
+    if (hint && QEMU_ALIGN_UP(*hint, align) != *hint) {
+        error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
+                   align);
+        return 0;
+    }
+
+    if (QEMU_ALIGN_UP(size, align) != size) {
+        error_setg(errp, "backend memory size must be multiple of 0x%"
+                   PRIx64, align);
+        return 0;
+    }
+
+    if (hint) {
+        new_addr = *hint;
+        if (new_addr < address_space_start) {
+            error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
+                       "] at 0x%" PRIx64, new_addr, size, address_space_start);
+            return 0;
+        } else if ((new_addr + size) > address_space_end) {
+            error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
+                       "] beyond 0x%" PRIx64, new_addr, size,
+                       address_space_end);
+            return 0;
+        }
+    } else {
+        new_addr = address_space_start;
+    }
+
+    /* find address range that will fit new memory device */
+    object_child_foreach(qdev_get_machine(), memory_device_built_list, &list);
+    for (item = list; item; item = g_slist_next(item)) {
+        MemoryDeviceState *md = item->data;
+        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md));
+        uint64_t md_size, md_addr;
+
+        md_addr = mdc->get_addr(md);
+        md_size = mdc->get_region_size(md, errp);
+        if (*errp) {
+            goto out;
+        }
+
+        if (ranges_overlap(md_addr, md_size, new_addr, size)) {
+            if (hint) {
+                DeviceState *d = DEVICE(md);
+                error_setg(errp, "address range conflicts with '%s'", d->id);
+                goto out;
+            }
+            new_addr = QEMU_ALIGN_UP(md_addr + md_size, align);
+        }
+    }
+
+    if (new_addr + size > address_space_end) {
+        error_setg(errp, "could not find position in guest address space for "
+                   "memory device - memory fragmented due to alignments");
+        goto out;
+    }
+out:
+    g_slist_free(list);
+    return new_addr;
+}
+
+void memory_device_plug_region(MemoryRegion *mr, uint64_t addr, Error **errp)
+{
+    MachineState *machine = MACHINE(qdev_get_machine());
+    MachineClass *mc = MACHINE_GET_CLASS(machine);
+    MemoryHotplugState *hpms;
+
+    /* we expect a previous call to memory_device_get_free_addr() */
+    g_assert(mc->get_memory_hotplug_state);
+    hpms = mc->get_memory_hotplug_state(machine);
+    g_assert(hpms);
+
+    /* we will need a new memory slot for kvm and vhost */
+    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
+        error_setg(errp, "hypervisor has no free memory slots left");
+        return;
+    }
+    if (!vhost_has_free_slot()) {
+        error_setg(errp, "a used vhost backend has no free memory slots left");
+        return;
+    }
+
+    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
+}
+
+void memory_device_unplug_region(MemoryRegion *mr)
+{
+    MachineState *machine = MACHINE(qdev_get_machine());
+    MachineClass *mc = MACHINE_GET_CLASS(machine);
+    MemoryHotplugState *hpms;
+
+    /* we expect a previous call to memory_device_get_free_addr() */
+    g_assert(mc->get_memory_hotplug_state);
+    hpms = mc->get_memory_hotplug_state(machine);
+    g_assert(hpms);
+
+    memory_region_del_subregion(&hpms->mr, mr);
+}
+
 static const TypeInfo memory_device_info = {
     .name          = TYPE_MEMORY_DEVICE,
     .parent        = TYPE_INTERFACE,
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 1dbf699e02..cf23ab5d76 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -25,19 +25,10 @@
 #include "qapi/error.h"
 #include "qemu/config-file.h"
 #include "qapi/visitor.h"
-#include "qemu/range.h"
 #include "sysemu/numa.h"
-#include "sysemu/kvm.h"
 #include "trace.h"
-#include "hw/virtio/vhost.h"
 
-typedef struct pc_dimms_capacity {
-     uint64_t size;
-     Error    **errp;
-} pc_dimms_capacity;
-
-void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
-                         MemoryRegion *mr, uint64_t align, Error **errp)
+void pc_dimm_memory_plug(DeviceState *dev, uint64_t align, Error **errp)
 {
     int slot;
     MachineState *machine = MACHINE(qdev_get_machine());
@@ -45,37 +36,26 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
     PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
     MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm);
     Error *local_err = NULL;
-    uint64_t existing_dimms_capacity = 0;
+    MemoryRegion *mr;
     uint64_t addr;
 
-    addr = object_property_get_uint(OBJECT(dimm),
-                                    PC_DIMM_ADDR_PROP, &local_err);
+    mr = ddc->get_memory_region(dimm, &local_err);
     if (local_err) {
         goto out;
     }
 
-    addr = pc_dimm_get_free_addr(hpms->base,
-                                 memory_region_size(&hpms->mr),
-                                 !addr ? NULL : &addr, align,
-                                 memory_region_size(mr), &local_err);
+    addr = object_property_get_uint(OBJECT(dimm),
+                                    PC_DIMM_ADDR_PROP, &local_err);
     if (local_err) {
         goto out;
     }
 
-    existing_dimms_capacity = pc_existing_dimms_capacity(&local_err);
+    addr = memory_device_get_free_addr(!addr ? NULL : &addr, align,
+                                       memory_region_size(mr), &local_err);
     if (local_err) {
         goto out;
     }
 
-    if (existing_dimms_capacity + memory_region_size(mr) >
-        machine->maxram_size - machine->ram_size) {
-        error_setg(&local_err, "not enough space, currently 0x%" PRIx64
-                   " in use of total hot pluggable 0x" RAM_ADDR_FMT,
-                   existing_dimms_capacity,
-                   machine->maxram_size - machine->ram_size);
-        goto out;
-    }
-
     object_property_set_uint(OBJECT(dev), addr, PC_DIMM_ADDR_PROP, &local_err);
     if (local_err) {
         goto out;
@@ -98,67 +78,27 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
     }
     trace_mhp_pc_dimm_assigned_slot(slot);
 
-    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
-        error_setg(&local_err, "hypervisor has no free memory slots left");
-        goto out;
-    }
-
-    if (!vhost_has_free_slot()) {
-        error_setg(&local_err, "a used vhost backend has no free"
-                               " memory slots left");
+    memory_device_plug_region(mr, addr, &local_err);
+    if (local_err) {
         goto out;
     }
-
-    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
     vmstate_register_ram(vmstate_mr, dev);
 
 out:
     error_propagate(errp, local_err);
 }
 
-void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
-                           MemoryRegion *mr)
+void pc_dimm_memory_unplug(DeviceState *dev)
 {
     PCDIMMDevice *dimm = PC_DIMM(dev);
     PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
     MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm);
+    MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
 
-    memory_region_del_subregion(&hpms->mr, mr);
+    memory_device_unplug_region(mr);
     vmstate_unregister_ram(vmstate_mr, dev);
 }
 
-static int pc_existing_dimms_capacity_internal(Object *obj, void *opaque)
-{
-    pc_dimms_capacity *cap = opaque;
-    uint64_t *size = &cap->size;
-
-    if (object_dynamic_cast(obj, TYPE_PC_DIMM)) {
-        DeviceState *dev = DEVICE(obj);
-
-        if (dev->realized) {
-            (*size) += object_property_get_uint(obj, PC_DIMM_SIZE_PROP,
-                cap->errp);
-        }
-
-        if (cap->errp && *cap->errp) {
-            return 1;
-        }
-    }
-    object_child_foreach(obj, pc_existing_dimms_capacity_internal, opaque);
-    return 0;
-}
-
-uint64_t pc_existing_dimms_capacity(Error **errp)
-{
-    pc_dimms_capacity cap;
-
-    cap.size = 0;
-    cap.errp = errp;
-
-    pc_existing_dimms_capacity_internal(qdev_get_machine(), &cap);
-    return cap.size;
-}
-
 static int pc_dimm_slot2bitmap(Object *obj, void *opaque)
 {
     unsigned long *bitmap = opaque;
@@ -205,107 +145,6 @@ out:
     return slot;
 }
 
-static gint pc_dimm_addr_sort(gconstpointer a, gconstpointer b)
-{
-    PCDIMMDevice *x = PC_DIMM(a);
-    PCDIMMDevice *y = PC_DIMM(b);
-    Int128 diff = int128_sub(int128_make64(x->addr), int128_make64(y->addr));
-
-    if (int128_lt(diff, int128_zero())) {
-        return -1;
-    } else if (int128_gt(diff, int128_zero())) {
-        return 1;
-    }
-    return 0;
-}
-
-static int pc_dimm_built_list(Object *obj, void *opaque)
-{
-    GSList **list = opaque;
-
-    if (object_dynamic_cast(obj, TYPE_PC_DIMM)) {
-        DeviceState *dev = DEVICE(obj);
-        if (dev->realized) { /* only realized DIMMs matter */
-            *list = g_slist_insert_sorted(*list, dev, pc_dimm_addr_sort);
-        }
-    }
-
-    object_child_foreach(obj, pc_dimm_built_list, opaque);
-    return 0;
-}
-
-uint64_t pc_dimm_get_free_addr(uint64_t address_space_start,
-                               uint64_t address_space_size,
-                               uint64_t *hint, uint64_t align, uint64_t size,
-                               Error **errp)
-{
-    GSList *list = NULL, *item;
-    uint64_t new_addr, ret = 0;
-    uint64_t address_space_end = address_space_start + address_space_size;
-
-    g_assert(QEMU_ALIGN_UP(address_space_start, align) == address_space_start);
-
-    if (!address_space_size) {
-        error_setg(errp, "memory hotplug is not enabled, "
-                         "please add maxmem option");
-        goto out;
-    }
-
-    if (hint && QEMU_ALIGN_UP(*hint, align) != *hint) {
-        error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
-                   align);
-        goto out;
-    }
-
-    if (QEMU_ALIGN_UP(size, align) != size) {
-        error_setg(errp, "backend memory size must be multiple of 0x%"
-                   PRIx64, align);
-        goto out;
-    }
-
-    assert(address_space_end > address_space_start);
-    object_child_foreach(qdev_get_machine(), pc_dimm_built_list, &list);
-
-    if (hint) {
-        new_addr = *hint;
-    } else {
-        new_addr = address_space_start;
-    }
-
-    /* find address range that will fit new DIMM */
-    for (item = list; item; item = g_slist_next(item)) {
-        PCDIMMDevice *dimm = item->data;
-        uint64_t dimm_size = object_property_get_uint(OBJECT(dimm),
-                                                      PC_DIMM_SIZE_PROP,
-                                                      errp);
-        if (errp && *errp) {
-            goto out;
-        }
-
-        if (ranges_overlap(dimm->addr, dimm_size, new_addr, size)) {
-            if (hint) {
-                DeviceState *d = DEVICE(dimm);
-                error_setg(errp, "address range conflicts with '%s'", d->id);
-                goto out;
-            }
-            new_addr = QEMU_ALIGN_UP(dimm->addr + dimm_size, align);
-        }
-    }
-    ret = new_addr;
-
-    if (new_addr < address_space_start) {
-        error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
-                   "] at 0x%" PRIx64, new_addr, size, address_space_start);
-    } else if ((new_addr + size) > address_space_end) {
-        error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
-                   "] beyond 0x%" PRIx64, new_addr, size, address_space_end);
-    }
-
-out:
-    g_slist_free(list);
-    return ret;
-}
-
 static Property pc_dimm_properties[] = {
     DEFINE_PROP_UINT64(PC_DIMM_ADDR_PROP, PCDIMMDevice, addr, 0),
     DEFINE_PROP_UINT32(PC_DIMM_NODE_PROP, PCDIMMDevice, node, 0),
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 7ccdb705b3..7757a49335 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -3041,7 +3041,7 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
     align = memory_region_get_alignment(mr);
     size = memory_region_size(mr);
 
-    pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
+    pc_dimm_memory_plug(dev, align, &local_err);
     if (local_err) {
         goto out;
     }
@@ -3062,7 +3062,7 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
     return;
 
 out_unplug:
-    pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
+    pc_dimm_memory_unplug(dev);
 out:
     error_propagate(errp, local_err);
 }
@@ -3180,9 +3180,6 @@ static sPAPRDIMMState *spapr_recover_pending_dimm_state(sPAPRMachineState *ms,
 void spapr_lmb_release(DeviceState *dev)
 {
     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_hotplug_handler(dev));
-    PCDIMMDevice *dimm = PC_DIMM(dev);
-    PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
-    MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
     sPAPRDIMMState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
 
     /* This information will get lost if a migration occurs
@@ -3202,7 +3199,7 @@ void spapr_lmb_release(DeviceState *dev)
      * Now that all the LMBs have been removed by the guest, call the
      * pc-dimm unplug handler to cleanup up the pc-dimm device.
      */
-    pc_dimm_memory_unplug(dev, &spapr->hotplug_memory, mr);
+    pc_dimm_memory_unplug(dev);
     object_unparent(OBJECT(dev));
     spapr_pending_dimm_unplugs_remove(spapr, ds);
 }
diff --git a/include/hw/mem/memory-device.h b/include/hw/mem/memory-device.h
index 3e498b2e61..722620da24 100644
--- a/include/hw/mem/memory-device.h
+++ b/include/hw/mem/memory-device.h
@@ -40,5 +40,9 @@ typedef struct MemoryDeviceClass {
 
 MemoryDeviceInfoList *qmp_memory_device_list(void);
 uint64_t get_plugged_memory_size(void);
+uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
+                                     uint64_t size, Error **errp);
+void memory_device_plug_region(MemoryRegion *mr, uint64_t addr, Error **errp);
+void memory_device_unplug_region(MemoryRegion *mr);
 
 #endif
diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h
index 8bda37adab..2e7c2abe35 100644
--- a/include/hw/mem/pc-dimm.h
+++ b/include/hw/mem/pc-dimm.h
@@ -19,7 +19,6 @@
 #include "exec/memory.h"
 #include "sysemu/hostmem.h"
 #include "hw/qdev.h"
-#include "hw/boards.h"
 
 #define TYPE_PC_DIMM "pc-dimm"
 #define PC_DIMM(obj) \
@@ -76,16 +75,7 @@ typedef struct PCDIMMDeviceClass {
     MemoryRegion *(*get_vmstate_memory_region)(PCDIMMDevice *dimm);
 } PCDIMMDeviceClass;
 
-uint64_t pc_dimm_get_free_addr(uint64_t address_space_start,
-                               uint64_t address_space_size,
-                               uint64_t *hint, uint64_t align, uint64_t size,
-                               Error **errp);
-
 int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp);
-
-uint64_t pc_existing_dimms_capacity(Error **errp);
-void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
-                         MemoryRegion *mr, uint64_t align, Error **errp);
-void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
-                           MemoryRegion *mr);
+void pc_dimm_memory_plug(DeviceState *dev, uint64_t align, Error **errp);
+void pc_dimm_memory_unplug(DeviceState *dev);
 #endif
-- 
2.14.3


Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Igor Mammedov 7 years, 9 months ago
On Fri, 20 Apr 2018 14:34:56 +0200
David Hildenbrand <david@redhat.com> wrote:

> To be able to reuse MemoryDevice logic from other devices besides
> pc-dimm, factor the relevant stuff out into the MemoryDevice code.
> 
> As we don't care about slots for memory devices that are not pc-dimm,
> don't factor that part out.
that's not really true, you still consume kvm and vhost slots (whatever it is)
whenever you map it into address space as ram memory region.

Also ram_slots currently are (ab)used as flag that user enabled memory
hotplug via CLI.
 
> Most of this patch just moves checks and logic around. While at it, make
> the code properly detect certain error conditions better (e.g. fragmented
> memory).
I'd suggest splitting patch in several smaller ones if possible,
especially parts that do anything more than just moving code around.


> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  hw/i386/pc.c                   |  12 +--
>  hw/mem/memory-device.c         | 162 ++++++++++++++++++++++++++++++++++++
>  hw/mem/pc-dimm.c               | 185 +++--------------------------------------
>  hw/ppc/spapr.c                 |   9 +-
>  include/hw/mem/memory-device.h |   4 +
>  include/hw/mem/pc-dimm.h       |  14 +---
>  6 files changed, 185 insertions(+), 201 deletions(-)
> 
> diff --git a/hw/i386/pc.c b/hw/i386/pc.c
> index fa8862af33..1c25546a0c 100644
> --- a/hw/i386/pc.c
> +++ b/hw/i386/pc.c
> @@ -1711,7 +1711,7 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
>          goto out;
>      }
>  
> -    pc_dimm_memory_plug(dev, &pcms->hotplug_memory, mr, align, &local_err);
> +    pc_dimm_memory_plug(dev, align, &local_err);
Is there a reason why you are dropping pcms->hotplug_memory argument
and fall back to qdev_get_machine()?

I'd rather see it going other direction,
i.e. move hotplug_memory from PC
machine to MachineState and then pass it down as argument whenever it's needed.

>      if (local_err) {
>          goto out;
>      }
> @@ -1761,17 +1761,9 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
>                             DeviceState *dev, Error **errp)
>  {
>      PCMachineState *pcms = PC_MACHINE(hotplug_dev);
> -    PCDIMMDevice *dimm = PC_DIMM(dev);
> -    PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
> -    MemoryRegion *mr;
>      HotplugHandlerClass *hhc;
>      Error *local_err = NULL;
>  
> -    mr = ddc->get_memory_region(dimm, &local_err);
> -    if (local_err) {
> -        goto out;
> -    }
> -
>      hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
>      hhc->unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
>  
> @@ -1779,7 +1771,7 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
>          goto out;
>      }
>  
> -    pc_dimm_memory_unplug(dev, &pcms->hotplug_memory, mr);
> +    pc_dimm_memory_unplug(dev);
ditto

>      object_unparent(OBJECT(dev));
>  
>   out:
> diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
> index b860c9c582..b96efa3bf4 100644
> --- a/hw/mem/memory-device.c
> +++ b/hw/mem/memory-device.c
> @@ -15,6 +15,8 @@
>  #include "qapi/error.h"
>  #include "hw/boards.h"
>  #include "qemu/range.h"
> +#include "hw/virtio/vhost.h"
> +#include "sysemu/kvm.h"
>  
>  static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
>  {
> @@ -106,6 +108,166 @@ uint64_t get_plugged_memory_size(void)
>      return size;
>  }
>  
> +static int memory_device_used_region_size_internal(Object *obj, void *opaque)
> +{
> +    uint64_t *size = opaque;
> +
> +    if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
> +        DeviceState *dev = DEVICE(obj);
> +        MemoryDeviceState *md = MEMORY_DEVICE(obj);
> +        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
> +
> +        if (dev->realized) {
> +            *size += mdc->get_region_size(md, &error_abort);
> +        }
> +    }
> +
> +    object_child_foreach(obj, memory_device_used_region_size_internal, opaque);
> +    return 0;
> +}
> +
> +static uint64_t memory_device_used_region_size(void)
> +{
> +    uint64_t size = 0;
> +
> +    memory_device_used_region_size_internal(qdev_get_machine(), &size);
> +
> +    return size;
> +}
> +
> +uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
> +                                     uint64_t size, Error **errp)
I'd suggest to pc_dimm_memory_plug/pc_dimm_get_free_addr first,
namely most of the stuff it does like checks and assigning default
values should go to pre_plug (pre realize) handler and then only
actual mapping is left for plug (after realize) handler to deal with:

    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);               
    vmstate_register_ram(vmstate_mr, dev); 

> +{
> +    const uint64_t used_region_size = memory_device_used_region_size();
> +    uint64_t address_space_start, address_space_end;
> +    MachineState *machine = MACHINE(qdev_get_machine());
> +    MachineClass *mc = MACHINE_GET_CLASS(machine);
> +    MemoryHotplugState *hpms;
> +    GSList *list = NULL, *item;
> +    uint64_t new_addr = 0;
> +
> +    if (!mc->get_memory_hotplug_state) {
> +        error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
> +                         "supported by the machine");
> +        return 0;
> +    }
> +
> +    hpms = mc->get_memory_hotplug_state(machine);
> +    if (!hpms || !memory_region_size(&hpms->mr)) {
> +        error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
> +                         "enabled, please specify the maxmem option");
> +        return 0;
> +    }
above 2 checks are repeated multiple times separate helper to do it
would be better.

PS:
there is no need to check for it every time device is plugged,
doing this check once (at machine_init time) is sufficient.

> +    address_space_start = hpms->base;
> +    address_space_end = address_space_start + memory_region_size(&hpms->mr);
> +    g_assert(address_space_end >= address_space_start);
> +
> +    if (used_region_size + size > machine->maxram_size - machine->ram_size) {
> +        error_setg(errp, "not enough space, currently 0x%" PRIx64
> +                   " in use of total hot pluggable 0x" RAM_ADDR_FMT,
> +                   used_region_size, machine->maxram_size - machine->ram_size);
> +        return 0;
> +    }
> +
> +    if (hint && QEMU_ALIGN_UP(*hint, align) != *hint) {
> +        error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
> +                   align);
> +        return 0;
> +    }
> +
> +    if (QEMU_ALIGN_UP(size, align) != size) {
> +        error_setg(errp, "backend memory size must be multiple of 0x%"
> +                   PRIx64, align);
> +        return 0;
> +    }
> +
> +    if (hint) {
> +        new_addr = *hint;
> +        if (new_addr < address_space_start) {
> +            error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
> +                       "] at 0x%" PRIx64, new_addr, size, address_space_start);
> +            return 0;
> +        } else if ((new_addr + size) > address_space_end) {
> +            error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
> +                       "] beyond 0x%" PRIx64, new_addr, size,
> +                       address_space_end);
> +            return 0;
> +        }
> +    } else {
> +        new_addr = address_space_start;
> +    }
> +
> +    /* find address range that will fit new memory device */
> +    object_child_foreach(qdev_get_machine(), memory_device_built_list, &list);
> +    for (item = list; item; item = g_slist_next(item)) {
> +        MemoryDeviceState *md = item->data;
> +        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md));
> +        uint64_t md_size, md_addr;
> +
> +        md_addr = mdc->get_addr(md);
> +        md_size = mdc->get_region_size(md, errp);
> +        if (*errp) {
> +            goto out;
> +        }
> +
> +        if (ranges_overlap(md_addr, md_size, new_addr, size)) {
> +            if (hint) {
> +                DeviceState *d = DEVICE(md);
> +                error_setg(errp, "address range conflicts with '%s'", d->id);
> +                goto out;
> +            }
> +            new_addr = QEMU_ALIGN_UP(md_addr + md_size, align);
> +        }
> +    }
> +
> +    if (new_addr + size > address_space_end) {
> +        error_setg(errp, "could not find position in guest address space for "
> +                   "memory device - memory fragmented due to alignments");
> +        goto out;
> +    }
> +out:
> +    g_slist_free(list);
> +    return new_addr;
> +}
> +
> +void memory_device_plug_region(MemoryRegion *mr, uint64_t addr, Error **errp)
> +{
[...]

> +    /* we will need a new memory slot for kvm and vhost */
> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
> +        error_setg(errp, "hypervisor has no free memory slots left");
> +        return;
> +    }
> +    if (!vhost_has_free_slot()) {
> +        error_setg(errp, "a used vhost backend has no free memory slots left");
> +        return;
> +    }
move these checks to pre_plug time

> +
> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
missing vmstate registration?

> +}
> +
> +void memory_device_unplug_region(MemoryRegion *mr)
> +{
> +    MachineState *machine = MACHINE(qdev_get_machine());
> +    MachineClass *mc = MACHINE_GET_CLASS(machine);
> +    MemoryHotplugState *hpms;
> +
> +    /* we expect a previous call to memory_device_get_free_addr() */
> +    g_assert(mc->get_memory_hotplug_state);
> +    hpms = mc->get_memory_hotplug_state(machine);
> +    g_assert(hpms);
> +
> +    memory_region_del_subregion(&hpms->mr, mr);
> +}
> +
>  static const TypeInfo memory_device_info = {
>      .name          = TYPE_MEMORY_DEVICE,
>      .parent        = TYPE_INTERFACE,
> diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
> index 1dbf699e02..cf23ab5d76 100644
> --- a/hw/mem/pc-dimm.c
> +++ b/hw/mem/pc-dimm.c
> @@ -25,19 +25,10 @@
>  #include "qapi/error.h"
>  #include "qemu/config-file.h"
>  #include "qapi/visitor.h"
> -#include "qemu/range.h"
>  #include "sysemu/numa.h"
> -#include "sysemu/kvm.h"
>  #include "trace.h"
> -#include "hw/virtio/vhost.h"
>  
> -typedef struct pc_dimms_capacity {
> -     uint64_t size;
> -     Error    **errp;
> -} pc_dimms_capacity;
> -
> -void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
> -                         MemoryRegion *mr, uint64_t align, Error **errp)
> +void pc_dimm_memory_plug(DeviceState *dev, uint64_t align, Error **errp)
>  {
>      int slot;
>      MachineState *machine = MACHINE(qdev_get_machine());
> @@ -45,37 +36,26 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
>      PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
>      MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm);
>      Error *local_err = NULL;
> -    uint64_t existing_dimms_capacity = 0;
> +    MemoryRegion *mr;
>      uint64_t addr;
>  
> -    addr = object_property_get_uint(OBJECT(dimm),
> -                                    PC_DIMM_ADDR_PROP, &local_err);
> +    mr = ddc->get_memory_region(dimm, &local_err);
>      if (local_err) {
>          goto out;
>      }
>  
> -    addr = pc_dimm_get_free_addr(hpms->base,
> -                                 memory_region_size(&hpms->mr),
> -                                 !addr ? NULL : &addr, align,
> -                                 memory_region_size(mr), &local_err);
> +    addr = object_property_get_uint(OBJECT(dimm),
> +                                    PC_DIMM_ADDR_PROP, &local_err);
>      if (local_err) {
>          goto out;
>      }
>  
> -    existing_dimms_capacity = pc_existing_dimms_capacity(&local_err);
> +    addr = memory_device_get_free_addr(!addr ? NULL : &addr, align,
> +                                       memory_region_size(mr), &local_err);
>      if (local_err) {
>          goto out;
>      }
>  
> -    if (existing_dimms_capacity + memory_region_size(mr) >
> -        machine->maxram_size - machine->ram_size) {
> -        error_setg(&local_err, "not enough space, currently 0x%" PRIx64
> -                   " in use of total hot pluggable 0x" RAM_ADDR_FMT,
> -                   existing_dimms_capacity,
> -                   machine->maxram_size - machine->ram_size);
> -        goto out;
> -    }
> -
>      object_property_set_uint(OBJECT(dev), addr, PC_DIMM_ADDR_PROP, &local_err);
>      if (local_err) {
>          goto out;
> @@ -98,67 +78,27 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
>      }
>      trace_mhp_pc_dimm_assigned_slot(slot);
>  
> -    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
> -        error_setg(&local_err, "hypervisor has no free memory slots left");
> -        goto out;
> -    }
> -
> -    if (!vhost_has_free_slot()) {
> -        error_setg(&local_err, "a used vhost backend has no free"
> -                               " memory slots left");
> +    memory_device_plug_region(mr, addr, &local_err);
> +    if (local_err) {
>          goto out;
>      }
> -
> -    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
>      vmstate_register_ram(vmstate_mr, dev);
>  
>  out:
>      error_propagate(errp, local_err);
>  }
>  
> -void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
> -                           MemoryRegion *mr)
> +void pc_dimm_memory_unplug(DeviceState *dev)
>  {
>      PCDIMMDevice *dimm = PC_DIMM(dev);
>      PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
>      MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm);
> +    MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
>  
> -    memory_region_del_subregion(&hpms->mr, mr);
> +    memory_device_unplug_region(mr);
>      vmstate_unregister_ram(vmstate_mr, dev);
>  }
>  
> -static int pc_existing_dimms_capacity_internal(Object *obj, void *opaque)
> -{
> -    pc_dimms_capacity *cap = opaque;
> -    uint64_t *size = &cap->size;
> -
> -    if (object_dynamic_cast(obj, TYPE_PC_DIMM)) {
> -        DeviceState *dev = DEVICE(obj);
> -
> -        if (dev->realized) {
> -            (*size) += object_property_get_uint(obj, PC_DIMM_SIZE_PROP,
> -                cap->errp);
> -        }
> -
> -        if (cap->errp && *cap->errp) {
> -            return 1;
> -        }
> -    }
> -    object_child_foreach(obj, pc_existing_dimms_capacity_internal, opaque);
> -    return 0;
> -}
> -
> -uint64_t pc_existing_dimms_capacity(Error **errp)
> -{
> -    pc_dimms_capacity cap;
> -
> -    cap.size = 0;
> -    cap.errp = errp;
> -
> -    pc_existing_dimms_capacity_internal(qdev_get_machine(), &cap);
> -    return cap.size;
> -}
> -
>  static int pc_dimm_slot2bitmap(Object *obj, void *opaque)
>  {
>      unsigned long *bitmap = opaque;
> @@ -205,107 +145,6 @@ out:
>      return slot;
>  }
>  
> -static gint pc_dimm_addr_sort(gconstpointer a, gconstpointer b)
> -{
> -    PCDIMMDevice *x = PC_DIMM(a);
> -    PCDIMMDevice *y = PC_DIMM(b);
> -    Int128 diff = int128_sub(int128_make64(x->addr), int128_make64(y->addr));
> -
> -    if (int128_lt(diff, int128_zero())) {
> -        return -1;
> -    } else if (int128_gt(diff, int128_zero())) {
> -        return 1;
> -    }
> -    return 0;
> -}
> -
> -static int pc_dimm_built_list(Object *obj, void *opaque)
> -{
> -    GSList **list = opaque;
> -
> -    if (object_dynamic_cast(obj, TYPE_PC_DIMM)) {
> -        DeviceState *dev = DEVICE(obj);
> -        if (dev->realized) { /* only realized DIMMs matter */
> -            *list = g_slist_insert_sorted(*list, dev, pc_dimm_addr_sort);
> -        }
> -    }
> -
> -    object_child_foreach(obj, pc_dimm_built_list, opaque);
> -    return 0;
> -}
> -
> -uint64_t pc_dimm_get_free_addr(uint64_t address_space_start,
> -                               uint64_t address_space_size,
> -                               uint64_t *hint, uint64_t align, uint64_t size,
> -                               Error **errp)
> -{
> -    GSList *list = NULL, *item;
> -    uint64_t new_addr, ret = 0;
> -    uint64_t address_space_end = address_space_start + address_space_size;
> -
> -    g_assert(QEMU_ALIGN_UP(address_space_start, align) == address_space_start);
> -
> -    if (!address_space_size) {
> -        error_setg(errp, "memory hotplug is not enabled, "
> -                         "please add maxmem option");
> -        goto out;
> -    }
> -
> -    if (hint && QEMU_ALIGN_UP(*hint, align) != *hint) {
> -        error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
> -                   align);
> -        goto out;
> -    }
> -
> -    if (QEMU_ALIGN_UP(size, align) != size) {
> -        error_setg(errp, "backend memory size must be multiple of 0x%"
> -                   PRIx64, align);
> -        goto out;
> -    }
> -
> -    assert(address_space_end > address_space_start);
> -    object_child_foreach(qdev_get_machine(), pc_dimm_built_list, &list);
> -
> -    if (hint) {
> -        new_addr = *hint;
> -    } else {
> -        new_addr = address_space_start;
> -    }
> -
> -    /* find address range that will fit new DIMM */
> -    for (item = list; item; item = g_slist_next(item)) {
> -        PCDIMMDevice *dimm = item->data;
> -        uint64_t dimm_size = object_property_get_uint(OBJECT(dimm),
> -                                                      PC_DIMM_SIZE_PROP,
> -                                                      errp);
> -        if (errp && *errp) {
> -            goto out;
> -        }
> -
> -        if (ranges_overlap(dimm->addr, dimm_size, new_addr, size)) {
> -            if (hint) {
> -                DeviceState *d = DEVICE(dimm);
> -                error_setg(errp, "address range conflicts with '%s'", d->id);
> -                goto out;
> -            }
> -            new_addr = QEMU_ALIGN_UP(dimm->addr + dimm_size, align);
> -        }
> -    }
> -    ret = new_addr;
> -
> -    if (new_addr < address_space_start) {
> -        error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
> -                   "] at 0x%" PRIx64, new_addr, size, address_space_start);
> -    } else if ((new_addr + size) > address_space_end) {
> -        error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
> -                   "] beyond 0x%" PRIx64, new_addr, size, address_space_end);
> -    }
> -
> -out:
> -    g_slist_free(list);
> -    return ret;
> -}
> -
>  static Property pc_dimm_properties[] = {
>      DEFINE_PROP_UINT64(PC_DIMM_ADDR_PROP, PCDIMMDevice, addr, 0),
>      DEFINE_PROP_UINT32(PC_DIMM_NODE_PROP, PCDIMMDevice, node, 0),
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 7ccdb705b3..7757a49335 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -3041,7 +3041,7 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
>      align = memory_region_get_alignment(mr);
>      size = memory_region_size(mr);
>  
> -    pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
> +    pc_dimm_memory_plug(dev, align, &local_err);
>      if (local_err) {
>          goto out;
>      }
> @@ -3062,7 +3062,7 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
>      return;
>  
>  out_unplug:
> -    pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
> +    pc_dimm_memory_unplug(dev);
>  out:
>      error_propagate(errp, local_err);
>  }
> @@ -3180,9 +3180,6 @@ static sPAPRDIMMState *spapr_recover_pending_dimm_state(sPAPRMachineState *ms,
>  void spapr_lmb_release(DeviceState *dev)
>  {
>      sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_hotplug_handler(dev));
> -    PCDIMMDevice *dimm = PC_DIMM(dev);
> -    PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
> -    MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
>      sPAPRDIMMState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
>  
>      /* This information will get lost if a migration occurs
> @@ -3202,7 +3199,7 @@ void spapr_lmb_release(DeviceState *dev)
>       * Now that all the LMBs have been removed by the guest, call the
>       * pc-dimm unplug handler to cleanup up the pc-dimm device.
>       */
> -    pc_dimm_memory_unplug(dev, &spapr->hotplug_memory, mr);
> +    pc_dimm_memory_unplug(dev);
>      object_unparent(OBJECT(dev));
>      spapr_pending_dimm_unplugs_remove(spapr, ds);
>  }
> diff --git a/include/hw/mem/memory-device.h b/include/hw/mem/memory-device.h
> index 3e498b2e61..722620da24 100644
> --- a/include/hw/mem/memory-device.h
> +++ b/include/hw/mem/memory-device.h
> @@ -40,5 +40,9 @@ typedef struct MemoryDeviceClass {
>  
>  MemoryDeviceInfoList *qmp_memory_device_list(void);
>  uint64_t get_plugged_memory_size(void);
> +uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
> +                                     uint64_t size, Error **errp);
> +void memory_device_plug_region(MemoryRegion *mr, uint64_t addr, Error **errp);
> +void memory_device_unplug_region(MemoryRegion *mr);
>  
>  #endif
> diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h
> index 8bda37adab..2e7c2abe35 100644
> --- a/include/hw/mem/pc-dimm.h
> +++ b/include/hw/mem/pc-dimm.h
> @@ -19,7 +19,6 @@
>  #include "exec/memory.h"
>  #include "sysemu/hostmem.h"
>  #include "hw/qdev.h"
> -#include "hw/boards.h"
>  
>  #define TYPE_PC_DIMM "pc-dimm"
>  #define PC_DIMM(obj) \
> @@ -76,16 +75,7 @@ typedef struct PCDIMMDeviceClass {
>      MemoryRegion *(*get_vmstate_memory_region)(PCDIMMDevice *dimm);
>  } PCDIMMDeviceClass;
>  
> -uint64_t pc_dimm_get_free_addr(uint64_t address_space_start,
> -                               uint64_t address_space_size,
> -                               uint64_t *hint, uint64_t align, uint64_t size,
> -                               Error **errp);
> -
>  int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp);
> -
> -uint64_t pc_existing_dimms_capacity(Error **errp);
> -void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
> -                         MemoryRegion *mr, uint64_t align, Error **errp);
> -void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
> -                           MemoryRegion *mr);
> +void pc_dimm_memory_plug(DeviceState *dev, uint64_t align, Error **errp);
> +void pc_dimm_memory_unplug(DeviceState *dev);
>  #endif


Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by David Hildenbrand 7 years, 9 months ago
On 23.04.2018 14:19, Igor Mammedov wrote:
> On Fri, 20 Apr 2018 14:34:56 +0200
> David Hildenbrand <david@redhat.com> wrote:
> 
>> To be able to reuse MemoryDevice logic from other devices besides
>> pc-dimm, factor the relevant stuff out into the MemoryDevice code.
>>
>> As we don't care about slots for memory devices that are not pc-dimm,
>> don't factor that part out.
> that's not really true, you still consume kvm and vhost slots (whatever it is)
> whenever you map it into address space as ram memory region.

Let me rephrase ACPI slots are not of interest. That user visible part
is not needed for other memory devices.

KVM and VHOST slots are different (just memory regions, we don't care
about which specific slot is taken)

> 
> Also ram_slots currently are (ab)used as flag that user enabled memory
> hotplug via CLI.

Yes, have a patch for this :)

>  
>> Most of this patch just moves checks and logic around. While at it, make
>> the code properly detect certain error conditions better (e.g. fragmented
>> memory).
> I'd suggest splitting patch in several smaller ones if possible,
> especially parts that do anything more than just moving code around.

I tried to do it in smaller steps but most of it turned out to just
introduce and delete temporary code. Will have a look if this can be
done after we got a common understanding of what the end result should
look like.

> 
> 
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>>  hw/i386/pc.c                   |  12 +--
>>  hw/mem/memory-device.c         | 162 ++++++++++++++++++++++++++++++++++++
>>  hw/mem/pc-dimm.c               | 185 +++--------------------------------------
>>  hw/ppc/spapr.c                 |   9 +-
>>  include/hw/mem/memory-device.h |   4 +
>>  include/hw/mem/pc-dimm.h       |  14 +---
>>  6 files changed, 185 insertions(+), 201 deletions(-)
>>
>> diff --git a/hw/i386/pc.c b/hw/i386/pc.c
>> index fa8862af33..1c25546a0c 100644
>> --- a/hw/i386/pc.c
>> +++ b/hw/i386/pc.c
>> @@ -1711,7 +1711,7 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
>>          goto out;
>>      }
>>  
>> -    pc_dimm_memory_plug(dev, &pcms->hotplug_memory, mr, align, &local_err);
>> +    pc_dimm_memory_plug(dev, align, &local_err);
> Is there a reason why you are dropping pcms->hotplug_memory argument
> and fall back to qdev_get_machine()?

Yes, because we
a) access machine either way internally (a couple of times even).
b) this only works if we have a hotplug handler on the machine (esp: not
for virtio devices). Otherwise we have to get the machine from the
virtio realize function - also ugly.

> 
> I'd rather see it going other direction,
> i.e. move hotplug_memory from PC
> machine to MachineState and then pass it down as argument whenever it's needed.

As said, ugly for virtio devices. And I don't see a benefit if we access
the machine internally already either way.

> 
>>      if (local_err) {
>>          goto out;
>>      }
>> @@ -1761,17 +1761,9 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
>>                             DeviceState *dev, Error **errp)
>>  {
>>      PCMachineState *pcms = PC_MACHINE(hotplug_dev);
>> -    PCDIMMDevice *dimm = PC_DIMM(dev);
>> -    PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
>> -    MemoryRegion *mr;
>>      HotplugHandlerClass *hhc;
>>      Error *local_err = NULL;
>>  
>> -    mr = ddc->get_memory_region(dimm, &local_err);
>> -    if (local_err) {
>> -        goto out;
>> -    }
>> -
>>      hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
>>      hhc->unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
>>  
>> @@ -1779,7 +1771,7 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
>>          goto out;
>>      }
>>  
>> -    pc_dimm_memory_unplug(dev, &pcms->hotplug_memory, mr);
>> +    pc_dimm_memory_unplug(dev);
> ditto

(and I still think it looks cleaner, but we can discuss)

> 
>>      object_unparent(OBJECT(dev));
>>  
>>   out:
>> diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
>> index b860c9c582..b96efa3bf4 100644
>> --- a/hw/mem/memory-device.c
>> +++ b/hw/mem/memory-device.c
>> @@ -15,6 +15,8 @@
>>  #include "qapi/error.h"
>>  #include "hw/boards.h"
>>  #include "qemu/range.h"
>> +#include "hw/virtio/vhost.h"
>> +#include "sysemu/kvm.h"
>>  
>>  static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
>>  {
>> @@ -106,6 +108,166 @@ uint64_t get_plugged_memory_size(void)
>>      return size;
>>  }
>>  
>> +static int memory_device_used_region_size_internal(Object *obj, void *opaque)
>> +{
>> +    uint64_t *size = opaque;
>> +
>> +    if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
>> +        DeviceState *dev = DEVICE(obj);
>> +        MemoryDeviceState *md = MEMORY_DEVICE(obj);
>> +        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
>> +
>> +        if (dev->realized) {
>> +            *size += mdc->get_region_size(md, &error_abort);
>> +        }
>> +    }
>> +
>> +    object_child_foreach(obj, memory_device_used_region_size_internal, opaque);
>> +    return 0;
>> +}
>> +
>> +static uint64_t memory_device_used_region_size(void)
>> +{
>> +    uint64_t size = 0;
>> +
>> +    memory_device_used_region_size_internal(qdev_get_machine(), &size);
>> +
>> +    return size;
>> +}
>> +
>> +uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
>> +                                     uint64_t size, Error **errp)
> I'd suggest to pc_dimm_memory_plug/pc_dimm_get_free_addr first,
> namely most of the stuff it does like checks and assigning default
> values should go to pre_plug (pre realize) handler and then only
> actual mapping is left for plug (after realize) handler to deal with:
> 

Can you elaborate what you mean by pre-plug? If this is about pre plug
handler of the (machine) hotplug handler, it might be problematic for
virtio devices.

>     memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);               
>     vmstate_register_ram(vmstate_mr, dev); 
> 
>> +{
>> +    const uint64_t used_region_size = memory_device_used_region_size();
>> +    uint64_t address_space_start, address_space_end;
>> +    MachineState *machine = MACHINE(qdev_get_machine());
>> +    MachineClass *mc = MACHINE_GET_CLASS(machine);
>> +    MemoryHotplugState *hpms;
>> +    GSList *list = NULL, *item;
>> +    uint64_t new_addr = 0;
>> +
>> +    if (!mc->get_memory_hotplug_state) {
>> +        error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
>> +                         "supported by the machine");
>> +        return 0;
>> +    }
>> +
>> +    hpms = mc->get_memory_hotplug_state(machine);
>> +    if (!hpms || !memory_region_size(&hpms->mr)) {
>> +        error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
>> +                         "enabled, please specify the maxmem option");
>> +        return 0;
>> +    }
> above 2 checks are repeated multiple times separate helper to do it
> would be better.

In the current code there is only one place left (namely) here.

> 
> PS:
> there is no need to check for it every time device is plugged,
> doing this check once (at machine_init time) is sufficient.

Can you elaborate? There has to be one central place where we check if
we can hotplug a memory device. E.g. virtio devices don't go via the
machine hotplug handler.

> 
>> +    address_space_start = hpms->base;
>> +    address_space_end = address_space_start + memory_region_size(&hpms->mr);
>> +    g_assert(address_space_end >= address_space_start);
>> +
>> +    if (used_region_size + size > machine->maxram_size - machine->ram_size) {
>> +        error_setg(errp, "not enough space, currently 0x%" PRIx64
>> +                   " in use of total hot pluggable 0x" RAM_ADDR_FMT,
>> +                   used_region_size, machine->maxram_size - machine->ram_size);
>> +        return 0;
>> +    }
>> +
>> +    if (hint && QEMU_ALIGN_UP(*hint, align) != *hint) {
>> +        error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
>> +                   align);
>> +        return 0;
>> +    }
>> +
>> +    if (QEMU_ALIGN_UP(size, align) != size) {
>> +        error_setg(errp, "backend memory size must be multiple of 0x%"
>> +                   PRIx64, align);
>> +        return 0;
>> +    }
>> +
>> +    if (hint) {
>> +        new_addr = *hint;
>> +        if (new_addr < address_space_start) {
>> +            error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
>> +                       "] at 0x%" PRIx64, new_addr, size, address_space_start);
>> +            return 0;
>> +        } else if ((new_addr + size) > address_space_end) {
>> +            error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64
>> +                       "] beyond 0x%" PRIx64, new_addr, size,
>> +                       address_space_end);
>> +            return 0;
>> +        }
>> +    } else {
>> +        new_addr = address_space_start;
>> +    }
>> +
>> +    /* find address range that will fit new memory device */
>> +    object_child_foreach(qdev_get_machine(), memory_device_built_list, &list);
>> +    for (item = list; item; item = g_slist_next(item)) {
>> +        MemoryDeviceState *md = item->data;
>> +        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md));
>> +        uint64_t md_size, md_addr;
>> +
>> +        md_addr = mdc->get_addr(md);
>> +        md_size = mdc->get_region_size(md, errp);
>> +        if (*errp) {
>> +            goto out;
>> +        }
>> +
>> +        if (ranges_overlap(md_addr, md_size, new_addr, size)) {
>> +            if (hint) {
>> +                DeviceState *d = DEVICE(md);
>> +                error_setg(errp, "address range conflicts with '%s'", d->id);
>> +                goto out;
>> +            }
>> +            new_addr = QEMU_ALIGN_UP(md_addr + md_size, align);
>> +        }
>> +    }
>> +
>> +    if (new_addr + size > address_space_end) {
>> +        error_setg(errp, "could not find position in guest address space for "
>> +                   "memory device - memory fragmented due to alignments");
>> +        goto out;
>> +    }
>> +out:
>> +    g_slist_free(list);
>> +    return new_addr;
>> +}
>> +
>> +void memory_device_plug_region(MemoryRegion *mr, uint64_t addr, Error **errp)
>> +{
> [...]
> 
>> +    /* we will need a new memory slot for kvm and vhost */
>> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
>> +        error_setg(errp, "hypervisor has no free memory slots left");
>> +        return;
>> +    }
>> +    if (!vhost_has_free_slot()) {
>> +        error_setg(errp, "a used vhost backend has no free memory slots left");
>> +        return;
>> +    }
> move these checks to pre_plug time

That would then be a different flow as we have right now. But it sounds
sane.


-- 

Thanks,

David / dhildenb

Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Igor Mammedov 7 years, 9 months ago
On Mon, 23 Apr 2018 14:44:34 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 23.04.2018 14:19, Igor Mammedov wrote:
> > On Fri, 20 Apr 2018 14:34:56 +0200
> > David Hildenbrand <david@redhat.com> wrote:
considering v4 queued, I'm dropping mostly nor relevant points at this point.
wrt, virtio I'll elaborate more in reply to Pankaj

[...]

> >> diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
> >> index b860c9c582..b96efa3bf4 100644
> >> --- a/hw/mem/memory-device.c
> >> +++ b/hw/mem/memory-device.c
> >> @@ -15,6 +15,8 @@
> >>  #include "qapi/error.h"
> >>  #include "hw/boards.h"
> >>  #include "qemu/range.h"
> >> +#include "hw/virtio/vhost.h"
> >> +#include "sysemu/kvm.h"
> >>  
> >>  static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
> >>  {
> >> @@ -106,6 +108,166 @@ uint64_t get_plugged_memory_size(void)
> >>      return size;
> >>  }
> >>  
> >> +static int memory_device_used_region_size_internal(Object *obj, void *opaque)
> >> +{
> >> +    uint64_t *size = opaque;
> >> +
> >> +    if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
> >> +        DeviceState *dev = DEVICE(obj);
> >> +        MemoryDeviceState *md = MEMORY_DEVICE(obj);
> >> +        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
> >> +
> >> +        if (dev->realized) {
> >> +            *size += mdc->get_region_size(md, &error_abort);
> >> +        }
> >> +    }
> >> +
> >> +    object_child_foreach(obj, memory_device_used_region_size_internal, opaque);
> >> +    return 0;
> >> +}
> >> +
> >> +static uint64_t memory_device_used_region_size(void)
> >> +{
> >> +    uint64_t size = 0;
> >> +
> >> +    memory_device_used_region_size_internal(qdev_get_machine(), &size);
> >> +
> >> +    return size;
> >> +}
> >> +
> >> +uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
> >> +                                     uint64_t size, Error **errp)  
> > I'd suggest to pc_dimm_memory_plug/pc_dimm_get_free_addr first,
> > namely most of the stuff it does like checks and assigning default
> > values should go to pre_plug (pre realize) handler and then only
> > actual mapping is left for plug (after realize) handler to deal with:
> >   
> 
> Can you elaborate what you mean by pre-plug? If this is about pre plug
> handler of the (machine) hotplug handler, it might be problematic for
> virtio devices.
yes, something along these lines: c871bc70b



Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by David Hildenbrand 7 years, 9 months ago
On 24.04.2018 15:28, Igor Mammedov wrote:
> On Mon, 23 Apr 2018 14:44:34 +0200
> David Hildenbrand <david@redhat.com> wrote:
> 
>> On 23.04.2018 14:19, Igor Mammedov wrote:
>>> On Fri, 20 Apr 2018 14:34:56 +0200
>>> David Hildenbrand <david@redhat.com> wrote:
> considering v4 queued, I'm dropping mostly nor relevant points at this point.
> wrt, virtio I'll elaborate more in reply to Pankaj
> 
> [...]
> 
>>>> diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
>>>> index b860c9c582..b96efa3bf4 100644
>>>> --- a/hw/mem/memory-device.c
>>>> +++ b/hw/mem/memory-device.c
>>>> @@ -15,6 +15,8 @@
>>>>  #include "qapi/error.h"
>>>>  #include "hw/boards.h"
>>>>  #include "qemu/range.h"
>>>> +#include "hw/virtio/vhost.h"
>>>> +#include "sysemu/kvm.h"
>>>>  
>>>>  static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
>>>>  {
>>>> @@ -106,6 +108,166 @@ uint64_t get_plugged_memory_size(void)
>>>>      return size;
>>>>  }
>>>>  
>>>> +static int memory_device_used_region_size_internal(Object *obj, void *opaque)
>>>> +{
>>>> +    uint64_t *size = opaque;
>>>> +
>>>> +    if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
>>>> +        DeviceState *dev = DEVICE(obj);
>>>> +        MemoryDeviceState *md = MEMORY_DEVICE(obj);
>>>> +        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
>>>> +
>>>> +        if (dev->realized) {
>>>> +            *size += mdc->get_region_size(md, &error_abort);
>>>> +        }
>>>> +    }
>>>> +
>>>> +    object_child_foreach(obj, memory_device_used_region_size_internal, opaque);
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +static uint64_t memory_device_used_region_size(void)
>>>> +{
>>>> +    uint64_t size = 0;
>>>> +
>>>> +    memory_device_used_region_size_internal(qdev_get_machine(), &size);
>>>> +
>>>> +    return size;
>>>> +}
>>>> +
>>>> +uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
>>>> +                                     uint64_t size, Error **errp)  
>>> I'd suggest to pc_dimm_memory_plug/pc_dimm_get_free_addr first,
>>> namely most of the stuff it does like checks and assigning default
>>> values should go to pre_plug (pre realize) handler and then only
>>> actual mapping is left for plug (after realize) handler to deal with:
>>>   
>>
>> Can you elaborate what you mean by pre-plug? If this is about pre plug
>> handler of the (machine) hotplug handler, it might be problematic for
>> virtio devices.
> yes, something along these lines: c871bc70b
> 
> 

Yes, we can factor that out (at least) for pc-dimm later on easily.
Seems to be just about moving a couple of calls.

-- 

Thanks,

David / dhildenb

Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Igor Mammedov 7 years, 9 months ago
On Tue, 24 Apr 2018 15:39:30 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 24.04.2018 15:28, Igor Mammedov wrote:
> > On Mon, 23 Apr 2018 14:44:34 +0200
> > David Hildenbrand <david@redhat.com> wrote:
> >   
> >> On 23.04.2018 14:19, Igor Mammedov wrote:  
> >>> On Fri, 20 Apr 2018 14:34:56 +0200
> >>> David Hildenbrand <david@redhat.com> wrote:  
> > considering v4 queued, I'm dropping mostly nor relevant points at this point.
> > wrt, virtio I'll elaborate more in reply to Pankaj
> > 
> > [...]
> >   
> >>>> diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
> >>>> index b860c9c582..b96efa3bf4 100644
> >>>> --- a/hw/mem/memory-device.c
> >>>> +++ b/hw/mem/memory-device.c
> >>>> @@ -15,6 +15,8 @@
> >>>>  #include "qapi/error.h"
> >>>>  #include "hw/boards.h"
> >>>>  #include "qemu/range.h"
> >>>> +#include "hw/virtio/vhost.h"
> >>>> +#include "sysemu/kvm.h"
> >>>>  
> >>>>  static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
> >>>>  {
> >>>> @@ -106,6 +108,166 @@ uint64_t get_plugged_memory_size(void)
> >>>>      return size;
> >>>>  }
> >>>>  
> >>>> +static int memory_device_used_region_size_internal(Object *obj, void *opaque)
> >>>> +{
> >>>> +    uint64_t *size = opaque;
> >>>> +
> >>>> +    if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
> >>>> +        DeviceState *dev = DEVICE(obj);
> >>>> +        MemoryDeviceState *md = MEMORY_DEVICE(obj);
> >>>> +        MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
> >>>> +
> >>>> +        if (dev->realized) {
> >>>> +            *size += mdc->get_region_size(md, &error_abort);
> >>>> +        }
> >>>> +    }
> >>>> +
> >>>> +    object_child_foreach(obj, memory_device_used_region_size_internal, opaque);
> >>>> +    return 0;
> >>>> +}
> >>>> +
> >>>> +static uint64_t memory_device_used_region_size(void)
> >>>> +{
> >>>> +    uint64_t size = 0;
> >>>> +
> >>>> +    memory_device_used_region_size_internal(qdev_get_machine(), &size);
> >>>> +
> >>>> +    return size;
> >>>> +}
> >>>> +
> >>>> +uint64_t memory_device_get_free_addr(uint64_t *hint, uint64_t align,
> >>>> +                                     uint64_t size, Error **errp)    
> >>> I'd suggest to pc_dimm_memory_plug/pc_dimm_get_free_addr first,
> >>> namely most of the stuff it does like checks and assigning default
> >>> values should go to pre_plug (pre realize) handler and then only
> >>> actual mapping is left for plug (after realize) handler to deal with:
> >>>     
> >>
> >> Can you elaborate what you mean by pre-plug? If this is about pre plug
> >> handler of the (machine) hotplug handler, it might be problematic for
> >> virtio devices.  
> > yes, something along these lines: c871bc70b
> > 
> >   
> 
> Yes, we can factor that out (at least) for pc-dimm later on easily.
> Seems to be just about moving a couple of calls.
yep, but there is nice side effect,
there is no need to call devicefoo::unrealize() on failure since
devicefoo:realize() hasn't been called yet.



Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by David Hildenbrand 7 years, 9 months ago
> 
>> +    /* we will need a new memory slot for kvm and vhost */
>> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
>> +        error_setg(errp, "hypervisor has no free memory slots left");
>> +        return;
>> +    }
>> +    if (!vhost_has_free_slot()) {
>> +        error_setg(errp, "a used vhost backend has no free memory slots left");
>> +        return;
>> +    }
> move these checks to pre_plug time
> 
>> +
>> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
> missing vmstate registration?

Missed this one: To be called by the caller. Important because e.g. for
virtio-pmem we don't want this (I assume :) ).

Thanks!


-- 

Thanks,

David / dhildenb

Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Igor Mammedov 7 years, 9 months ago
On Mon, 23 Apr 2018 14:52:37 +0200
David Hildenbrand <david@redhat.com> wrote:

> >   
> >> +    /* we will need a new memory slot for kvm and vhost */
> >> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
> >> +        error_setg(errp, "hypervisor has no free memory slots left");
> >> +        return;
> >> +    }
> >> +    if (!vhost_has_free_slot()) {
> >> +        error_setg(errp, "a used vhost backend has no free memory slots left");
> >> +        return;
> >> +    }  
> > move these checks to pre_plug time
> >   
> >> +
> >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);  
> > missing vmstate registration?  
> 
> Missed this one: To be called by the caller. Important because e.g. for
> virtio-pmem we don't want this (I assume :) ).
if pmem isn't on shared storage, then We'd probably want to migrate
it as well, otherwise target would experience data loss.
Anyways, I'd just reat it as normal RAM in migration case

> 
> Thanks!
> 
> 


Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by David Hildenbrand 7 years, 9 months ago
On 24.04.2018 15:31, Igor Mammedov wrote:
> On Mon, 23 Apr 2018 14:52:37 +0200
> David Hildenbrand <david@redhat.com> wrote:
> 
>>>   
>>>> +    /* we will need a new memory slot for kvm and vhost */
>>>> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
>>>> +        error_setg(errp, "hypervisor has no free memory slots left");
>>>> +        return;
>>>> +    }
>>>> +    if (!vhost_has_free_slot()) {
>>>> +        error_setg(errp, "a used vhost backend has no free memory slots left");
>>>> +        return;
>>>> +    }  
>>> move these checks to pre_plug time
>>>   
>>>> +
>>>> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);  
>>> missing vmstate registration?  
>>
>> Missed this one: To be called by the caller. Important because e.g. for
>> virtio-pmem we don't want this (I assume :) ).
> if pmem isn't on shared storage, then We'd probably want to migrate
> it as well, otherwise target would experience data loss.
> Anyways, I'd just reat it as normal RAM in migration case

Yes, if we realize that all MemoryDevices need this call, we can move it
to that place, too.

Wonder if we might want to make this configurable for virtio-pmem later
on (via a flag or sth like that).

-- 

Thanks,

David / dhildenb

Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Igor Mammedov 7 years, 9 months ago
On Tue, 24 Apr 2018 15:41:23 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 24.04.2018 15:31, Igor Mammedov wrote:
> > On Mon, 23 Apr 2018 14:52:37 +0200
> > David Hildenbrand <david@redhat.com> wrote:
> >   
> >>>     
> >>>> +    /* we will need a new memory slot for kvm and vhost */
> >>>> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
> >>>> +        error_setg(errp, "hypervisor has no free memory slots left");
> >>>> +        return;
> >>>> +    }
> >>>> +    if (!vhost_has_free_slot()) {
> >>>> +        error_setg(errp, "a used vhost backend has no free memory slots left");
> >>>> +        return;
> >>>> +    }    
> >>> move these checks to pre_plug time
> >>>     
> >>>> +
> >>>> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);    
> >>> missing vmstate registration?    
> >>
> >> Missed this one: To be called by the caller. Important because e.g. for
> >> virtio-pmem we don't want this (I assume :) ).  
> > if pmem isn't on shared storage, then We'd probably want to migrate
> > it as well, otherwise target would experience data loss.
> > Anyways, I'd just reat it as normal RAM in migration case  
> 
> Yes, if we realize that all MemoryDevices need this call, we can move it
> to that place, too.
> 
> Wonder if we might want to make this configurable for virtio-pmem later
> on (via a flag or sth like that).
I don't see any reason why we wouldn't like it to be migrated,
it's the same as nvdimm but with another qemu:guest ABI
and async flush instead of sync one we have with nvdimm.


Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by David Hildenbrand 7 years, 9 months ago
On 24.04.2018 16:44, Igor Mammedov wrote:
> On Tue, 24 Apr 2018 15:41:23 +0200
> David Hildenbrand <david@redhat.com> wrote:
> 
>> On 24.04.2018 15:31, Igor Mammedov wrote:
>>> On Mon, 23 Apr 2018 14:52:37 +0200
>>> David Hildenbrand <david@redhat.com> wrote:
>>>   
>>>>>     
>>>>>> +    /* we will need a new memory slot for kvm and vhost */
>>>>>> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
>>>>>> +        error_setg(errp, "hypervisor has no free memory slots left");
>>>>>> +        return;
>>>>>> +    }
>>>>>> +    if (!vhost_has_free_slot()) {
>>>>>> +        error_setg(errp, "a used vhost backend has no free memory slots left");
>>>>>> +        return;
>>>>>> +    }    
>>>>> move these checks to pre_plug time
>>>>>     
>>>>>> +
>>>>>> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);    
>>>>> missing vmstate registration?    
>>>>
>>>> Missed this one: To be called by the caller. Important because e.g. for
>>>> virtio-pmem we don't want this (I assume :) ).  
>>> if pmem isn't on shared storage, then We'd probably want to migrate
>>> it as well, otherwise target would experience data loss.
>>> Anyways, I'd just reat it as normal RAM in migration case  
>>
>> Yes, if we realize that all MemoryDevices need this call, we can move it
>> to that place, too.
>>
>> Wonder if we might want to make this configurable for virtio-pmem later
>> on (via a flag or sth like that).
> I don't see any reason why we wouldn't like it to be migrated,
> it's the same as nvdimm but with another qemu:guest ABI
> and async flush instead of sync one we have with nvdimm.
> 

Didn't you just mention "shared storage" ? :)

Anyhow, I leave such stuff to Pankaj to figure out. I remember him
working on some page cache details. Once clarified, this is easily
refactored later on.

-- 

Thanks,

David / dhildenb

Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Pankaj Gupta 7 years, 9 months ago
> 
> > >   
> > >> +    /* we will need a new memory slot for kvm and vhost */
> > >> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
> > >> +        error_setg(errp, "hypervisor has no free memory slots left");
> > >> +        return;
> > >> +    }
> > >> +    if (!vhost_has_free_slot()) {
> > >> +        error_setg(errp, "a used vhost backend has no free memory slots
> > >> left");
> > >> +        return;
> > >> +    }
> > > move these checks to pre_plug time
> > >   
> > >> +
> > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
> > > missing vmstate registration?
> > 
> > Missed this one: To be called by the caller. Important because e.g. for
> > virtio-pmem we don't want this (I assume :) ).
> if pmem isn't on shared storage, then We'd probably want to migrate
> it as well, otherwise target would experience data loss.
> Anyways, I'd just reat it as normal RAM in migration case

Main difference between RAM and pmem it acts like combination of RAM and disk.
Saying this, in normal use-case size would be 100 GB's - few TB's range. 
I am not sure we really want to migrate it for non-shared storage use-case.

One reason why nvdimm added vmstate info could be: still there would be transient
writes in memory with fake DAX and there is no way(till now) to flush the guest 
writes. But with virtio-pmem we can flush such writes before migration and automatically
at destination host with shared disk we will have updated data.


Thanks,
Pankaj  



Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Igor Mammedov 7 years, 9 months ago
On Wed, 25 Apr 2018 01:45:12 -0400 (EDT)
Pankaj Gupta <pagupta@redhat.com> wrote:

> >   
> > > >     
> > > >> +    /* we will need a new memory slot for kvm and vhost */
> > > >> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
> > > >> +        error_setg(errp, "hypervisor has no free memory slots left");
> > > >> +        return;
> > > >> +    }
> > > >> +    if (!vhost_has_free_slot()) {
> > > >> +        error_setg(errp, "a used vhost backend has no free memory slots
> > > >> left");
> > > >> +        return;
> > > >> +    }  
> > > > move these checks to pre_plug time
> > > >     
> > > >> +
> > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);  
> > > > missing vmstate registration?  
> > > 
> > > Missed this one: To be called by the caller. Important because e.g. for
> > > virtio-pmem we don't want this (I assume :) ).  
> > if pmem isn't on shared storage, then We'd probably want to migrate
> > it as well, otherwise target would experience data loss.
> > Anyways, I'd just reat it as normal RAM in migration case  
> 
> Main difference between RAM and pmem it acts like combination of RAM and disk.
> Saying this, in normal use-case size would be 100 GB's - few TB's range. 
> I am not sure we really want to migrate it for non-shared storage use-case.
with non shared storage you'd have to migrate it target host but
with shared storage it might be possible to flush it and use directly
from target host. That probably won't work right out of box and would
need some sort of synchronization between src/dst hosts.

The same applies to nv/pc-dimm as well, as backend file easily could be
on pmem storage as well.

Maybe for now we should migrate everything so it would work in case of
non shared NVDIMM on host. And then later add migration-less capability
to all of them.

> One reason why nvdimm added vmstate info could be: still there would be transient
> writes in memory with fake DAX and there is no way(till now) to flush the guest 
> writes. But with virtio-pmem we can flush such writes before migration and automatically
> at destination host with shared disk we will have updated data.
nvdimm has concept of flush address hint (may be not implemented in qemu yet)
but it can flush. The only reason I'm buying into virtio-mem idea
is that would allow async flush queues which would reduce number
of vmexits.

> 
> 
> Thanks,
> Pankaj  
> 
> 


Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Pankaj Gupta 7 years, 9 months ago
> > > > >     
> > > > >> +
> > > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
> > > > > missing vmstate registration?
> > > > 
> > > > Missed this one: To be called by the caller. Important because e.g. for
> > > > virtio-pmem we don't want this (I assume :) ).
> > > if pmem isn't on shared storage, then We'd probably want to migrate
> > > it as well, otherwise target would experience data loss.
> > > Anyways, I'd just reat it as normal RAM in migration case
> > 
> > Main difference between RAM and pmem it acts like combination of RAM and
> > disk.
> > Saying this, in normal use-case size would be 100 GB's - few TB's range.
> > I am not sure we really want to migrate it for non-shared storage use-case.
> with non shared storage you'd have to migrate it target host but
> with shared storage it might be possible to flush it and use directly
> from target host. That probably won't work right out of box and would
> need some sort of synchronization between src/dst hosts.

Shared storage should work out of the box. Only thing is data in destination
host will be cache cold and existing pages in cache should be invalidated first. 
But if we migrate entire fake DAX RAMstate it will populate destination host page 
cache including pages while were idle in source host. This would unnecessarily 
create entropy in destination host. 

To me this feature don't make much sense. Problem which we are solving is:
Efficiently use guest RAM. 

> 
> The same applies to nv/pc-dimm as well, as backend file easily could be
> on pmem storage as well.

Are you saying backing file is in actual actual nvdimm hardware? we don't need 
emulation at all. 

> 
> Maybe for now we should migrate everything so it would work in case of
> non shared NVDIMM on host. And then later add migration-less capability
> to all of them.

not sure I agree.

> 
> > One reason why nvdimm added vmstate info could be: still there would be
> > transient
> > writes in memory with fake DAX and there is no way(till now) to flush the
> > guest
> > writes. But with virtio-pmem we can flush such writes before migration and
> > automatically
> > at destination host with shared disk we will have updated data.
> nvdimm has concept of flush address hint (may be not implemented in qemu yet)
> but it can flush. The only reason I'm buying into virtio-mem idea
> is that would allow async flush queues which would reduce number
> of vmexits.

Thats correct.

Thanks,
Pankaj

 

Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Igor Mammedov 7 years, 9 months ago
On Wed, 25 Apr 2018 09:56:49 -0400 (EDT)
Pankaj Gupta <pagupta@redhat.com> wrote:

> > > > > >       
> > > > > >> +
> > > > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);  
> > > > > > missing vmstate registration?  
> > > > > 
> > > > > Missed this one: To be called by the caller. Important because e.g. for
> > > > > virtio-pmem we don't want this (I assume :) ).  
> > > > if pmem isn't on shared storage, then We'd probably want to migrate
> > > > it as well, otherwise target would experience data loss.
> > > > Anyways, I'd just reat it as normal RAM in migration case  
> > > 
> > > Main difference between RAM and pmem it acts like combination of RAM and
> > > disk.
> > > Saying this, in normal use-case size would be 100 GB's - few TB's range.
> > > I am not sure we really want to migrate it for non-shared storage use-case.  
> > with non shared storage you'd have to migrate it target host but
> > with shared storage it might be possible to flush it and use directly
> > from target host. That probably won't work right out of box and would
> > need some sort of synchronization between src/dst hosts.  
> 
> Shared storage should work out of the box.
> Only thing is data in destination
> host will be cache cold and existing pages in cache should be invalidated first. 
> But if we migrate entire fake DAX RAMstate it will populate destination host page 
> cache including pages while were idle in source host. This would unnecessarily 
> create entropy in destination host. 
> 
> To me this feature don't make much sense. Problem which we are solving is:
> Efficiently use guest RAM.
What would live migration handover flow look like in case of 
guest constantly dirting memory provided by virtio-pmem and
and sometimes issuing async flush req along with it?


> > The same applies to nv/pc-dimm as well, as backend file easily could be
> > on pmem storage as well.  
> 
> Are you saying backing file is in actual actual nvdimm hardware? we don't need 
> emulation at all.
depends on if file is on DAX filesystem, but your argument about
migrating huge 100Gb- TB's range applies in this case as well.

> 
> > 
> > Maybe for now we should migrate everything so it would work in case of
> > non shared NVDIMM on host. And then later add migration-less capability
> > to all of them.  
> 
> not sure I agree.
So would you inhibit migration in case of non shared backend storage,
to avoid loosing data since they aren't migrated?


> > > One reason why nvdimm added vmstate info could be: still there would be
> > > transient
> > > writes in memory with fake DAX and there is no way(till now) to flush the
> > > guest
> > > writes. But with virtio-pmem we can flush such writes before migration and
> > > automatically
> > > at destination host with shared disk we will have updated data.  
> > nvdimm has concept of flush address hint (may be not implemented in qemu yet)
> > but it can flush. The only reason I'm buying into virtio-mem idea
> > is that would allow async flush queues which would reduce number
> > of vmexits.  
> 
> Thats correct.
> 
> Thanks,
> Pankaj
> 
>  


Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by Pankaj Gupta 7 years, 9 months ago
> > > > > > >> +
> > > > > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base,
> > > > > > >> mr);
> > > > > > > missing vmstate registration?
> > > > > > 
> > > > > > Missed this one: To be called by the caller. Important because e.g.
> > > > > > for
> > > > > > virtio-pmem we don't want this (I assume :) ).
> > > > > if pmem isn't on shared storage, then We'd probably want to migrate
> > > > > it as well, otherwise target would experience data loss.
> > > > > Anyways, I'd just reat it as normal RAM in migration case
> > > > 
> > > > Main difference between RAM and pmem it acts like combination of RAM
> > > > and
> > > > disk.
> > > > Saying this, in normal use-case size would be 100 GB's - few TB's
> > > > range.
> > > > I am not sure we really want to migrate it for non-shared storage
> > > > use-case.
> > > with non shared storage you'd have to migrate it target host but
> > > with shared storage it might be possible to flush it and use directly
> > > from target host. That probably won't work right out of box and would
> > > need some sort of synchronization between src/dst hosts.
> > 
> > Shared storage should work out of the box.
> > Only thing is data in destination
> > host will be cache cold and existing pages in cache should be invalidated
> > first.
> > But if we migrate entire fake DAX RAMstate it will populate destination
> > host page
> > cache including pages while were idle in source host. This would
> > unnecessarily
> > create entropy in destination host.
> > 
> > To me this feature don't make much sense. Problem which we are solving is:
> > Efficiently use guest RAM.
> What would live migration handover flow look like in case of
> guest constantly dirting memory provided by virtio-pmem and
> and sometimes issuing async flush req along with it?

Dirty entire pmem (disk) at once not a usual scenario. Some part of disk/pmem
would get dirty and we need to handle that. I just want to say moving entire
pmem (disk) is not efficient solution because we are using this solution to
manage guest memory efficiently. Otherwise it will be like any block device copy
with non-shared storage.   
 
> 
> 
> > > The same applies to nv/pc-dimm as well, as backend file easily could be
> > > on pmem storage as well.
> > 
> > Are you saying backing file is in actual actual nvdimm hardware? we don't
> > need
> > emulation at all.
> depends on if file is on DAX filesystem, but your argument about
> migrating huge 100Gb- TB's range applies in this case as well.
> 
> > 
> > > 
> > > Maybe for now we should migrate everything so it would work in case of
> > > non shared NVDIMM on host. And then later add migration-less capability
> > > to all of them.
> > 
> > not sure I agree.
> So would you inhibit migration in case of non shared backend storage,
> to avoid loosing data since they aren't migrated?

I am just thinking what features we want to support with pmem. And live migration
with shared storage is the one which comes to my mind.

If live migration with non-shared storage is what we want to support (I don't know
yet) we can add this? Even with shared storage it would copy entire pmem state?

Thanks,
Pankaj
 
> 
> 
> > > > One reason why nvdimm added vmstate info could be: still there would be
> > > > transient
> > > > writes in memory with fake DAX and there is no way(till now) to flush
> > > > the
> > > > guest
> > > > writes. But with virtio-pmem we can flush such writes before migration
> > > > and
> > > > automatically
> > > > at destination host with shared disk we will have updated data.
> > > nvdimm has concept of flush address hint (may be not implemented in qemu
> > > yet)
> > > but it can flush. The only reason I'm buying into virtio-mem idea
> > > is that would allow async flush queues which would reduce number
> > > of vmexits.
> > 
> > Thats correct.
> > 
> > Thanks,
> > Pankaj
> > 
> >  
> 
> 
> 

Re: [Qemu-devel] [PATCH v3 3/3] virtio-pmem: should we make it migratable???
Posted by Igor Mammedov 7 years, 9 months ago
On Thu, 26 Apr 2018 03:37:51 -0400 (EDT)
Pankaj Gupta <pagupta@redhat.com> wrote:

trimming CC list to keep people that might be interested in the topic
and renaming thread to reflect it.

> > > > > > > >> +
> > > > > > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base,
> > > > > > > >> mr);  
> > > > > > > > missing vmstate registration?  
> > > > > > > 
> > > > > > > Missed this one: To be called by the caller. Important because e.g.
> > > > > > > for
> > > > > > > virtio-pmem we don't want this (I assume :) ).  
> > > > > > if pmem isn't on shared storage, then We'd probably want to migrate
> > > > > > it as well, otherwise target would experience data loss.
> > > > > > Anyways, I'd just reat it as normal RAM in migration case  
> > > > > 
> > > > > Main difference between RAM and pmem it acts like combination of RAM
> > > > > and
> > > > > disk.
> > > > > Saying this, in normal use-case size would be 100 GB's - few TB's
> > > > > range.
> > > > > I am not sure we really want to migrate it for non-shared storage
> > > > > use-case.  
> > > > with non shared storage you'd have to migrate it target host but
> > > > with shared storage it might be possible to flush it and use directly
> > > > from target host. That probably won't work right out of box and would
> > > > need some sort of synchronization between src/dst hosts.  
> > > 
> > > Shared storage should work out of the box.
> > > Only thing is data in destination
> > > host will be cache cold and existing pages in cache should be invalidated
> > > first.
> > > But if we migrate entire fake DAX RAMstate it will populate destination
> > > host page
> > > cache including pages while were idle in source host. This would
> > > unnecessarily
> > > create entropy in destination host.
> > > 
> > > To me this feature don't make much sense. Problem which we are solving is:
> > > Efficiently use guest RAM.  
> > What would live migration handover flow look like in case of
> > guest constantly dirting memory provided by virtio-pmem and
> > and sometimes issuing async flush req along with it?  
> 
> Dirty entire pmem (disk) at once not a usual scenario. Some part of disk/pmem
> would get dirty and we need to handle that. I just want to say moving entire
> pmem (disk) is not efficient solution because we are using this solution to
> manage guest memory efficiently. Otherwise it will be like any block device copy
> with non-shared storage.   
not sure if we can use block layer analogy here.

> > > > The same applies to nv/pc-dimm as well, as backend file easily could be
> > > > on pmem storage as well.  
> > > 
> > > Are you saying backing file is in actual actual nvdimm hardware? we don't
> > > need
> > > emulation at all.  
> > depends on if file is on DAX filesystem, but your argument about
> > migrating huge 100Gb- TB's range applies in this case as well.
> >   
> > >   
> > > > 
> > > > Maybe for now we should migrate everything so it would work in case of
> > > > non shared NVDIMM on host. And then later add migration-less capability
> > > > to all of them.  
> > > 
> > > not sure I agree.  
> > So would you inhibit migration in case of non shared backend storage,
> > to avoid loosing data since they aren't migrated?  
> 
> I am just thinking what features we want to support with pmem. And live migration
> with shared storage is the one which comes to my mind.
> 
> If live migration with non-shared storage is what we want to support (I don't know
> yet) we can add this? Even with shared storage it would copy entire pmem state?
Perhaps we should register vmstate like for normal ram and use something similar to
  http://lists.gnu.org/archive/html/qemu-devel/2018-04/msg00003.html this
to skip shared memory on migration.
In this case we could use this for pc-dimms as well.

David,
 what's your take on it?

> Thanks,
> Pankaj
>  
> > 
> >   
> > > > > One reason why nvdimm added vmstate info could be: still there would be
> > > > > transient
> > > > > writes in memory with fake DAX and there is no way(till now) to flush
> > > > > the
> > > > > guest
> > > > > writes. But with virtio-pmem we can flush such writes before migration
> > > > > and
> > > > > automatically
> > > > > at destination host with shared disk we will have updated data.  
> > > > nvdimm has concept of flush address hint (may be not implemented in qemu
> > > > yet)
> > > > but it can flush. The only reason I'm buying into virtio-mem idea
> > > > is that would allow async flush queues which would reduce number
> > > > of vmexits.  
> > > 
> > > Thats correct.
> > > 
> > > Thanks,
> > > Pankaj
> > > 
> > >    
> > 
> > 
> >   
> 


Re: [Qemu-devel] [PATCH v3 3/3] virtio-pmem: should we make it migratable???
Posted by David Hildenbrand 7 years, 9 months ago
On 04.05.2018 11:13, Igor Mammedov wrote:
> On Thu, 26 Apr 2018 03:37:51 -0400 (EDT)
> Pankaj Gupta <pagupta@redhat.com> wrote:
> 
> trimming CC list to keep people that might be interested in the topic
> and renaming thread to reflect it.
> 
>>>>>>>>>> +
>>>>>>>>>> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base,
>>>>>>>>>> mr);  
>>>>>>>>> missing vmstate registration?  
>>>>>>>>
>>>>>>>> Missed this one: To be called by the caller. Important because e.g.
>>>>>>>> for
>>>>>>>> virtio-pmem we don't want this (I assume :) ).  
>>>>>>> if pmem isn't on shared storage, then We'd probably want to migrate
>>>>>>> it as well, otherwise target would experience data loss.
>>>>>>> Anyways, I'd just reat it as normal RAM in migration case  
>>>>>>
>>>>>> Main difference between RAM and pmem it acts like combination of RAM
>>>>>> and
>>>>>> disk.
>>>>>> Saying this, in normal use-case size would be 100 GB's - few TB's
>>>>>> range.
>>>>>> I am not sure we really want to migrate it for non-shared storage
>>>>>> use-case.  
>>>>> with non shared storage you'd have to migrate it target host but
>>>>> with shared storage it might be possible to flush it and use directly
>>>>> from target host. That probably won't work right out of box and would
>>>>> need some sort of synchronization between src/dst hosts.  
>>>>
>>>> Shared storage should work out of the box.
>>>> Only thing is data in destination
>>>> host will be cache cold and existing pages in cache should be invalidated
>>>> first.
>>>> But if we migrate entire fake DAX RAMstate it will populate destination
>>>> host page
>>>> cache including pages while were idle in source host. This would
>>>> unnecessarily
>>>> create entropy in destination host.
>>>>
>>>> To me this feature don't make much sense. Problem which we are solving is:
>>>> Efficiently use guest RAM.  
>>> What would live migration handover flow look like in case of
>>> guest constantly dirting memory provided by virtio-pmem and
>>> and sometimes issuing async flush req along with it?  
>>
>> Dirty entire pmem (disk) at once not a usual scenario. Some part of disk/pmem
>> would get dirty and we need to handle that. I just want to say moving entire
>> pmem (disk) is not efficient solution because we are using this solution to
>> manage guest memory efficiently. Otherwise it will be like any block device copy
>> with non-shared storage.   
> not sure if we can use block layer analogy here.
> 
>>>>> The same applies to nv/pc-dimm as well, as backend file easily could be
>>>>> on pmem storage as well.  
>>>>
>>>> Are you saying backing file is in actual actual nvdimm hardware? we don't
>>>> need
>>>> emulation at all.  
>>> depends on if file is on DAX filesystem, but your argument about
>>> migrating huge 100Gb- TB's range applies in this case as well.
>>>   
>>>>   
>>>>>
>>>>> Maybe for now we should migrate everything so it would work in case of
>>>>> non shared NVDIMM on host. And then later add migration-less capability
>>>>> to all of them.  
>>>>
>>>> not sure I agree.  
>>> So would you inhibit migration in case of non shared backend storage,
>>> to avoid loosing data since they aren't migrated?  
>>
>> I am just thinking what features we want to support with pmem. And live migration
>> with shared storage is the one which comes to my mind.
>>
>> If live migration with non-shared storage is what we want to support (I don't know
>> yet) we can add this? Even with shared storage it would copy entire pmem state?
> Perhaps we should register vmstate like for normal ram and use something similar to
>   http://lists.gnu.org/archive/html/qemu-devel/2018-04/msg00003.html this
> to skip shared memory on migration.
> In this case we could use this for pc-dimms as well.
> 
> David,
>  what's your take on it?

(I assume you were talking to David Gilbert, but still my take on it :))

"shared RAM" use in that context is rather "shared between processes",
no? What would be the benefit of enabling migration for a ramblock but
then again blocking it off?

Anyhow, I think this detail discussion is way to early right now. Pankaj
has some other problems to solve before moving on to migration (yes,
migration is important to keep in mind but not the top priority right
now). And I would consider migration in this context as the next step
once we have the basics sorted out (flushing, threads ...)

> 
>> Thanks,
>> Pankaj
>>  


-- 

Thanks,

David / dhildenb

Re: [Qemu-devel] [PATCH v3 3/3] virtio-pmem: should we make it migratable???
Posted by Pankaj Gupta 7 years, 9 months ago

> > 
> >>>>>>>>>> +
> >>>>>>>>>> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base,
> >>>>>>>>>> mr);
> >>>>>>>>> missing vmstate registration?
> >>>>>>>>
> >>>>>>>> Missed this one: To be called by the caller. Important because e.g.
> >>>>>>>> for
> >>>>>>>> virtio-pmem we don't want this (I assume :) ).
> >>>>>>> if pmem isn't on shared storage, then We'd probably want to migrate
> >>>>>>> it as well, otherwise target would experience data loss.
> >>>>>>> Anyways, I'd just reat it as normal RAM in migration case
> >>>>>>
> >>>>>> Main difference between RAM and pmem it acts like combination of RAM
> >>>>>> and
> >>>>>> disk.
> >>>>>> Saying this, in normal use-case size would be 100 GB's - few TB's
> >>>>>> range.
> >>>>>> I am not sure we really want to migrate it for non-shared storage
> >>>>>> use-case.
> >>>>> with non shared storage you'd have to migrate it target host but
> >>>>> with shared storage it might be possible to flush it and use directly
> >>>>> from target host. That probably won't work right out of box and would
> >>>>> need some sort of synchronization between src/dst hosts.
> >>>>
> >>>> Shared storage should work out of the box.
> >>>> Only thing is data in destination
> >>>> host will be cache cold and existing pages in cache should be
> >>>> invalidated
> >>>> first.
> >>>> But if we migrate entire fake DAX RAMstate it will populate destination
> >>>> host page
> >>>> cache including pages while were idle in source host. This would
> >>>> unnecessarily
> >>>> create entropy in destination host.
> >>>>
> >>>> To me this feature don't make much sense. Problem which we are solving
> >>>> is:
> >>>> Efficiently use guest RAM.
> >>> What would live migration handover flow look like in case of
> >>> guest constantly dirting memory provided by virtio-pmem and
> >>> and sometimes issuing async flush req along with it?
> >>
> >> Dirty entire pmem (disk) at once not a usual scenario. Some part of
> >> disk/pmem
> >> would get dirty and we need to handle that. I just want to say moving
> >> entire
> >> pmem (disk) is not efficient solution because we are using this solution
> >> to
> >> manage guest memory efficiently. Otherwise it will be like any block
> >> device copy
> >> with non-shared storage.
> > not sure if we can use block layer analogy here.
> > 
> >>>>> The same applies to nv/pc-dimm as well, as backend file easily could be
> >>>>> on pmem storage as well.
> >>>>
> >>>> Are you saying backing file is in actual actual nvdimm hardware? we
> >>>> don't
> >>>> need
> >>>> emulation at all.
> >>> depends on if file is on DAX filesystem, but your argument about
> >>> migrating huge 100Gb- TB's range applies in this case as well.
> >>>   
> >>>>   
> >>>>>
> >>>>> Maybe for now we should migrate everything so it would work in case of
> >>>>> non shared NVDIMM on host. And then later add migration-less capability
> >>>>> to all of them.
> >>>>
> >>>> not sure I agree.
> >>> So would you inhibit migration in case of non shared backend storage,
> >>> to avoid loosing data since they aren't migrated?
> >>
> >> I am just thinking what features we want to support with pmem. And live
> >> migration
> >> with shared storage is the one which comes to my mind.
> >>
> >> If live migration with non-shared storage is what we want to support (I
> >> don't know
> >> yet) we can add this? Even with shared storage it would copy entire pmem
> >> state?
> > Perhaps we should register vmstate like for normal ram and use something
> > similar to
> >   http://lists.gnu.org/archive/html/qemu-devel/2018-04/msg00003.html this
> > to skip shared memory on migration.
> > In this case we could use this for pc-dimms as well.
> > 
> > David,
> >  what's your take on it?
> 
> (I assume you were talking to David Gilbert, but still my take on it :))
> 
> "shared RAM" use in that context is rather "shared between processes",
> no? What would be the benefit of enabling migration for a ramblock but
> then again blocking it off?
> 
> Anyhow, I think this detail discussion is way to early right now. Pankaj
> has some other problems to solve before moving on to migration (yes,
> migration is important to keep in mind but not the top priority right
> now). And I would consider migration in this context as the next step
> once we have the basics sorted out (flushing, threads ...)

Right. Still it would be very helpful to have Dave Gilbert's inputs on 
this ongoing discussion. That would clear some doubts regarding expectation 
for this type of memory from live migration point of view.

Then we can discuss in more details after we have first version accepted 
and start optimization for live migration. 

Thanks,
Pankaj

Re: [Qemu-devel] [PATCH v3 3/3] virtio-pmem: should we make it migratable???
Posted by Dr. David Alan Gilbert 7 years, 9 months ago
* Igor Mammedov (imammedo@redhat.com) wrote:
> On Thu, 26 Apr 2018 03:37:51 -0400 (EDT)
> Pankaj Gupta <pagupta@redhat.com> wrote:
> 
> trimming CC list to keep people that might be interested in the topic
> and renaming thread to reflect it.
> 
> > > > > > > > >> +
> > > > > > > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base,
> > > > > > > > >> mr);  
> > > > > > > > > missing vmstate registration?  
> > > > > > > > 
> > > > > > > > Missed this one: To be called by the caller. Important because e.g.
> > > > > > > > for
> > > > > > > > virtio-pmem we don't want this (I assume :) ).  
> > > > > > > if pmem isn't on shared storage, then We'd probably want to migrate
> > > > > > > it as well, otherwise target would experience data loss.
> > > > > > > Anyways, I'd just reat it as normal RAM in migration case  
> > > > > > 
> > > > > > Main difference between RAM and pmem it acts like combination of RAM
> > > > > > and
> > > > > > disk.
> > > > > > Saying this, in normal use-case size would be 100 GB's - few TB's
> > > > > > range.
> > > > > > I am not sure we really want to migrate it for non-shared storage
> > > > > > use-case.  
> > > > > with non shared storage you'd have to migrate it target host but
> > > > > with shared storage it might be possible to flush it and use directly
> > > > > from target host. That probably won't work right out of box and would
> > > > > need some sort of synchronization between src/dst hosts.  
> > > > 
> > > > Shared storage should work out of the box.
> > > > Only thing is data in destination
> > > > host will be cache cold and existing pages in cache should be invalidated
> > > > first.
> > > > But if we migrate entire fake DAX RAMstate it will populate destination
> > > > host page
> > > > cache including pages while were idle in source host. This would
> > > > unnecessarily
> > > > create entropy in destination host.
> > > > 
> > > > To me this feature don't make much sense. Problem which we are solving is:
> > > > Efficiently use guest RAM.  
> > > What would live migration handover flow look like in case of
> > > guest constantly dirting memory provided by virtio-pmem and
> > > and sometimes issuing async flush req along with it?  
> > 
> > Dirty entire pmem (disk) at once not a usual scenario. Some part of disk/pmem
> > would get dirty and we need to handle that. I just want to say moving entire
> > pmem (disk) is not efficient solution because we are using this solution to
> > manage guest memory efficiently. Otherwise it will be like any block device copy
> > with non-shared storage.   
> not sure if we can use block layer analogy here.
> 
> > > > > The same applies to nv/pc-dimm as well, as backend file easily could be
> > > > > on pmem storage as well.  
> > > > 
> > > > Are you saying backing file is in actual actual nvdimm hardware? we don't
> > > > need
> > > > emulation at all.  
> > > depends on if file is on DAX filesystem, but your argument about
> > > migrating huge 100Gb- TB's range applies in this case as well.
> > >   
> > > >   
> > > > > 
> > > > > Maybe for now we should migrate everything so it would work in case of
> > > > > non shared NVDIMM on host. And then later add migration-less capability
> > > > > to all of them.  
> > > > 
> > > > not sure I agree.  
> > > So would you inhibit migration in case of non shared backend storage,
> > > to avoid loosing data since they aren't migrated?  
> > 
> > I am just thinking what features we want to support with pmem. And live migration
> > with shared storage is the one which comes to my mind.
> > 
> > If live migration with non-shared storage is what we want to support (I don't know
> > yet) we can add this? Even with shared storage it would copy entire pmem state?
> Perhaps we should register vmstate like for normal ram and use something similar to
>   http://lists.gnu.org/archive/html/qemu-devel/2018-04/msg00003.html this
> to skip shared memory on migration.
> In this case we could use this for pc-dimms as well.
> 
> David,
>  what's your take on it?

My feel is that something is going to have to migrate it, I'm just not
sure how.
So let me just check I understand:
  a) It's potentially huge
  b) It's a RAMBlock
  c) It's backed by ????
     c1) Something machine local - i.e. a physical lump of flash in a
         socket rather than something sharable by machines?
  d) It can potentially be rapidly changing as the guest writes to it?

Dave

> > Thanks,
> > Pankaj
> >  
> > > 
> > >   
> > > > > > One reason why nvdimm added vmstate info could be: still there would be
> > > > > > transient
> > > > > > writes in memory with fake DAX and there is no way(till now) to flush
> > > > > > the
> > > > > > guest
> > > > > > writes. But with virtio-pmem we can flush such writes before migration
> > > > > > and
> > > > > > automatically
> > > > > > at destination host with shared disk we will have updated data.  
> > > > > nvdimm has concept of flush address hint (may be not implemented in qemu
> > > > > yet)
> > > > > but it can flush. The only reason I'm buying into virtio-mem idea
> > > > > is that would allow async flush queues which would reduce number
> > > > > of vmexits.  
> > > > 
> > > > Thats correct.
> > > > 
> > > > Thanks,
> > > > Pankaj
> > > > 
> > > >    
> > > 
> > > 
> > >   
> > 
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

Re: [Qemu-devel] [PATCH v3 3/3] virtio-pmem: should we make it migratable???
Posted by Igor Mammedov 7 years, 9 months ago
On Fri, 4 May 2018 13:26:51 +0100
"Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:

> * Igor Mammedov (imammedo@redhat.com) wrote:
> > On Thu, 26 Apr 2018 03:37:51 -0400 (EDT)
> > Pankaj Gupta <pagupta@redhat.com> wrote:
> > 
> > trimming CC list to keep people that might be interested in the topic
> > and renaming thread to reflect it.
> >   
> > > > > > > > > >> +
> > > > > > > > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base,
> > > > > > > > > >> mr);    
> > > > > > > > > > missing vmstate registration?    
> > > > > > > > > 
> > > > > > > > > Missed this one: To be called by the caller. Important because e.g.
> > > > > > > > > for
> > > > > > > > > virtio-pmem we don't want this (I assume :) ).    
> > > > > > > > if pmem isn't on shared storage, then We'd probably want to migrate
> > > > > > > > it as well, otherwise target would experience data loss.
> > > > > > > > Anyways, I'd just reat it as normal RAM in migration case    
> > > > > > > 
> > > > > > > Main difference between RAM and pmem it acts like combination of RAM
> > > > > > > and
> > > > > > > disk.
> > > > > > > Saying this, in normal use-case size would be 100 GB's - few TB's
> > > > > > > range.
> > > > > > > I am not sure we really want to migrate it for non-shared storage
> > > > > > > use-case.    
> > > > > > with non shared storage you'd have to migrate it target host but
> > > > > > with shared storage it might be possible to flush it and use directly
> > > > > > from target host. That probably won't work right out of box and would
> > > > > > need some sort of synchronization between src/dst hosts.    
> > > > > 
> > > > > Shared storage should work out of the box.
> > > > > Only thing is data in destination
> > > > > host will be cache cold and existing pages in cache should be invalidated
> > > > > first.
> > > > > But if we migrate entire fake DAX RAMstate it will populate destination
> > > > > host page
> > > > > cache including pages while were idle in source host. This would
> > > > > unnecessarily
> > > > > create entropy in destination host.
> > > > > 
> > > > > To me this feature don't make much sense. Problem which we are solving is:
> > > > > Efficiently use guest RAM.    
> > > > What would live migration handover flow look like in case of
> > > > guest constantly dirting memory provided by virtio-pmem and
> > > > and sometimes issuing async flush req along with it?    
> > > 
> > > Dirty entire pmem (disk) at once not a usual scenario. Some part of disk/pmem
> > > would get dirty and we need to handle that. I just want to say moving entire
> > > pmem (disk) is not efficient solution because we are using this solution to
> > > manage guest memory efficiently. Otherwise it will be like any block device copy
> > > with non-shared storage.     
> > not sure if we can use block layer analogy here.
> >   
> > > > > > The same applies to nv/pc-dimm as well, as backend file easily could be
> > > > > > on pmem storage as well.    
> > > > > 
> > > > > Are you saying backing file is in actual actual nvdimm hardware? we don't
> > > > > need
> > > > > emulation at all.    
> > > > depends on if file is on DAX filesystem, but your argument about
> > > > migrating huge 100Gb- TB's range applies in this case as well.
> > > >     
> > > > >     
> > > > > > 
> > > > > > Maybe for now we should migrate everything so it would work in case of
> > > > > > non shared NVDIMM on host. And then later add migration-less capability
> > > > > > to all of them.    
> > > > > 
> > > > > not sure I agree.    
> > > > So would you inhibit migration in case of non shared backend storage,
> > > > to avoid loosing data since they aren't migrated?    
> > > 
> > > I am just thinking what features we want to support with pmem. And live migration
> > > with shared storage is the one which comes to my mind.
> > > 
> > > If live migration with non-shared storage is what we want to support (I don't know
> > > yet) we can add this? Even with shared storage it would copy entire pmem state?  
> > Perhaps we should register vmstate like for normal ram and use something similar to
> >   http://lists.gnu.org/archive/html/qemu-devel/2018-04/msg00003.html this
> > to skip shared memory on migration.
> > In this case we could use this for pc-dimms as well.
> > 
> > David,
> >  what's your take on it?  
> 
> My feel is that something is going to have to migrate it, I'm just not
> sure how.
> So let me just check I understand:
>   a) It's potentially huge
yep, assume it could be in storage quarantines (100s of Gb)

>   b) It's a RAMBlock
it is

>   c) It's backed by ????
>      c1) Something machine local - i.e. a physical lump of flash in a
>          socket rather than something sharable by machines?
it's backed by memory-backend-foo, so it could be really anything (RAM,
file on local or shared storage, file descriptor)

>   d) It can potentially be rapidly changing as the guest writes to it?
it's sort of like NVDIMM but without NVDIMM interface, it uses virtio to
to force flushing instead. Otherwise it's directly mapped into guest
address space, so guest can do anything with it including fast dirtying.


> Dave
> 
> > > Thanks,
> > > Pankaj
> > >    
> > > > 
> > > >     
> > > > > > > One reason why nvdimm added vmstate info could be: still there would be
> > > > > > > transient
> > > > > > > writes in memory with fake DAX and there is no way(till now) to flush
> > > > > > > the
> > > > > > > guest
> > > > > > > writes. But with virtio-pmem we can flush such writes before migration
> > > > > > > and
> > > > > > > automatically
> > > > > > > at destination host with shared disk we will have updated data.    
> > > > > > nvdimm has concept of flush address hint (may be not implemented in qemu
> > > > > > yet)
> > > > > > but it can flush. The only reason I'm buying into virtio-mem idea
> > > > > > is that would allow async flush queues which would reduce number
> > > > > > of vmexits.    
> > > > > 
> > > > > Thats correct.
> > > > > 
> > > > > Thanks,
> > > > > Pankaj
> > > > > 
> > > > >      
> > > > 
> > > > 
> > > >     
> > >   
> >   
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK


Re: [Qemu-devel] [PATCH v3 3/3] virtio-pmem: should we make it migratable???
Posted by Pankaj Gupta 7 years, 9 months ago
> 
> On Fri, 4 May 2018 13:26:51 +0100
> "Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
> 
> > * Igor Mammedov (imammedo@redhat.com) wrote:
> > > On Thu, 26 Apr 2018 03:37:51 -0400 (EDT)
> > > Pankaj Gupta <pagupta@redhat.com> wrote:
> > > 
> > > trimming CC list to keep people that might be interested in the topic
> > > and renaming thread to reflect it.
> > >   
> > > > > > > > > > >> +
> > > > > > > > > > >> +    memory_region_add_subregion(&hpms->mr, addr -
> > > > > > > > > > >> hpms->base,
> > > > > > > > > > >> mr);
> > > > > > > > > > > missing vmstate registration?
> > > > > > > > > > 
> > > > > > > > > > Missed this one: To be called by the caller. Important
> > > > > > > > > > because e.g.
> > > > > > > > > > for
> > > > > > > > > > virtio-pmem we don't want this (I assume :) ).
> > > > > > > > > if pmem isn't on shared storage, then We'd probably want to
> > > > > > > > > migrate
> > > > > > > > > it as well, otherwise target would experience data loss.
> > > > > > > > > Anyways, I'd just reat it as normal RAM in migration case
> > > > > > > > 
> > > > > > > > Main difference between RAM and pmem it acts like combination
> > > > > > > > of RAM
> > > > > > > > and
> > > > > > > > disk.
> > > > > > > > Saying this, in normal use-case size would be 100 GB's - few
> > > > > > > > TB's
> > > > > > > > range.
> > > > > > > > I am not sure we really want to migrate it for non-shared
> > > > > > > > storage
> > > > > > > > use-case.
> > > > > > > with non shared storage you'd have to migrate it target host but
> > > > > > > with shared storage it might be possible to flush it and use
> > > > > > > directly
> > > > > > > from target host. That probably won't work right out of box and
> > > > > > > would
> > > > > > > need some sort of synchronization between src/dst hosts.
> > > > > > 
> > > > > > Shared storage should work out of the box.
> > > > > > Only thing is data in destination
> > > > > > host will be cache cold and existing pages in cache should be
> > > > > > invalidated
> > > > > > first.
> > > > > > But if we migrate entire fake DAX RAMstate it will populate
> > > > > > destination
> > > > > > host page
> > > > > > cache including pages while were idle in source host. This would
> > > > > > unnecessarily
> > > > > > create entropy in destination host.
> > > > > > 
> > > > > > To me this feature don't make much sense. Problem which we are
> > > > > > solving is:
> > > > > > Efficiently use guest RAM.
> > > > > What would live migration handover flow look like in case of
> > > > > guest constantly dirting memory provided by virtio-pmem and
> > > > > and sometimes issuing async flush req along with it?
> > > > 
> > > > Dirty entire pmem (disk) at once not a usual scenario. Some part of
> > > > disk/pmem
> > > > would get dirty and we need to handle that. I just want to say moving
> > > > entire
> > > > pmem (disk) is not efficient solution because we are using this
> > > > solution to
> > > > manage guest memory efficiently. Otherwise it will be like any block
> > > > device copy
> > > > with non-shared storage.
> > > not sure if we can use block layer analogy here.
> > >   
> > > > > > > The same applies to nv/pc-dimm as well, as backend file easily
> > > > > > > could be
> > > > > > > on pmem storage as well.
> > > > > > 
> > > > > > Are you saying backing file is in actual actual nvdimm hardware? we
> > > > > > don't
> > > > > > need
> > > > > > emulation at all.
> > > > > depends on if file is on DAX filesystem, but your argument about
> > > > > migrating huge 100Gb- TB's range applies in this case as well.
> > > > >     
> > > > > >     
> > > > > > > 
> > > > > > > Maybe for now we should migrate everything so it would work in
> > > > > > > case of
> > > > > > > non shared NVDIMM on host. And then later add migration-less
> > > > > > > capability
> > > > > > > to all of them.
> > > > > > 
> > > > > > not sure I agree.
> > > > > So would you inhibit migration in case of non shared backend storage,
> > > > > to avoid loosing data since they aren't migrated?
> > > > 
> > > > I am just thinking what features we want to support with pmem. And live
> > > > migration
> > > > with shared storage is the one which comes to my mind.
> > > > 
> > > > If live migration with non-shared storage is what we want to support (I
> > > > don't know
> > > > yet) we can add this? Even with shared storage it would copy entire
> > > > pmem state?
> > > Perhaps we should register vmstate like for normal ram and use something
> > > similar to
> > >   http://lists.gnu.org/archive/html/qemu-devel/2018-04/msg00003.html this
> > > to skip shared memory on migration.
> > > In this case we could use this for pc-dimms as well.
> > > 
> > > David,
> > >  what's your take on it?
> > 
> > My feel is that something is going to have to migrate it, I'm just not
> > sure how.
> > So let me just check I understand:
> >   a) It's potentially huge
> yep, assume it could be in storage quarantines (100s of Gb)
> 
> >   b) It's a RAMBlock
> it is
> 
> >   c) It's backed by ????
> >      c1) Something machine local - i.e. a physical lump of flash in a
> >          socket rather than something sharable by machines?
> it's backed by memory-backend-foo, so it could be really anything (RAM,
> file on local or shared storage, file descriptor)

Just a point I want to add.

Currently, we are proposing file-backed memory which is 'mmaped' in Qemu 
address space. This is to achieve 'persistent' property similar to real 
NVDIMM storage. Latest guest writes should be synced to backed file after 
guest performs a 'fsync' operation with DAX capable file-system.
 
> 
> >   d) It can potentially be rapidly changing as the guest writes to it?
> it's sort of like NVDIMM but without NVDIMM interface, it uses virtio to
> to force flushing instead. Otherwise it's directly mapped into guest
> address space, so guest can do anything with it including fast dirtying.
> 
> 
> > Dave
> > 
> > > > Thanks,
> > > > Pankaj
> > > >    
> > > > > 
> > > > >     
> > > > > > > > One reason why nvdimm added vmstate info could be: still there
> > > > > > > > would be
> > > > > > > > transient
> > > > > > > > writes in memory with fake DAX and there is no way(till now) to
> > > > > > > > flush
> > > > > > > > the
> > > > > > > > guest
> > > > > > > > writes. But with virtio-pmem we can flush such writes before
> > > > > > > > migration
> > > > > > > > and
> > > > > > > > automatically
> > > > > > > > at destination host with shared disk we will have updated data.
> > > > > > > nvdimm has concept of flush address hint (may be not implemented
> > > > > > > in qemu
> > > > > > > yet)
> > > > > > > but it can flush. The only reason I'm buying into virtio-mem idea
> > > > > > > is that would allow async flush queues which would reduce number
> > > > > > > of vmexits.
> > > > > > 
> > > > > > Thats correct.
> > > > > > 
> > > > > > Thanks,
> > > > > > Pankaj
> > > > > > 
> > > > > >      
> > > > > 
> > > > > 
> > > > >     
> > > >   
> > >   
> > --
> > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
> 
> 
> 

Re: [Qemu-devel] [PATCH v3 3/3] virtio-pmem: should we make it migratable???
Posted by Dr. David Alan Gilbert 7 years, 9 months ago
* Igor Mammedov (imammedo@redhat.com) wrote:
> On Fri, 4 May 2018 13:26:51 +0100
> "Dr. David Alan Gilbert" <dgilbert@redhat.com> wrote:
> 
> > * Igor Mammedov (imammedo@redhat.com) wrote:
> > > On Thu, 26 Apr 2018 03:37:51 -0400 (EDT)
> > > Pankaj Gupta <pagupta@redhat.com> wrote:
> > > 
> > > trimming CC list to keep people that might be interested in the topic
> > > and renaming thread to reflect it.
> > >   
> > > > > > > > > > >> +
> > > > > > > > > > >> +    memory_region_add_subregion(&hpms->mr, addr - hpms->base,
> > > > > > > > > > >> mr);    
> > > > > > > > > > > missing vmstate registration?    
> > > > > > > > > > 
> > > > > > > > > > Missed this one: To be called by the caller. Important because e.g.
> > > > > > > > > > for
> > > > > > > > > > virtio-pmem we don't want this (I assume :) ).    
> > > > > > > > > if pmem isn't on shared storage, then We'd probably want to migrate
> > > > > > > > > it as well, otherwise target would experience data loss.
> > > > > > > > > Anyways, I'd just reat it as normal RAM in migration case    
> > > > > > > > 
> > > > > > > > Main difference between RAM and pmem it acts like combination of RAM
> > > > > > > > and
> > > > > > > > disk.
> > > > > > > > Saying this, in normal use-case size would be 100 GB's - few TB's
> > > > > > > > range.
> > > > > > > > I am not sure we really want to migrate it for non-shared storage
> > > > > > > > use-case.    
> > > > > > > with non shared storage you'd have to migrate it target host but
> > > > > > > with shared storage it might be possible to flush it and use directly
> > > > > > > from target host. That probably won't work right out of box and would
> > > > > > > need some sort of synchronization between src/dst hosts.    
> > > > > > 
> > > > > > Shared storage should work out of the box.
> > > > > > Only thing is data in destination
> > > > > > host will be cache cold and existing pages in cache should be invalidated
> > > > > > first.
> > > > > > But if we migrate entire fake DAX RAMstate it will populate destination
> > > > > > host page
> > > > > > cache including pages while were idle in source host. This would
> > > > > > unnecessarily
> > > > > > create entropy in destination host.
> > > > > > 
> > > > > > To me this feature don't make much sense. Problem which we are solving is:
> > > > > > Efficiently use guest RAM.    
> > > > > What would live migration handover flow look like in case of
> > > > > guest constantly dirting memory provided by virtio-pmem and
> > > > > and sometimes issuing async flush req along with it?    
> > > > 
> > > > Dirty entire pmem (disk) at once not a usual scenario. Some part of disk/pmem
> > > > would get dirty and we need to handle that. I just want to say moving entire
> > > > pmem (disk) is not efficient solution because we are using this solution to
> > > > manage guest memory efficiently. Otherwise it will be like any block device copy
> > > > with non-shared storage.     
> > > not sure if we can use block layer analogy here.
> > >   
> > > > > > > The same applies to nv/pc-dimm as well, as backend file easily could be
> > > > > > > on pmem storage as well.    
> > > > > > 
> > > > > > Are you saying backing file is in actual actual nvdimm hardware? we don't
> > > > > > need
> > > > > > emulation at all.    
> > > > > depends on if file is on DAX filesystem, but your argument about
> > > > > migrating huge 100Gb- TB's range applies in this case as well.
> > > > >     
> > > > > >     
> > > > > > > 
> > > > > > > Maybe for now we should migrate everything so it would work in case of
> > > > > > > non shared NVDIMM on host. And then later add migration-less capability
> > > > > > > to all of them.    
> > > > > > 
> > > > > > not sure I agree.    
> > > > > So would you inhibit migration in case of non shared backend storage,
> > > > > to avoid loosing data since they aren't migrated?    
> > > > 
> > > > I am just thinking what features we want to support with pmem. And live migration
> > > > with shared storage is the one which comes to my mind.
> > > > 
> > > > If live migration with non-shared storage is what we want to support (I don't know
> > > > yet) we can add this? Even with shared storage it would copy entire pmem state?  
> > > Perhaps we should register vmstate like for normal ram and use something similar to
> > >   http://lists.gnu.org/archive/html/qemu-devel/2018-04/msg00003.html this
> > > to skip shared memory on migration.
> > > In this case we could use this for pc-dimms as well.
> > > 
> > > David,
> > >  what's your take on it?  
> > 
> > My feel is that something is going to have to migrate it, I'm just not
> > sure how.
> > So let me just check I understand:
> >   a) It's potentially huge
> yep, assume it could be in storage quarantines (100s of Gb)
> 
> >   b) It's a RAMBlock
> it is

Well, the good news is migration is going to try and migrate it.
The bad news is migration is going to try and migrate it.

> >   c) It's backed by ????
> >      c1) Something machine local - i.e. a physical lump of flash in a
> >          socket rather than something sharable by machines?
> it's backed by memory-backend-foo, so it could be really anything (RAM,
> file on local or shared storage, file descriptor)

OK, something is going to have to know whether it's on shared storage or
not and do something different in the two cases.   If it's shared
storage then we need to find a way to stop migration trying to migrate
it, because migrating data to the other host when both hosts are really
backed by the same thing ends up with a corrupt mess; we've had block
storage do that when they don't realise they're on an NFS share.
There are a few patches on the list to exclude migration from some
RAMBlock's, so we can build on that once we figure out how we know
if it's shared or not.

If it is shared, then we've also got to worry about consistency to
ensure that the last few writes on the source make it to the destination
before the destination starts, that the destination hasn't cached
any old stuff, and that a failing migraiton lands back on the source
without the destination having changed anything.

> >   d) It can potentially be rapidly changing as the guest writes to it?
> it's sort of like NVDIMM but without NVDIMM interface, it uses virtio to
> to force flushing instead. Otherwise it's directly mapped into guest
> address space, so guest can do anything with it including fast dirtying.

OK.

Getting Postcopy to work with it might be a solution; but depending what
the underlying fd looks like will probably need some kernel changes to
get userfaultfd to work on it.
Postcopy on huge memories should work, but watch out for downtime
due to sending the discard bitmaps.

(cc'd in Eric and Stefan)

Dave

> 
> > Dave
> > 
> > > > Thanks,
> > > > Pankaj
> > > >    
> > > > > 
> > > > >     
> > > > > > > > One reason why nvdimm added vmstate info could be: still there would be
> > > > > > > > transient
> > > > > > > > writes in memory with fake DAX and there is no way(till now) to flush
> > > > > > > > the
> > > > > > > > guest
> > > > > > > > writes. But with virtio-pmem we can flush such writes before migration
> > > > > > > > and
> > > > > > > > automatically
> > > > > > > > at destination host with shared disk we will have updated data.    
> > > > > > > nvdimm has concept of flush address hint (may be not implemented in qemu
> > > > > > > yet)
> > > > > > > but it can flush. The only reason I'm buying into virtio-mem idea
> > > > > > > is that would allow async flush queues which would reduce number
> > > > > > > of vmexits.    
> > > > > > 
> > > > > > Thats correct.
> > > > > > 
> > > > > > Thanks,
> > > > > > Pankaj
> > > > > > 
> > > > > >      
> > > > > 
> > > > > 
> > > > >     
> > > >   
> > >   
> > --
> > Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

Re: [Qemu-devel] [PATCH v3 3/3] pc-dimm: factor out address space logic into MemoryDevice code
Posted by David Hildenbrand 7 years, 9 months ago
On 23.04.2018 14:19, Igor Mammedov wrote:
> On Fri, 20 Apr 2018 14:34:56 +0200
> David Hildenbrand <david@redhat.com> wrote:
> 
>> To be able to reuse MemoryDevice logic from other devices besides
>> pc-dimm, factor the relevant stuff out into the MemoryDevice code.
>>
>> As we don't care about slots for memory devices that are not pc-dimm,
>> don't factor that part out.
> that's not really true, you still consume kvm and vhost slots (whatever it is)
> whenever you map it into address space as ram memory region.
> 
> Also ram_slots currently are (ab)used as flag that user enabled memory
> hotplug via CLI.
>  
>> Most of this patch just moves checks and logic around. While at it, make
>> the code properly detect certain error conditions better (e.g. fragmented
>> memory).
> I'd suggest splitting patch in several smaller ones if possible,
> especially parts that do anything more than just moving code around.
> 
> 
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>>  hw/i386/pc.c                   |  12 +--
>>  hw/mem/memory-device.c         | 162 ++++++++++++++++++++++++++++++++++++
>>  hw/mem/pc-dimm.c               | 185 +++--------------------------------------
>>  hw/ppc/spapr.c                 |   9 +-
>>  include/hw/mem/memory-device.h |   4 +
>>  include/hw/mem/pc-dimm.h       |  14 +---
>>  6 files changed, 185 insertions(+), 201 deletions(-)
>>
>> diff --git a/hw/i386/pc.c b/hw/i386/pc.c
>> index fa8862af33..1c25546a0c 100644
>> --- a/hw/i386/pc.c
>> +++ b/hw/i386/pc.c
>> @@ -1711,7 +1711,7 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
>>          goto out;
>>      }
>>  
>> -    pc_dimm_memory_plug(dev, &pcms->hotplug_memory, mr, align, &local_err);
>> +    pc_dimm_memory_plug(dev, align, &local_err);
> Is there a reason why you are dropping pcms->hotplug_memory argument
> and fall back to qdev_get_machine()?
> 
> I'd rather see it going other direction,
> i.e. move hotplug_memory from PC
> machine to MachineState and then pass it down as argument whenever it's needed.

FWIW, I think I found a way to split this into smaller patches.

The current prototypes will look like this for pc_dimm

void pc_dimm_memory_plug(DeviceState *dev, MachineState *machine,
                         uint64_t align, Error **errp);
void pc_dimm_memory_unplug(DeviceState *dev, MachineState *machine);

I am not sure yet if I'll work on the pre-plug stuff for pc-dimm (I want
to get memory devices running not rewrite all of the pc-dimm memory
hotplug code :) ), but that can be reworked later on easily.

-- 

Thanks,

David / dhildenb