[RFC PATCH v2 09/10] vdpa: add vhost_vdpa_net_load_setup NetClient callback

Eugenio Pérez posted 10 patches 12 months ago
[RFC PATCH v2 09/10] vdpa: add vhost_vdpa_net_load_setup NetClient callback
Posted by Eugenio Pérez 12 months ago
So the vDPA backend knows when a migration incoming starts.  NicState
argument is needed so we can get the dma address space.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
RFC v2:
* Solve git conflict with .set_steering_ebpf
* Fix x-svq=on use case which did not allocated iova_tree.
---
 include/net/net.h |  6 ++++++
 net/vhost-vdpa.c  | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 39 insertions(+)

diff --git a/include/net/net.h b/include/net/net.h
index ffbd2c8d56..68282dde31 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -42,6 +42,7 @@ typedef struct NICConf {
 
 /* Net clients */
 
+struct NICState;
 typedef void (NetPoll)(NetClientState *, bool enable);
 typedef bool (NetCanReceive)(NetClientState *);
 typedef int (NetStart)(NetClientState *);
@@ -69,6 +70,9 @@ typedef void (SocketReadStateFinalize)(SocketReadState *rs);
 typedef void (NetAnnounce)(NetClientState *);
 typedef bool (SetSteeringEBPF)(NetClientState *, int);
 typedef bool (NetCheckPeerType)(NetClientState *, ObjectClass *, Error **);
+/* This can be called before start & pair, so get also the peer */
+typedef int (NetMigrationLoadSetup)(NetClientState *, struct NICState *);
+typedef int (NetMigrationLoadCleanup)(NetClientState *, struct NICState *);
 
 typedef struct NetClientInfo {
     NetClientDriver type;
@@ -98,6 +102,8 @@ typedef struct NetClientInfo {
     NetAnnounce *announce;
     SetSteeringEBPF *set_steering_ebpf;
     NetCheckPeerType *check_peer_type;
+    NetMigrationLoadSetup *load_setup;
+    NetMigrationLoadCleanup *load_cleanup;
 } NetClientInfo;
 
 struct NetClientState {
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index a37de7860e..90f41280d2 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -407,6 +407,37 @@ static void vhost_vdpa_net_client_stop(NetClientState *nc)
     }
 }
 
+static int vhost_vdpa_net_load_setup(NetClientState *nc, NICState *nic)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    VirtIONet *n = qemu_get_nic_opaque(&nic->ncs[0]);
+    VhostVDPAShared *shared = s->vhost_vdpa.shared;
+    int r;
+
+    if (s->always_svq) {
+        /* iova tree is needed because of SVQ */
+        shared->iova_tree = vhost_iova_tree_new(shared->iova_range.first,
+                                                shared->iova_range.last);
+    }
+
+    r = vhost_vdpa_load_setup(shared, n->parent_obj.dma_as);
+    if (unlikely(r < 0)) {
+        g_clear_pointer(&s->vhost_vdpa.shared->iova_tree,
+                        vhost_iova_tree_delete);
+    }
+
+    return r;
+}
+
+static int vhost_vdpa_net_load_cleanup(NetClientState *nc, NICState *nic)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    VirtIONet *n = qemu_get_nic_opaque(&nic->ncs[0]);
+
+    return vhost_vdpa_load_cleanup(s->vhost_vdpa.shared,
+                             n->parent_obj.status & VIRTIO_CONFIG_S_DRIVER_OK);
+}
+
 static NetClientInfo net_vhost_vdpa_info = {
         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
         .size = sizeof(VhostVDPAState),
@@ -419,6 +450,8 @@ static NetClientInfo net_vhost_vdpa_info = {
         .has_ufo = vhost_vdpa_has_ufo,
         .check_peer_type = vhost_vdpa_check_peer_type,
         .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
+        .load_setup = vhost_vdpa_net_load_setup,
+        .load_cleanup = vhost_vdpa_net_load_cleanup,
 };
 
 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
-- 
2.39.3