[PATCH 22/40] vdpa: factor out vhost_vdpa_map_batch_begin

Si-Wei Liu posted 40 patches 11 months, 3 weeks ago
[PATCH 22/40] vdpa: factor out vhost_vdpa_map_batch_begin
Posted by Si-Wei Liu 11 months, 3 weeks ago
Refactoring only. No functional change.

Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 hw/virtio/trace-events |  2 +-
 hw/virtio/vhost-vdpa.c | 25 ++++++++++++++++---------
 2 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 9725d44..b0239b8 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -32,7 +32,7 @@ vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
 # vhost-vdpa.c
 vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
 vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
-vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
+vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
 vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
 vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
 vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 013bfa2..7a1b7f4 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -161,7 +161,7 @@ int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
     return ret;
 }
 
-static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
+static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s)
 {
     int fd = s->device_fd;
     struct vhost_msg_v2 msg = {
@@ -169,26 +169,33 @@ static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
     };
 
-    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) ||
-        s->iotlb_batch_begin_sent) {
-        return;
-    }
-
     if (s->map_thread_enabled && !qemu_thread_is_self(&s->map_thread)) {
         struct vhost_msg_v2 *new_msg = g_new(struct vhost_msg_v2, 1);
 
         *new_msg = msg;
         g_async_queue_push(s->map_queue, new_msg);
 
-        return;
+        return false;
     }
 
-    trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
+    trace_vhost_vdpa_map_batch_begin(s, fd, msg.type, msg.iotlb.type);
     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
         error_report("failed to write, fd=%d, errno=%d (%s)",
                      fd, errno, strerror(errno));
     }
-    s->iotlb_batch_begin_sent = true;
+    return true;
+}
+
+static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
+{
+    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) ||
+        s->iotlb_batch_begin_sent) {
+        return;
+    }
+
+    if (vhost_vdpa_map_batch_begin(s)) {
+        s->iotlb_batch_begin_sent = true;
+    }
 }
 
 static void vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s)
-- 
1.8.3.1
Re: [PATCH 22/40] vdpa: factor out vhost_vdpa_map_batch_begin
Posted by Jason Wang 10 months, 2 weeks ago
On Fri, Dec 8, 2023 at 2:51 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Refactoring only. No functional change.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>

Acked-by: Jason Wang <jasowang@redhat.com>

Thanks

> ---
>  hw/virtio/trace-events |  2 +-
>  hw/virtio/vhost-vdpa.c | 25 ++++++++++++++++---------
>  2 files changed, 17 insertions(+), 10 deletions(-)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 9725d44..b0239b8 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -32,7 +32,7 @@ vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
>  # vhost-vdpa.c
>  vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
>  vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
> -vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> +vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
>  vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
>  vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
>  vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 013bfa2..7a1b7f4 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -161,7 +161,7 @@ int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
>      return ret;
>  }
>
> -static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
> +static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s)
>  {
>      int fd = s->device_fd;
>      struct vhost_msg_v2 msg = {
> @@ -169,26 +169,33 @@ static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
>          .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
>      };
>
> -    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) ||
> -        s->iotlb_batch_begin_sent) {
> -        return;
> -    }
> -
>      if (s->map_thread_enabled && !qemu_thread_is_self(&s->map_thread)) {
>          struct vhost_msg_v2 *new_msg = g_new(struct vhost_msg_v2, 1);
>
>          *new_msg = msg;
>          g_async_queue_push(s->map_queue, new_msg);
>
> -        return;
> +        return false;
>      }
>
> -    trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
> +    trace_vhost_vdpa_map_batch_begin(s, fd, msg.type, msg.iotlb.type);
>      if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
>          error_report("failed to write, fd=%d, errno=%d (%s)",
>                       fd, errno, strerror(errno));
>      }
> -    s->iotlb_batch_begin_sent = true;
> +    return true;
> +}
> +
> +static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
> +{
> +    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) ||
> +        s->iotlb_batch_begin_sent) {
> +        return;
> +    }
> +
> +    if (vhost_vdpa_map_batch_begin(s)) {
> +        s->iotlb_batch_begin_sent = true;
> +    }
>  }
>
>  static void vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s)
> --
> 1.8.3.1
>