[RFC-PATCH v2] vhost-user: add a request-reply lock

Prasad Pandit posted 1 patch 3 months ago
hw/virtio/vhost-user.c         | 74 ++++++++++++++++++++++++++++++++++
include/hw/virtio/vhost-user.h |  3 ++
2 files changed, 77 insertions(+)
[RFC-PATCH v2] vhost-user: add a request-reply lock
Posted by Prasad Pandit 3 months ago
From: Prasad Pandit <pjp@fedoraproject.org>

QEMU threads use vhost_user_write/read calls to send
and receive request/reply messages from a vhost-user
device. When multiple threads communicate with the
same vhost-user device, they can receive each other's
messages, resulting in an erroneous state.

When fault_thread exits upon completion of Postcopy
migration, it sends a 'postcopy_end' message to the
vhost-user device. But sometimes 'postcopy_end' message
is sent while vhost device is being setup via
vhost_dev_start().

     Thread-1                           Thread-2

 vhost_dev_start                    postcopy_ram_incoming_cleanup
 vhost_device_iotlb_miss            postcopy_notify
 vhost_backend_update_device_iotlb  vhost_user_postcopy_notifier
 vhost_user_send_device_iotlb_msg   vhost_user_postcopy_end
 process_message_reply              process_message_reply
 vhost_user_read                    vhost_user_read
 vhost_user_read_header             vhost_user_read_header
 "Fail to update device iotlb"      "Failed to receive reply to postcopy_end"

This creates confusion when vhost-user device receives
'postcopy_end' message while it is trying to update
IOTLB entries.

 vhost_user_read_header:
  700871,700871: Failed to read msg header. Flags 0x0 instead of 0x5.
 vhost_device_iotlb_miss:
  700871,700871: Fail to update device iotlb
 vhost_user_postcopy_end:
  700871,700900: Failed to receive reply to postcopy_end
 vhost_user_read_header:
  700871,700871: Failed to read msg header. Flags 0x0 instead of 0x5.

Here fault thread seems to end the postcopy migration
while another thread is starting the vhost-user device.

Add a mutex lock to hold for one request-reply cycle
and avoid such race condition.

Fixes: 46343570c06e ("vhost+postcopy: Wire up POSTCOPY_END notify")
Suggested-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Prasad Pandit <pjp@fedoraproject.org>
---
 hw/virtio/vhost-user.c         | 74 ++++++++++++++++++++++++++++++++++
 include/hw/virtio/vhost-user.h |  3 ++
 2 files changed, 77 insertions(+)

v2:
 - Place QEMU_LOCK_GUARD near the vhost_user_write() calls, holding
   the lock for longer fails some tests during rpmbuild(8).
 - rpmbuild(8) fails for some SRPMs, not all. RHEL-9 SRPM builds with
   this patch, whereas Fedora SRPM does not build.
 - The host OS also seems to affect rpmbuild(8). Some SRPMs build well
   on RHEL-9, but not on Fedora-40 machine.

v1: https://lore.kernel.org/qemu-devel/20240808095147.291626-3-ppandit@redhat.com/#R

diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 00561daa06..7b030ae2cd 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -24,6 +24,7 @@
 #include "qemu/main-loop.h"
 #include "qemu/uuid.h"
 #include "qemu/sockets.h"
+#include "qemu/lockable.h"
 #include "sysemu/runstate.h"
 #include "sysemu/cryptodev.h"
 #include "migration/postcopy-ram.h"
@@ -446,6 +447,10 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
         .hdr.size = sizeof(msg.payload.log),
     };
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     /* Send only once with first queue pair */
     if (dev->vq_index != 0) {
         return 0;
@@ -664,6 +669,7 @@ static int send_remove_regions(struct vhost_dev *dev,
                                bool reply_supported)
 {
     struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
     struct vhost_memory_region *shadow_reg;
     int i, fd, shadow_reg_idx, ret;
     ram_addr_t offset;
@@ -685,6 +691,8 @@ static int send_remove_regions(struct vhost_dev *dev,
             vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
             msg->payload.mem_reg.region = region_buffer;
 
+            QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
             ret = vhost_user_write(dev, msg, NULL, 0);
             if (ret < 0) {
                 return ret;
@@ -718,6 +726,7 @@ static int send_add_regions(struct vhost_dev *dev,
                             bool reply_supported, bool track_ramblocks)
 {
     struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
     int i, fd, ret, reg_idx, reg_fd_idx;
     struct vhost_memory_region *reg;
     MemoryRegion *mr;
@@ -746,6 +755,8 @@ static int send_add_regions(struct vhost_dev *dev,
             vhost_user_fill_msg_region(&region_buffer, reg, offset);
             msg->payload.mem_reg.region = region_buffer;
 
+            QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
             ret = vhost_user_write(dev, msg, &fd, 1);
             if (ret < 0) {
                 return ret;
@@ -893,6 +904,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
                                              bool config_mem_slots)
 {
     struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
     size_t fd_num = 0;
     VhostUserMsg msg_reply;
@@ -926,6 +938,8 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
             return ret;
         }
 
+        QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
         ret = vhost_user_write(dev, &msg, fds, fd_num);
         if (ret < 0) {
             return ret;
@@ -1005,6 +1019,7 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
                                     struct vhost_memory *mem)
 {
     struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
     size_t fd_num = 0;
     bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
@@ -1044,6 +1059,8 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
             return ret;
         }
 
+        QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
         ret = vhost_user_write(dev, &msg, fds, fd_num);
         if (ret < 0) {
             return ret;
@@ -1089,6 +1106,10 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
         return 0;
     }
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         return ret;
@@ -1138,6 +1159,10 @@ static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
         }
     }
 
+/*  struct vhost_user *u = dev->opaque;
+ *  struct VhostUserState *us = u->user;
+ *  QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+ */
     ret = vhost_user_write(dev, msg, NULL, 0);
     if (ret < 0) {
         return ret;
@@ -1277,6 +1302,8 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
         .hdr.size = sizeof(msg.payload.state),
     };
     struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
 
     VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
     if (n) {
@@ -1669,6 +1696,9 @@ int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
     };
     memcpy(msg.payload.object.uuid, uuid, sizeof(msg.payload.object.uuid));
 
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         return ret;
@@ -1889,6 +1919,9 @@ static int vhost_setup_backend_channel(struct vhost_dev *dev)
         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
     }
 
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, &sv[1], 1);
     if (ret) {
         goto out;
@@ -1993,6 +2026,9 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
         .hdr.flags = VHOST_USER_VERSION,
     };
 
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         error_setg(errp, "Failed to send postcopy_advise to vhost");
@@ -2051,6 +2087,9 @@ static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
 
     trace_vhost_user_postcopy_listen();
 
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         error_setg(errp, "Failed to send postcopy_listen to vhost");
@@ -2080,6 +2119,9 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
 
     trace_vhost_user_postcopy_end_entry();
 
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         error_setg(errp, "Failed to send postcopy_end to vhost");
@@ -2372,6 +2414,10 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
     }
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         return ret;
@@ -2396,6 +2442,10 @@ static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
         .payload.iotlb = *imsg,
     };
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         return ret;
@@ -2428,6 +2478,10 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
 
     assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     msg.payload.config.offset = 0;
     msg.payload.config.size = config_len;
     ret = vhost_user_write(dev, &msg, NULL, 0);
@@ -2492,6 +2546,10 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
     p = msg.payload.config.region;
     memcpy(p, data, size);
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         return ret;
@@ -2570,6 +2628,10 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
         }
     }
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     msg.payload.session.op_code = backend_info->op_code;
     msg.payload.session.session_id = backend_info->session_id;
     ret = vhost_user_write(dev, &msg, NULL, 0);
@@ -2662,6 +2724,9 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
         return 0;
     }
 
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         return ret;
@@ -2757,6 +2822,7 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
     user->memory_slots = 0;
     user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
                                            &vhost_user_state_destroy);
+    qemu_mutex_init(&user->vhost_user_request_reply_lock);
     return true;
 }
 
@@ -2769,6 +2835,7 @@ void vhost_user_cleanup(VhostUserState *user)
     user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
     memory_region_transaction_commit();
     user->chr = NULL;
+    qemu_mutex_destroy(&user->vhost_user_request_reply_lock);
 }
 
 
@@ -2902,6 +2969,9 @@ static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
         return -ENOTSUP;
     }
 
+    struct VhostUserState *us = vu->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, &fd, 1);
     close(fd);
     if (ret < 0) {
@@ -2965,6 +3035,10 @@ static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
         return -ENOTSUP;
     }
 
+    struct vhost_user *u = dev->opaque;
+    struct VhostUserState *us = u->user;
+    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
     ret = vhost_user_write(dev, &msg, NULL, 0);
     if (ret < 0) {
         error_setg_errno(errp, -ret,
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index 324cd8663a..e96f12d449 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -67,6 +67,9 @@ typedef struct VhostUserState {
     GPtrArray *notifiers;
     int memory_slots;
     bool supports_config;
+
+    /* Hold lock for a request-reply cycle */
+    QemuMutex vhost_user_request_reply_lock;
 } VhostUserState;
 
 /**
-- 
2.46.0
Re: [RFC-PATCH v2] vhost-user: add a request-reply lock
Posted by Michael S. Tsirkin 3 months ago
On Mon, Aug 19, 2024 at 05:32:48PM +0530, Prasad Pandit wrote:
> From: Prasad Pandit <pjp@fedoraproject.org>
> 
> QEMU threads use vhost_user_write/read calls to send
> and receive request/reply messages from a vhost-user
> device. When multiple threads communicate with the
> same vhost-user device, they can receive each other's
> messages, resulting in an erroneous state.
> 
> When fault_thread exits upon completion of Postcopy
> migration, it sends a 'postcopy_end' message to the
> vhost-user device. But sometimes 'postcopy_end' message
> is sent while vhost device is being setup via
> vhost_dev_start().
> 
>      Thread-1                           Thread-2
> 
>  vhost_dev_start                    postcopy_ram_incoming_cleanup
>  vhost_device_iotlb_miss            postcopy_notify
>  vhost_backend_update_device_iotlb  vhost_user_postcopy_notifier
>  vhost_user_send_device_iotlb_msg   vhost_user_postcopy_end
>  process_message_reply              process_message_reply
>  vhost_user_read                    vhost_user_read
>  vhost_user_read_header             vhost_user_read_header
>  "Fail to update device iotlb"      "Failed to receive reply to postcopy_end"
> 
> This creates confusion when vhost-user device receives
> 'postcopy_end' message while it is trying to update
> IOTLB entries.
> 
>  vhost_user_read_header:
>   700871,700871: Failed to read msg header. Flags 0x0 instead of 0x5.
>  vhost_device_iotlb_miss:
>   700871,700871: Fail to update device iotlb
>  vhost_user_postcopy_end:
>   700871,700900: Failed to receive reply to postcopy_end
>  vhost_user_read_header:
>   700871,700871: Failed to read msg header. Flags 0x0 instead of 0x5.
> 
> Here fault thread seems to end the postcopy migration
> while another thread is starting the vhost-user device.
> 
> Add a mutex lock to hold for one request-reply cycle
> and avoid such race condition.
> 
> Fixes: 46343570c06e ("vhost+postcopy: Wire up POSTCOPY_END notify")
> Suggested-by: Peter Xu <peterx@redhat.com>
> Signed-off-by: Prasad Pandit <pjp@fedoraproject.org>

makes sense.
Acked-by: Michael S. Tsirkin <mst@redhat.com>
But do not post v2 as reply to v1 pls.

> ---
>  hw/virtio/vhost-user.c         | 74 ++++++++++++++++++++++++++++++++++
>  include/hw/virtio/vhost-user.h |  3 ++
>  2 files changed, 77 insertions(+)
> 
> v2:
>  - Place QEMU_LOCK_GUARD near the vhost_user_write() calls, holding
>    the lock for longer fails some tests during rpmbuild(8).
>  - rpmbuild(8) fails for some SRPMs, not all. RHEL-9 SRPM builds with
>    this patch, whereas Fedora SRPM does not build.
>  - The host OS also seems to affect rpmbuild(8). Some SRPMs build well
>    on RHEL-9, but not on Fedora-40 machine.
> 
> v1: https://lore.kernel.org/qemu-devel/20240808095147.291626-3-ppandit@redhat.com/#R
> 
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 00561daa06..7b030ae2cd 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -24,6 +24,7 @@
>  #include "qemu/main-loop.h"
>  #include "qemu/uuid.h"
>  #include "qemu/sockets.h"
> +#include "qemu/lockable.h"
>  #include "sysemu/runstate.h"
>  #include "sysemu/cryptodev.h"
>  #include "migration/postcopy-ram.h"
> @@ -446,6 +447,10 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
>          .hdr.size = sizeof(msg.payload.log),
>      };
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      /* Send only once with first queue pair */
>      if (dev->vq_index != 0) {
>          return 0;
> @@ -664,6 +669,7 @@ static int send_remove_regions(struct vhost_dev *dev,
>                                 bool reply_supported)
>  {
>      struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
>      struct vhost_memory_region *shadow_reg;
>      int i, fd, shadow_reg_idx, ret;
>      ram_addr_t offset;
> @@ -685,6 +691,8 @@ static int send_remove_regions(struct vhost_dev *dev,
>              vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
>              msg->payload.mem_reg.region = region_buffer;
>  
> +            QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>              ret = vhost_user_write(dev, msg, NULL, 0);
>              if (ret < 0) {
>                  return ret;
> @@ -718,6 +726,7 @@ static int send_add_regions(struct vhost_dev *dev,
>                              bool reply_supported, bool track_ramblocks)
>  {
>      struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
>      int i, fd, ret, reg_idx, reg_fd_idx;
>      struct vhost_memory_region *reg;
>      MemoryRegion *mr;
> @@ -746,6 +755,8 @@ static int send_add_regions(struct vhost_dev *dev,
>              vhost_user_fill_msg_region(&region_buffer, reg, offset);
>              msg->payload.mem_reg.region = region_buffer;
>  
> +            QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>              ret = vhost_user_write(dev, msg, &fd, 1);
>              if (ret < 0) {
>                  return ret;
> @@ -893,6 +904,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
>                                               bool config_mem_slots)
>  {
>      struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
>      int fds[VHOST_MEMORY_BASELINE_NREGIONS];
>      size_t fd_num = 0;
>      VhostUserMsg msg_reply;
> @@ -926,6 +938,8 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
>              return ret;
>          }
>  
> +        QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>          ret = vhost_user_write(dev, &msg, fds, fd_num);
>          if (ret < 0) {
>              return ret;
> @@ -1005,6 +1019,7 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>                                      struct vhost_memory *mem)
>  {
>      struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
>      int fds[VHOST_MEMORY_BASELINE_NREGIONS];
>      size_t fd_num = 0;
>      bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
> @@ -1044,6 +1059,8 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>              return ret;
>          }
>  
> +        QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>          ret = vhost_user_write(dev, &msg, fds, fd_num);
>          if (ret < 0) {
>              return ret;
> @@ -1089,6 +1106,10 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
>          return 0;
>      }
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          return ret;
> @@ -1138,6 +1159,10 @@ static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
>          }
>      }
>  
> +/*  struct vhost_user *u = dev->opaque;
> + *  struct VhostUserState *us = u->user;
> + *  QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> + */
>      ret = vhost_user_write(dev, msg, NULL, 0);
>      if (ret < 0) {
>          return ret;
> @@ -1277,6 +1302,8 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
>          .hdr.size = sizeof(msg.payload.state),
>      };
>      struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
>  
>      VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
>      if (n) {
> @@ -1669,6 +1696,9 @@ int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
>      };
>      memcpy(msg.payload.object.uuid, uuid, sizeof(msg.payload.object.uuid));
>  
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          return ret;
> @@ -1889,6 +1919,9 @@ static int vhost_setup_backend_channel(struct vhost_dev *dev)
>          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>      }
>  
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, &sv[1], 1);
>      if (ret) {
>          goto out;
> @@ -1993,6 +2026,9 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
>          .hdr.flags = VHOST_USER_VERSION,
>      };
>  
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          error_setg(errp, "Failed to send postcopy_advise to vhost");
> @@ -2051,6 +2087,9 @@ static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
>  
>      trace_vhost_user_postcopy_listen();
>  
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          error_setg(errp, "Failed to send postcopy_listen to vhost");
> @@ -2080,6 +2119,9 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
>  
>      trace_vhost_user_postcopy_end_entry();
>  
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          error_setg(errp, "Failed to send postcopy_end to vhost");
> @@ -2372,6 +2414,10 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
>          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>      }
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          return ret;
> @@ -2396,6 +2442,10 @@ static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
>          .payload.iotlb = *imsg,
>      };
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          return ret;
> @@ -2428,6 +2478,10 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
>  
>      assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      msg.payload.config.offset = 0;
>      msg.payload.config.size = config_len;
>      ret = vhost_user_write(dev, &msg, NULL, 0);
> @@ -2492,6 +2546,10 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
>      p = msg.payload.config.region;
>      memcpy(p, data, size);
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          return ret;
> @@ -2570,6 +2628,10 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
>          }
>      }
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      msg.payload.session.op_code = backend_info->op_code;
>      msg.payload.session.session_id = backend_info->session_id;
>      ret = vhost_user_write(dev, &msg, NULL, 0);
> @@ -2662,6 +2724,9 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
>          return 0;
>      }
>  
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          return ret;
> @@ -2757,6 +2822,7 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
>      user->memory_slots = 0;
>      user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
>                                             &vhost_user_state_destroy);
> +    qemu_mutex_init(&user->vhost_user_request_reply_lock);
>      return true;
>  }
>  
> @@ -2769,6 +2835,7 @@ void vhost_user_cleanup(VhostUserState *user)
>      user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
>      memory_region_transaction_commit();
>      user->chr = NULL;
> +    qemu_mutex_destroy(&user->vhost_user_request_reply_lock);
>  }
>  
>  
> @@ -2902,6 +2969,9 @@ static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
>          return -ENOTSUP;
>      }
>  
> +    struct VhostUserState *us = vu->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, &fd, 1);
>      close(fd);
>      if (ret < 0) {
> @@ -2965,6 +3035,10 @@ static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
>          return -ENOTSUP;
>      }
>  
> +    struct vhost_user *u = dev->opaque;
> +    struct VhostUserState *us = u->user;
> +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> +
>      ret = vhost_user_write(dev, &msg, NULL, 0);
>      if (ret < 0) {
>          error_setg_errno(errp, -ret,
> diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
> index 324cd8663a..e96f12d449 100644
> --- a/include/hw/virtio/vhost-user.h
> +++ b/include/hw/virtio/vhost-user.h
> @@ -67,6 +67,9 @@ typedef struct VhostUserState {
>      GPtrArray *notifiers;
>      int memory_slots;
>      bool supports_config;
> +
> +    /* Hold lock for a request-reply cycle */
> +    QemuMutex vhost_user_request_reply_lock;
>  } VhostUserState;
>  
>  /**
> -- 
> 2.46.0
Re: [RFC-PATCH v2] vhost-user: add a request-reply lock
Posted by Michael S. Tsirkin 3 months ago
On Mon, Aug 19, 2024 at 11:42:02AM -0400, Michael S. Tsirkin wrote:
> On Mon, Aug 19, 2024 at 05:32:48PM +0530, Prasad Pandit wrote:
> > From: Prasad Pandit <pjp@fedoraproject.org>
> > 
> > QEMU threads use vhost_user_write/read calls to send
> > and receive request/reply messages from a vhost-user
> > device. When multiple threads communicate with the
> > same vhost-user device, they can receive each other's
> > messages, resulting in an erroneous state.
> > 
> > When fault_thread exits upon completion of Postcopy
> > migration, it sends a 'postcopy_end' message to the
> > vhost-user device. But sometimes 'postcopy_end' message
> > is sent while vhost device is being setup via
> > vhost_dev_start().
> > 
> >      Thread-1                           Thread-2
> > 
> >  vhost_dev_start                    postcopy_ram_incoming_cleanup
> >  vhost_device_iotlb_miss            postcopy_notify
> >  vhost_backend_update_device_iotlb  vhost_user_postcopy_notifier
> >  vhost_user_send_device_iotlb_msg   vhost_user_postcopy_end
> >  process_message_reply              process_message_reply
> >  vhost_user_read                    vhost_user_read
> >  vhost_user_read_header             vhost_user_read_header
> >  "Fail to update device iotlb"      "Failed to receive reply to postcopy_end"
> > 
> > This creates confusion when vhost-user device receives
> > 'postcopy_end' message while it is trying to update
> > IOTLB entries.
> > 
> >  vhost_user_read_header:
> >   700871,700871: Failed to read msg header. Flags 0x0 instead of 0x5.
> >  vhost_device_iotlb_miss:
> >   700871,700871: Fail to update device iotlb
> >  vhost_user_postcopy_end:
> >   700871,700900: Failed to receive reply to postcopy_end
> >  vhost_user_read_header:
> >   700871,700871: Failed to read msg header. Flags 0x0 instead of 0x5.
> > 
> > Here fault thread seems to end the postcopy migration
> > while another thread is starting the vhost-user device.
> > 
> > Add a mutex lock to hold for one request-reply cycle
> > and avoid such race condition.
> > 
> > Fixes: 46343570c06e ("vhost+postcopy: Wire up POSTCOPY_END notify")
> > Suggested-by: Peter Xu <peterx@redhat.com>
> > Signed-off-by: Prasad Pandit <pjp@fedoraproject.org>
> 
> makes sense.
> Acked-by: Michael S. Tsirkin <mst@redhat.com>
> But do not post v2 as reply to v1 pls.


Also, looks like this will replace Message-Id: <20240801124540.38774-1-xiangwencheng@dayudpu.com>
correct?

> > ---
> >  hw/virtio/vhost-user.c         | 74 ++++++++++++++++++++++++++++++++++
> >  include/hw/virtio/vhost-user.h |  3 ++
> >  2 files changed, 77 insertions(+)
> > 
> > v2:
> >  - Place QEMU_LOCK_GUARD near the vhost_user_write() calls, holding
> >    the lock for longer fails some tests during rpmbuild(8).
> >  - rpmbuild(8) fails for some SRPMs, not all. RHEL-9 SRPM builds with
> >    this patch, whereas Fedora SRPM does not build.
> >  - The host OS also seems to affect rpmbuild(8). Some SRPMs build well
> >    on RHEL-9, but not on Fedora-40 machine.
> > 
> > v1: https://lore.kernel.org/qemu-devel/20240808095147.291626-3-ppandit@redhat.com/#R
> > 
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index 00561daa06..7b030ae2cd 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -24,6 +24,7 @@
> >  #include "qemu/main-loop.h"
> >  #include "qemu/uuid.h"
> >  #include "qemu/sockets.h"
> > +#include "qemu/lockable.h"
> >  #include "sysemu/runstate.h"
> >  #include "sysemu/cryptodev.h"
> >  #include "migration/postcopy-ram.h"
> > @@ -446,6 +447,10 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
> >          .hdr.size = sizeof(msg.payload.log),
> >      };
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      /* Send only once with first queue pair */
> >      if (dev->vq_index != 0) {
> >          return 0;
> > @@ -664,6 +669,7 @@ static int send_remove_regions(struct vhost_dev *dev,
> >                                 bool reply_supported)
> >  {
> >      struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> >      struct vhost_memory_region *shadow_reg;
> >      int i, fd, shadow_reg_idx, ret;
> >      ram_addr_t offset;
> > @@ -685,6 +691,8 @@ static int send_remove_regions(struct vhost_dev *dev,
> >              vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
> >              msg->payload.mem_reg.region = region_buffer;
> >  
> > +            QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >              ret = vhost_user_write(dev, msg, NULL, 0);
> >              if (ret < 0) {
> >                  return ret;
> > @@ -718,6 +726,7 @@ static int send_add_regions(struct vhost_dev *dev,
> >                              bool reply_supported, bool track_ramblocks)
> >  {
> >      struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> >      int i, fd, ret, reg_idx, reg_fd_idx;
> >      struct vhost_memory_region *reg;
> >      MemoryRegion *mr;
> > @@ -746,6 +755,8 @@ static int send_add_regions(struct vhost_dev *dev,
> >              vhost_user_fill_msg_region(&region_buffer, reg, offset);
> >              msg->payload.mem_reg.region = region_buffer;
> >  
> > +            QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >              ret = vhost_user_write(dev, msg, &fd, 1);
> >              if (ret < 0) {
> >                  return ret;
> > @@ -893,6 +904,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
> >                                               bool config_mem_slots)
> >  {
> >      struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> >      int fds[VHOST_MEMORY_BASELINE_NREGIONS];
> >      size_t fd_num = 0;
> >      VhostUserMsg msg_reply;
> > @@ -926,6 +938,8 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
> >              return ret;
> >          }
> >  
> > +        QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >          ret = vhost_user_write(dev, &msg, fds, fd_num);
> >          if (ret < 0) {
> >              return ret;
> > @@ -1005,6 +1019,7 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
> >                                      struct vhost_memory *mem)
> >  {
> >      struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> >      int fds[VHOST_MEMORY_BASELINE_NREGIONS];
> >      size_t fd_num = 0;
> >      bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
> > @@ -1044,6 +1059,8 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
> >              return ret;
> >          }
> >  
> > +        QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >          ret = vhost_user_write(dev, &msg, fds, fd_num);
> >          if (ret < 0) {
> >              return ret;
> > @@ -1089,6 +1106,10 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
> >          return 0;
> >      }
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          return ret;
> > @@ -1138,6 +1159,10 @@ static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
> >          }
> >      }
> >  
> > +/*  struct vhost_user *u = dev->opaque;
> > + *  struct VhostUserState *us = u->user;
> > + *  QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > + */
> >      ret = vhost_user_write(dev, msg, NULL, 0);
> >      if (ret < 0) {
> >          return ret;
> > @@ -1277,6 +1302,8 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
> >          .hdr.size = sizeof(msg.payload.state),
> >      };
> >      struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> >  
> >      VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
> >      if (n) {
> > @@ -1669,6 +1696,9 @@ int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
> >      };
> >      memcpy(msg.payload.object.uuid, uuid, sizeof(msg.payload.object.uuid));
> >  
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          return ret;
> > @@ -1889,6 +1919,9 @@ static int vhost_setup_backend_channel(struct vhost_dev *dev)
> >          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> >      }
> >  
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, &sv[1], 1);
> >      if (ret) {
> >          goto out;
> > @@ -1993,6 +2026,9 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
> >          .hdr.flags = VHOST_USER_VERSION,
> >      };
> >  
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          error_setg(errp, "Failed to send postcopy_advise to vhost");
> > @@ -2051,6 +2087,9 @@ static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
> >  
> >      trace_vhost_user_postcopy_listen();
> >  
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          error_setg(errp, "Failed to send postcopy_listen to vhost");
> > @@ -2080,6 +2119,9 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
> >  
> >      trace_vhost_user_postcopy_end_entry();
> >  
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          error_setg(errp, "Failed to send postcopy_end to vhost");
> > @@ -2372,6 +2414,10 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
> >          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> >      }
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          return ret;
> > @@ -2396,6 +2442,10 @@ static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
> >          .payload.iotlb = *imsg,
> >      };
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          return ret;
> > @@ -2428,6 +2478,10 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
> >  
> >      assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      msg.payload.config.offset = 0;
> >      msg.payload.config.size = config_len;
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> > @@ -2492,6 +2546,10 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
> >      p = msg.payload.config.region;
> >      memcpy(p, data, size);
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          return ret;
> > @@ -2570,6 +2628,10 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
> >          }
> >      }
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      msg.payload.session.op_code = backend_info->op_code;
> >      msg.payload.session.session_id = backend_info->session_id;
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> > @@ -2662,6 +2724,9 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
> >          return 0;
> >      }
> >  
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          return ret;
> > @@ -2757,6 +2822,7 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
> >      user->memory_slots = 0;
> >      user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
> >                                             &vhost_user_state_destroy);
> > +    qemu_mutex_init(&user->vhost_user_request_reply_lock);
> >      return true;
> >  }
> >  
> > @@ -2769,6 +2835,7 @@ void vhost_user_cleanup(VhostUserState *user)
> >      user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
> >      memory_region_transaction_commit();
> >      user->chr = NULL;
> > +    qemu_mutex_destroy(&user->vhost_user_request_reply_lock);
> >  }
> >  
> >  
> > @@ -2902,6 +2969,9 @@ static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
> >          return -ENOTSUP;
> >      }
> >  
> > +    struct VhostUserState *us = vu->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, &fd, 1);
> >      close(fd);
> >      if (ret < 0) {
> > @@ -2965,6 +3035,10 @@ static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
> >          return -ENOTSUP;
> >      }
> >  
> > +    struct vhost_user *u = dev->opaque;
> > +    struct VhostUserState *us = u->user;
> > +    QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
> > +
> >      ret = vhost_user_write(dev, &msg, NULL, 0);
> >      if (ret < 0) {
> >          error_setg_errno(errp, -ret,
> > diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
> > index 324cd8663a..e96f12d449 100644
> > --- a/include/hw/virtio/vhost-user.h
> > +++ b/include/hw/virtio/vhost-user.h
> > @@ -67,6 +67,9 @@ typedef struct VhostUserState {
> >      GPtrArray *notifiers;
> >      int memory_slots;
> >      bool supports_config;
> > +
> > +    /* Hold lock for a request-reply cycle */
> > +    QemuMutex vhost_user_request_reply_lock;
> >  } VhostUserState;
> >  
> >  /**
> > -- 
> > 2.46.0
Re: [RFC-PATCH v2] vhost-user: add a request-reply lock
Posted by Prasad Pandit 2 months, 4 weeks ago
Hello Michael, all

Sorry about a late reply,  catching up with emails after long PTOs.

On Mon, 19 Aug 2024 at 21:20, Michael S. Tsirkin <mst@redhat.com> wrote:
>> makes sense.
>> Acked-by: Michael S. Tsirkin <mst@redhat.com>
>> But do not post v2 as reply to v1 pls.

* Yes, okay. Thank you for the review. I sent in reply to v1 because
this is a 'RFC' thread and there was no review yet to this series.

* If  this V2 patch looks reasonable/acceptable, I'll send this series
for review (without RFC) as V2, if that's okay.

>> Also, looks like this will replace Message-Id: <20240801124540.38774-1-xiangwencheng@dayudpu.com> correct?

* Going through the git logs, vhost_user_set_log_base() sends set log
message only when vq_index==0, so the above patch is not required
IIUC.
    -> https://github.com/qemu/qemu/commit/c98ac64cfb53ccb862a80e818c3a19bdd386e61e
===
    +    /* Send only once with first queue pair */
    +    if (dev->vq_index != 0) {
    +        return 0;
    +    }
===

Thank you.
---
  - Prasad