[PATCH v3] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF

Manish Mishra posted 1 patch 2 weeks, 2 days ago
include/io/channel-socket.h |  5 +++
io/channel-socket.c         | 74 ++++++++++++++++++++++++++++++-------
2 files changed, 65 insertions(+), 14 deletions(-)
[PATCH v3] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
Posted by Manish Mishra 2 weeks, 2 days ago
We allocate extra metadata SKBs in case of a zerocopy send. This metadata
memory is accounted for in the OPTMEM limit. If there is any error while
sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
queued in the socket error queue. This error queue is freed when userspace
reads it.

Usually, if there are continuous failures, we merge the metadata into a single
SKB and free another one. As a result, it never exceeds the OPTMEM limit.
However, if there is any out-of-order processing or intermittent zerocopy
failures, this error chain can grow significantly, exhausting the OPTMEM limit.
As a result, all new sendmsg requests fail to allocate any new SKB, leading to
an ENOBUF error. Depending on the amount of data queued before the flush
(i.e., large live migration iterations), even large OPTMEM limits are prone to
failure.

To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
we flush the error queue and retry once more.

V2:
  1. Removed the dirty_sync_missed_zero_copy migration stat.
  2. Made the call to qio_channel_socket_flush_internal() from
     qio_channel_socket_writev() non-blocking.

V3:
  1. Add the dirty_sync_missed_zero_copy migration stat again.

Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
---
 include/io/channel-socket.h |  5 +++
 io/channel-socket.c         | 74 ++++++++++++++++++++++++++++++-------
 2 files changed, 65 insertions(+), 14 deletions(-)

diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
index ab15577d38..2c48b972e8 100644
--- a/include/io/channel-socket.h
+++ b/include/io/channel-socket.h
@@ -49,6 +49,11 @@ struct QIOChannelSocket {
     socklen_t remoteAddrLen;
     ssize_t zero_copy_queued;
     ssize_t zero_copy_sent;
+    /**
+     * This flag indicates whether any new data was successfully sent with
+     * zerocopy since the last qio_channel_socket_flush() call.
+     */
+    bool new_zero_copy_sent_success;
 };
 
 
diff --git a/io/channel-socket.c b/io/channel-socket.c
index 608bcf066e..604ca9890d 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -37,6 +37,12 @@
 
 #define SOCKET_MAX_FDS 16
 
+#ifdef QEMU_MSG_ZEROCOPY
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+                                             bool block,
+                                             Error **errp);
+#endif
+
 SocketAddress *
 qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
                                      Error **errp)
@@ -65,6 +71,7 @@ qio_channel_socket_new(void)
     sioc->fd = -1;
     sioc->zero_copy_queued = 0;
     sioc->zero_copy_sent = 0;
+    sioc->new_zero_copy_sent_success = FALSE;
 
     ioc = QIO_CHANNEL(sioc);
     qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
@@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
     size_t fdsize = sizeof(int) * nfds;
     struct cmsghdr *cmsg;
     int sflags = 0;
+    bool zero_copy_flush_pending = TRUE;
 
     memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
 
@@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
             goto retry;
         case ENOBUFS:
             if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
-                error_setg_errno(errp, errno,
-                                 "Process can't lock enough memory for using MSG_ZEROCOPY");
-                return -1;
+                /**
+                 * Socket error queueing may exhaust the OPTMEM limit. Try
+                 * flushing the error queue once.
+                 */
+                if (zero_copy_flush_pending) {
+                    ret = qio_channel_socket_flush_internal(ioc, false, errp);
+                    if (ret < 0) {
+                        error_setg_errno(errp, errno,
+                                         "Zerocopy flush failed");
+                        return -1;
+                    }
+                    zero_copy_flush_pending = FALSE;
+                    goto retry;
+                } else {
+                    error_setg_errno(errp, errno,
+                                     "Process can't lock enough memory for "
+                                     "using MSG_ZEROCOPY");
+                    return -1;
+                }
             }
             break;
         }
@@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
 
 
 #ifdef QEMU_MSG_ZEROCOPY
-static int qio_channel_socket_flush(QIOChannel *ioc,
-                                    Error **errp)
+static int qio_channel_socket_flush_internal(QIOChannel *ioc,
+                                             bool block,
+                                             Error **errp)
 {
     QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
     struct msghdr msg = {};
@@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
     struct cmsghdr *cm;
     char control[CMSG_SPACE(sizeof(*serr))];
     int received;
-    int ret;
 
     if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
         return 0;
@@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
     msg.msg_controllen = sizeof(control);
     memset(control, 0, sizeof(control));
 
-    ret = 1;
-
     while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
         received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
         if (received < 0) {
             switch (errno) {
             case EAGAIN:
-                /* Nothing on errqueue, wait until something is available */
-                qio_channel_wait(ioc, G_IO_ERR);
-                continue;
+                if (block) {
+                    /* Nothing on errqueue, wait until something is
+                     * available.
+                     */
+                    qio_channel_wait(ioc, G_IO_ERR);
+                    continue;
+                }
+                return 0;
             case EINTR:
                 continue;
             default:
@@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
         /* No errors, count successfully finished sendmsg()*/
         sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
 
-        /* If any sendmsg() succeeded using zero copy, return 0 at the end */
+        /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
         if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
-            ret = 0;
+            sioc->new_zero_copy_sent_success = TRUE;
         }
     }
 
-    return ret;
+    return 0;
+}
+
+static int qio_channel_socket_flush(QIOChannel *ioc,
+                                    Error **errp)
+{
+    QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
+    int ret;
+
+    ret = qio_channel_socket_flush_internal(ioc, true, errp);
+    if (ret < 0) {
+        return ret;
+    }
+
+    if (sioc->new_zero_copy_sent_success) {
+        sioc->new_zero_copy_sent_success = FALSE;
+        return 0;
+    }
+
+    return 1;
 }
 
 #endif /* QEMU_MSG_ZEROCOPY */
-- 
2.43.0
Re: [PATCH v3] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
Posted by Daniel P. Berrangé 1 week, 4 days ago
On Sun, Mar 16, 2025 at 09:52:31PM -0400, Manish Mishra wrote:
> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
> memory is accounted for in the OPTMEM limit. If there is any error while
> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
> queued in the socket error queue. This error queue is freed when userspace
> reads it.
> 
> Usually, if there are continuous failures, we merge the metadata into a single
> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
> However, if there is any out-of-order processing or intermittent zerocopy
> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
> an ENOBUF error. Depending on the amount of data queued before the flush
> (i.e., large live migration iterations), even large OPTMEM limits are prone to
> failure.
> 
> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
> we flush the error queue and retry once more.
> 
> V2:
>   1. Removed the dirty_sync_missed_zero_copy migration stat.
>   2. Made the call to qio_channel_socket_flush_internal() from
>      qio_channel_socket_writev() non-blocking.
> 
> V3:
>   1. Add the dirty_sync_missed_zero_copy migration stat again.

These notes about changes each version should always be below
the '---' line, because they shouldn't remain in the commit message
when merged.

> 
> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
> ---
>  include/io/channel-socket.h |  5 +++
>  io/channel-socket.c         | 74 ++++++++++++++++++++++++++++++-------
>  2 files changed, 65 insertions(+), 14 deletions(-)
> 

> diff --git a/io/channel-socket.c b/io/channel-socket.c
> index 608bcf066e..604ca9890d 100644
> --- a/io/channel-socket.c
> +++ b/io/channel-socket.c
> @@ -37,6 +37,12 @@
>  
>  #define SOCKET_MAX_FDS 16
>  
> +#ifdef QEMU_MSG_ZEROCOPY
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> +                                             bool block,
> +                                             Error **errp);
> +#endif
> +
>  SocketAddress *
>  qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
>                                       Error **errp)
> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
>      sioc->fd = -1;
>      sioc->zero_copy_queued = 0;
>      sioc->zero_copy_sent = 0;
> +    sioc->new_zero_copy_sent_success = FALSE;
>  
>      ioc = QIO_CHANNEL(sioc);
>      qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>      size_t fdsize = sizeof(int) * nfds;
>      struct cmsghdr *cmsg;
>      int sflags = 0;
> +    bool zero_copy_flush_pending = TRUE;
>  
>      memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>  
> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>              goto retry;
>          case ENOBUFS:
>              if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
> -                error_setg_errno(errp, errno,
> -                                 "Process can't lock enough memory for using MSG_ZEROCOPY");
> -                return -1;
> +                /**
> +                 * Socket error queueing may exhaust the OPTMEM limit. Try
> +                 * flushing the error queue once.
> +                 */
> +                if (zero_copy_flush_pending) {
> +                    ret = qio_channel_socket_flush_internal(ioc, false, errp);

Hardcoding block==false isn't right.  The socket may be in either
blocking or non-blocking mode, and that needs to be taken into
account, otherwise we'll get spurious failures in blocking mode
if "flush" would otherwise want to block.

> +                    if (ret < 0) {
> +                        error_setg_errno(errp, errno,
> +                                         "Zerocopy flush failed");
> +                        return -1;
> +                    }
> +                    zero_copy_flush_pending = FALSE;
> +                    goto retry;
> +                } else {
> +                    error_setg_errno(errp, errno,
> +                                     "Process can't lock enough memory for "
> +                                     "using MSG_ZEROCOPY");
> +                    return -1;
> +                }
>              }
>              break;
>          }
> @@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>  
>  
>  #ifdef QEMU_MSG_ZEROCOPY
> -static int qio_channel_socket_flush(QIOChannel *ioc,
> -                                    Error **errp)
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> +                                             bool block,
> +                                             Error **errp)
>  {
>      QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
>      struct msghdr msg = {};
> @@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>      struct cmsghdr *cm;
>      char control[CMSG_SPACE(sizeof(*serr))];
>      int received;
> -    int ret;
>  
>      if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
>          return 0;
> @@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>      msg.msg_controllen = sizeof(control);
>      memset(control, 0, sizeof(control));
>  
> -    ret = 1;
> -
>      while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
>          received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
>          if (received < 0) {
>              switch (errno) {
>              case EAGAIN:
> -                /* Nothing on errqueue, wait until something is available */
> -                qio_channel_wait(ioc, G_IO_ERR);
> -                continue;
> +                if (block) {
> +                    /* Nothing on errqueue, wait until something is
> +                     * available.
> +                     */
> +                    qio_channel_wait(ioc, G_IO_ERR);
> +                    continue;
> +                }
> +                return 0;
>              case EINTR:
>                  continue;
>              default:
> @@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>          /* No errors, count successfully finished sendmsg()*/
>          sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
>  
> -        /* If any sendmsg() succeeded using zero copy, return 0 at the end */
> +        /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
>          if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
> -            ret = 0;
> +            sioc->new_zero_copy_sent_success = TRUE;
>          }
>      }
>  
> -    return ret;
> +    return 0;
> +}



> +
> +static int qio_channel_socket_flush(QIOChannel *ioc,
> +                                    Error **errp)
> +{
> +    QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> +    int ret;
> +
> +    ret = qio_channel_socket_flush_internal(ioc, true, errp);
> +    if (ret < 0) {
> +        return ret;
> +    }
> +
> +    if (sioc->new_zero_copy_sent_success) {
> +        sioc->new_zero_copy_sent_success = FALSE;
> +        return 0;
> +    }
> +
> +    return 1;
>  }
>  
>  #endif /* QEMU_MSG_ZEROCOPY */
> -- 
> 2.43.0
> 

With regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|
Re: [PATCH v3] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
Posted by Manish 5 hours ago
On 21/03/25 9:15 pm, Daniel P. Berrangé wrote:
> !-------------------------------------------------------------------|
>    CAUTION: External Email
>
> |-------------------------------------------------------------------!
>
> On Sun, Mar 16, 2025 at 09:52:31PM -0400, Manish Mishra wrote:
>> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
>> memory is accounted for in the OPTMEM limit. If there is any error while
>> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
>> queued in the socket error queue. This error queue is freed when userspace
>> reads it.
>>
>> Usually, if there are continuous failures, we merge the metadata into a single
>> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
>> However, if there is any out-of-order processing or intermittent zerocopy
>> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
>> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
>> an ENOBUF error. Depending on the amount of data queued before the flush
>> (i.e., large live migration iterations), even large OPTMEM limits are prone to
>> failure.
>>
>> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
>> we flush the error queue and retry once more.
>>
>> V2:
>>    1. Removed the dirty_sync_missed_zero_copy migration stat.
>>    2. Made the call to qio_channel_socket_flush_internal() from
>>       qio_channel_socket_writev() non-blocking.
>>
>> V3:
>>    1. Add the dirty_sync_missed_zero_copy migration stat again.
> These notes about changes each version should always be below
> the '---' line, because they shouldn't remain in the commit message
> when merged.
>
>> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
>> ---
>>   include/io/channel-socket.h |  5 +++
>>   io/channel-socket.c         | 74 ++++++++++++++++++++++++++++++-------
>>   2 files changed, 65 insertions(+), 14 deletions(-)
>>
>> diff --git a/io/channel-socket.c b/io/channel-socket.c
>> index 608bcf066e..604ca9890d 100644
>> --- a/io/channel-socket.c
>> +++ b/io/channel-socket.c
>> @@ -37,6 +37,12 @@
>>   
>>   #define SOCKET_MAX_FDS 16
>>   
>> +#ifdef QEMU_MSG_ZEROCOPY
>> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
>> +                                             bool block,
>> +                                             Error **errp);
>> +#endif
>> +
>>   SocketAddress *
>>   qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
>>                                        Error **errp)
>> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
>>       sioc->fd = -1;
>>       sioc->zero_copy_queued = 0;
>>       sioc->zero_copy_sent = 0;
>> +    sioc->new_zero_copy_sent_success = FALSE;
>>   
>>       ioc = QIO_CHANNEL(sioc);
>>       qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
>> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>>       size_t fdsize = sizeof(int) * nfds;
>>       struct cmsghdr *cmsg;
>>       int sflags = 0;
>> +    bool zero_copy_flush_pending = TRUE;
>>   
>>       memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>>   
>> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>>               goto retry;
>>           case ENOBUFS:
>>               if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
>> -                error_setg_errno(errp, errno,
>> -                                 "Process can't lock enough memory for using MSG_ZEROCOPY");
>> -                return -1;
>> +                /**
>> +                 * Socket error queueing may exhaust the OPTMEM limit. Try
>> +                 * flushing the error queue once.
>> +                 */
>> +                if (zero_copy_flush_pending) {
>> +                    ret = qio_channel_socket_flush_internal(ioc, false, errp);
> Hardcoding block==false isn't right.  The socket may be in either
> blocking or non-blocking mode, and that needs to be taken into
> account, otherwise we'll get spurious failures in blocking mode
> if "flush" would otherwise want to block.

Daniel, i did not understand what kind of failures are possible. In this 
case our purpose is to be able to free some space from error queue so 
that we can proceed with new request. It need not be completely flushed 
for all data in the queue. Also currently we do not store any 
information about blocking/non-blocking socket, if you want it to be 
conditional, will storing it makes sense?

>
>> +                    if (ret < 0) {
>> +                        error_setg_errno(errp, errno,
>> +                                         "Zerocopy flush failed");
>> +                        return -1;
>> +                    }
>> +                    zero_copy_flush_pending = FALSE;
>> +                    goto retry;
>> +                } else {
>> +                    error_setg_errno(errp, errno,
>> +                                     "Process can't lock enough memory for "
>> +                                     "using MSG_ZEROCOPY");
>> +                    return -1;
>> +                }
>>               }
>>               break;
>>           }
>> @@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>>   
>>   
>>   #ifdef QEMU_MSG_ZEROCOPY
>> -static int qio_channel_socket_flush(QIOChannel *ioc,
>> -                                    Error **errp)
>> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
>> +                                             bool block,
>> +                                             Error **errp)
>>   {
>>       QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
>>       struct msghdr msg = {};
>> @@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>>       struct cmsghdr *cm;
>>       char control[CMSG_SPACE(sizeof(*serr))];
>>       int received;
>> -    int ret;
>>   
>>       if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
>>           return 0;
>> @@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>>       msg.msg_controllen = sizeof(control);
>>       memset(control, 0, sizeof(control));
>>   
>> -    ret = 1;
>> -
>>       while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
>>           received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
>>           if (received < 0) {
>>               switch (errno) {
>>               case EAGAIN:
>> -                /* Nothing on errqueue, wait until something is available */
>> -                qio_channel_wait(ioc, G_IO_ERR);
>> -                continue;
>> +                if (block) {
>> +                    /* Nothing on errqueue, wait until something is
>> +                     * available.
>> +                     */
>> +                    qio_channel_wait(ioc, G_IO_ERR);
>> +                    continue;
>> +                }
>> +                return 0;
>>               case EINTR:
>>                   continue;
>>               default:
>> @@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>>           /* No errors, count successfully finished sendmsg()*/
>>           sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
>>   
>> -        /* If any sendmsg() succeeded using zero copy, return 0 at the end */
>> +        /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
>>           if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
>> -            ret = 0;
>> +            sioc->new_zero_copy_sent_success = TRUE;
>>           }
>>       }
>>   
>> -    return ret;
>> +    return 0;
>> +}
>
>
>> +
>> +static int qio_channel_socket_flush(QIOChannel *ioc,
>> +                                    Error **errp)
>> +{
>> +    QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
>> +    int ret;
>> +
>> +    ret = qio_channel_socket_flush_internal(ioc, true, errp);
>> +    if (ret < 0) {
>> +        return ret;
>> +    }
>> +
>> +    if (sioc->new_zero_copy_sent_success) {
>> +        sioc->new_zero_copy_sent_success = FALSE;
>> +        return 0;
>> +    }
>> +
>> +    return 1;
>>   }
>>   
>>   #endif /* QEMU_MSG_ZEROCOPY */
>> -- 
>> 2.43.0
>>
> With regards,
> Daniel


Thanks

Manish Mishra


Re: [PATCH v3] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
Posted by Manish 1 week, 5 days ago
Hi Daniel, Peter,

Please let me know if this latest patch looks good?


On 17/03/25 7:22 am, Manish Mishra wrote:
> We allocate extra metadata SKBs in case of a zerocopy send. This metadata
> memory is accounted for in the OPTMEM limit. If there is any error while
> sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
> queued in the socket error queue. This error queue is freed when userspace
> reads it.
>
> Usually, if there are continuous failures, we merge the metadata into a single
> SKB and free another one. As a result, it never exceeds the OPTMEM limit.
> However, if there is any out-of-order processing or intermittent zerocopy
> failures, this error chain can grow significantly, exhausting the OPTMEM limit.
> As a result, all new sendmsg requests fail to allocate any new SKB, leading to
> an ENOBUF error. Depending on the amount of data queued before the flush
> (i.e., large live migration iterations), even large OPTMEM limits are prone to
> failure.
>
> To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
> we flush the error queue and retry once more.
>
> V2:
>    1. Removed the dirty_sync_missed_zero_copy migration stat.
>    2. Made the call to qio_channel_socket_flush_internal() from
>       qio_channel_socket_writev() non-blocking.
>
> V3:
>    1. Add the dirty_sync_missed_zero_copy migration stat again.
>
> Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>
> ---
>   include/io/channel-socket.h |  5 +++
>   io/channel-socket.c         | 74 ++++++++++++++++++++++++++++++-------
>   2 files changed, 65 insertions(+), 14 deletions(-)
>
> diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h
> index ab15577d38..2c48b972e8 100644
> --- a/include/io/channel-socket.h
> +++ b/include/io/channel-socket.h
> @@ -49,6 +49,11 @@ struct QIOChannelSocket {
>       socklen_t remoteAddrLen;
>       ssize_t zero_copy_queued;
>       ssize_t zero_copy_sent;
> +    /**
> +     * This flag indicates whether any new data was successfully sent with
> +     * zerocopy since the last qio_channel_socket_flush() call.
> +     */
> +    bool new_zero_copy_sent_success;
>   };
>   
>   
> diff --git a/io/channel-socket.c b/io/channel-socket.c
> index 608bcf066e..604ca9890d 100644
> --- a/io/channel-socket.c
> +++ b/io/channel-socket.c
> @@ -37,6 +37,12 @@
>   
>   #define SOCKET_MAX_FDS 16
>   
> +#ifdef QEMU_MSG_ZEROCOPY
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> +                                             bool block,
> +                                             Error **errp);
> +#endif
> +
>   SocketAddress *
>   qio_channel_socket_get_local_address(QIOChannelSocket *ioc,
>                                        Error **errp)
> @@ -65,6 +71,7 @@ qio_channel_socket_new(void)
>       sioc->fd = -1;
>       sioc->zero_copy_queued = 0;
>       sioc->zero_copy_sent = 0;
> +    sioc->new_zero_copy_sent_success = FALSE;
>   
>       ioc = QIO_CHANNEL(sioc);
>       qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN);
> @@ -566,6 +573,7 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>       size_t fdsize = sizeof(int) * nfds;
>       struct cmsghdr *cmsg;
>       int sflags = 0;
> +    bool zero_copy_flush_pending = TRUE;
>   
>       memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
>   
> @@ -612,9 +620,25 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>               goto retry;
>           case ENOBUFS:
>               if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
> -                error_setg_errno(errp, errno,
> -                                 "Process can't lock enough memory for using MSG_ZEROCOPY");
> -                return -1;
> +                /**
> +                 * Socket error queueing may exhaust the OPTMEM limit. Try
> +                 * flushing the error queue once.
> +                 */
> +                if (zero_copy_flush_pending) {
> +                    ret = qio_channel_socket_flush_internal(ioc, false, errp);
> +                    if (ret < 0) {
> +                        error_setg_errno(errp, errno,
> +                                         "Zerocopy flush failed");
> +                        return -1;
> +                    }
> +                    zero_copy_flush_pending = FALSE;
> +                    goto retry;
> +                } else {
> +                    error_setg_errno(errp, errno,
> +                                     "Process can't lock enough memory for "
> +                                     "using MSG_ZEROCOPY");
> +                    return -1;
> +                }
>               }
>               break;
>           }
> @@ -725,8 +749,9 @@ static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
>   
>   
>   #ifdef QEMU_MSG_ZEROCOPY
> -static int qio_channel_socket_flush(QIOChannel *ioc,
> -                                    Error **errp)
> +static int qio_channel_socket_flush_internal(QIOChannel *ioc,
> +                                             bool block,
> +                                             Error **errp)
>   {
>       QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
>       struct msghdr msg = {};
> @@ -734,7 +759,6 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>       struct cmsghdr *cm;
>       char control[CMSG_SPACE(sizeof(*serr))];
>       int received;
> -    int ret;
>   
>       if (sioc->zero_copy_queued == sioc->zero_copy_sent) {
>           return 0;
> @@ -744,16 +768,19 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>       msg.msg_controllen = sizeof(control);
>       memset(control, 0, sizeof(control));
>   
> -    ret = 1;
> -
>       while (sioc->zero_copy_sent < sioc->zero_copy_queued) {
>           received = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
>           if (received < 0) {
>               switch (errno) {
>               case EAGAIN:
> -                /* Nothing on errqueue, wait until something is available */
> -                qio_channel_wait(ioc, G_IO_ERR);
> -                continue;
> +                if (block) {
> +                    /* Nothing on errqueue, wait until something is
> +                     * available.
> +                     */
> +                    qio_channel_wait(ioc, G_IO_ERR);
> +                    continue;
> +                }
> +                return 0;
>               case EINTR:
>                   continue;
>               default:
> @@ -791,13 +818,32 @@ static int qio_channel_socket_flush(QIOChannel *ioc,
>           /* No errors, count successfully finished sendmsg()*/
>           sioc->zero_copy_sent += serr->ee_data - serr->ee_info + 1;
>   
> -        /* If any sendmsg() succeeded using zero copy, return 0 at the end */
> +        /* If any sendmsg() succeeded using zero copy, mark zerocopy success */
>           if (serr->ee_code != SO_EE_CODE_ZEROCOPY_COPIED) {
> -            ret = 0;
> +            sioc->new_zero_copy_sent_success = TRUE;
>           }
>       }
>   
> -    return ret;
> +    return 0;
> +}
> +
> +static int qio_channel_socket_flush(QIOChannel *ioc,
> +                                    Error **errp)
> +{
> +    QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> +    int ret;
> +
> +    ret = qio_channel_socket_flush_internal(ioc, true, errp);
> +    if (ret < 0) {
> +        return ret;
> +    }
> +
> +    if (sioc->new_zero_copy_sent_success) {
> +        sioc->new_zero_copy_sent_success = FALSE;
> +        return 0;
> +    }
> +
> +    return 1;
>   }
>   
>   #endif /* QEMU_MSG_ZEROCOPY */
Re: [PATCH v3] QIOChannelSocket: Flush zerocopy socket error queue on sendmsg failure due to ENOBUF
Posted by Peter Xu 1 week, 4 days ago
On Fri, Mar 21, 2025 at 11:33:31AM +0530, Manish wrote:
> Hi Daniel, Peter,
> 
> Please let me know if this latest patch looks good?
> 
> 
> On 17/03/25 7:22 am, Manish Mishra wrote:
> > We allocate extra metadata SKBs in case of a zerocopy send. This metadata
> > memory is accounted for in the OPTMEM limit. If there is any error while
> > sending zerocopy packets or if zerocopy is skipped, these metadata SKBs are
> > queued in the socket error queue. This error queue is freed when userspace
> > reads it.
> > 
> > Usually, if there are continuous failures, we merge the metadata into a single
> > SKB and free another one. As a result, it never exceeds the OPTMEM limit.
> > However, if there is any out-of-order processing or intermittent zerocopy
> > failures, this error chain can grow significantly, exhausting the OPTMEM limit.
> > As a result, all new sendmsg requests fail to allocate any new SKB, leading to
> > an ENOBUF error. Depending on the amount of data queued before the flush
> > (i.e., large live migration iterations), even large OPTMEM limits are prone to
> > failure.
> > 
> > To work around this, if we encounter an ENOBUF error with a zerocopy sendmsg,
> > we flush the error queue and retry once more.
> > 
> > V2:
> >    1. Removed the dirty_sync_missed_zero_copy migration stat.
> >    2. Made the call to qio_channel_socket_flush_internal() from
> >       qio_channel_socket_writev() non-blocking.
> > 
> > V3:
> >    1. Add the dirty_sync_missed_zero_copy migration stat again.
> > 
> > Signed-off-by: Manish Mishra <manish.mishra@nutanix.com>

I have an old comment which could still apply here:

https://lore.kernel.org/all/Z885hS6QmGOZYj7N@x1.local/

That's on s/zero_copy_flush_pending/zerocopy_flush_once/.

But no need to repost only for that.. that's more or less a nitpick.  It's
unfortunate we need to keep the ABI and the complexity even if the counter
almost means nothing solid..

The change overall looks good here.

Reviewed-by: Peter Xu <peterx@redhat.com>

Thanks,

-- 
Peter Xu