[PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous

Peter Xu posted 3 patches 2 weeks, 2 days ago
Maintainers: "Daniel P. Berrangé" <berrange@redhat.com>, Peter Xu <peterx@redhat.com>, Fabiano Rosas <farosas@suse.de>
There is a newer version of this series
[PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous
Posted by Peter Xu 2 weeks, 2 days ago
No issue I hit, the change is only from code observation when I am looking
at a TLS premature termination issue.

qio_channel_tls_bye() API needs to be synchronous.  When it's not, the
previous impl will attach an asynchronous task retrying but only until when
the channel gets the relevant GIO event. It may be problematic, because the
caller of qio_channel_tls_bye() may have invoked channel close() before
that, leading to premature termination of the TLS session.

Remove the asynchronous handling, instead retry it immediately.  Currently,
the only two possible cases that may lead to async task is either INTERRUPT
or EAGAIN.  It should be suffice to spin retry as of now, until a solid
proof showing that a more complicated retry logic is needed.

With that, we can remove the whole async model for the bye task.

When at it, making the function return bool, which looks like a common
pattern in QEMU when errp is used.

Side note on the tracepoints: previously the tracepoint bye_complete()
isn't used.  Start to use it in this patch.  bye_pending() and bye_cancel()
can be dropped now.  Adding bye_retry() instead.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 include/io/channel-tls.h |  5 ++-
 io/channel-tls.c         | 86 +++++-----------------------------------
 io/trace-events          |  3 +-
 3 files changed, 15 insertions(+), 79 deletions(-)

diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
index 7e9023570d..bcd14ffbd6 100644
--- a/include/io/channel-tls.h
+++ b/include/io/channel-tls.h
@@ -49,7 +49,6 @@ struct QIOChannelTLS {
     QCryptoTLSSession *session;
     QIOChannelShutdown shutdown;
     guint hs_ioc_tag;
-    guint bye_ioc_tag;
 };
 
 /**
@@ -60,8 +59,10 @@ struct QIOChannelTLS {
  * Perform the TLS session termination. This method will return
  * immediately and the termination will continue in the background,
  * provided the main loop is running.
+ *
+ * Returns: true on success, false on error (with errp set)
  */
-void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
+bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
 
 /**
  * qio_channel_tls_new_server:
diff --git a/io/channel-tls.c b/io/channel-tls.c
index 5a2c8188ce..8510a187a8 100644
--- a/io/channel-tls.c
+++ b/io/channel-tls.c
@@ -253,84 +253,25 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc,
     qio_channel_tls_handshake_task(ioc, task, context);
 }
 
-static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
-                                       gpointer user_data);
-
-static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task,
-                                     GMainContext *context)
+bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
 {
-    GIOCondition condition;
-    QIOChannelTLSData *data;
     int status;
-    Error *err = NULL;
 
-    status = qcrypto_tls_session_bye(ioc->session, &err);
+    trace_qio_channel_tls_bye_start(ioc);
+retry:
+    status = qcrypto_tls_session_bye(ioc->session, errp);
 
     if (status < 0) {
         trace_qio_channel_tls_bye_fail(ioc);
-        qio_task_set_error(task, err);
-        qio_task_complete(task);
-        return;
-    }
-
-    if (status == QCRYPTO_TLS_BYE_COMPLETE) {
-        qio_task_complete(task);
-        return;
-    }
-
-    data = g_new0(typeof(*data), 1);
-    data->task = task;
-    data->context = context;
-
-    if (context) {
-        g_main_context_ref(context);
-    }
-
-    if (status == QCRYPTO_TLS_BYE_SENDING) {
-        condition = G_IO_OUT;
-    } else {
-        condition = G_IO_IN;
-    }
-
-    trace_qio_channel_tls_bye_pending(ioc, status);
-    ioc->bye_ioc_tag = qio_channel_add_watch_full(ioc->master, condition,
-                                                  qio_channel_tls_bye_io,
-                                                  data, NULL, context);
-}
-
-
-static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
-                                       gpointer user_data)
-{
-    QIOChannelTLSData *data = user_data;
-    QIOTask *task = data->task;
-    GMainContext *context = data->context;
-    QIOChannelTLS *tioc = QIO_CHANNEL_TLS(qio_task_get_source(task));
-
-    tioc->bye_ioc_tag = 0;
-    g_free(data);
-    qio_channel_tls_bye_task(tioc, task, context);
-
-    if (context) {
-        g_main_context_unref(context);
+        return false;
+    } else if (status != QCRYPTO_TLS_BYE_COMPLETE) {
+        /* BYE event must be synchronous, retry immediately */
+        trace_qio_channel_tls_bye_retry(ioc, status);
+        goto retry;
     }
 
-    return FALSE;
-}
-
-static void propagate_error(QIOTask *task, gpointer opaque)
-{
-    qio_task_propagate_error(task, opaque);
-}
-
-void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
-{
-    QIOTask *task;
-
-    task = qio_task_new(OBJECT(ioc), propagate_error, errp, NULL);
-
-    trace_qio_channel_tls_bye_start(ioc);
-    qio_channel_tls_bye_task(ioc, task, NULL);
+    trace_qio_channel_tls_bye_complete(ioc);
+    return true;
 }
 
 static void qio_channel_tls_init(Object *obj G_GNUC_UNUSED)
@@ -482,11 +423,6 @@ static int qio_channel_tls_close(QIOChannel *ioc,
         g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove);
     }
 
-    if (tioc->bye_ioc_tag) {
-        trace_qio_channel_tls_bye_cancel(ioc);
-        g_clear_handle_id(&tioc->bye_ioc_tag, g_source_remove);
-    }
-
     return qio_channel_close(tioc->master, errp);
 }
 
diff --git a/io/trace-events b/io/trace-events
index dc3a63ba1f..67b3814192 100644
--- a/io/trace-events
+++ b/io/trace-events
@@ -45,10 +45,9 @@ qio_channel_tls_handshake_fail(void *ioc) "TLS handshake fail ioc=%p"
 qio_channel_tls_handshake_complete(void *ioc) "TLS handshake complete ioc=%p"
 qio_channel_tls_handshake_cancel(void *ioc) "TLS handshake cancel ioc=%p"
 qio_channel_tls_bye_start(void *ioc) "TLS termination start ioc=%p"
-qio_channel_tls_bye_pending(void *ioc, int status) "TLS termination pending ioc=%p status=%d"
+qio_channel_tls_bye_retry(void *ioc, int status) "TLS termination pending ioc=%p status=%d"
 qio_channel_tls_bye_fail(void *ioc) "TLS termination fail ioc=%p"
 qio_channel_tls_bye_complete(void *ioc) "TLS termination complete ioc=%p"
-qio_channel_tls_bye_cancel(void *ioc) "TLS termination cancel ioc=%p"
 qio_channel_tls_credentials_allow(void *ioc) "TLS credentials allow ioc=%p"
 qio_channel_tls_credentials_deny(void *ioc) "TLS credentials deny ioc=%p"
 
-- 
2.50.1
Re: [PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous
Posted by Fabiano Rosas 1 week, 3 days ago
Peter Xu <peterx@redhat.com> writes:

> No issue I hit, the change is only from code observation when I am looking
> at a TLS premature termination issue.
>
> qio_channel_tls_bye() API needs to be synchronous.  When it's not, the
> previous impl will attach an asynchronous task retrying but only until when
> the channel gets the relevant GIO event. It may be problematic, because the
> caller of qio_channel_tls_bye() may have invoked channel close() before
> that, leading to premature termination of the TLS session.
>

I'm not super versed on socket APIs, so bear with me: Wouldn't the
subsequent shutdown() before close() ensure that the io watch gets
triggered? Assuming we're atomically installing the watch before the
shutdown() (at the moment, we're not).

> Remove the asynchronous handling, instead retry it immediately.  Currently,
> the only two possible cases that may lead to async task is either INTERRUPT
> or EAGAIN.  It should be suffice to spin retry as of now, until a solid
> proof showing that a more complicated retry logic is needed.
>
> With that, we can remove the whole async model for the bye task.
>

With the bye() being synchronous, do we still have the issue when
migration fails? I guess it depends on what the answer to my question
above is...

> When at it, making the function return bool, which looks like a common
> pattern in QEMU when errp is used.
>
> Side note on the tracepoints: previously the tracepoint bye_complete()
> isn't used.  Start to use it in this patch.  bye_pending() and bye_cancel()
> can be dropped now.  Adding bye_retry() instead.
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  include/io/channel-tls.h |  5 ++-
>  io/channel-tls.c         | 86 +++++-----------------------------------
>  io/trace-events          |  3 +-
>  3 files changed, 15 insertions(+), 79 deletions(-)
>
> diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
> index 7e9023570d..bcd14ffbd6 100644
> --- a/include/io/channel-tls.h
> +++ b/include/io/channel-tls.h
> @@ -49,7 +49,6 @@ struct QIOChannelTLS {
>      QCryptoTLSSession *session;
>      QIOChannelShutdown shutdown;
>      guint hs_ioc_tag;
> -    guint bye_ioc_tag;
>  };
>  
>  /**
> @@ -60,8 +59,10 @@ struct QIOChannelTLS {
>   * Perform the TLS session termination. This method will return
>   * immediately and the termination will continue in the background,
>   * provided the main loop is running.
> + *
> + * Returns: true on success, false on error (with errp set)
>   */
> -void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
> +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
>  
>  /**
>   * qio_channel_tls_new_server:
> diff --git a/io/channel-tls.c b/io/channel-tls.c
> index 5a2c8188ce..8510a187a8 100644
> --- a/io/channel-tls.c
> +++ b/io/channel-tls.c
> @@ -253,84 +253,25 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc,
>      qio_channel_tls_handshake_task(ioc, task, context);
>  }
>  
> -static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
> -                                       gpointer user_data);
> -
> -static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task,
> -                                     GMainContext *context)
> +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
>  {
> -    GIOCondition condition;
> -    QIOChannelTLSData *data;
>      int status;
> -    Error *err = NULL;
>  
> -    status = qcrypto_tls_session_bye(ioc->session, &err);
> +    trace_qio_channel_tls_bye_start(ioc);
> +retry:
> +    status = qcrypto_tls_session_bye(ioc->session, errp);
>  
>      if (status < 0) {
>          trace_qio_channel_tls_bye_fail(ioc);
> -        qio_task_set_error(task, err);
> -        qio_task_complete(task);
> -        return;
> -    }
> -
> -    if (status == QCRYPTO_TLS_BYE_COMPLETE) {
> -        qio_task_complete(task);
> -        return;
> -    }
> -
> -    data = g_new0(typeof(*data), 1);
> -    data->task = task;
> -    data->context = context;
> -
> -    if (context) {
> -        g_main_context_ref(context);
> -    }
> -
> -    if (status == QCRYPTO_TLS_BYE_SENDING) {
> -        condition = G_IO_OUT;
> -    } else {
> -        condition = G_IO_IN;
> -    }
> -
> -    trace_qio_channel_tls_bye_pending(ioc, status);
> -    ioc->bye_ioc_tag = qio_channel_add_watch_full(ioc->master, condition,
> -                                                  qio_channel_tls_bye_io,
> -                                                  data, NULL, context);
> -}
> -
> -
> -static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
> -                                       gpointer user_data)
> -{
> -    QIOChannelTLSData *data = user_data;
> -    QIOTask *task = data->task;
> -    GMainContext *context = data->context;
> -    QIOChannelTLS *tioc = QIO_CHANNEL_TLS(qio_task_get_source(task));
> -
> -    tioc->bye_ioc_tag = 0;
> -    g_free(data);
> -    qio_channel_tls_bye_task(tioc, task, context);
> -
> -    if (context) {
> -        g_main_context_unref(context);
> +        return false;
> +    } else if (status != QCRYPTO_TLS_BYE_COMPLETE) {
> +        /* BYE event must be synchronous, retry immediately */
> +        trace_qio_channel_tls_bye_retry(ioc, status);
> +        goto retry;
>      }
>  
> -    return FALSE;
> -}
> -
> -static void propagate_error(QIOTask *task, gpointer opaque)
> -{
> -    qio_task_propagate_error(task, opaque);
> -}
> -
> -void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
> -{
> -    QIOTask *task;
> -
> -    task = qio_task_new(OBJECT(ioc), propagate_error, errp, NULL);
> -
> -    trace_qio_channel_tls_bye_start(ioc);
> -    qio_channel_tls_bye_task(ioc, task, NULL);
> +    trace_qio_channel_tls_bye_complete(ioc);
> +    return true;
>  }
>  
>  static void qio_channel_tls_init(Object *obj G_GNUC_UNUSED)
> @@ -482,11 +423,6 @@ static int qio_channel_tls_close(QIOChannel *ioc,
>          g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove);
>      }
>  
> -    if (tioc->bye_ioc_tag) {
> -        trace_qio_channel_tls_bye_cancel(ioc);
> -        g_clear_handle_id(&tioc->bye_ioc_tag, g_source_remove);
> -    }
> -
>      return qio_channel_close(tioc->master, errp);
>  }
>  
> diff --git a/io/trace-events b/io/trace-events
> index dc3a63ba1f..67b3814192 100644
> --- a/io/trace-events
> +++ b/io/trace-events
> @@ -45,10 +45,9 @@ qio_channel_tls_handshake_fail(void *ioc) "TLS handshake fail ioc=%p"
>  qio_channel_tls_handshake_complete(void *ioc) "TLS handshake complete ioc=%p"
>  qio_channel_tls_handshake_cancel(void *ioc) "TLS handshake cancel ioc=%p"
>  qio_channel_tls_bye_start(void *ioc) "TLS termination start ioc=%p"
> -qio_channel_tls_bye_pending(void *ioc, int status) "TLS termination pending ioc=%p status=%d"
> +qio_channel_tls_bye_retry(void *ioc, int status) "TLS termination pending ioc=%p status=%d"
>  qio_channel_tls_bye_fail(void *ioc) "TLS termination fail ioc=%p"
>  qio_channel_tls_bye_complete(void *ioc) "TLS termination complete ioc=%p"
> -qio_channel_tls_bye_cancel(void *ioc) "TLS termination cancel ioc=%p"
>  qio_channel_tls_credentials_allow(void *ioc) "TLS credentials allow ioc=%p"
>  qio_channel_tls_credentials_deny(void *ioc) "TLS credentials deny ioc=%p"
Re: [PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous
Posted by Peter Xu 1 week, 2 days ago
On Thu, Sep 18, 2025 at 11:47:00AM -0300, Fabiano Rosas wrote:
> Peter Xu <peterx@redhat.com> writes:
> 
> > No issue I hit, the change is only from code observation when I am looking
> > at a TLS premature termination issue.
> >
> > qio_channel_tls_bye() API needs to be synchronous.  When it's not, the
> > previous impl will attach an asynchronous task retrying but only until when
> > the channel gets the relevant GIO event. It may be problematic, because the
> > caller of qio_channel_tls_bye() may have invoked channel close() before
> > that, leading to premature termination of the TLS session.
> >
> 
> I'm not super versed on socket APIs, so bear with me: Wouldn't the
> subsequent shutdown() before close() ensure that the io watch gets
> triggered? Assuming we're atomically installing the watch before the
> shutdown() (at the moment, we're not).

I think it won't.

First of all, AFAIU migration_cleanup() must be run in the main thread,
because it can register async tasks like the bye() task, and it registers
against context==NULL (in qio_channel_tls_bye_task(), for example), which I
believe means it'll be registered against the QEMU main loop.

Then, if we do a sequence of this:

  qio_channel_tls_bye()
  shutdown()
  close()

And if we do not yield anywhere within the process, IIUC it means it'll run
in sequence _without_ processing any events on the main loop even if some
events triggered.

So.. I think the close() will see the async task and remove it, never get a
chance to kick it off.

> 
> > Remove the asynchronous handling, instead retry it immediately.  Currently,
> > the only two possible cases that may lead to async task is either INTERRUPT
> > or EAGAIN.  It should be suffice to spin retry as of now, until a solid
> > proof showing that a more complicated retry logic is needed.
> >
> > With that, we can remove the whole async model for the bye task.
> >
> 
> With the bye() being synchronous, do we still have the issue when
> migration fails? I guess it depends on what the answer to my question
> above is...

When migration fails, IMHO it's fine to prematurely terminate the channels,
as I replied to one email that you commented on v1.  But we can discuss, I
am not sure if I missed things alone the lines.

Note, Dan suggested me to change the channel blocking mode as a smaller and
quicker fix, instead of throwing async model away, which seems to be
preferred to keep for any iochannel APIs.  So feel free to ignore this
patch too as of now.  I'll still need to investigate a bit on what would
happen if a concurrent update of fd nonblocking would affect other threads,
though.  In all cases, all results will be reflected in v3, but likely this
patch will be either dropped or replaced.

I know I let you read some of the things that we already planned to throw
away, my apologies. But it's partly your "fault" (to take holidays!). No,
I'm joking. :) It's still good to discuss these.

-- 
Peter Xu
Re: [PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous
Posted by Daniel P. Berrangé 2 weeks, 2 days ago
On Thu, Sep 11, 2025 at 05:23:54PM -0400, Peter Xu wrote:
> No issue I hit, the change is only from code observation when I am looking
> at a TLS premature termination issue.
> 
> qio_channel_tls_bye() API needs to be synchronous.  When it's not, the
> previous impl will attach an asynchronous task retrying but only until when
> the channel gets the relevant GIO event. It may be problematic, because the
> caller of qio_channel_tls_bye() may have invoked channel close() before
> that, leading to premature termination of the TLS session.
> 
> Remove the asynchronous handling, instead retry it immediately.  Currently,
> the only two possible cases that may lead to async task is either INTERRUPT
> or EAGAIN.  It should be suffice to spin retry as of now, until a solid
> proof showing that a more complicated retry logic is needed.
> 
> With that, we can remove the whole async model for the bye task.
> 
> When at it, making the function return bool, which looks like a common
> pattern in QEMU when errp is used.
> 
> Side note on the tracepoints: previously the tracepoint bye_complete()
> isn't used.  Start to use it in this patch.  bye_pending() and bye_cancel()
> can be dropped now.  Adding bye_retry() instead.
> 
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  include/io/channel-tls.h |  5 ++-
>  io/channel-tls.c         | 86 +++++-----------------------------------
>  io/trace-events          |  3 +-
>  3 files changed, 15 insertions(+), 79 deletions(-)
> 
> diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
> index 7e9023570d..bcd14ffbd6 100644
> --- a/include/io/channel-tls.h
> +++ b/include/io/channel-tls.h
> @@ -49,7 +49,6 @@ struct QIOChannelTLS {
>      QCryptoTLSSession *session;
>      QIOChannelShutdown shutdown;
>      guint hs_ioc_tag;
> -    guint bye_ioc_tag;
>  };
>  
>  /**
> @@ -60,8 +59,10 @@ struct QIOChannelTLS {
>   * Perform the TLS session termination. This method will return
>   * immediately and the termination will continue in the background,
>   * provided the main loop is running.
> + *
> + * Returns: true on success, false on error (with errp set)
>   */
> -void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
> +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
>  
>  /**
>   * qio_channel_tls_new_server:
> diff --git a/io/channel-tls.c b/io/channel-tls.c
> index 5a2c8188ce..8510a187a8 100644
> --- a/io/channel-tls.c
> +++ b/io/channel-tls.c
> @@ -253,84 +253,25 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc,
>      qio_channel_tls_handshake_task(ioc, task, context);
>  }
>  
> -static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
> -                                       gpointer user_data);
> -
> -static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task,
> -                                     GMainContext *context)
> +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
>  {
> -    GIOCondition condition;
> -    QIOChannelTLSData *data;
>      int status;
> -    Error *err = NULL;
>  
> -    status = qcrypto_tls_session_bye(ioc->session, &err);
> +    trace_qio_channel_tls_bye_start(ioc);
> +retry:
> +    status = qcrypto_tls_session_bye(ioc->session, errp);
>  
>      if (status < 0) {
>          trace_qio_channel_tls_bye_fail(ioc);

snip

> +        return false;
> +    } else if (status != QCRYPTO_TLS_BYE_COMPLETE) {
> +        /* BYE event must be synchronous, retry immediately */
> +        trace_qio_channel_tls_bye_retry(ioc, status);
> +        goto retry;
>      }

We cannot do this. If the gnutls_bye() API needs to perform
socket I/O, and so when we're running over a non-blocking
socket we must expect EAGAIN. With this handling, QEMU will
busy loop burning 100% CPU when the socket is not ready.

A second point is that from a QIOChannel POV, we need to
ensure that all APIs can be used in a non-blocking scenario.
This is why in the QIOChannelSocket impl connect/listen APIs
we provide both _sync and _async variants of the APIs, or
in the QIOChannelTLS impl, the handshake API is always
async with a callback to be invokved on completion.

The QIOChanel 'bye' method is flawed in that it is
asynchronous, but has no callback for completion.

If migration is /always/ using a blocking socket for the
TLS channels this isn't a problem as gnutls will complete
immediately, but if any async sockets are used we have no
way to wait for completion. This requires improving the
API design in some manner.


>  
> -    return FALSE;
> -}
> -
> -static void propagate_error(QIOTask *task, gpointer opaque)
> -{
> -    qio_task_propagate_error(task, opaque);
> -}
> -
> -void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
> -{
> -    QIOTask *task;
> -
> -    task = qio_task_new(OBJECT(ioc), propagate_error, errp, NULL);
> -
> -    trace_qio_channel_tls_bye_start(ioc);
> -    qio_channel_tls_bye_task(ioc, task, NULL);
> +    trace_qio_channel_tls_bye_complete(ioc);
> +    return true;
>  }
>  
>  static void qio_channel_tls_init(Object *obj G_GNUC_UNUSED)
> @@ -482,11 +423,6 @@ static int qio_channel_tls_close(QIOChannel *ioc,
>          g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove);
>      }
>  
> -    if (tioc->bye_ioc_tag) {
> -        trace_qio_channel_tls_bye_cancel(ioc);
> -        g_clear_handle_id(&tioc->bye_ioc_tag, g_source_remove);
> -    }
> -
>      return qio_channel_close(tioc->master, errp);
>  }
>  
> diff --git a/io/trace-events b/io/trace-events
> index dc3a63ba1f..67b3814192 100644
> --- a/io/trace-events
> +++ b/io/trace-events
> @@ -45,10 +45,9 @@ qio_channel_tls_handshake_fail(void *ioc) "TLS handshake fail ioc=%p"
>  qio_channel_tls_handshake_complete(void *ioc) "TLS handshake complete ioc=%p"
>  qio_channel_tls_handshake_cancel(void *ioc) "TLS handshake cancel ioc=%p"
>  qio_channel_tls_bye_start(void *ioc) "TLS termination start ioc=%p"
> -qio_channel_tls_bye_pending(void *ioc, int status) "TLS termination pending ioc=%p status=%d"
> +qio_channel_tls_bye_retry(void *ioc, int status) "TLS termination pending ioc=%p status=%d"
>  qio_channel_tls_bye_fail(void *ioc) "TLS termination fail ioc=%p"
>  qio_channel_tls_bye_complete(void *ioc) "TLS termination complete ioc=%p"
> -qio_channel_tls_bye_cancel(void *ioc) "TLS termination cancel ioc=%p"
>  qio_channel_tls_credentials_allow(void *ioc) "TLS credentials allow ioc=%p"
>  qio_channel_tls_credentials_deny(void *ioc) "TLS credentials deny ioc=%p"
>  
> -- 
> 2.50.1
> 

With regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|
Re: [PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous
Posted by Peter Xu 2 weeks, 2 days ago
On Fri, Sep 12, 2025 at 12:27:52PM +0100, Daniel P. Berrangé wrote:
> On Thu, Sep 11, 2025 at 05:23:54PM -0400, Peter Xu wrote:
> > No issue I hit, the change is only from code observation when I am looking
> > at a TLS premature termination issue.
> > 
> > qio_channel_tls_bye() API needs to be synchronous.  When it's not, the
> > previous impl will attach an asynchronous task retrying but only until when
> > the channel gets the relevant GIO event. It may be problematic, because the
> > caller of qio_channel_tls_bye() may have invoked channel close() before
> > that, leading to premature termination of the TLS session.
> > 
> > Remove the asynchronous handling, instead retry it immediately.  Currently,
> > the only two possible cases that may lead to async task is either INTERRUPT
> > or EAGAIN.  It should be suffice to spin retry as of now, until a solid
> > proof showing that a more complicated retry logic is needed.
> > 
> > With that, we can remove the whole async model for the bye task.
> > 
> > When at it, making the function return bool, which looks like a common
> > pattern in QEMU when errp is used.
> > 
> > Side note on the tracepoints: previously the tracepoint bye_complete()
> > isn't used.  Start to use it in this patch.  bye_pending() and bye_cancel()
> > can be dropped now.  Adding bye_retry() instead.
> > 
> > Signed-off-by: Peter Xu <peterx@redhat.com>
> > ---
> >  include/io/channel-tls.h |  5 ++-
> >  io/channel-tls.c         | 86 +++++-----------------------------------
> >  io/trace-events          |  3 +-
> >  3 files changed, 15 insertions(+), 79 deletions(-)
> > 
> > diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
> > index 7e9023570d..bcd14ffbd6 100644
> > --- a/include/io/channel-tls.h
> > +++ b/include/io/channel-tls.h
> > @@ -49,7 +49,6 @@ struct QIOChannelTLS {
> >      QCryptoTLSSession *session;
> >      QIOChannelShutdown shutdown;
> >      guint hs_ioc_tag;
> > -    guint bye_ioc_tag;
> >  };
> >  
> >  /**
> > @@ -60,8 +59,10 @@ struct QIOChannelTLS {
> >   * Perform the TLS session termination. This method will return
> >   * immediately and the termination will continue in the background,
> >   * provided the main loop is running.
> > + *
> > + * Returns: true on success, false on error (with errp set)
> >   */
> > -void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
> > +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp);
> >  
> >  /**
> >   * qio_channel_tls_new_server:
> > diff --git a/io/channel-tls.c b/io/channel-tls.c
> > index 5a2c8188ce..8510a187a8 100644
> > --- a/io/channel-tls.c
> > +++ b/io/channel-tls.c
> > @@ -253,84 +253,25 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc,
> >      qio_channel_tls_handshake_task(ioc, task, context);
> >  }
> >  
> > -static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
> > -                                       gpointer user_data);
> > -
> > -static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task,
> > -                                     GMainContext *context)
> > +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
> >  {
> > -    GIOCondition condition;
> > -    QIOChannelTLSData *data;
> >      int status;
> > -    Error *err = NULL;
> >  
> > -    status = qcrypto_tls_session_bye(ioc->session, &err);
> > +    trace_qio_channel_tls_bye_start(ioc);
> > +retry:
> > +    status = qcrypto_tls_session_bye(ioc->session, errp);
> >  
> >      if (status < 0) {
> >          trace_qio_channel_tls_bye_fail(ioc);
> 
> snip
> 
> > +        return false;
> > +    } else if (status != QCRYPTO_TLS_BYE_COMPLETE) {
> > +        /* BYE event must be synchronous, retry immediately */
> > +        trace_qio_channel_tls_bye_retry(ioc, status);
> > +        goto retry;
> >      }
> 
> We cannot do this. If the gnutls_bye() API needs to perform
> socket I/O, and so when we're running over a non-blocking
> socket we must expect EAGAIN. With this handling, QEMU will
> busy loop burning 100% CPU when the socket is not ready.

Right.  That was the plan when drafting this, the hope is spinning will
almost not happen, and even if it happens it'll finish soon (migration is
completing, it means network mustn't be so bad), unless the network is
stuck exactly at when we send the bye().

> 
> A second point is that from a QIOChannel POV, we need to
> ensure that all APIs can be used in a non-blocking scenario.
> This is why in the QIOChannelSocket impl connect/listen APIs
> we provide both _sync and _async variants of the APIs, or
> in the QIOChannelTLS impl, the handshake API is always
> async with a callback to be invokved on completion.

I agree.  The issue is if so, migration code needs to be always be prepared
with a possible async op even if in 99.9999% cases it won't happen... we
need to complicate the multifd logic a lot for this, but the gain is
little..

This series still used patch 1 to fix the problem (rather than do real BYE
on preempt channels, for example) only because it's the easiest, after all
it's still a contract in tls channel impl to allow premature termination
for explicit shutdown()s on the host.

If we want to do 100% graceful shutdowns, we'll need to apply this to all
channels, and the async-possible model can definitely add more complexity
more than multifd.  I hope it won't be necessary.. but just to mention it.

> 
> The QIOChanel 'bye' method is flawed in that it is
> asynchronous, but has no callback for completion.
> 
> If migration is /always/ using a blocking socket for the
> TLS channels this isn't a problem as gnutls will complete
> immediately, but if any async sockets are used we have no
> way to wait for completion. This requires improving the
> API design in some manner.

I recall one of your future series on TLS would start to enable async for
all channels?  In all cases, we definitely don't want to have this call to
be relevant to the blocking mode of the channels.

Would it make sense to introduce a _sync() version of it, but keep the
original bye(), leaving the rest until a real async user appears?

I can also at least drop this patch as of now, because we can still wish it
almost always be synchronous.  However we have risk forgetting that forever
and hit it a few years later..

Thanks,

-- 
Peter Xu


Re: [PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous
Posted by Daniel P. Berrangé 1 week, 5 days ago
On Fri, Sep 12, 2025 at 11:36:51AM -0400, Peter Xu wrote:
> On Fri, Sep 12, 2025 at 12:27:52PM +0100, Daniel P. Berrangé wrote:
> > On Thu, Sep 11, 2025 at 05:23:54PM -0400, Peter Xu wrote:
> > > No issue I hit, the change is only from code observation when I am looking
> > > at a TLS premature termination issue.
> > > 
> > > qio_channel_tls_bye() API needs to be synchronous.  When it's not, the
> > > previous impl will attach an asynchronous task retrying but only until when
> > > the channel gets the relevant GIO event. It may be problematic, because the
> > > caller of qio_channel_tls_bye() may have invoked channel close() before
> > > that, leading to premature termination of the TLS session.
> > > 
> > > Remove the asynchronous handling, instead retry it immediately.  Currently,
> > > the only two possible cases that may lead to async task is either INTERRUPT
> > > or EAGAIN.  It should be suffice to spin retry as of now, until a solid
> > > proof showing that a more complicated retry logic is needed.
> > > 
> > > With that, we can remove the whole async model for the bye task.
> > > 
> > > When at it, making the function return bool, which looks like a common
> > > pattern in QEMU when errp is used.
> > > 
> > > Side note on the tracepoints: previously the tracepoint bye_complete()
> > > isn't used.  Start to use it in this patch.  bye_pending() and bye_cancel()
> > > can be dropped now.  Adding bye_retry() instead.
> > > 
> > > Signed-off-by: Peter Xu <peterx@redhat.com>
> > > ---
> > >  include/io/channel-tls.h |  5 ++-
> > >  io/channel-tls.c         | 86 +++++-----------------------------------
> > >  io/trace-events          |  3 +-
> > >  3 files changed, 15 insertions(+), 79 deletions(-)
> > > 

> > > diff --git a/io/channel-tls.c b/io/channel-tls.c
> > > index 5a2c8188ce..8510a187a8 100644
> > > --- a/io/channel-tls.c
> > > +++ b/io/channel-tls.c
> > > @@ -253,84 +253,25 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc,
> > >      qio_channel_tls_handshake_task(ioc, task, context);
> > >  }
> > >  
> > > -static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
> > > -                                       gpointer user_data);
> > > -
> > > -static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task,
> > > -                                     GMainContext *context)
> > > +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
> > >  {
> > > -    GIOCondition condition;
> > > -    QIOChannelTLSData *data;
> > >      int status;
> > > -    Error *err = NULL;
> > >  
> > > -    status = qcrypto_tls_session_bye(ioc->session, &err);
> > > +    trace_qio_channel_tls_bye_start(ioc);
> > > +retry:
> > > +    status = qcrypto_tls_session_bye(ioc->session, errp);
> > >  
> > >      if (status < 0) {
> > >          trace_qio_channel_tls_bye_fail(ioc);
> > 
> > snip
> > 
> > > +        return false;
> > > +    } else if (status != QCRYPTO_TLS_BYE_COMPLETE) {
> > > +        /* BYE event must be synchronous, retry immediately */
> > > +        trace_qio_channel_tls_bye_retry(ioc, status);
> > > +        goto retry;
> > >      }
> > 
> > We cannot do this. If the gnutls_bye() API needs to perform
> > socket I/O, and so when we're running over a non-blocking
> > socket we must expect EAGAIN. With this handling, QEMU will
> > busy loop burning 100% CPU when the socket is not ready.
> 
> Right.  That was the plan when drafting this, the hope is spinning will
> almost not happen, and even if it happens it'll finish soon (migration is
> completing, it means network mustn't be so bad), unless the network is
> stuck exactly at when we send the bye().

Don't forget that the machine can be running 5 migrations
of different VMs concurrently, and so may not be as quick
to finish sending traffic as we expect. Since QEMU's mig
protocol is essentially undirectional, I wonder if the
send buffer could still be full of VMstate data waiting
to be sent ? Perhaps its fine, but I don't like relying
on luck, or hard-to-prove scenarios.

> > A second point is that from a QIOChannel POV, we need to
> > ensure that all APIs can be used in a non-blocking scenario.
> > This is why in the QIOChannelSocket impl connect/listen APIs
> > we provide both _sync and _async variants of the APIs, or
> > in the QIOChannelTLS impl, the handshake API is always
> > async with a callback to be invokved on completion.
> 
> I agree.  The issue is if so, migration code needs to be always be prepared
> with a possible async op even if in 99.9999% cases it won't happen... we
> need to complicate the multifd logic a lot for this, but the gain is
> little..
> 
> This series still used patch 1 to fix the problem (rather than do real BYE
> on preempt channels, for example) only because it's the easiest, after all
> it's still a contract in tls channel impl to allow premature termination
> for explicit shutdown()s on the host.
> 
> If we want to do 100% graceful shutdowns, we'll need to apply this to all
> channels, and the async-possible model can definitely add more complexity
> more than multifd.  I hope it won't be necessary.. but just to mention it.

Even if the migration code is relying on non-blocking sockets
for most of its work, at the time we're ready to invoke "bye",
perhaps the migration code could simply call

 qio_channel_set_blocking(ioc, true)

to switch the socket over to blocking mode.


> > The QIOChanel 'bye' method is flawed in that it is
> > asynchronous, but has no callback for completion.
> > 
> > If migration is /always/ using a blocking socket for the
> > TLS channels this isn't a problem as gnutls will complete
> > immediately, but if any async sockets are used we have no
> > way to wait for completion. This requires improving the
> > API design in some manner.
> 
> I recall one of your future series on TLS would start to enable async for
> all channels?  In all cases, we definitely don't want to have this call to
> be relevant to the blocking mode of the channels.
> 
> Would it make sense to introduce a _sync() version of it, but keep the
> original bye(), leaving the rest until a real async user appears?
> 
> I can also at least drop this patch as of now, because we can still wish it
> almost always be synchronous.  However we have risk forgetting that forever
> and hit it a few years later..

If we leave the current code as-is, and relying on migration switching
to blocking mode first before calling bye, we'll be ok

With regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|


Re: [PATCH v2 2/3] io/tls: Make qio_channel_tls_bye() always synchronous
Posted by Peter Xu 1 week, 5 days ago
On Mon, Sep 15, 2025 at 07:40:33PM +0100, Daniel P. Berrangé wrote:
> On Fri, Sep 12, 2025 at 11:36:51AM -0400, Peter Xu wrote:
> > On Fri, Sep 12, 2025 at 12:27:52PM +0100, Daniel P. Berrangé wrote:
> > > On Thu, Sep 11, 2025 at 05:23:54PM -0400, Peter Xu wrote:
> > > > No issue I hit, the change is only from code observation when I am looking
> > > > at a TLS premature termination issue.
> > > > 
> > > > qio_channel_tls_bye() API needs to be synchronous.  When it's not, the
> > > > previous impl will attach an asynchronous task retrying but only until when
> > > > the channel gets the relevant GIO event. It may be problematic, because the
> > > > caller of qio_channel_tls_bye() may have invoked channel close() before
> > > > that, leading to premature termination of the TLS session.
> > > > 
> > > > Remove the asynchronous handling, instead retry it immediately.  Currently,
> > > > the only two possible cases that may lead to async task is either INTERRUPT
> > > > or EAGAIN.  It should be suffice to spin retry as of now, until a solid
> > > > proof showing that a more complicated retry logic is needed.
> > > > 
> > > > With that, we can remove the whole async model for the bye task.
> > > > 
> > > > When at it, making the function return bool, which looks like a common
> > > > pattern in QEMU when errp is used.
> > > > 
> > > > Side note on the tracepoints: previously the tracepoint bye_complete()
> > > > isn't used.  Start to use it in this patch.  bye_pending() and bye_cancel()
> > > > can be dropped now.  Adding bye_retry() instead.
> > > > 
> > > > Signed-off-by: Peter Xu <peterx@redhat.com>
> > > > ---
> > > >  include/io/channel-tls.h |  5 ++-
> > > >  io/channel-tls.c         | 86 +++++-----------------------------------
> > > >  io/trace-events          |  3 +-
> > > >  3 files changed, 15 insertions(+), 79 deletions(-)
> > > > 
> 
> > > > diff --git a/io/channel-tls.c b/io/channel-tls.c
> > > > index 5a2c8188ce..8510a187a8 100644
> > > > --- a/io/channel-tls.c
> > > > +++ b/io/channel-tls.c
> > > > @@ -253,84 +253,25 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc,
> > > >      qio_channel_tls_handshake_task(ioc, task, context);
> > > >  }
> > > >  
> > > > -static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition,
> > > > -                                       gpointer user_data);
> > > > -
> > > > -static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task,
> > > > -                                     GMainContext *context)
> > > > +bool qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp)
> > > >  {
> > > > -    GIOCondition condition;
> > > > -    QIOChannelTLSData *data;
> > > >      int status;
> > > > -    Error *err = NULL;
> > > >  
> > > > -    status = qcrypto_tls_session_bye(ioc->session, &err);
> > > > +    trace_qio_channel_tls_bye_start(ioc);
> > > > +retry:
> > > > +    status = qcrypto_tls_session_bye(ioc->session, errp);
> > > >  
> > > >      if (status < 0) {
> > > >          trace_qio_channel_tls_bye_fail(ioc);
> > > 
> > > snip
> > > 
> > > > +        return false;
> > > > +    } else if (status != QCRYPTO_TLS_BYE_COMPLETE) {
> > > > +        /* BYE event must be synchronous, retry immediately */
> > > > +        trace_qio_channel_tls_bye_retry(ioc, status);
> > > > +        goto retry;
> > > >      }
> > > 
> > > We cannot do this. If the gnutls_bye() API needs to perform
> > > socket I/O, and so when we're running over a non-blocking
> > > socket we must expect EAGAIN. With this handling, QEMU will
> > > busy loop burning 100% CPU when the socket is not ready.
> > 
> > Right.  That was the plan when drafting this, the hope is spinning will
> > almost not happen, and even if it happens it'll finish soon (migration is
> > completing, it means network mustn't be so bad), unless the network is
> > stuck exactly at when we send the bye().
> 
> Don't forget that the machine can be running 5 migrations
> of different VMs concurrently, and so may not be as quick
> to finish sending traffic as we expect. Since QEMU's mig
> protocol is essentially undirectional, I wonder if the
> send buffer could still be full of VMstate data waiting
> to be sent ? Perhaps its fine, but I don't like relying
> on luck, or hard-to-prove scenarios.

Very rare conditional spinning is, IMHO, totally OK.  I wished all our
problems are as simple as cpu spinning.. then it's super straightforward to
debug when hit, and also benign.  We can add whatever smart tech after that.

I would just guess even if with this patch goes in, we will never observe
it considering the write buffer shouldn't be more than tens of MBs..

Said that..

> 
> > > A second point is that from a QIOChannel POV, we need to
> > > ensure that all APIs can be used in a non-blocking scenario.
> > > This is why in the QIOChannelSocket impl connect/listen APIs
> > > we provide both _sync and _async variants of the APIs, or
> > > in the QIOChannelTLS impl, the handshake API is always
> > > async with a callback to be invokved on completion.
> > 
> > I agree.  The issue is if so, migration code needs to be always be prepared
> > with a possible async op even if in 99.9999% cases it won't happen... we
> > need to complicate the multifd logic a lot for this, but the gain is
> > little..
> > 
> > This series still used patch 1 to fix the problem (rather than do real BYE
> > on preempt channels, for example) only because it's the easiest, after all
> > it's still a contract in tls channel impl to allow premature termination
> > for explicit shutdown()s on the host.
> > 
> > If we want to do 100% graceful shutdowns, we'll need to apply this to all
> > channels, and the async-possible model can definitely add more complexity
> > more than multifd.  I hope it won't be necessary.. but just to mention it.
> 
> Even if the migration code is relying on non-blocking sockets
> for most of its work, at the time we're ready to invoke "bye",
> perhaps the migration code could simply call
> 
>  qio_channel_set_blocking(ioc, true)
> 
> to switch the socket over to blocking mode.

... I think this is a good idea and should solve the problem indeed.  I
hope there's no loophole that it could still trigger the async path.

I'll respin with that, thanks!

-- 
Peter Xu