To implement reconnect we need several states for the client:
CONNECTED, QUIT and two CONNECTING states. CONNECTING states will
be realized in the following patches. This patch implements CONNECTED
and QUIT.
QUIT means, that we should close the connection and fail all current
and further requests (like old quit = true).
CONNECTED means that connection is ok, we can send requests (like old
quit = false).
For receiving loop we use a comparison of the current state with QUIT,
because reconnect will be in the same loop, so it should be looping
until the end.
Opposite, for requests we use a comparison of the current state with
CONNECTED, as we don't want to send requests in CONNECTING states (
which are unreachable now, but will be reachable after the following
commits)
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
block/nbd-client.h | 9 ++++++++-
block/nbd-client.c | 55 ++++++++++++++++++++++++++++++++----------------------
2 files changed, 41 insertions(+), 23 deletions(-)
diff --git a/block/nbd-client.h b/block/nbd-client.h
index 2f047ba614..5367425774 100644
--- a/block/nbd-client.h
+++ b/block/nbd-client.h
@@ -23,6 +23,13 @@ typedef struct {
bool receiving; /* waiting for read_reply_co? */
} NBDClientRequest;
+typedef enum NBDClientState {
+ NBD_CLIENT_CONNECTING_WAIT,
+ NBD_CLIENT_CONNECTING_NOWAIT,
+ NBD_CLIENT_CONNECTED,
+ NBD_CLIENT_QUIT
+} NBDClientState;
+
typedef struct NBDClientSession {
QIOChannelSocket *sioc; /* The master data channel */
QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
@@ -32,10 +39,10 @@ typedef struct NBDClientSession {
CoQueue free_sema;
Coroutine *read_reply_co;
int in_flight;
+ NBDClientState state;
NBDClientRequest requests[MAX_NBD_REQUESTS];
NBDReply reply;
- bool quit;
} NBDClientSession;
NBDClientSession *nbd_get_client_session(BlockDriverState *bs);
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 7eaf0149f0..a91fd3ea3e 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -34,6 +34,12 @@
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
#define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
+/* @ret would be used for reconnect in future */
+static void nbd_channel_error(NBDClientSession *s, int ret)
+{
+ s->state = NBD_CLIENT_QUIT;
+}
+
static void nbd_recv_coroutines_wake_all(NBDClientSession *s)
{
int i;
@@ -73,14 +79,15 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
int ret = 0;
Error *local_err = NULL;
- while (!s->quit) {
+ while (s->state != NBD_CLIENT_QUIT) {
assert(s->reply.handle == 0);
ret = nbd_receive_reply(s->ioc, &s->reply, &local_err);
if (local_err) {
error_report_err(local_err);
}
if (ret <= 0) {
- break;
+ nbd_channel_error(s, ret ? ret : -EIO);
+ continue;
}
/* There's no need for a mutex on the receive side, because the
@@ -93,7 +100,8 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
!s->requests[i].receiving ||
(nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
{
- break;
+ nbd_channel_error(s, -EINVAL);
+ continue;
}
/* We're woken up again by the request itself. Note that there
@@ -111,7 +119,6 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
qemu_coroutine_yield();
}
- s->quit = true;
nbd_recv_coroutines_wake_all(s);
s->read_reply_co = NULL;
}
@@ -121,12 +128,18 @@ static int nbd_co_send_request(BlockDriverState *bs,
QEMUIOVector *qiov)
{
NBDClientSession *s = nbd_get_client_session(bs);
- int rc, i;
+ int rc, i = -1;
qemu_co_mutex_lock(&s->send_mutex);
while (s->in_flight == MAX_NBD_REQUESTS) {
qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
}
+
+ if (s->state != NBD_CLIENT_CONNECTED) {
+ rc = -EIO;
+ goto err;
+ }
+
s->in_flight++;
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
@@ -144,16 +157,12 @@ static int nbd_co_send_request(BlockDriverState *bs,
request->handle = INDEX_TO_HANDLE(s, i);
- if (s->quit) {
- rc = -EIO;
- goto err;
- }
assert(s->ioc);
if (qiov) {
qio_channel_set_cork(s->ioc, true);
rc = nbd_send_request(s->ioc, request);
- if (rc >= 0 && !s->quit) {
+ if (rc >= 0 && s->state == NBD_CLIENT_CONNECTED) {
if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
NULL) < 0) {
rc = -EIO;
@@ -168,9 +177,11 @@ static int nbd_co_send_request(BlockDriverState *bs,
err:
if (rc < 0) {
- s->quit = true;
- s->requests[i].coroutine = NULL;
- s->in_flight--;
+ nbd_channel_error(s, rc);
+ if (i != -1) {
+ s->requests[i].coroutine = NULL;
+ s->in_flight--;
+ }
qemu_co_queue_next(&s->free_sema);
}
qemu_co_mutex_unlock(&s->send_mutex);
@@ -421,7 +432,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
s->requests[i].receiving = true;
qemu_coroutine_yield();
s->requests[i].receiving = false;
- if (s->quit) {
+ if (s->state != NBD_CLIENT_CONNECTED) {
error_setg(errp, "Connection closed");
return -EIO;
}
@@ -504,7 +515,7 @@ static coroutine_fn int nbd_co_receive_one_chunk(
request_ret, qiov, payload, errp);
if (ret < 0) {
- s->quit = true;
+ nbd_channel_error(s, ret);
} else {
/* For assert at loop start in nbd_read_reply_entry */
if (reply) {
@@ -570,7 +581,7 @@ static bool nbd_reply_chunk_iter_receive(NBDClientSession *s,
NBDReply local_reply;
NBDStructuredReplyChunk *chunk;
Error *local_err = NULL;
- if (s->quit) {
+ if (s->state != NBD_CLIENT_CONNECTED) {
error_setg(&local_err, "Connection closed");
nbd_iter_channel_error(iter, -EIO, &local_err);
goto break_loop;
@@ -595,7 +606,7 @@ static bool nbd_reply_chunk_iter_receive(NBDClientSession *s,
}
/* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
- if (nbd_reply_is_simple(reply) || s->quit) {
+ if (nbd_reply_is_simple(reply) || s->state != NBD_CLIENT_CONNECTED) {
goto break_loop;
}
@@ -667,14 +678,14 @@ static int nbd_co_receive_cmdread_reply(NBDClientSession *s, uint64_t handle,
ret = nbd_parse_offset_hole_payload(&reply.structured, payload,
offset, qiov, &local_err);
if (ret < 0) {
- s->quit = true;
+ nbd_channel_error(s, ret);
nbd_iter_channel_error(&iter, ret, &local_err);
}
break;
default:
if (!nbd_reply_type_is_error(chunk->type)) {
/* not allowed reply type */
- s->quit = true;
+ nbd_channel_error(s, -EINVAL);
error_setg(&local_err,
"Unexpected reply type: %d (%s) for CMD_READ",
chunk->type, nbd_reply_type_lookup(chunk->type));
@@ -714,7 +725,7 @@ static int nbd_co_receive_blockstatus_reply(NBDClientSession *s,
switch (chunk->type) {
case NBD_REPLY_TYPE_BLOCK_STATUS:
if (received) {
- s->quit = true;
+ nbd_channel_error(s, -EINVAL);
error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
nbd_iter_channel_error(&iter, -EINVAL, &local_err);
}
@@ -724,13 +735,13 @@ static int nbd_co_receive_blockstatus_reply(NBDClientSession *s,
payload, length, extent,
&local_err);
if (ret < 0) {
- s->quit = true;
+ nbd_channel_error(s, ret);
nbd_iter_channel_error(&iter, ret, &local_err);
}
break;
default:
if (!nbd_reply_type_is_error(chunk->type)) {
- s->quit = true;
+ nbd_channel_error(s, -EINVAL);
error_setg(&local_err,
"Unexpected reply type: %d (%s) "
"for CMD_BLOCK_STATUS",
--
2.11.1
[adding Dan]
On 7/31/18 12:30 PM, Vladimir Sementsov-Ogievskiy wrote:
> To implement reconnect we need several states for the client:
> CONNECTED, QUIT and two CONNECTING states. CONNECTING states will
> be realized in the following patches. This patch implements CONNECTED
> and QUIT.
>
> QUIT means, that we should close the connection and fail all current
> and further requests (like old quit = true).
>
> CONNECTED means that connection is ok, we can send requests (like old
> quit = false).
>
> For receiving loop we use a comparison of the current state with QUIT,
> because reconnect will be in the same loop, so it should be looping
> until the end.
>
> Opposite, for requests we use a comparison of the current state with
> CONNECTED, as we don't want to send requests in CONNECTING states (
> which are unreachable now, but will be reachable after the following
> commits)
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> ---
> block/nbd-client.h | 9 ++++++++-
> block/nbd-client.c | 55 ++++++++++++++++++++++++++++++++----------------------
> 2 files changed, 41 insertions(+), 23 deletions(-)
Dan just recently proposed patches to SocketChardev in general to use a
state machine that distinguishes between connecting and connected:
https://lists.gnu.org/archive/html/qemu-devel/2019-01/msg03339.html
I'm wondering how much of his work is related or can be reused to get
restartable connections on NBD sockets?
Remember, right now, the NBD code always starts in blocking mode, and
does single-threaded handshaking until it is ready for transmission,
then switches to non-blocking mode for all subsequent transmissions (so,
for example, servicing a read request can assume that the socket is
valid without further waiting). But once we start allowing reconnects,
a read request will need to detect when one socket has gone down, and
wait for its replacement socket to come back up, in order to retry the
request; this retry is in a context where we are in non-blocking
context, but the retry must establish a new socket, and possibly convert
the socket into TLS mode, all before being ready to retry the read request.
>
> diff --git a/block/nbd-client.h b/block/nbd-client.h
> index 2f047ba614..5367425774 100644
> --- a/block/nbd-client.h
> +++ b/block/nbd-client.h
> @@ -23,6 +23,13 @@ typedef struct {
> bool receiving; /* waiting for read_reply_co? */
> } NBDClientRequest;
>
> +typedef enum NBDClientState {
> + NBD_CLIENT_CONNECTING_WAIT,
> + NBD_CLIENT_CONNECTING_NOWAIT,
Would we be better off adding these enum values in the later patch that
uses them?
> + NBD_CLIENT_CONNECTED,
> + NBD_CLIENT_QUIT
> +} NBDClientState;
> +
> typedef struct NBDClientSession {
> QIOChannelSocket *sioc; /* The master data channel */
> QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
> @@ -32,10 +39,10 @@ typedef struct NBDClientSession {
> CoQueue free_sema;
> Coroutine *read_reply_co;
> int in_flight;
> + NBDClientState state;
>
> NBDClientRequest requests[MAX_NBD_REQUESTS];
> NBDReply reply;
> - bool quit;
> } NBDClientSession;
>
> NBDClientSession *nbd_get_client_session(BlockDriverState *bs);
> diff --git a/block/nbd-client.c b/block/nbd-client.c
> index 7eaf0149f0..a91fd3ea3e 100644
> --- a/block/nbd-client.c
> +++ b/block/nbd-client.c
> @@ -34,6 +34,12 @@
> #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
> #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
>
> +/* @ret would be used for reconnect in future */
s/would/will/
> +static void nbd_channel_error(NBDClientSession *s, int ret)
> +{
> + s->state = NBD_CLIENT_QUIT;
> +}
> +
> static void nbd_recv_coroutines_wake_all(NBDClientSession *s)
> {
> int i;
> @@ -73,14 +79,15 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
> int ret = 0;
> Error *local_err = NULL;
>
> - while (!s->quit) {
> + while (s->state != NBD_CLIENT_QUIT) {
> assert(s->reply.handle == 0);
> ret = nbd_receive_reply(s->ioc, &s->reply, &local_err);
> if (local_err) {
> error_report_err(local_err);
> }
> if (ret <= 0) {
> - break;
> + nbd_channel_error(s, ret ? ret : -EIO);
> + continue;
I guess the continue instead of the break is pre-supposing that
nbd_channel_error() might be able to recover in later patches? But for
this patch, there is no change in control flow, because the loop
condition is met for no further iterations, the same as a break would
have done.
The rest of the patch looks sane, but fails to apply easily for me (I'm
getting enough rebase churn, that it's getting harder to state if it is
accurate against the latest git master).
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3226
Virtualization: qemu.org | libvirt.org
On Wed, Jan 16, 2019 at 10:25:03AM -0600, Eric Blake wrote: > [adding Dan] > > On 7/31/18 12:30 PM, Vladimir Sementsov-Ogievskiy wrote: > > To implement reconnect we need several states for the client: > > CONNECTED, QUIT and two CONNECTING states. CONNECTING states will > > be realized in the following patches. This patch implements CONNECTED > > and QUIT. > > > > QUIT means, that we should close the connection and fail all current > > and further requests (like old quit = true). > > > > CONNECTED means that connection is ok, we can send requests (like old > > quit = false). > > > > For receiving loop we use a comparison of the current state with QUIT, > > because reconnect will be in the same loop, so it should be looping > > until the end. > > > > Opposite, for requests we use a comparison of the current state with > > CONNECTED, as we don't want to send requests in CONNECTING states ( > > which are unreachable now, but will be reachable after the following > > commits) > > > > Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> > > --- > > block/nbd-client.h | 9 ++++++++- > > block/nbd-client.c | 55 ++++++++++++++++++++++++++++++++---------------------- > > 2 files changed, 41 insertions(+), 23 deletions(-) > > Dan just recently proposed patches to SocketChardev in general to use a > state machine that distinguishes between connecting and connected: > > https://lists.gnu.org/archive/html/qemu-devel/2019-01/msg03339.html > > I'm wondering how much of his work is related or can be reused to get > restartable connections on NBD sockets? There's nothing really special about what I did. Vladimir looks to have basically done the same kind of approach, but I don't think there's real scope for sharing with chardevs, as each care about their own set of states. > Remember, right now, the NBD code always starts in blocking mode, and > does single-threaded handshaking until it is ready for transmission, > then switches to non-blocking mode for all subsequent transmissions (so, > for example, servicing a read request can assume that the socket is > valid without further waiting). But once we start allowing reconnects, > a read request will need to detect when one socket has gone down, and > wait for its replacement socket to come back up, in order to retry the > request; this retry is in a context where we are in non-blocking > context, but the retry must establish a new socket, and possibly convert > the socket into TLS mode, all before being ready to retry the read request. That makes it sound like the NBD handshake needs to be converted to use entirely non-blocking I/O. The TLS handshake already uses an asynchronous callback pattern and to deal with that NBD had to create & run a private main loop to complete the TLS handshake in its blocking code pattern. You could potentially push this concept up to the top level. ie implement the entire NBD handshake with async callbacks / non-blocking I/O. Then simply use a private main loop to run that in a blocking fashion for the initial connection. When you need to do re-connect you now just run the async code without the extra main loop around it. Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
16.01.2019 19:58, Daniel P. Berrangé wrote:
> On Wed, Jan 16, 2019 at 10:25:03AM -0600, Eric Blake wrote:
>> [adding Dan]
>>
>> On 7/31/18 12:30 PM, Vladimir Sementsov-Ogievskiy wrote:
>>> To implement reconnect we need several states for the client:
>>> CONNECTED, QUIT and two CONNECTING states. CONNECTING states will
>>> be realized in the following patches. This patch implements CONNECTED
>>> and QUIT.
>>>
>>> QUIT means, that we should close the connection and fail all current
>>> and further requests (like old quit = true).
>>>
>>> CONNECTED means that connection is ok, we can send requests (like old
>>> quit = false).
>>>
>>> For receiving loop we use a comparison of the current state with QUIT,
>>> because reconnect will be in the same loop, so it should be looping
>>> until the end.
>>>
>>> Opposite, for requests we use a comparison of the current state with
>>> CONNECTED, as we don't want to send requests in CONNECTING states (
>>> which are unreachable now, but will be reachable after the following
>>> commits)
>>>
>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>>> ---
>>> block/nbd-client.h | 9 ++++++++-
>>> block/nbd-client.c | 55 ++++++++++++++++++++++++++++++++----------------------
>>> 2 files changed, 41 insertions(+), 23 deletions(-)
>>
>> Dan just recently proposed patches to SocketChardev in general to use a
>> state machine that distinguishes between connecting and connected:
>>
>> https://lists.gnu.org/archive/html/qemu-devel/2019-01/msg03339.html
>>
>> I'm wondering how much of his work is related or can be reused to get
>> restartable connections on NBD sockets?
>
> There's nothing really special about what I did. Vladimir looks to
> have basically done the same kind of approach, but I don't think
> there's real scope for sharing with chardevs, as each care about
> their own set of states.
>
>> Remember, right now, the NBD code always starts in blocking mode, and
>> does single-threaded handshaking until it is ready for transmission,
>> then switches to non-blocking mode for all subsequent transmissions (so,
>> for example, servicing a read request can assume that the socket is
>> valid without further waiting). But once we start allowing reconnects,
>> a read request will need to detect when one socket has gone down, and
>> wait for its replacement socket to come back up, in order to retry the
>> request; this retry is in a context where we are in non-blocking
>> context, but the retry must establish a new socket, and possibly convert
>> the socket into TLS mode, all before being ready to retry the read request.
>
> That makes it sound like the NBD handshake needs to be converted to
> use entirely non-blocking I/O.
>
> The TLS handshake already uses an asynchronous callback pattern and
> to deal with that NBD had to create & run a private main loop to
> complete the TLS handshake in its blocking code pattern.
>
> You could potentially push this concept up to the top level. ie
> implement the entire NBD handshake with async callbacks / non-blocking
> I/O. Then simply use a private main loop to run that in a blocking
> fashion for the initial connection. When you need to do re-connect
> you now just run the async code without the extra main loop around
> it.
>
Hmm, you mean this code:
data.loop = g_main_loop_new(g_main_context_default(), FALSE);
trace_nbd_receive_starttls_tls_handshake();
qio_channel_tls_handshake(tioc,
nbd_tls_handshake,
&data,
NULL,
NULL);
if (!data.complete) {
g_main_loop_run(data.loop);
}
g_main_loop_unref(data.loop);
What this does in context of Qemu? Isn't it more correct to do
coroutine based async staff, like in qcow2_open():
if (qemu_in_coroutine()) {
/* From bdrv_co_create. */
qcow2_open_entry(&qoc);
} else {
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc));
BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
}
return qoc.ret;
And then yield after handshake() and enter back from nbd_tls_handshake callback?
Hmm, also, checked, nobody calls g_main_context_default() in qemu, except
util/main-loop.c, nbd and tests. So, I'm not sure that this is a valid thing
to do in nbd..
--
Best regards,
Vladimir
05.02.2019 19:35, Vladimir Sementsov-Ogievskiy wrote:
> 16.01.2019 19:58, Daniel P. Berrangé wrote:
>> On Wed, Jan 16, 2019 at 10:25:03AM -0600, Eric Blake wrote:
>>> [adding Dan]
>>>
>>> On 7/31/18 12:30 PM, Vladimir Sementsov-Ogievskiy wrote:
>>>> To implement reconnect we need several states for the client:
>>>> CONNECTED, QUIT and two CONNECTING states. CONNECTING states will
>>>> be realized in the following patches. This patch implements CONNECTED
>>>> and QUIT.
>>>>
>>>> QUIT means, that we should close the connection and fail all current
>>>> and further requests (like old quit = true).
>>>>
>>>> CONNECTED means that connection is ok, we can send requests (like old
>>>> quit = false).
>>>>
>>>> For receiving loop we use a comparison of the current state with QUIT,
>>>> because reconnect will be in the same loop, so it should be looping
>>>> until the end.
>>>>
>>>> Opposite, for requests we use a comparison of the current state with
>>>> CONNECTED, as we don't want to send requests in CONNECTING states (
>>>> which are unreachable now, but will be reachable after the following
>>>> commits)
>>>>
>>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>>>> ---
>>>> block/nbd-client.h | 9 ++++++++-
>>>> block/nbd-client.c | 55 ++++++++++++++++++++++++++++++++----------------------
>>>> 2 files changed, 41 insertions(+), 23 deletions(-)
>>>
>>> Dan just recently proposed patches to SocketChardev in general to use a
>>> state machine that distinguishes between connecting and connected:
>>>
>>> https://lists.gnu.org/archive/html/qemu-devel/2019-01/msg03339.html
>>>
>>> I'm wondering how much of his work is related or can be reused to get
>>> restartable connections on NBD sockets?
>>
>> There's nothing really special about what I did. Vladimir looks to
>> have basically done the same kind of approach, but I don't think
>> there's real scope for sharing with chardevs, as each care about
>> their own set of states.
>>
>>> Remember, right now, the NBD code always starts in blocking mode, and
>>> does single-threaded handshaking until it is ready for transmission,
>>> then switches to non-blocking mode for all subsequent transmissions (so,
>>> for example, servicing a read request can assume that the socket is
>>> valid without further waiting). But once we start allowing reconnects,
>>> a read request will need to detect when one socket has gone down, and
>>> wait for its replacement socket to come back up, in order to retry the
>>> request; this retry is in a context where we are in non-blocking
>>> context, but the retry must establish a new socket, and possibly convert
>>> the socket into TLS mode, all before being ready to retry the read request.
>>
>> That makes it sound like the NBD handshake needs to be converted to
>> use entirely non-blocking I/O.
>>
>> The TLS handshake already uses an asynchronous callback pattern and
>> to deal with that NBD had to create & run a private main loop to
>> complete the TLS handshake in its blocking code pattern.
>>
>> You could potentially push this concept up to the top level. ie
>> implement the entire NBD handshake with async callbacks / non-blocking
>> I/O. Then simply use a private main loop to run that in a blocking
>> fashion for the initial connection. When you need to do re-connect
>> you now just run the async code without the extra main loop around
>> it.
>>
>
> Hmm, you mean this code:
>
> data.loop = g_main_loop_new(g_main_context_default(), FALSE);
> trace_nbd_receive_starttls_tls_handshake();
> qio_channel_tls_handshake(tioc,
> nbd_tls_handshake,
> &data,
> NULL,
> NULL);
>
> if (!data.complete) {
> g_main_loop_run(data.loop);
> }
> g_main_loop_unref(data.loop);
>
>
> What this does in context of Qemu? Isn't it more correct to do
> coroutine based async staff, like in qcow2_open():
>
> if (qemu_in_coroutine()) {
> /* From bdrv_co_create. */
> qcow2_open_entry(&qoc);
> } else {
> assert(qemu_get_current_aio_context() == qemu_get_aio_context());
> qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc));
> BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
> }
> return qoc.ret;
>
> And then yield after handshake() and enter back from nbd_tls_handshake callback?
>
> Hmm, also, checked, nobody calls g_main_context_default() in qemu, except
> util/main-loop.c, nbd and tests. So, I'm not sure that this is a valid thing
> to do in nbd..
>
>
Aha, we just don't have any bs in nbd/ code. But anyway, moving to AioContext and make
negotiation non-blocking should be good idea. I'll try to do something around it.
--
Best regards,
Vladimir
© 2016 - 2025 Red Hat, Inc.