curl_multi_do_locked() currently marks all sockets as ready. That is
not only inefficient, but in fact unsafe (the loop is). A follow-up
patch will change that, but to do so, curl_multi_do_locked() needs to
know exactly which socket is ready; and that is accomplished by this
patch here.
Cc: qemu-stable@nongnu.org
Signed-off-by: Max Reitz <mreitz@redhat.com>
---
block/curl.c | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/block/curl.c b/block/curl.c
index 8a45b371cc..05f77a38c2 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -189,15 +189,15 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
switch (action) {
case CURL_POLL_IN:
aio_set_fd_handler(s->aio_context, fd, false,
- curl_multi_read, NULL, NULL, state);
+ curl_multi_read, NULL, NULL, socket);
break;
case CURL_POLL_OUT:
aio_set_fd_handler(s->aio_context, fd, false,
- NULL, curl_multi_do, NULL, state);
+ NULL, curl_multi_do, NULL, socket);
break;
case CURL_POLL_INOUT:
aio_set_fd_handler(s->aio_context, fd, false,
- curl_multi_read, curl_multi_do, NULL, state);
+ curl_multi_read, curl_multi_do, NULL, socket);
break;
case CURL_POLL_REMOVE:
aio_set_fd_handler(s->aio_context, fd, false,
@@ -394,9 +394,10 @@ static void curl_multi_check_completion(BDRVCURLState *s)
}
/* Called with s->mutex held. */
-static void curl_multi_do_locked(CURLState *s)
+static void curl_multi_do_locked(CURLSocket *ready_socket)
{
CURLSocket *socket, *next_socket;
+ CURLState *s = socket->state;
int running;
int r;
@@ -415,21 +416,23 @@ static void curl_multi_do_locked(CURLState *s)
static void curl_multi_do(void *arg)
{
- CURLState *s = (CURLState *)arg;
+ CURLSocket *socket = arg;
+ BDRVCURLState *s = socket->state->s;
- qemu_mutex_lock(&s->s->mutex);
- curl_multi_do_locked(s);
- qemu_mutex_unlock(&s->s->mutex);
+ qemu_mutex_lock(&s->mutex);
+ curl_multi_do_locked(socket);
+ qemu_mutex_unlock(&s->mutex);
}
static void curl_multi_read(void *arg)
{
- CURLState *s = (CURLState *)arg;
+ CURLSocket *socket = arg;
+ BDRVCURLState *s = socket->state->s;
- qemu_mutex_lock(&s->s->mutex);
- curl_multi_do_locked(s);
- curl_multi_check_completion(s->s);
- qemu_mutex_unlock(&s->s->mutex);
+ qemu_mutex_lock(&s->mutex);
+ curl_multi_do_locked(socket);
+ curl_multi_check_completion(s);
+ qemu_mutex_unlock(&s->mutex);
}
static void curl_multi_timeout_do(void *arg)
--
2.21.0
On 8/27/19 12:34 PM, Max Reitz wrote:
> curl_multi_do_locked() currently marks all sockets as ready. That is
> not only inefficient, but in fact unsafe (the loop is). A follow-up
> patch will change that, but to do so, curl_multi_do_locked() needs to
> know exactly which socket is ready; and that is accomplished by this
> patch here.
>
> Cc: qemu-stable@nongnu.org
> Signed-off-by: Max Reitz <mreitz@redhat.com>
> ---
> block/curl.c | 29 ++++++++++++++++-------------
> 1 file changed, 16 insertions(+), 13 deletions(-)
>
> diff --git a/block/curl.c b/block/curl.c
> index 8a45b371cc..05f77a38c2 100644
> --- a/block/curl.c
> +++ b/block/curl.c
> @@ -189,15 +189,15 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
> switch (action) {
> case CURL_POLL_IN:
> aio_set_fd_handler(s->aio_context, fd, false,
> - curl_multi_read, NULL, NULL, state);
> + curl_multi_read, NULL, NULL, socket);
> break;
> case CURL_POLL_OUT:
> aio_set_fd_handler(s->aio_context, fd, false,
> - NULL, curl_multi_do, NULL, state);
> + NULL, curl_multi_do, NULL, socket);
> break;
> case CURL_POLL_INOUT:
> aio_set_fd_handler(s->aio_context, fd, false,
> - curl_multi_read, curl_multi_do, NULL, state);
> + curl_multi_read, curl_multi_do, NULL, socket);
> break;
> case CURL_POLL_REMOVE:
> aio_set_fd_handler(s->aio_context, fd, false,
> @@ -394,9 +394,10 @@ static void curl_multi_check_completion(BDRVCURLState *s)
> }
>
> /* Called with s->mutex held. */
> -static void curl_multi_do_locked(CURLState *s)
> +static void curl_multi_do_locked(CURLSocket *ready_socket)
> {
> CURLSocket *socket, *next_socket;
> + CURLState *s = socket->state;
Did you mean to use ready_socket here instead?
> int running;
> int r;
>
> @@ -415,21 +416,23 @@ static void curl_multi_do_locked(CURLState *s)
>
> static void curl_multi_do(void *arg)
> {
> - CURLState *s = (CURLState *)arg;
> + CURLSocket *socket = arg;
> + BDRVCURLState *s = socket->state->s;
>
> - qemu_mutex_lock(&s->s->mutex);
> - curl_multi_do_locked(s);
> - qemu_mutex_unlock(&s->s->mutex);
> + qemu_mutex_lock(&s->mutex);
> + curl_multi_do_locked(socket);
> + qemu_mutex_unlock(&s->mutex);
> }
>
> static void curl_multi_read(void *arg)
> {
> - CURLState *s = (CURLState *)arg;
> + CURLSocket *socket = arg;
> + BDRVCURLState *s = socket->state->s;
>
> - qemu_mutex_lock(&s->s->mutex);
> - curl_multi_do_locked(s);
> - curl_multi_check_completion(s->s);
> - qemu_mutex_unlock(&s->s->mutex);
bye bye &s->s->mutex ! you're very nasty !!
> + qemu_mutex_lock(&s->mutex);
> + curl_multi_do_locked(socket);
> + curl_multi_check_completion(s);
> + qemu_mutex_unlock(&s->mutex);
> }
>
> static void curl_multi_timeout_do(void *arg)
>
On 09.09.19 22:10, John Snow wrote:
>
>
> On 8/27/19 12:34 PM, Max Reitz wrote:
>> curl_multi_do_locked() currently marks all sockets as ready. That is
>> not only inefficient, but in fact unsafe (the loop is). A follow-up
>> patch will change that, but to do so, curl_multi_do_locked() needs to
>> know exactly which socket is ready; and that is accomplished by this
>> patch here.
>>
>> Cc: qemu-stable@nongnu.org
>> Signed-off-by: Max Reitz <mreitz@redhat.com>
>> ---
>> block/curl.c | 29 ++++++++++++++++-------------
>> 1 file changed, 16 insertions(+), 13 deletions(-)
>>
>> diff --git a/block/curl.c b/block/curl.c
>> index 8a45b371cc..05f77a38c2 100644
>> --- a/block/curl.c
>> +++ b/block/curl.c
>> @@ -189,15 +189,15 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
>> switch (action) {
>> case CURL_POLL_IN:
>> aio_set_fd_handler(s->aio_context, fd, false,
>> - curl_multi_read, NULL, NULL, state);
>> + curl_multi_read, NULL, NULL, socket);
>> break;
>> case CURL_POLL_OUT:
>> aio_set_fd_handler(s->aio_context, fd, false,
>> - NULL, curl_multi_do, NULL, state);
>> + NULL, curl_multi_do, NULL, socket);
>> break;
>> case CURL_POLL_INOUT:
>> aio_set_fd_handler(s->aio_context, fd, false,
>> - curl_multi_read, curl_multi_do, NULL, state);
>> + curl_multi_read, curl_multi_do, NULL, socket);
>> break;
>> case CURL_POLL_REMOVE:
>> aio_set_fd_handler(s->aio_context, fd, false,
>> @@ -394,9 +394,10 @@ static void curl_multi_check_completion(BDRVCURLState *s)
>> }
>>
>> /* Called with s->mutex held. */
>> -static void curl_multi_do_locked(CURLState *s)
>> +static void curl_multi_do_locked(CURLSocket *ready_socket)
>> {
>> CURLSocket *socket, *next_socket;
>> + CURLState *s = socket->state;
>
> Did you mean to use ready_socket here instead?
Oops... Yes, I suppose so.
(An artifact from pulling apart one large patch, sorry.)
Max
>> int running;
>> int r;
>>
>> @@ -415,21 +416,23 @@ static void curl_multi_do_locked(CURLState *s)
>>
>> static void curl_multi_do(void *arg)
>> {
>> - CURLState *s = (CURLState *)arg;
>> + CURLSocket *socket = arg;
>> + BDRVCURLState *s = socket->state->s;
>>
>> - qemu_mutex_lock(&s->s->mutex);
>> - curl_multi_do_locked(s);
>> - qemu_mutex_unlock(&s->s->mutex);
>> + qemu_mutex_lock(&s->mutex);
>> + curl_multi_do_locked(socket);
>> + qemu_mutex_unlock(&s->mutex);
>> }
>>
>> static void curl_multi_read(void *arg)
>> {
>> - CURLState *s = (CURLState *)arg;
>> + CURLSocket *socket = arg;
>> + BDRVCURLState *s = socket->state->s;
>>
>> - qemu_mutex_lock(&s->s->mutex);
>> - curl_multi_do_locked(s);
>> - curl_multi_check_completion(s->s);
>> - qemu_mutex_unlock(&s->s->mutex);
>
> bye bye &s->s->mutex ! you're very nasty !!
>
>> + qemu_mutex_lock(&s->mutex);
>> + curl_multi_do_locked(socket);
>> + curl_multi_check_completion(s);
>> + qemu_mutex_unlock(&s->mutex);
>> }
>>
>> static void curl_multi_timeout_do(void *arg)
>>
© 2016 - 2025 Red Hat, Inc.