[Qemu-devel] [PATCH 07/17] throttle-groups: do not use qemu_co_enter_next

Paolo Bonzini posted 17 patches 8 years, 9 months ago
There is a newer version of this series
[Qemu-devel] [PATCH 07/17] throttle-groups: do not use qemu_co_enter_next
Posted by Paolo Bonzini 8 years, 9 months ago
Prepare for removing this function; always restart throttled requests
from coroutine context.  This will matter when restarting throttled
requests will have to acquire a CoMutex.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 block/throttle-groups.c | 65 +++++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 58 insertions(+), 7 deletions(-)

diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 69bfbd4..d66bf62 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -260,6 +260,18 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
     return must_wait;
 }
 
+/* Start the next pending I/O request for a BlockBackend.
+ *
+ * @blk:       the current BlockBackend
+ * @is_write:  the type of operation (read/write)
+ */
+static bool throttle_group_co_restart_queue(BlockBackend *blk, bool is_write)
+{
+    BlockBackendPublic *blkp = blk_get_public(blk);
+
+    return qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
+}
+
 /* Look for the next pending I/O request and schedule it.
  *
  * This assumes that tg->lock is held.
@@ -287,7 +299,7 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
     if (!must_wait) {
         /* Give preference to requests from the current blk */
         if (qemu_in_coroutine() &&
-            qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
+            throttle_group_co_restart_queue(blk, is_write)) {
             token = blk;
         } else {
             ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
@@ -340,18 +352,57 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
     qemu_mutex_unlock(&tg->lock);
 }
 
-void throttle_group_restart_blk(BlockBackend *blk)
+typedef struct {
+    BlockBackend *blk;
+    bool is_write;
+    int ret;
+} RestartData;
+
+static void throttle_group_restart_queue_entry(void *opaque)
 {
-    BlockBackendPublic *blkp = blk_get_public(blk);
+    RestartData *data = opaque;
+
+    data->ret = throttle_group_co_restart_queue(data->blk, data->is_write);
+}
+
+static int throttle_group_restart_queue(BlockBackend *blk, bool is_write)
+{
+    Coroutine *co;
+    RestartData rd = {
+        .blk = blk,
+        .is_write = is_write
+    };
+
+    aio_context_acquire(blk_get_aio_context(blk));
+    co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
+    /* The request doesn't start until after throttle_group_restart_queue_entry
+     * returns, so the coroutine cannot yield.
+     */
+    qemu_coroutine_enter(co);
+    aio_context_release(blk_get_aio_context(blk));
+    return rd.ret;
+}
+
+static void throttle_group_restart_blk_entry(void *opaque)
+{
+    BlockBackend *blk = opaque;
     int i;
 
     for (i = 0; i < 2; i++) {
-        while (qemu_co_enter_next(&blkp->throttled_reqs[i])) {
+        while (throttle_group_co_restart_queue(blk, i)) {
             ;
         }
     }
 }
 
+void throttle_group_restart_blk(BlockBackend *blk)
+{
+    Coroutine *co;
+
+    co = qemu_coroutine_create(throttle_group_restart_blk_entry, blk);
+    qemu_coroutine_enter(co);
+}
+
 /* Update the throttle configuration for a particular group. Similar
  * to throttle_config(), but guarantees atomicity within the
  * throttling group.
@@ -376,8 +427,8 @@ void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg)
     throttle_config(ts, tt, cfg);
     qemu_mutex_unlock(&tg->lock);
 
-    qemu_co_enter_next(&blkp->throttled_reqs[0]);
-    qemu_co_enter_next(&blkp->throttled_reqs[1]);
+    throttle_group_restart_queue(blk, 0);
+    throttle_group_restart_queue(blk, 1);
 }
 
 /* Get the throttle configuration from a particular group. Similar to
@@ -417,7 +468,7 @@ static void timer_cb(BlockBackend *blk, bool is_write)
 
     /* Run the request that was waiting for this timer */
     aio_context_acquire(blk_get_aio_context(blk));
-    empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
+    empty_queue = !throttle_group_restart_queue(blk, is_write);
     aio_context_release(blk_get_aio_context(blk));
 
     /* If the request queue was empty then we have to take care of
-- 
2.9.3



Re: [Qemu-devel] [Qemu-block] [PATCH 07/17] throttle-groups: do not use qemu_co_enter_next
Posted by Stefan Hajnoczi 8 years, 9 months ago
On Thu, Apr 20, 2017 at 02:00:48PM +0200, Paolo Bonzini wrote:
> Prepare for removing this function; always restart throttled requests
> from coroutine context.  This will matter when restarting throttled
> requests will have to acquire a CoMutex.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  block/throttle-groups.c | 65 +++++++++++++++++++++++++++++++++++++++++++------
>  1 file changed, 58 insertions(+), 7 deletions(-)

I don't understand this patch :(.

> diff --git a/block/throttle-groups.c b/block/throttle-groups.c
> index 69bfbd4..d66bf62 100644
> --- a/block/throttle-groups.c
> +++ b/block/throttle-groups.c
> @@ -260,6 +260,18 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
>      return must_wait;
>  }
>  
> +/* Start the next pending I/O request for a BlockBackend.
> + *
> + * @blk:       the current BlockBackend
> + * @is_write:  the type of operation (read/write)

The return value is undocumented.

> + */
> +static bool throttle_group_co_restart_queue(BlockBackend *blk, bool is_write)

This function calls qemu_co_queue_next(), which has coroutine_fn, so
this function must also be marked coroutine_fn.

> +{
> +    BlockBackendPublic *blkp = blk_get_public(blk);
> +
> +    return qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
> +}
> +
>  /* Look for the next pending I/O request and schedule it.
>   *
>   * This assumes that tg->lock is held.
> @@ -287,7 +299,7 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
>      if (!must_wait) {
>          /* Give preference to requests from the current blk */
>          if (qemu_in_coroutine() &&
> -            qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
> +            throttle_group_co_restart_queue(blk, is_write)) {
>              token = blk;
>          } else {
>              ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
> @@ -340,18 +352,57 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
>      qemu_mutex_unlock(&tg->lock);
>  }
>  
> -void throttle_group_restart_blk(BlockBackend *blk)
> +typedef struct {
> +    BlockBackend *blk;
> +    bool is_write;
> +    int ret;

s/ret/bool/

> +} RestartData;
> +
> +static void throttle_group_restart_queue_entry(void *opaque)

This is a coroutine entry function, it must be marked coroutine_fn.

>  {
> -    BlockBackendPublic *blkp = blk_get_public(blk);
> +    RestartData *data = opaque;
> +
> +    data->ret = throttle_group_co_restart_queue(data->blk, data->is_write);
> +}
> +
> +static int throttle_group_restart_queue(BlockBackend *blk, bool is_write)

s/int/bool/

> +{
> +    Coroutine *co;
> +    RestartData rd = {
> +        .blk = blk,
> +        .is_write = is_write
> +    };
> +
> +    aio_context_acquire(blk_get_aio_context(blk));
> +    co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
> +    /* The request doesn't start until after throttle_group_restart_queue_entry
> +     * returns, so the coroutine cannot yield.
> +     */

I don't understand this sentence.

This function assumes that throttle_group_restart_queue_entry(),
throttle_group_co_restart_queue(), and qemu_co_queue_next() do not
yield.  Making these assumptions is fragile :(.

But how does that relate to "the request doesn't start until after
throttle_group_restart_queue_entry returns"?

> +    qemu_coroutine_enter(co);
> +    aio_context_release(blk_get_aio_context(blk));
> +    return rd.ret;
> +}
> +
> +static void throttle_group_restart_blk_entry(void *opaque)

This is a coroutine entry function so it must be marked coroutine_fn.

> +{
> +    BlockBackend *blk = opaque;
>      int i;
>  
>      for (i = 0; i < 2; i++) {
> -        while (qemu_co_enter_next(&blkp->throttled_reqs[i])) {
> +        while (throttle_group_co_restart_queue(blk, i)) {
>              ;
>          }
>      }

Why is the throttle_group_restart_blk_entry() coroutine necessary?
throttle_group_co_restart_queue() already spawns a coroutine which is
used for queuing.  I think this function could be a non-coroutine
function.

>  }
>  
> +void throttle_group_restart_blk(BlockBackend *blk)
> +{
> +    Coroutine *co;
> +
> +    co = qemu_coroutine_create(throttle_group_restart_blk_entry, blk);
> +    qemu_coroutine_enter(co);
> +}
> +
>  /* Update the throttle configuration for a particular group. Similar
>   * to throttle_config(), but guarantees atomicity within the
>   * throttling group.
> @@ -376,8 +427,8 @@ void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg)
>      throttle_config(ts, tt, cfg);
>      qemu_mutex_unlock(&tg->lock);
>  
> -    qemu_co_enter_next(&blkp->throttled_reqs[0]);
> -    qemu_co_enter_next(&blkp->throttled_reqs[1]);
> +    throttle_group_restart_queue(blk, 0);
> +    throttle_group_restart_queue(blk, 1);
>  }
>  
>  /* Get the throttle configuration from a particular group. Similar to
> @@ -417,7 +468,7 @@ static void timer_cb(BlockBackend *blk, bool is_write)
>  
>      /* Run the request that was waiting for this timer */
>      aio_context_acquire(blk_get_aio_context(blk));
> -    empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
> +    empty_queue = !throttle_group_restart_queue(blk, is_write);
>      aio_context_release(blk_get_aio_context(blk));
>  
>      /* If the request queue was empty then we have to take care of
> -- 
> 2.9.3
> 
> 
>