[PATCH 0/3] x86/hvm: follow-up to f7039ee41b3d

Jan Beulich posted 3 patches 3 years, 9 months ago
Only 0 patches received!
[PATCH 0/3] x86/hvm: follow-up to f7039ee41b3d
Posted by Jan Beulich 3 years, 9 months ago
Much of what gets done here was discussed in the context of
"ioreq: handle pending emulation racing with ioreq server
destruction" already.

1: fold hvm_io_assist() into its only caller
2: re-work hvm_wait_for_io() a little
3: fold both instances of looking up a hvm_ioreq_vcpu with a request pending

Jan

[PATCH 1/3] x86/HVM: fold hvm_io_assist() into its only caller
Posted by Jan Beulich 3 years, 9 months ago
While there are two call sites, the function they're in can be slightly
re-arranged such that the code sequence can be added at its bottom. Note
that the function's only caller has already checked sv->pending, and
that the prior while() loop was just a slightly more fancy if()
(allowing an early break out of the construct).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -103,23 +103,12 @@ bool hvm_io_pending(struct vcpu *v)
     return false;
 }
 
-static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
-{
-    struct vcpu *v = sv->vcpu;
-    ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
-
-    if ( hvm_ioreq_needs_completion(ioreq) )
-        ioreq->data = data;
-
-    sv->pending = false;
-}
-
 static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
 {
     unsigned int prev_state = STATE_IOREQ_NONE;
+    uint64_t data = ~0;
 
-    while ( sv->pending )
-    {
+    do {
         unsigned int state = p->state;
 
         smp_rmb();
@@ -132,7 +121,6 @@ static bool hvm_wait_for_io(struct hvm_i
              * emulator is dying and it races with an I/O being
              * requested.
              */
-            hvm_io_assist(sv, ~0ul);
             break;
         }
 
@@ -149,7 +137,7 @@ static bool hvm_wait_for_io(struct hvm_i
         {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
             p->state = STATE_IOREQ_NONE;
-            hvm_io_assist(sv, p->data);
+            data = p->data;
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
@@ -164,7 +152,13 @@ static bool hvm_wait_for_io(struct hvm_i
             domain_crash(sv->vcpu->domain);
             return false; /* bail */
         }
-    }
+    } while ( false );
+
+    p = &sv->vcpu->arch.hvm.hvm_io.io_req;
+    if ( hvm_ioreq_needs_completion(p) )
+        p->data = data;
+
+    sv->pending = false;
 
     return true;
 }


RE: [PATCH 1/3] x86/HVM: fold hvm_io_assist() into its only caller
Posted by Paul Durrant 3 years, 9 months ago
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: 15 July 2020 13:04
> To: xen-devel@lists.xenproject.org
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>; Wei Liu
> <wl@xen.org>; Paul Durrant <paul@xen.org>
> Subject: [PATCH 1/3] x86/HVM: fold hvm_io_assist() into its only caller
> 
> While there are two call sites, the function they're in can be slightly
> re-arranged such that the code sequence can be added at its bottom. Note
> that the function's only caller has already checked sv->pending, and
> that the prior while() loop was just a slightly more fancy if()
> (allowing an early break out of the construct).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -103,23 +103,12 @@ bool hvm_io_pending(struct vcpu *v)
>      return false;
>  }
> 
> -static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
> -{
> -    struct vcpu *v = sv->vcpu;
> -    ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
> -
> -    if ( hvm_ioreq_needs_completion(ioreq) )
> -        ioreq->data = data;
> -
> -    sv->pending = false;
> -}
> -
>  static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
>  {
>      unsigned int prev_state = STATE_IOREQ_NONE;
> +    uint64_t data = ~0;
> 
> -    while ( sv->pending )
> -    {
> +    do {
>          unsigned int state = p->state;

I guess this is beneficial from the point of view of restricting cope and...

> 
>          smp_rmb();
> @@ -132,7 +121,6 @@ static bool hvm_wait_for_io(struct hvm_i
>               * emulator is dying and it races with an I/O being
>               * requested.
>               */
> -            hvm_io_assist(sv, ~0ul);
>              break;

...(as you say) allowing this early break, but a forward jump to an 'out' label would be more consistent with other code. It works though so...

Reviewed-by: Paul Durrant <paul@xen.org>


>          }
> 
> @@ -149,7 +137,7 @@ static bool hvm_wait_for_io(struct hvm_i
>          {
>          case STATE_IORESP_READY: /* IORESP_READY -> NONE */
>              p->state = STATE_IOREQ_NONE;
> -            hvm_io_assist(sv, p->data);
> +            data = p->data;
>              break;
>          case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
>          case STATE_IOREQ_INPROCESS:
> @@ -164,7 +152,13 @@ static bool hvm_wait_for_io(struct hvm_i
>              domain_crash(sv->vcpu->domain);
>              return false; /* bail */
>          }
> -    }
> +    } while ( false );
> +
> +    p = &sv->vcpu->arch.hvm.hvm_io.io_req;
> +    if ( hvm_ioreq_needs_completion(p) )
> +        p->data = data;
> +
> +    sv->pending = false;
> 
>      return true;
>  }



Re: [PATCH 1/3] x86/HVM: fold hvm_io_assist() into its only caller
Posted by Jan Beulich 3 years, 9 months ago
On 15.07.2020 14:40, Paul Durrant wrote:
>> From: Jan Beulich <jbeulich@suse.com>
>> Sent: 15 July 2020 13:04
>>
>>  static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
>>  {
>>      unsigned int prev_state = STATE_IOREQ_NONE;
>> +    uint64_t data = ~0;
>>
>> -    while ( sv->pending )
>> -    {
>> +    do {
>>          unsigned int state = p->state;
> 
> I guess this is beneficial from the point of view of restricting cope and...
> 
>>
>>          smp_rmb();
>> @@ -132,7 +121,6 @@ static bool hvm_wait_for_io(struct hvm_i
>>               * emulator is dying and it races with an I/O being
>>               * requested.
>>               */
>> -            hvm_io_assist(sv, ~0ul);
>>              break;
> 
> ...(as you say) allowing this early break, but a forward jump to an 'out' label would be more consistent with other code. It works though so...

Since this gets restructured by subsequent patches I thought I'd
avoid the introduction of a disliked by me "goto".

> Reviewed-by: Paul Durrant <paul@xen.org>

Thanks, Jan

[PATCH 2/3] x86/HVM: re-work hvm_wait_for_io() a little
Posted by Jan Beulich 3 years, 9 months ago
Convert the function's main loop to a more ordinary one, without goto
and without initial steps not needing to be inside a loop at all.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -106,24 +106,17 @@ bool hvm_io_pending(struct vcpu *v)
 static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
 {
     unsigned int prev_state = STATE_IOREQ_NONE;
+    unsigned int state = p->state;
     uint64_t data = ~0;
 
-    do {
-        unsigned int state = p->state;
-
-        smp_rmb();
-
-    recheck:
-        if ( unlikely(state == STATE_IOREQ_NONE) )
-        {
-            /*
-             * The only reason we should see this case is when an
-             * emulator is dying and it races with an I/O being
-             * requested.
-             */
-            break;
-        }
+    smp_rmb();
 
+    /*
+     * The only reason we should see this condition be false is when an
+     * emulator dying races with I/O being requested.
+     */
+    while ( likely(state != STATE_IOREQ_NONE) )
+    {
         if ( unlikely(state < prev_state) )
         {
             gdprintk(XENLOG_ERR, "Weird HVM ioreq state transition %u -> %u\n",
@@ -139,20 +132,24 @@ static bool hvm_wait_for_io(struct hvm_i
             p->state = STATE_IOREQ_NONE;
             data = p->data;
             break;
+
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
             wait_on_xen_event_channel(sv->ioreq_evtchn,
                                       ({ state = p->state;
                                          smp_rmb();
                                          state != prev_state; }));
-            goto recheck;
+            continue;
+
         default:
             gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
             sv->pending = false;
             domain_crash(sv->vcpu->domain);
             return false; /* bail */
         }
-    } while ( false );
+
+        break;
+    }
 
     p = &sv->vcpu->arch.hvm.hvm_io.io_req;
     if ( hvm_ioreq_needs_completion(p) )


RE: [PATCH 2/3] x86/HVM: re-work hvm_wait_for_io() a little
Posted by Paul Durrant 3 years, 9 months ago
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: 15 July 2020 13:04
> To: xen-devel@lists.xenproject.org
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>; Wei Liu
> <wl@xen.org>; Paul Durrant <paul@xen.org>
> Subject: [PATCH 2/3] x86/HVM: re-work hvm_wait_for_io() a little
> 
> Convert the function's main loop to a more ordinary one, without goto
> and without initial steps not needing to be inside a loop at all.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -106,24 +106,17 @@ bool hvm_io_pending(struct vcpu *v)
>  static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
>  {
>      unsigned int prev_state = STATE_IOREQ_NONE;
> +    unsigned int state = p->state;
>      uint64_t data = ~0;
> 
> -    do {
> -        unsigned int state = p->state;


Oh, you lose the loop here anyway so the conversion in patch #1 was only transient.

> -
> -        smp_rmb();
> -
> -    recheck:
> -        if ( unlikely(state == STATE_IOREQ_NONE) )
> -        {
> -            /*
> -             * The only reason we should see this case is when an
> -             * emulator is dying and it races with an I/O being
> -             * requested.
> -             */
> -            break;
> -        }
> +    smp_rmb();
> 
> +    /*
> +     * The only reason we should see this condition be false is when an
> +     * emulator dying races with I/O being requested.
> +     */
> +    while ( likely(state != STATE_IOREQ_NONE) )
> +    {
>          if ( unlikely(state < prev_state) )
>          {
>              gdprintk(XENLOG_ERR, "Weird HVM ioreq state transition %u -> %u\n",
> @@ -139,20 +132,24 @@ static bool hvm_wait_for_io(struct hvm_i
>              p->state = STATE_IOREQ_NONE;
>              data = p->data;
>              break;
> +

Possibly mention the style fix-up in the comment comment.

>          case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
>          case STATE_IOREQ_INPROCESS:
>              wait_on_xen_event_channel(sv->ioreq_evtchn,
>                                        ({ state = p->state;
>                                           smp_rmb();
>                                           state != prev_state; }));
> -            goto recheck;
> +            continue;
> +

You could just break out of the switch now, I guess. Anyway...

Reviewed-by: Paul Durrant <paul@xen.org>

>          default:
>              gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
>              sv->pending = false;
>              domain_crash(sv->vcpu->domain);
>              return false; /* bail */
>          }
> -    } while ( false );
> +
> +        break;
> +    }
> 
>      p = &sv->vcpu->arch.hvm.hvm_io.io_req;
>      if ( hvm_ioreq_needs_completion(p) )



Re: [PATCH 2/3] x86/HVM: re-work hvm_wait_for_io() a little
Posted by Jan Beulich 3 years, 9 months ago
On 15.07.2020 14:47, Paul Durrant wrote:
>> From: Jan Beulich <jbeulich@suse.com>
>> Sent: 15 July 2020 13:04
>>
>> @@ -139,20 +132,24 @@ static bool hvm_wait_for_io(struct hvm_i
>>              p->state = STATE_IOREQ_NONE;
>>              data = p->data;
>>              break;
>> +
> 
> Possibly mention the style fix-up in the comment comment.

Done.

>>          case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
>>          case STATE_IOREQ_INPROCESS:
>>              wait_on_xen_event_channel(sv->ioreq_evtchn,
>>                                        ({ state = p->state;
>>                                           smp_rmb();
>>                                           state != prev_state; }));
>> -            goto recheck;
>> +            continue;
>> +
> 
> You could just break out of the switch now, I guess.

I can't because of (see below).

> Anyway...
> 
> Reviewed-by: Paul Durrant <paul@xen.org>

Thanks.

>>          default:
>>              gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
>>              sv->pending = false;
>>              domain_crash(sv->vcpu->domain);
>>              return false; /* bail */
>>          }
>> -    } while ( false );
>> +
>> +        break;
>> +    }

This "break" requires the use of "continue" inside the switch().

Jan

[PATCH 3/3] x86/HVM: fold both instances of looking up a hvm_ioreq_vcpu with a request pending
Posted by Jan Beulich 3 years, 9 months ago
It seems pretty likely that the "break" in the loop getting replaced in
handle_hvm_io_completion() was meant to exit both nested loops at the
same time. Re-purpose what has been hvm_io_pending() to hand back the
struct hvm_ioreq_vcpu instance found, and use it to replace the two
nested loops.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -81,7 +81,8 @@ static ioreq_t *get_ioreq(struct hvm_ior
     return &p->vcpu_ioreq[v->vcpu_id];
 }
 
-bool hvm_io_pending(struct vcpu *v)
+static struct hvm_ioreq_vcpu *get_pending_vcpu(const struct vcpu *v,
+                                               struct hvm_ioreq_server **srvp)
 {
     struct domain *d = v->domain;
     struct hvm_ioreq_server *s;
@@ -96,11 +97,20 @@ bool hvm_io_pending(struct vcpu *v)
                               list_entry )
         {
             if ( sv->vcpu == v && sv->pending )
-                return true;
+            {
+                if ( srvp )
+                    *srvp = s;
+                return sv;
+            }
         }
     }
 
-    return false;
+    return NULL;
+}
+
+bool hvm_io_pending(struct vcpu *v)
+{
+    return get_pending_vcpu(v, NULL);
 }
 
 static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
@@ -165,8 +175,8 @@ bool handle_hvm_io_completion(struct vcp
     struct domain *d = v->domain;
     struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
     struct hvm_ioreq_server *s;
+    struct hvm_ioreq_vcpu *sv;
     enum hvm_io_completion io_completion;
-    unsigned int id;
 
     if ( has_vpci(d) && vpci_process_pending(v) )
     {
@@ -174,23 +184,9 @@ bool handle_hvm_io_completion(struct vcp
         return false;
     }
 
-    FOR_EACH_IOREQ_SERVER(d, id, s)
-    {
-        struct hvm_ioreq_vcpu *sv;
-
-        list_for_each_entry ( sv,
-                              &s->ioreq_vcpu_list,
-                              list_entry )
-        {
-            if ( sv->vcpu == v && sv->pending )
-            {
-                if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
-                    return false;
-
-                break;
-            }
-        }
-    }
+    sv = get_pending_vcpu(v, &s);
+    if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
+        return false;
 
     vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
         STATE_IORESP_READY : STATE_IOREQ_NONE;

RE: [PATCH 3/3] x86/HVM: fold both instances of looking up a hvm_ioreq_vcpu with a request pending
Posted by Paul Durrant 3 years, 9 months ago
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: 15 July 2020 13:05
> To: xen-devel@lists.xenproject.org
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>; Wei Liu
> <wl@xen.org>; Paul Durrant <paul@xen.org>
> Subject: [PATCH 3/3] x86/HVM: fold both instances of looking up a hvm_ioreq_vcpu with a request
> pending
> 
> It seems pretty likely that the "break" in the loop getting replaced in
> handle_hvm_io_completion() was meant to exit both nested loops at the
> same time. Re-purpose what has been hvm_io_pending() to hand back the
> struct hvm_ioreq_vcpu instance found, and use it to replace the two
> nested loops.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Yes, much neater...

Reviewed-by: Paul Durrant <paul@xen.org>

> 
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -81,7 +81,8 @@ static ioreq_t *get_ioreq(struct hvm_ior
>      return &p->vcpu_ioreq[v->vcpu_id];
>  }
> 
> -bool hvm_io_pending(struct vcpu *v)
> +static struct hvm_ioreq_vcpu *get_pending_vcpu(const struct vcpu *v,
> +                                               struct hvm_ioreq_server **srvp)
>  {
>      struct domain *d = v->domain;
>      struct hvm_ioreq_server *s;
> @@ -96,11 +97,20 @@ bool hvm_io_pending(struct vcpu *v)
>                                list_entry )
>          {
>              if ( sv->vcpu == v && sv->pending )
> -                return true;
> +            {
> +                if ( srvp )
> +                    *srvp = s;
> +                return sv;
> +            }
>          }
>      }
> 
> -    return false;
> +    return NULL;
> +}
> +
> +bool hvm_io_pending(struct vcpu *v)
> +{
> +    return get_pending_vcpu(v, NULL);
>  }
> 
>  static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
> @@ -165,8 +175,8 @@ bool handle_hvm_io_completion(struct vcp
>      struct domain *d = v->domain;
>      struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
>      struct hvm_ioreq_server *s;
> +    struct hvm_ioreq_vcpu *sv;
>      enum hvm_io_completion io_completion;
> -    unsigned int id;
> 
>      if ( has_vpci(d) && vpci_process_pending(v) )
>      {
> @@ -174,23 +184,9 @@ bool handle_hvm_io_completion(struct vcp
>          return false;
>      }
> 
> -    FOR_EACH_IOREQ_SERVER(d, id, s)
> -    {
> -        struct hvm_ioreq_vcpu *sv;
> -
> -        list_for_each_entry ( sv,
> -                              &s->ioreq_vcpu_list,
> -                              list_entry )
> -        {
> -            if ( sv->vcpu == v && sv->pending )
> -            {
> -                if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
> -                    return false;
> -
> -                break;
> -            }
> -        }
> -    }
> +    sv = get_pending_vcpu(v, &s);
> +    if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
> +        return false;
> 
>      vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
>          STATE_IORESP_READY : STATE_IOREQ_NONE;