[PATCH v2 11/11] xen/arm: Process pending vPCI map/unmap operations

Oleksandr Andrushchenko posted 11 patches 3 years, 2 months ago
There is a newer version of this series
[PATCH v2 11/11] xen/arm: Process pending vPCI map/unmap operations
Posted by Oleksandr Andrushchenko 3 years, 2 months ago
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>

vPCI may map and unmap PCI device memory (BARs) being passed through which
may take a lot of time. For this those operations may be deferred to be
performed later, so that they can be safely preempted.
Run the corresponding vPCI code while switching a vCPU.

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>

---
Since v1:
 - Moved the check for pending vpci work from the common IOREQ code
   to hvm_do_resume on x86
 - Re-worked the code for Arm to ensure we don't miss pending vPCI work
---
 xen/arch/arm/traps.c   | 13 +++++++++++++
 xen/arch/x86/hvm/hvm.c |  6 ++++++
 xen/common/ioreq.c     |  9 ---------
 3 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 219ab3c3fbde..b246f51086e3 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -34,6 +34,7 @@
 #include <xen/symbols.h>
 #include <xen/version.h>
 #include <xen/virtual_region.h>
+#include <xen/vpci.h>
 
 #include <public/sched.h>
 #include <public/xen.h>
@@ -2304,6 +2305,18 @@ static bool check_for_vcpu_work(void)
     }
 #endif
 
+    if ( has_vpci(v->domain) )
+    {
+        bool pending;
+
+        local_irq_enable();
+        pending = vpci_process_pending(v);
+        local_irq_disable();
+
+        if ( pending )
+            return true;
+    }
+
     if ( likely(!v->arch.need_flush_to_ram) )
         return false;
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7b48a1b925bb..d32f5d572941 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -549,6 +549,12 @@ void hvm_do_resume(struct vcpu *v)
     if ( !vcpu_ioreq_handle_completion(v) )
         return;
 
+    if ( has_vpci(v->domain) && vpci_process_pending(v) )
+    {
+        raise_softirq(SCHEDULE_SOFTIRQ);
+        return;
+    }
+
     if ( unlikely(v->arch.vm_event) )
         hvm_vm_event_do_resume(v);
 
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index d732dc045df9..689d256544c8 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -25,9 +25,7 @@
 #include <xen/lib.h>
 #include <xen/paging.h>
 #include <xen/sched.h>
-#include <xen/softirq.h>
 #include <xen/trace.h>
-#include <xen/vpci.h>
 
 #include <asm/guest_atomics.h>
 #include <asm/ioreq.h>
@@ -212,19 +210,12 @@ static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
 
 bool vcpu_ioreq_handle_completion(struct vcpu *v)
 {
-    struct domain *d = v->domain;
     struct vcpu_io *vio = &v->io;
     struct ioreq_server *s;
     struct ioreq_vcpu *sv;
     enum vio_completion completion;
     bool res = true;
 
-    if ( has_vpci(d) && vpci_process_pending(v) )
-    {
-        raise_softirq(SCHEDULE_SOFTIRQ);
-        return false;
-    }
-
     while ( (sv = get_pending_vcpu(v, &s)) != NULL )
         if ( !wait_for_io(sv, get_ioreq(s, v)) )
             return false;
-- 
2.25.1


Re: [PATCH v2 11/11] xen/arm: Process pending vPCI map/unmap operations
Posted by Stefano Stabellini 3 years, 2 months ago
+ x86 maintainers

On Thu, 23 Sep 2021, Oleksandr Andrushchenko wrote:
> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
> 
> vPCI may map and unmap PCI device memory (BARs) being passed through which
> may take a lot of time. For this those operations may be deferred to be
> performed later, so that they can be safely preempted.
> Run the corresponding vPCI code while switching a vCPU.
> 
> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>

From an ARM point of view, I think the code change is OK.

Only one note: it would be good to add to the commit message a short
list of relevant TODOs which in particular affect this patch.

Something like:

Please be aware that there are a few outstanding TODOs affecting this
code path, see xen/drivers/vpci/header.c:map_range and
xen/drivers/vpci/header.c:vpci_process_pending.


> ---
> Since v1:
>  - Moved the check for pending vpci work from the common IOREQ code
>    to hvm_do_resume on x86
>  - Re-worked the code for Arm to ensure we don't miss pending vPCI work
> ---
>  xen/arch/arm/traps.c   | 13 +++++++++++++
>  xen/arch/x86/hvm/hvm.c |  6 ++++++
>  xen/common/ioreq.c     |  9 ---------
>  3 files changed, 19 insertions(+), 9 deletions(-)
> 
> diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
> index 219ab3c3fbde..b246f51086e3 100644
> --- a/xen/arch/arm/traps.c
> +++ b/xen/arch/arm/traps.c
> @@ -34,6 +34,7 @@
>  #include <xen/symbols.h>
>  #include <xen/version.h>
>  #include <xen/virtual_region.h>
> +#include <xen/vpci.h>
>  
>  #include <public/sched.h>
>  #include <public/xen.h>
> @@ -2304,6 +2305,18 @@ static bool check_for_vcpu_work(void)
>      }
>  #endif
>  
> +    if ( has_vpci(v->domain) )
> +    {
> +        bool pending;
> +
> +        local_irq_enable();
> +        pending = vpci_process_pending(v);
> +        local_irq_disable();
> +
> +        if ( pending )
> +            return true;
> +    }
> +
>      if ( likely(!v->arch.need_flush_to_ram) )
>          return false;
>  
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 7b48a1b925bb..d32f5d572941 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -549,6 +549,12 @@ void hvm_do_resume(struct vcpu *v)
>      if ( !vcpu_ioreq_handle_completion(v) )
>          return;
>  
> +    if ( has_vpci(v->domain) && vpci_process_pending(v) )
> +    {
> +        raise_softirq(SCHEDULE_SOFTIRQ);
> +        return;
> +    }
> +
>      if ( unlikely(v->arch.vm_event) )
>          hvm_vm_event_do_resume(v);
>  
> diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
> index d732dc045df9..689d256544c8 100644
> --- a/xen/common/ioreq.c
> +++ b/xen/common/ioreq.c
> @@ -25,9 +25,7 @@
>  #include <xen/lib.h>
>  #include <xen/paging.h>
>  #include <xen/sched.h>
> -#include <xen/softirq.h>
>  #include <xen/trace.h>
> -#include <xen/vpci.h>
>  
>  #include <asm/guest_atomics.h>
>  #include <asm/ioreq.h>
> @@ -212,19 +210,12 @@ static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
>  
>  bool vcpu_ioreq_handle_completion(struct vcpu *v)
>  {
> -    struct domain *d = v->domain;
>      struct vcpu_io *vio = &v->io;
>      struct ioreq_server *s;
>      struct ioreq_vcpu *sv;
>      enum vio_completion completion;
>      bool res = true;
>  
> -    if ( has_vpci(d) && vpci_process_pending(v) )
> -    {
> -        raise_softirq(SCHEDULE_SOFTIRQ);
> -        return false;
> -    }
> -
>      while ( (sv = get_pending_vcpu(v, &s)) != NULL )
>          if ( !wait_for_io(sv, get_ioreq(s, v)) )
>              return false;
> -- 
> 2.25.1
> 

Re: [PATCH v2 11/11] xen/arm: Process pending vPCI map/unmap operations
Posted by Jan Beulich 3 years, 2 months ago
+ Paul (retaining full context for this reason)

On 23.09.2021 14:54, Oleksandr Andrushchenko wrote:
> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
> 
> vPCI may map and unmap PCI device memory (BARs) being passed through which
> may take a lot of time. For this those operations may be deferred to be
> performed later, so that they can be safely preempted.
> Run the corresponding vPCI code while switching a vCPU.
> 
> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
> 
> ---
> Since v1:
>  - Moved the check for pending vpci work from the common IOREQ code
>    to hvm_do_resume on x86

While perhaps obvious for Arm folks, I'd like to see the reason for this
spelled out in the description.

> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -549,6 +549,12 @@ void hvm_do_resume(struct vcpu *v)
>      if ( !vcpu_ioreq_handle_completion(v) )
>          return;
>  
> +    if ( has_vpci(v->domain) && vpci_process_pending(v) )
> +    {
> +        raise_softirq(SCHEDULE_SOFTIRQ);
> +        return;
> +    }

Note that you're altering behavior here: Originally this was done ...

> @@ -212,19 +210,12 @@ static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
>  
>  bool vcpu_ioreq_handle_completion(struct vcpu *v)
>  {
> -    struct domain *d = v->domain;
>      struct vcpu_io *vio = &v->io;
>      struct ioreq_server *s;
>      struct ioreq_vcpu *sv;
>      enum vio_completion completion;
>      bool res = true;
>  
> -    if ( has_vpci(d) && vpci_process_pending(v) )
> -    {
> -        raise_softirq(SCHEDULE_SOFTIRQ);
> -        return false;
> -    }

... first thing. And I think it wants (perhaps even needs) to remain
that way; otherwise you'll need to explain why not, and why the change
is correct / safe.

Jan


Re: [PATCH v2 11/11] xen/arm: Process pending vPCI map/unmap operations
Posted by Oleksandr Andrushchenko 3 years, 2 months ago
On 27.09.21 11:06, Jan Beulich wrote:
> + Paul (retaining full context for this reason)
>
> On 23.09.2021 14:54, Oleksandr Andrushchenko wrote:
>> From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
>>
>> vPCI may map and unmap PCI device memory (BARs) being passed through which
>> may take a lot of time. For this those operations may be deferred to be
>> performed later, so that they can be safely preempted.
>> Run the corresponding vPCI code while switching a vCPU.
>>
>> Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
>>
>> ---
>> Since v1:
>>   - Moved the check for pending vpci work from the common IOREQ code
>>     to hvm_do_resume on x86
> While perhaps obvious for Arm folks, I'd like to see the reason for this
> spelled out in the description.

I will add:

IOREQ is not enabled for Arm by default, so pending vPCI work has

no chance to be executed if the processing is left as is in the common IOREQ

code only. For that reason Arm processes that in arch specific code which results

in that the processing happens twice for Arm when IOREQ is enabled.

For x86, processing of vPCI in IOREQ code also doesn't seem to be the right

place, so move that to hvm_do_resume.

>
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -549,6 +549,12 @@ void hvm_do_resume(struct vcpu *v)
>>       if ( !vcpu_ioreq_handle_completion(v) )
>>           return;
>>   
>> +    if ( has_vpci(v->domain) && vpci_process_pending(v) )
>> +    {
>> +        raise_softirq(SCHEDULE_SOFTIRQ);
>> +        return;
>> +    }
> Note that you're altering behavior here: Originally this was done ...
>
>> @@ -212,19 +210,12 @@ static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
>>   
>>   bool vcpu_ioreq_handle_completion(struct vcpu *v)
>>   {
>> -    struct domain *d = v->domain;
>>       struct vcpu_io *vio = &v->io;
>>       struct ioreq_server *s;
>>       struct ioreq_vcpu *sv;
>>       enum vio_completion completion;
>>       bool res = true;
>>   
>> -    if ( has_vpci(d) && vpci_process_pending(v) )
>> -    {
>> -        raise_softirq(SCHEDULE_SOFTIRQ);
>> -        return false;
>> -    }
> ... first thing. And I think it wants (perhaps even needs) to remain
> that way;
Will make it the first operation
>   otherwise you'll need to explain why not, and why the change
> is correct / safe.
>
> Jan
>
Thank you,

Oleksandr