[XEN PATCH v3 14/16] ioreq: make arch_vcpu_ioreq_completion() an optional callback

Sergiy Kibrik posted 16 patches 5 months, 3 weeks ago
[XEN PATCH v3 14/16] ioreq: make arch_vcpu_ioreq_completion() an optional callback
Posted by Sergiy Kibrik 5 months, 3 weeks ago
For the most cases arch_vcpu_ioreq_completion() routine is just an empty stub,
except when handling VIO_realmode_completion, which only happens on HVM
domains running on VT-x machine. When VT-x is disabled in build configuration,
both x86 & arm version of routine become empty stubs.
To dispose of these useless stubs we can do optional call to arch-specific
ioreq completion handler, if it's present, and drop arm and generic x86 handlers.
Actual handling of VIO_realmore_completion can be done by VMX code then.

Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
---
 xen/arch/arm/ioreq.c       |  6 ------
 xen/arch/x86/hvm/ioreq.c   | 23 -----------------------
 xen/arch/x86/hvm/vmx/vmx.c | 16 ++++++++++++++++
 xen/common/ioreq.c         |  5 ++++-
 xen/include/xen/ioreq.h    |  2 +-
 5 files changed, 21 insertions(+), 31 deletions(-)

diff --git a/xen/arch/arm/ioreq.c b/xen/arch/arm/ioreq.c
index 5df755b48b..2e829d2e7f 100644
--- a/xen/arch/arm/ioreq.c
+++ b/xen/arch/arm/ioreq.c
@@ -135,12 +135,6 @@ bool arch_ioreq_complete_mmio(void)
     return false;
 }
 
-bool arch_vcpu_ioreq_completion(enum vio_completion completion)
-{
-    ASSERT_UNREACHABLE();
-    return true;
-}
-
 /*
  * The "legacy" mechanism of mapping magic pages for the IOREQ servers
  * is x86 specific, so the following hooks don't need to be implemented on Arm:
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 4eb7a70182..088650e007 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -29,29 +29,6 @@ bool arch_ioreq_complete_mmio(void)
     return handle_mmio();
 }
 
-bool arch_vcpu_ioreq_completion(enum vio_completion completion)
-{
-    switch ( completion )
-    {
-    case VIO_realmode_completion:
-    {
-        struct hvm_emulate_ctxt ctxt;
-
-        hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
-        vmx_realmode_emulate_one(&ctxt);
-        hvm_emulate_writeback(&ctxt);
-
-        break;
-    }
-
-    default:
-        ASSERT_UNREACHABLE();
-        break;
-    }
-
-    return true;
-}
-
 static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server *s)
 {
     struct domain *d = s->target;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f16faa6a61..7187d1819c 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -10,6 +10,7 @@
 #include <xen/param.h>
 #include <xen/trace.h>
 #include <xen/sched.h>
+#include <xen/ioreq.h>
 #include <xen/irq.h>
 #include <xen/softirq.h>
 #include <xen/domain_page.h>
@@ -2749,6 +2750,20 @@ static void cf_check vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
     vmx_vmcs_exit(v);
 }
 
+bool realmode_vcpu_ioreq_completion(enum vio_completion completion)
+{
+    struct hvm_emulate_ctxt ctxt;
+
+    if ( completion != VIO_realmode_completion )
+        ASSERT_UNREACHABLE();
+
+    hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
+    vmx_realmode_emulate_one(&ctxt);
+    hvm_emulate_writeback(&ctxt);
+
+    return true;
+}
+
 static struct hvm_function_table __initdata_cf_clobber vmx_function_table = {
     .name                 = "VMX",
     .cpu_up_prepare       = vmx_cpu_up_prepare,
@@ -3070,6 +3085,7 @@ const struct hvm_function_table * __init start_vmx(void)
     lbr_tsx_fixup_check();
     ler_to_fixup_check();
 
+    arch_vcpu_ioreq_completion = realmode_vcpu_ioreq_completion;
     return &vmx_function_table;
 }
 
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 1257a3d972..94fde97ece 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -33,6 +33,8 @@
 #include <public/hvm/ioreq.h>
 #include <public/hvm/params.h>
 
+bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion) = NULL;
+
 void ioreq_request_mapcache_invalidate(const struct domain *d)
 {
     struct vcpu *v = current;
@@ -244,7 +246,8 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v)
         break;
 
     default:
-        res = arch_vcpu_ioreq_completion(completion);
+        if ( arch_vcpu_ioreq_completion )
+            res = arch_vcpu_ioreq_completion(completion);
         break;
     }
 
diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h
index cd399adf17..880214ec41 100644
--- a/xen/include/xen/ioreq.h
+++ b/xen/include/xen/ioreq.h
@@ -111,7 +111,7 @@ void ioreq_domain_init(struct domain *d);
 int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op);
 
 bool arch_ioreq_complete_mmio(void);
-bool arch_vcpu_ioreq_completion(enum vio_completion completion);
+extern bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion);
 int arch_ioreq_server_map_pages(struct ioreq_server *s);
 void arch_ioreq_server_unmap_pages(struct ioreq_server *s);
 void arch_ioreq_server_enable(struct ioreq_server *s);
-- 
2.25.1
Re: [XEN PATCH v3 14/16] ioreq: make arch_vcpu_ioreq_completion() an optional callback
Posted by Julien Grall 5 months, 3 weeks ago
Hi Sergiy,

On 03/06/2024 12:34, Sergiy Kibrik wrote:
> For the most cases arch_vcpu_ioreq_completion() routine is just an empty stub,
> except when handling VIO_realmode_completion, which only happens on HVM
> domains running on VT-x machine. When VT-x is disabled in build configuration,
> both x86 & arm version of routine become empty stubs.
> To dispose of these useless stubs we can do optional call to arch-specific
> ioreq completion handler, if it's present, and drop arm and generic x86 handlers.
> Actual handling of VIO_realmore_completion can be done by VMX code then.
> 
> Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
> ---
>   xen/arch/arm/ioreq.c       |  6 ------
>   xen/arch/x86/hvm/ioreq.c   | 23 -----------------------
>   xen/arch/x86/hvm/vmx/vmx.c | 16 ++++++++++++++++
>   xen/common/ioreq.c         |  5 ++++-
>   xen/include/xen/ioreq.h    |  2 +-
>   5 files changed, 21 insertions(+), 31 deletions(-)
> 
> diff --git a/xen/arch/arm/ioreq.c b/xen/arch/arm/ioreq.c
> index 5df755b48b..2e829d2e7f 100644
> --- a/xen/arch/arm/ioreq.c
> +++ b/xen/arch/arm/ioreq.c
> @@ -135,12 +135,6 @@ bool arch_ioreq_complete_mmio(void)
>       return false;
>   }
>   
> -bool arch_vcpu_ioreq_completion(enum vio_completion completion)
> -{
> -    ASSERT_UNREACHABLE();
> -    return true;
> -}
> -
>   /*
>    * The "legacy" mechanism of mapping magic pages for the IOREQ servers
>    * is x86 specific, so the following hooks don't need to be implemented on Arm:
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 4eb7a70182..088650e007 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -29,29 +29,6 @@ bool arch_ioreq_complete_mmio(void)
>       return handle_mmio();
>   }
>   
> -bool arch_vcpu_ioreq_completion(enum vio_completion completion)
> -{
> -    switch ( completion )
> -    {
> -    case VIO_realmode_completion:
> -    {
> -        struct hvm_emulate_ctxt ctxt;
> -
> -        hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
> -        vmx_realmode_emulate_one(&ctxt);
> -        hvm_emulate_writeback(&ctxt);
> -
> -        break;
> -    }
> -
> -    default:
> -        ASSERT_UNREACHABLE();
> -        break;
> -    }
> -
> -    return true;
> -}
> -
>   static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server *s)
>   {
>       struct domain *d = s->target;
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index f16faa6a61..7187d1819c 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -10,6 +10,7 @@
>   #include <xen/param.h>
>   #include <xen/trace.h>
>   #include <xen/sched.h>
> +#include <xen/ioreq.h>
>   #include <xen/irq.h>
>   #include <xen/softirq.h>
>   #include <xen/domain_page.h>
> @@ -2749,6 +2750,20 @@ static void cf_check vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
>       vmx_vmcs_exit(v);
>   }
>   
> +bool realmode_vcpu_ioreq_completion(enum vio_completion completion)

No one seems to call this function outside of vmx.c. So can it be 'static'?

> +{
> +    struct hvm_emulate_ctxt ctxt;
> +
> +    if ( completion != VIO_realmode_completion )
> +        ASSERT_UNREACHABLE();
> +
> +    hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
> +    vmx_realmode_emulate_one(&ctxt);
> +    hvm_emulate_writeback(&ctxt);
> +
> +    return true;
> +}
> +
>   static struct hvm_function_table __initdata_cf_clobber vmx_function_table = {
>       .name                 = "VMX",
>       .cpu_up_prepare       = vmx_cpu_up_prepare,
> @@ -3070,6 +3085,7 @@ const struct hvm_function_table * __init start_vmx(void)
>       lbr_tsx_fixup_check();
>       ler_to_fixup_check();
>   
> +    arch_vcpu_ioreq_completion = realmode_vcpu_ioreq_completion;
>       return &vmx_function_table;
>   }
>   
> diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
> index 1257a3d972..94fde97ece 100644
> --- a/xen/common/ioreq.c
> +++ b/xen/common/ioreq.c
> @@ -33,6 +33,8 @@
>   #include <public/hvm/ioreq.h>
>   #include <public/hvm/params.h>
>   
> +bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion) = NULL;

I don't think this should be allowed to be modified after boot. So I 
woudl add __ro_after_init.

> +
>   void ioreq_request_mapcache_invalidate(const struct domain *d)
>   {
>       struct vcpu *v = current;
> @@ -244,7 +246,8 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v)
>           break;
>   
>       default:
> -        res = arch_vcpu_ioreq_completion(completion);
> +        if ( arch_vcpu_ioreq_completion )
> +            res = arch_vcpu_ioreq_completion(completion);

I think this wants an:

else {
   ASSERT_UNREACHABLE();
}

So this match the existing code. But I am not fully convinced that this 
is the right approach. Arch_vcpu_ioreq_completion is not meant to change 
across boot (or even at compile time for Arm).

Reading the previous thread, I think something like below would work:

static arch_vcpu_ioreq_completion(...)
{
#ifdef CONFIG_VMX
/* Existing code */
#else
ASSERT_UNREACHABLE();
return true;
#endif
}

If we want to avoid stub, then I think it would be better to use

#ifdef CONFIG_VMX
static  arch_vcpu_ioreq...
{
}
#endif CONFIG_VMX

Then in the x86 header.

#ifdef CONFIG_VMX
static arch_vcpu_ioreq..();
#define arch_vcpu_ioreq...
#endif

And then in common/ioreq.c

#ifdef arch_vcpu_ioreq
res = arch_vcpu_ioreq(...)
#else
ASSERT_UNREACHABLE();
#endif

>           break;
>       }
>   
> diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h
> index cd399adf17..880214ec41 100644
> --- a/xen/include/xen/ioreq.h
> +++ b/xen/include/xen/ioreq.h
> @@ -111,7 +111,7 @@ void ioreq_domain_init(struct domain *d);
>   int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op);
>   
>   bool arch_ioreq_complete_mmio(void);
> -bool arch_vcpu_ioreq_completion(enum vio_completion completion);
> +extern bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion);
>   int arch_ioreq_server_map_pages(struct ioreq_server *s);
>   void arch_ioreq_server_unmap_pages(struct ioreq_server *s);
>   void arch_ioreq_server_enable(struct ioreq_server *s);

-- 
Julien Grall
Re: [XEN PATCH v3 14/16] ioreq: make arch_vcpu_ioreq_completion() an optional callback
Posted by Sergiy Kibrik 5 months, 2 weeks ago
hi Julien,

04.06.24 14:07, Julien Grall:
> So this match the existing code. But I am not fully convinced that this 
> is the right approach. Arch_vcpu_ioreq_completion is not meant to change 
> across boot (or even at compile time for Arm).
> 
> Reading the previous thread, I think something like below would work:
> 
> static arch_vcpu_ioreq_completion(...)
> {
> #ifdef CONFIG_VMX
> /* Existing code */
> #else
> ASSERT_UNREACHABLE();
> return true;
> #endif
> }
> 
> If we want to avoid stub, then I think it would be better to use
> 
> #ifdef CONFIG_VMX
> static  arch_vcpu_ioreq...
> {
> }
> #endif CONFIG_VMX
> 
> Then in the x86 header.
> 
> #ifdef CONFIG_VMX
> static arch_vcpu_ioreq..();
> #define arch_vcpu_ioreq...
> #endif
> 
> And then in common/ioreq.c
> 
> #ifdef arch_vcpu_ioreq
> res = arch_vcpu_ioreq(...)
> #else
> ASSERT_UNREACHABLE();
> #endif

Thank you for taking a look and a suggestion. I agree that it's all 
related to compile time configuration and not a runtime.

I'll rewrite the patch and let's see how it looks like.

   -Sergiy

Re: [XEN PATCH v3 14/16] ioreq: make arch_vcpu_ioreq_completion() an optional callback
Posted by Jan Beulich 5 months, 3 weeks ago
On 04.06.2024 13:07, Julien Grall wrote:
> On 03/06/2024 12:34, Sergiy Kibrik wrote:
>> @@ -2749,6 +2750,20 @@ static void cf_check vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
>>       vmx_vmcs_exit(v);
>>   }
>>   
>> +bool realmode_vcpu_ioreq_completion(enum vio_completion completion)
> 
> No one seems to call this function outside of vmx.c. So can it be 'static'?

Plus it absolutely needs to be cf_check. If it is to stay, which
it looks like it isn't, as per further comments from Julien.

Jan