From: Xenia Ragiadakou <burzalodowa@gmail.com>
VIO_realmode_completion is specific to vmx realmode, so guard the completion
handling code with CONFIG_VMX. Also, guard VIO_realmode_completion itself by
CONFIG_VMX, instead of generic CONFIG_X86.
No functional change intended.
Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
---
changes in v1:
- put VIO_realmode_completion enum under #ifdef CONFIG_VMX
---
xen/arch/x86/hvm/emulate.c | 2 ++
xen/arch/x86/hvm/ioreq.c | 2 ++
xen/include/xen/sched.h | 2 +-
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index ab1bc51683..d60b1f6f4d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2667,7 +2667,9 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
break;
case VIO_mmio_completion:
+#ifdef CONFIG_VMX
case VIO_realmode_completion:
+#endif
BUILD_BUG_ON(sizeof(hvio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
hvio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
memcpy(hvio->mmio_insn, hvmemul_ctxt->insn_buf, hvio->mmio_insn_bytes);
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 4eb7a70182..b37bbd660b 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -33,6 +33,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion completion)
{
switch ( completion )
{
+#ifdef CONFIG_VMX
case VIO_realmode_completion:
{
struct hvm_emulate_ctxt ctxt;
@@ -43,6 +44,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion completion)
break;
}
+#endif
default:
ASSERT_UNREACHABLE();
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 132b841995..50a58fe428 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -152,7 +152,7 @@ enum vio_completion {
VIO_no_completion,
VIO_mmio_completion,
VIO_pio_completion,
-#ifdef CONFIG_X86
+#ifdef CONFIG_VMX
VIO_realmode_completion,
#endif
};
--
2.25.1
On 15.05.2024 11:24, Sergiy Kibrik wrote: > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -2667,7 +2667,9 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, > break; > > case VIO_mmio_completion: > +#ifdef CONFIG_VMX > case VIO_realmode_completion: > +#endif > BUILD_BUG_ON(sizeof(hvio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf)); > hvio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes; > memcpy(hvio->mmio_insn, hvmemul_ctxt->insn_buf, hvio->mmio_insn_bytes); This change doesn't buy us anything, does it? > --- a/xen/arch/x86/hvm/ioreq.c > +++ b/xen/arch/x86/hvm/ioreq.c > @@ -33,6 +33,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion completion) > { > switch ( completion ) > { > +#ifdef CONFIG_VMX > case VIO_realmode_completion: > { > struct hvm_emulate_ctxt ctxt; > @@ -43,6 +44,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion completion) > > break; > } > +#endif > > default: > ASSERT_UNREACHABLE(); And while this change is needed for the goal of the series, I wonder whether it wouldn't better be arch_vcpu_ioreq_completion() as whole that then gets stubbed out. Jan
16.05.24 15:11, Jan Beulich: > On 15.05.2024 11:24, Sergiy Kibrik wrote: >> --- a/xen/arch/x86/hvm/emulate.c >> +++ b/xen/arch/x86/hvm/emulate.c >> @@ -2667,7 +2667,9 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, >> break; >> >> case VIO_mmio_completion: >> +#ifdef CONFIG_VMX >> case VIO_realmode_completion: >> +#endif >> BUILD_BUG_ON(sizeof(hvio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf)); >> hvio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes; >> memcpy(hvio->mmio_insn, hvmemul_ctxt->insn_buf, hvio->mmio_insn_bytes); > > This change doesn't buy us anything, does it? why not? Code won't compile w/o it. Or do you mean hiding the whole VIO_realmode_completion enum under CONFIG_VMX wasn't really useful? >> --- a/xen/arch/x86/hvm/ioreq.c >> +++ b/xen/arch/x86/hvm/ioreq.c >> @@ -33,6 +33,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion completion) >> { >> switch ( completion ) >> { >> +#ifdef CONFIG_VMX >> case VIO_realmode_completion: >> { >> struct hvm_emulate_ctxt ctxt; >> @@ -43,6 +44,7 @@ bool arch_vcpu_ioreq_completion(enum vio_completion completion) >> >> break; >> } >> +#endif >> >> default: >> ASSERT_UNREACHABLE(); > > And while this change is needed for the goal of the series, I wonder whether > it wouldn't better be arch_vcpu_ioreq_completion() as whole that then gets > stubbed out. > I'll post a patch to this thread to confirm whether I got your point correctly. -Sergiy
On 31.05.2024 10:05, Sergiy Kibrik wrote: > 16.05.24 15:11, Jan Beulich: >> On 15.05.2024 11:24, Sergiy Kibrik wrote: >>> --- a/xen/arch/x86/hvm/emulate.c >>> +++ b/xen/arch/x86/hvm/emulate.c >>> @@ -2667,7 +2667,9 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, >>> break; >>> >>> case VIO_mmio_completion: >>> +#ifdef CONFIG_VMX >>> case VIO_realmode_completion: >>> +#endif >>> BUILD_BUG_ON(sizeof(hvio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf)); >>> hvio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes; >>> memcpy(hvio->mmio_insn, hvmemul_ctxt->insn_buf, hvio->mmio_insn_bytes); >> >> This change doesn't buy us anything, does it? > > why not? Code won't compile w/o it. > Or do you mean hiding the whole VIO_realmode_completion enum under > CONFIG_VMX wasn't really useful? That's what I meant, by implication. To me it's extra #ifdef-ary without real gain. Jan
For the most cases arch_vcpu_ioreq_completion() routine is just an empty stub,
except when handling VIO_realmode_completion, which only happens on HVM
domains running on VT-x machine. When VT-x is disabled in build configuration,
both x86 & arm version of routine become empty stubs.
To dispose of these useless stubs we can do optional call to arch-specific
ioreq completion handler, if it's present, and drop arm and generic x86 handlers.
Actual handling of VIO_realmore_completion can be done by VMX code then.
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
---
xen/arch/arm/ioreq.c | 6 ------
xen/arch/x86/hvm/ioreq.c | 25 -------------------------
xen/arch/x86/hvm/vmx/vmx.c | 15 +++++++++++++++
xen/common/ioreq.c | 5 ++++-
xen/include/xen/ioreq.h | 2 +-
5 files changed, 20 insertions(+), 33 deletions(-)
diff --git a/xen/arch/arm/ioreq.c b/xen/arch/arm/ioreq.c
index 5df755b48b..2e829d2e7f 100644
--- a/xen/arch/arm/ioreq.c
+++ b/xen/arch/arm/ioreq.c
@@ -135,12 +135,6 @@ bool arch_ioreq_complete_mmio(void)
return false;
}
-bool arch_vcpu_ioreq_completion(enum vio_completion completion)
-{
- ASSERT_UNREACHABLE();
- return true;
-}
-
/*
* The "legacy" mechanism of mapping magic pages for the IOREQ servers
* is x86 specific, so the following hooks don't need to be implemented on Arm:
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index b37bbd660b..088650e007 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -29,31 +29,6 @@ bool arch_ioreq_complete_mmio(void)
return handle_mmio();
}
-bool arch_vcpu_ioreq_completion(enum vio_completion completion)
-{
- switch ( completion )
- {
-#ifdef CONFIG_VMX
- case VIO_realmode_completion:
- {
- struct hvm_emulate_ctxt ctxt;
-
- hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
- vmx_realmode_emulate_one(&ctxt);
- hvm_emulate_writeback(&ctxt);
-
- break;
- }
-#endif
-
- default:
- ASSERT_UNREACHABLE();
- break;
- }
-
- return true;
-}
-
static gfn_t hvm_alloc_legacy_ioreq_gfn(struct ioreq_server *s)
{
struct domain *d = s->target;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 8ba996546f..4f9be50fe7 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2749,6 +2749,20 @@ static void cf_check vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
vmx_vmcs_exit(v);
}
+bool realmode_vcpu_ioreq_completion(enum vio_completion completion)
+{
+ struct hvm_emulate_ctxt ctxt;
+
+ if ( completion != VIO_realmode_completion )
+ ASSERT_UNREACHABLE();
+
+ hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
+ vmx_realmode_emulate_one(&ctxt);
+ hvm_emulate_writeback(&ctxt);
+
+ return true;
+}
+
static struct hvm_function_table __initdata_cf_clobber vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
@@ -3070,6 +3084,7 @@ const struct hvm_function_table * __init start_vmx(void)
lbr_tsx_fixup_check();
ler_to_fixup_check();
+ arch_vcpu_ioreq_completion = realmode_vcpu_ioreq_completion;
return &vmx_function_table;
}
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 1257a3d972..94fde97ece 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -33,6 +33,8 @@
#include <public/hvm/ioreq.h>
#include <public/hvm/params.h>
+bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion) = NULL;
+
void ioreq_request_mapcache_invalidate(const struct domain *d)
{
struct vcpu *v = current;
@@ -244,7 +246,8 @@ bool vcpu_ioreq_handle_completion(struct vcpu *v)
break;
default:
- res = arch_vcpu_ioreq_completion(completion);
+ if ( arch_vcpu_ioreq_completion )
+ res = arch_vcpu_ioreq_completion(completion);
break;
}
diff --git a/xen/include/xen/ioreq.h b/xen/include/xen/ioreq.h
index cd399adf17..880214ec41 100644
--- a/xen/include/xen/ioreq.h
+++ b/xen/include/xen/ioreq.h
@@ -111,7 +111,7 @@ void ioreq_domain_init(struct domain *d);
int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op);
bool arch_ioreq_complete_mmio(void);
-bool arch_vcpu_ioreq_completion(enum vio_completion completion);
+extern bool (*arch_vcpu_ioreq_completion)(enum vio_completion completion);
int arch_ioreq_server_map_pages(struct ioreq_server *s);
void arch_ioreq_server_unmap_pages(struct ioreq_server *s);
void arch_ioreq_server_enable(struct ioreq_server *s);
--
2.25.1
On Wed, 15 May 2024, Sergiy Kibrik wrote: > From: Xenia Ragiadakou <burzalodowa@gmail.com> > > VIO_realmode_completion is specific to vmx realmode, so guard the completion > handling code with CONFIG_VMX. Also, guard VIO_realmode_completion itself by > CONFIG_VMX, instead of generic CONFIG_X86. > > No functional change intended. > > Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com> > Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
© 2016 - 2024 Red Hat, Inc.