[PATCH v2] x86/HVM: convert remaining hvm_funcs hook invocations to alt-call

Jan Beulich posted 1 patch 1 week, 4 days ago
Test gitlab-ci failed
Patches applied successfully (tree, apply log)
git fetch https://gitlab.com/xen-project/patchew/xen tags/patchew/ed34964c-c612-497f-0350-b9202c0ba057@suse.com

[PATCH v2] x86/HVM: convert remaining hvm_funcs hook invocations to alt-call

Posted by Jan Beulich 1 week, 4 days ago
The aim being to have as few indirect calls as possible (see [1]),
whereas during initial conversion performance was the main aspect and
hence rarely used hooks didn't get converted. Apparently one use of
get_interrupt_shadow() was missed at the time.

While doing this, drop NULL checks ahead of CPU management and .nhvm_*()
calls when the hook is always present. Also convert the
.nhvm_vcpu_reset() call to alternative_vcall(), as the return value is
unused and the caller has currently no way of propagating it.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

[1] https://lists.xen.org/archives/html/xen-devel/2021-11/msg01822.html
---
v2: Convert all hook invocations. Re-base.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2549,7 +2549,7 @@ static int hvmemul_vmfunc(
 
     if ( !hvm_funcs.altp2m_vcpu_emulate_vmfunc )
         return X86EMUL_UNHANDLEABLE;
-    rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
+    rc = alternative_call(hvm_funcs.altp2m_vcpu_emulate_vmfunc, ctxt->regs);
     if ( rc == X86EMUL_EXCEPTION )
         x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
 
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -126,14 +126,14 @@ static int cpu_callback(
     switch ( action )
     {
     case CPU_UP_PREPARE:
-        rc = hvm_funcs.cpu_up_prepare(cpu);
+        rc = alternative_call(hvm_funcs.cpu_up_prepare, cpu);
         break;
     case CPU_DYING:
         hvm_cpu_down();
         break;
     case CPU_UP_CANCELED:
     case CPU_DEAD:
-        hvm_funcs.cpu_dead(cpu);
+        alternative_vcall(hvm_funcs.cpu_dead, cpu);
         break;
     default:
         break;
@@ -707,7 +707,7 @@ int hvm_domain_initialise(struct domain
     if ( rc )
         goto fail2;
 
-    rc = hvm_funcs.domain_initialise(d);
+    rc = alternative_call(hvm_funcs.domain_initialise, d);
     if ( rc != 0 )
         goto fail2;
 
@@ -740,7 +740,7 @@ void hvm_domain_relinquish_resources(str
         alternative_vcall(hvm_funcs.domain_relinquish_resources, d);
 
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
-        hvm_funcs.nhvm_domain_relinquish_resources(d);
+        alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
 
     viridian_domain_deinit(d);
 
@@ -870,7 +870,7 @@ static int hvm_save_cpu_ctxt(struct vcpu
         return 0;
 
     /* Architecture-specific vmcs/vmcb bits */
-    hvm_funcs.save_cpu_ctxt(v, &ctxt);
+    alternative_vcall(hvm_funcs.save_cpu_ctxt, v, &ctxt);
 
     hvm_get_segment_register(v, x86_seg_idtr, &seg);
     ctxt.idtr_limit = seg.limit;
@@ -1093,14 +1093,14 @@ static int hvm_load_cpu_ctxt(struct doma
 #undef UNFOLD_ARBYTES
 
     /* Architecture-specific vmcs/vmcb bits */
-    if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
+    if ( alternative_call(hvm_funcs.load_cpu_ctxt, v, &ctxt) < 0 )
         return -EINVAL;
 
     v->arch.hvm.guest_cr[2] = ctxt.cr2;
     hvm_update_guest_cr(v, 2);
 
     if ( hvm_funcs.tsc_scaling.setup )
-        hvm_funcs.tsc_scaling.setup(v);
+        alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
 
     v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
 
@@ -1563,7 +1563,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
     if ( rc != 0 ) /* teardown: vlapic_destroy */
         goto fail2;
 
-    if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
+    rc = alternative_call(hvm_funcs.vcpu_initialise, v);
+    if ( rc != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
         goto fail3;
 
     softirq_tasklet_init(&v->arch.hvm.assert_evtchn_irq_tasklet,
@@ -1611,7 +1612,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     free_compat_arg_xlat(v);
  fail4:
     hvmemul_cache_destroy(v);
-    hvm_funcs.vcpu_destroy(v);
+    alternative_vcall(hvm_funcs.vcpu_destroy, v);
  fail3:
     vlapic_destroy(v);
  fail2:
@@ -1635,7 +1636,7 @@ void hvm_vcpu_destroy(struct vcpu *v)
     free_compat_arg_xlat(v);
 
     tasklet_kill(&v->arch.hvm.assert_evtchn_irq_tasklet);
-    hvm_funcs.vcpu_destroy(v);
+    alternative_vcall(hvm_funcs.vcpu_destroy, v);
 
     vlapic_destroy(v);
 
@@ -3874,7 +3875,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
          !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
         return hvm_intblk_rflags_ie;
 
-    intr_shadow = hvm_funcs.get_interrupt_shadow(v);
+    intr_shadow = alternative_call(hvm_funcs.get_interrupt_shadow, v);
 
     if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
         return hvm_intblk_shadow;
@@ -3990,7 +3991,7 @@ void hvm_vcpu_reset_state(struct vcpu *v
     hvm_set_segment_register(v, x86_seg_idtr, &reg);
 
     if ( hvm_funcs.tsc_scaling.setup )
-        hvm_funcs.tsc_scaling.setup(v);
+        alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
 
     /* Sync AP's TSC with BSP's. */
     v->arch.hvm.cache_tsc_offset =
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -54,8 +54,7 @@ nestedhvm_vcpu_reset(struct vcpu *v)
 
     hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);
 
-    if ( hvm_funcs.nhvm_vcpu_reset )
-        hvm_funcs.nhvm_vcpu_reset(v);
+    alternative_vcall(hvm_funcs.nhvm_vcpu_reset, v);
 
     /* vcpu is in host mode */
     nestedhvm_vcpu_exit_guestmode(v);
@@ -64,14 +63,14 @@ nestedhvm_vcpu_reset(struct vcpu *v)
 int
 nestedhvm_vcpu_initialise(struct vcpu *v)
 {
-    int rc = -EOPNOTSUPP;
+    int rc;
 
     if ( !shadow_io_bitmap[0] )
         return -ENOMEM;
 
-    if ( !hvm_funcs.nhvm_vcpu_initialise ||
-         ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) )
-         return rc;
+    rc = alternative_call(hvm_funcs.nhvm_vcpu_initialise, v);
+    if ( rc )
+        return rc;
 
     nestedhvm_vcpu_reset(v);
     return 0;
@@ -80,8 +79,7 @@ nestedhvm_vcpu_initialise(struct vcpu *v
 void
 nestedhvm_vcpu_destroy(struct vcpu *v)
 {
-    if ( hvm_funcs.nhvm_vcpu_destroy )
-        hvm_funcs.nhvm_vcpu_destroy(v);
+    alternative_vcall(hvm_funcs.nhvm_vcpu_destroy, v);
 }
 
 static void
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -561,13 +561,12 @@ static inline void hvm_invlpg(struct vcp
 
 static inline int hvm_cpu_up(void)
 {
-    return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0);
+    return alternative_call(hvm_funcs.cpu_up);
 }
 
 static inline void hvm_cpu_down(void)
 {
-    if ( hvm_funcs.cpu_down )
-        hvm_funcs.cpu_down();
+    alternative_vcall(hvm_funcs.cpu_down);
 }
 
 static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
@@ -601,7 +600,7 @@ static inline void hvm_invalidate_regs_f
 static inline int nhvm_vcpu_vmexit_event(
     struct vcpu *v, const struct x86_event *event)
 {
-    return hvm_funcs.nhvm_vcpu_vmexit_event(v, event);
+    return alternative_call(hvm_funcs.nhvm_vcpu_vmexit_event, v, event);
 }
 
 /* returns l1 guest's cr3 that points to the page table used to
@@ -609,45 +608,46 @@ static inline int nhvm_vcpu_vmexit_event
  */
 static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
 {
-    return hvm_funcs.nhvm_vcpu_p2m_base(v);
+    return alternative_call(hvm_funcs.nhvm_vcpu_p2m_base, v);
 }
 
 /* returns true, when l1 guest intercepts the specified trap */
 static inline bool_t nhvm_vmcx_guest_intercepts_event(
     struct vcpu *v, unsigned int vector, int errcode)
 {
-    return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode);
+    return alternative_call(hvm_funcs.nhvm_vmcx_guest_intercepts_event, v,
+                            vector, errcode);
 }
 
 /* returns true when l1 guest wants to use hap to run l2 guest */
 static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
 {
-    return hvm_funcs.nhvm_vmcx_hap_enabled(v);
+    return alternative_call(hvm_funcs.nhvm_vmcx_hap_enabled, v);
 }
 
 /* interrupt */
 static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
 {
-    return hvm_funcs.nhvm_intr_blocked(v);
+    return alternative_call(hvm_funcs.nhvm_intr_blocked, v);
 }
 
 static inline int nhvm_hap_walk_L1_p2m(
     struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
     uint8_t *p2m_acc, struct npfec npfec)
 {
-    return hvm_funcs.nhvm_hap_walk_L1_p2m(
+    return alternative_call(hvm_funcs.nhvm_hap_walk_L1_p2m,
         v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
 }
 
 static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
-    hvm_funcs.enable_msr_interception(d, msr);
+    alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
 }
 
 static inline bool_t hvm_is_singlestep_supported(void)
 {
     return (hvm_funcs.is_singlestep_supported &&
-            hvm_funcs.is_singlestep_supported());
+            alternative_call(hvm_funcs.is_singlestep_supported));
 }
 
 static inline bool hvm_hap_supported(void)
@@ -665,14 +665,14 @@ static inline bool hvm_altp2m_supported(
 static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
 {
     if ( hvm_funcs.altp2m_vcpu_update_p2m )
-        hvm_funcs.altp2m_vcpu_update_p2m(v);
+        alternative_vcall(hvm_funcs.altp2m_vcpu_update_p2m, v);
 }
 
 /* updates VMCS fields related to VMFUNC and #VE */
 static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
 {
     if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
-        hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+        alternative_vcall(hvm_funcs.altp2m_vcpu_update_vmfunc_ve, v);
 }
 
 /* emulates #VE */
@@ -680,7 +680,7 @@ static inline bool altp2m_vcpu_emulate_v
 {
     if ( hvm_funcs.altp2m_vcpu_emulate_ve )
     {
-        hvm_funcs.altp2m_vcpu_emulate_ve(v);
+        alternative_vcall(hvm_funcs.altp2m_vcpu_emulate_ve, v);
         return true;
     }
     return false;
@@ -689,7 +689,7 @@ static inline bool altp2m_vcpu_emulate_v
 static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
 {
     if ( hvm_funcs.vmtrace_control )
-        return hvm_funcs.vmtrace_control(v, enable, reset);
+        return alternative_call(hvm_funcs.vmtrace_control, v, enable, reset);
 
     return -EOPNOTSUPP;
 }
@@ -698,7 +698,7 @@ static inline int hvm_vmtrace_control(st
 static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
 {
     if ( hvm_funcs.vmtrace_output_position )
-        return hvm_funcs.vmtrace_output_position(v, pos);
+        return alternative_call(hvm_funcs.vmtrace_output_position, v, pos);
 
     return -EOPNOTSUPP;
 }
@@ -707,7 +707,7 @@ static inline int hvm_vmtrace_set_option
     struct vcpu *v, uint64_t key, uint64_t value)
 {
     if ( hvm_funcs.vmtrace_set_option )
-        return hvm_funcs.vmtrace_set_option(v, key, value);
+        return alternative_call(hvm_funcs.vmtrace_set_option, v, key, value);
 
     return -EOPNOTSUPP;
 }
@@ -716,7 +716,7 @@ static inline int hvm_vmtrace_get_option
     struct vcpu *v, uint64_t key, uint64_t *value)
 {
     if ( hvm_funcs.vmtrace_get_option )
-        return hvm_funcs.vmtrace_get_option(v, key, value);
+        return alternative_call(hvm_funcs.vmtrace_get_option, v, key, value);
 
     return -EOPNOTSUPP;
 }
@@ -724,7 +724,7 @@ static inline int hvm_vmtrace_get_option
 static inline int hvm_vmtrace_reset(struct vcpu *v)
 {
     if ( hvm_funcs.vmtrace_reset )
-        return hvm_funcs.vmtrace_reset(v);
+        return alternative_call(hvm_funcs.vmtrace_reset, v);
 
     return -EOPNOTSUPP;
 }
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -270,7 +270,8 @@ int arch_monitor_domctl_event(struct dom
         ad->monitor.descriptor_access_enabled = requested_status;
 
         for_each_vcpu ( d, v )
-            hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+            alternative_vcall(hvm_funcs.set_descriptor_access_exiting, v,
+                              requested_status);
 
         domain_unpause(d);
         break;
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -204,7 +204,7 @@ void vm_event_fill_regs(vm_event_request
     ASSERT(is_hvm_vcpu(curr));
 
     /* Architecture-specific vmcs/vmcb bits */
-    hvm_funcs.save_cpu_ctxt(curr, &ctxt);
+    alternative_vcall(hvm_funcs.save_cpu_ctxt, curr, &ctxt);
 
     req->data.regs.x86.rax = regs->rax;
     req->data.regs.x86.rcx = regs->rcx;


Re: [PATCH v2] x86/HVM: convert remaining hvm_funcs hook invocations to alt-call

Posted by Tamas K Lengyel 1 week, 3 days ago
On Thu, Jan 13, 2022 at 12:02 PM Jan Beulich <jbeulich@suse.com> wrote:
>
> The aim being to have as few indirect calls as possible (see [1]),
> whereas during initial conversion performance was the main aspect and
> hence rarely used hooks didn't get converted. Apparently one use of
> get_interrupt_shadow() was missed at the time.
>
> While doing this, drop NULL checks ahead of CPU management and .nhvm_*()
> calls when the hook is always present. Also convert the
> .nhvm_vcpu_reset() call to alternative_vcall(), as the return value is
> unused and the caller has currently no way of propagating it.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Tamas K Lengyel <tamas@tklengyel.com>

Re: [PATCH v2] x86/HVM: convert remaining hvm_funcs hook invocations to alt-call

Posted by Andrew Cooper 1 week, 4 days ago
On 13/01/2022 17:02, Jan Beulich wrote:
> The aim being to have as few indirect calls as possible (see [1]),
> whereas during initial conversion performance was the main aspect and
> hence rarely used hooks didn't get converted. Apparently one use of
> get_interrupt_shadow() was missed at the time.
>
> While doing this, drop NULL checks ahead of CPU management and .nhvm_*()
> calls when the hook is always present. Also convert the
> .nhvm_vcpu_reset() call to alternative_vcall(), as the return value is
> unused and the caller has currently no way of propagating it.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>