[PATCH] x86/hvm: save/restore per-VCPU event channel upcall vector

David Vrabel posted 1 patch 2 years, 3 months ago
Test gitlab-ci failed
Patches applied successfully (tree, apply log)
git fetch https://gitlab.com/xen-project/patchew/xen tags/patchew/20220106155442.160258-1-dvrabel@amazon.co.uk
xen/arch/x86/hvm/hvm.c                 | 50 ++++++++++++++++++++++++--
xen/include/public/arch-x86/hvm/save.h | 12 ++++++-
2 files changed, 58 insertions(+), 4 deletions(-)
[PATCH] x86/hvm: save/restore per-VCPU event channel upcall vector
Posted by David Vrabel 2 years, 3 months ago
The Windows XENBUS driver sets the per-VCPU LAPIC vector for event
channel interrupts using the HVMOP_set_evtchn_upcall_vector hypercall
(rather than using a vector-type callback in the CALLBACK_IRQ HVM
parameter since the vectors might be different for different VCPUs).

This state needs to be saved/restored or a restored guest may not be
able to get an event channel interrupts.

Note that the Windows XENBUS driver workarounds this by reissuing the
hypercall when resuming after a migration, but this workaround would
not be possible in an guest transparent live migration or a live
update.

Signed-off-by: David Vrabel <dvrabel@amazon.co.uk>
---
 xen/arch/x86/hvm/hvm.c                 | 50 ++++++++++++++++++++++++--
 xen/include/public/arch-x86/hvm/save.h | 12 ++++++-
 2 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 350dc396e3..be2e676c4a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4071,6 +4071,52 @@ static int hvmop_flush_tlb_all(void)
     return paging_flush_tlb(NULL) ? 0 : -ERESTART;
 }
 
+static void hvm_set_evtchn_upcall_vector(struct vcpu *v, uint8_t vector)
+{
+    printk(XENLOG_G_INFO "%pv: upcall vector %02x\n", v, vector);
+
+    v->arch.hvm.evtchn_upcall_vector = vector;
+    hvm_assert_evtchn_irq(v);
+}
+
+static int hvm_save_evtchn_upcall_vector(
+    struct vcpu *v, hvm_domain_context_t *h)
+{
+    struct hvm_evtchn_upcall_vector upcall;
+
+    /* Skip record if VCPU is down or the upcall vector is not used. */
+    if ( test_bit(_VPF_down, &v->pause_flags) )
+        return 0;
+    if ( v->arch.hvm.evtchn_upcall_vector == 0 )
+        return 0;
+
+    upcall.vector = v->arch.hvm.evtchn_upcall_vector;
+
+    return hvm_save_entry(EVTCHN_UPCALL_VECTOR, v->vcpu_id, h, &upcall);
+}
+
+static int hvm_load_evtchn_upcall_vector(
+    struct domain *d, hvm_domain_context_t *h)
+{
+    unsigned int vcpuid;
+    struct vcpu *v;
+    struct hvm_evtchn_upcall_vector upcall;
+
+    vcpuid = hvm_load_instance(h);
+    if ( (v = domain_vcpu(d, vcpuid)) == NULL )
+        return -EINVAL;
+
+    if ( hvm_load_entry(EVTCHN_UPCALL_VECTOR, h, &upcall) != 0 )
+        return -EINVAL;
+
+    hvm_set_evtchn_upcall_vector(v, upcall.vector);
+
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(EVTCHN_UPCALL_VECTOR, hvm_save_evtchn_upcall_vector,
+                          hvm_load_evtchn_upcall_vector, 1, HVMSR_PER_VCPU);
+
 static int hvmop_set_evtchn_upcall_vector(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
 {
@@ -4090,10 +4136,8 @@ static int hvmop_set_evtchn_upcall_vector(
     if ( (v = domain_vcpu(d, op.vcpu)) == NULL )
         return -ENOENT;
 
-    printk(XENLOG_G_INFO "%pv: upcall vector %02x\n", v, op.vector);
+    hvm_set_evtchn_upcall_vector(v, op.vector);
 
-    v->arch.hvm.evtchn_upcall_vector = op.vector;
-    hvm_assert_evtchn_irq(v);
     return 0;
 }
 
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 773a380bc2..177cb09150 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -641,12 +641,22 @@ struct hvm_msr {
 
 #define CPU_MSR_CODE  20
 
+/**
+ * Per-VCPU event channel upcall vector as set by
+ * HVMOP_set_evtchn_upcall_vector hypercall.
+ */
+struct hvm_evtchn_upcall_vector {
+    uint8_t vector;
+};
+
+DECLARE_HVM_SAVE_TYPE(EVTCHN_UPCALL_VECTOR, 21, struct hvm_evtchn_upcall_vector);
+
 /* Range 22 - 34 (inclusive) reserved for Amazon */
 
 /*
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 20
+#define HVM_SAVE_CODE_MAX 21
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
 
-- 
2.30.2


Re: [PATCH] x86/hvm: save/restore per-VCPU event channel upcall vector
Posted by Jan Beulich 2 years, 3 months ago
On 06.01.2022 16:54, David Vrabel wrote:
> The Windows XENBUS driver sets the per-VCPU LAPIC vector for event
> channel interrupts using the HVMOP_set_evtchn_upcall_vector hypercall
> (rather than using a vector-type callback in the CALLBACK_IRQ HVM
> parameter since the vectors might be different for different VCPUs).
> 
> This state needs to be saved/restored or a restored guest may not be
> able to get an event channel interrupts.
> 
> Note that the Windows XENBUS driver workarounds this by reissuing the
> hypercall when resuming after a migration, but this workaround would
> not be possible in an guest transparent live migration or a live
> update.

Why would this be needed only for this one form of "via"? And how do
you guarantee no observable change in behavior for existing guests?
It would seem to me that this information is something to be saved /
restored _only_ during transparent migration, as aware guests may
deliberately defer re-registration.

> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4071,6 +4071,52 @@ static int hvmop_flush_tlb_all(void)
>      return paging_flush_tlb(NULL) ? 0 : -ERESTART;
>  }
>  
> +static void hvm_set_evtchn_upcall_vector(struct vcpu *v, uint8_t vector)
> +{
> +    printk(XENLOG_G_INFO "%pv: upcall vector %02x\n", v, vector);
> +
> +    v->arch.hvm.evtchn_upcall_vector = vector;
> +    hvm_assert_evtchn_irq(v);

While asserting is needed during guest initiated registration, is
this really correct also when registering "behind the back" of the
guest?

> +}
> +
> +static int hvm_save_evtchn_upcall_vector(
> +    struct vcpu *v, hvm_domain_context_t *h)
> +{
> +    struct hvm_evtchn_upcall_vector upcall;
> +
> +    /* Skip record if VCPU is down or the upcall vector is not used. */
> +    if ( test_bit(_VPF_down, &v->pause_flags) )
> +        return 0;
> +    if ( v->arch.hvm.evtchn_upcall_vector == 0 )
> +        return 0;

Aren't you assigning meaning here to vector 0 which hasn't been
assigned so far? Shouldn't you check callback_via_type instead?

> +    upcall.vector = v->arch.hvm.evtchn_upcall_vector;
> +
> +    return hvm_save_entry(EVTCHN_UPCALL_VECTOR, v->vcpu_id, h, &upcall);
> +}
> +
> +static int hvm_load_evtchn_upcall_vector(
> +    struct domain *d, hvm_domain_context_t *h)
> +{
> +    unsigned int vcpuid;
> +    struct vcpu *v;
> +    struct hvm_evtchn_upcall_vector upcall;
> +
> +    vcpuid = hvm_load_instance(h);
> +    if ( (v = domain_vcpu(d, vcpuid)) == NULL )
> +        return -EINVAL;
> +
> +    if ( hvm_load_entry(EVTCHN_UPCALL_VECTOR, h, &upcall) != 0 )
> +        return -EINVAL;
> +
> +    hvm_set_evtchn_upcall_vector(v, upcall.vector);
> +
> +    return 0;
> +}

Don't you need to also set callback_via_type accordingly?

Jan


Re: [PATCH] x86/hvm: save/restore per-VCPU event channel upcall vector
Posted by David Vrabel 2 years, 3 months ago
On 06/01/2022 16:48, Jan Beulich wrote:
> On 06.01.2022 16:54, David Vrabel wrote:
>> The Windows XENBUS driver sets the per-VCPU LAPIC vector for event
>> channel interrupts using the HVMOP_set_evtchn_upcall_vector hypercall
>> (rather than using a vector-type callback in the CALLBACK_IRQ HVM
>> parameter since the vectors might be different for different VCPUs).
>>
>> This state needs to be saved/restored or a restored guest may not be
>> able to get an event channel interrupts.
>>
>> Note that the Windows XENBUS driver workarounds this by reissuing the
>> hypercall when resuming after a migration, but this workaround would
>> not be possible in an guest transparent live migration or a live
>> update.
> 
> Why would this be needed only for this one form of "via"? And how do
> you guarantee no observable change in behavior for existing guests?
> It would seem to me that this information is something to be saved /
> restored _only_ during transparent migration, as aware guests may
> deliberately defer re-registration.

I was under the impression that the HVM parameters (including 
CALLBACK_IRQ) were saved/restored so the intent here was to save/restore 
all event channel delivery mechanism state consistently but it seems I 
was confused by some internal work that's not upstream yet.

So, I agree, this patch on it's own doesn't make sense.

I've sent a patch reserving another save record ID instead.

>> +static int hvm_load_evtchn_upcall_vector(
>> +    struct domain *d, hvm_domain_context_t *h)
>> +{
>> +    unsigned int vcpuid;
>> +    struct vcpu *v;
>> +    struct hvm_evtchn_upcall_vector upcall;
>> +
>> +    vcpuid = hvm_load_instance(h);
>> +    if ( (v = domain_vcpu(d, vcpuid)) == NULL )
>> +        return -EINVAL;
>> +
>> +    if ( hvm_load_entry(EVTCHN_UPCALL_VECTOR, h, &upcall) != 0 )
>> +        return -EINVAL;
>> +
>> +    hvm_set_evtchn_upcall_vector(v, upcall.vector);
>> +
>> +    return 0;
>> +}
> 
> Don't you need to also set callback_via_type accordingly?

The callback_via_type is ignored if a per-vcpu upcall vector is set.

You can use a mix of a CALLBACK_IRQ defined upcalls on some VCPUs, and a 
HVMOP_set_evtchn_upcall_vector defined one on others, so setting the 
per-domain type wouldn't work.

I'm not sure why you would do this, but the ABI (as implemented, if not 
intentionally) allows it...

David