[PATCH v2 0/2] x86: VM assist hypercall adjustments

Jan Beulich posted 2 patches 4 years ago
Only 0 patches received!
There is a newer version of this series
[PATCH v2 0/2] x86: VM assist hypercall adjustments
Posted by Jan Beulich 4 years ago
1: HVM: expose VM assist hypercall
2: validate VM assist value in arch_set_info_guest()

Jan

[PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Jan Beulich 4 years ago
In preparation for the addition of VMASST_TYPE_runstate_update_flag
commit 72c538cca957 ("arm: add support for vm_assist hypercall") enabled
the hypercall for Arm. I consider it not logical that it then isn't also
exposed to x86 HVM guests (with the same single feature permitted to be
enabled as Arm has); Linux actually tries to use it afaict.

Rather than introducing yet another thin wrapper around vm_assist(),
make that function the main handler, requiring a per-arch
arch_vm_assist_valid() definition instead.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Re-work vm_assist() handling/layering at the same time. Also adjust
    arch_set_info_guest().

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -939,6 +939,9 @@ int arch_set_info_guest(
         v->arch.dr6 = c(debugreg[6]);
         v->arch.dr7 = c(debugreg[7]);
 
+        if ( v->vcpu_id == 0 )
+            d->vm_assist = c.nat->vm_assist;
+
         hvm_set_info_guest(v);
         goto out;
     }
--- a/xen/arch/x86/hvm/hypercall.c
+++ b/xen/arch/x86/hvm/hypercall.c
@@ -128,6 +128,7 @@ static const hypercall_table_t hvm_hyper
 #ifdef CONFIG_GRANT_TABLE
     HVM_CALL(grant_table_op),
 #endif
+    HYPERCALL(vm_assist),
     COMPAT_CALL(vcpu_op),
     HVM_CALL(physdev_op),
     COMPAT_CALL(xen_version),
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -57,7 +57,7 @@ const hypercall_table_t pv_hypercall_tab
 #ifdef CONFIG_GRANT_TABLE
     COMPAT_CALL(grant_table_op),
 #endif
-    COMPAT_CALL(vm_assist),
+    HYPERCALL(vm_assist),
     COMPAT_CALL(update_va_mapping_otherdomain),
     COMPAT_CALL(iret),
     COMPAT_CALL(vcpu_op),
--- a/xen/common/compat/kernel.c
+++ b/xen/common/compat/kernel.c
@@ -37,11 +37,6 @@ CHECK_TYPE(capabilities_info);
 
 CHECK_TYPE(domain_handle);
 
-#ifdef COMPAT_VM_ASSIST_VALID
-#undef VM_ASSIST_VALID
-#define VM_ASSIST_VALID COMPAT_VM_ASSIST_VALID
-#endif
-
 #define DO(fn) int compat_##fn
 #define COMPAT
 
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1517,20 +1517,23 @@ long do_vcpu_op(int cmd, unsigned int vc
     return rc;
 }
 
-#ifdef VM_ASSIST_VALID
-long vm_assist(struct domain *p, unsigned int cmd, unsigned int type,
-               unsigned long valid)
+#ifdef arch_vm_assist_valid
+long do_vm_assist(unsigned int cmd, unsigned int type)
 {
+    struct domain *currd = current->domain;
+    const unsigned long valid = arch_vm_assist_valid(currd);
+
     if ( type >= BITS_PER_LONG || !test_bit(type, &valid) )
         return -EINVAL;
 
     switch ( cmd )
     {
     case VMASST_CMD_enable:
-        set_bit(type, &p->vm_assist);
+        set_bit(type, &currd->vm_assist);
         return 0;
+
     case VMASST_CMD_disable:
-        clear_bit(type, &p->vm_assist);
+        clear_bit(type, &currd->vm_assist);
         return 0;
     }
 
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -566,13 +566,6 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
     return -ENOSYS;
 }
 
-#ifdef VM_ASSIST_VALID
-DO(vm_assist)(unsigned int cmd, unsigned int type)
-{
-    return vm_assist(current->domain, cmd, type, VM_ASSIST_VALID);
-}
-#endif
-
 /*
  * Local variables:
  * mode: C
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -195,8 +195,6 @@ extern unsigned long frametable_virt_end
 #define watchdog_disable() ((void)0)
 #define watchdog_enable()  ((void)0)
 
-#define VM_ASSIST_VALID          (1UL << VMASST_TYPE_runstate_update_flag)
-
 #endif /* __ARM_CONFIG_H__ */
 /*
  * Local variables:
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -269,6 +269,8 @@ static inline void free_vcpu_guest_conte
 
 static inline void arch_vcpu_block(struct vcpu *v) {}
 
+#define arch_vm_assist_valid(d) (1UL << VMASST_TYPE_runstate_update_flag)
+
 #endif /* __ASM_DOMAIN_H__ */
 
 /*
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -309,17 +309,6 @@ extern unsigned long xen_phys_start;
 #define ARG_XLAT_START(v)        \
     (ARG_XLAT_VIRT_START + ((v)->vcpu_id << ARG_XLAT_VA_SHIFT))
 
-#define NATIVE_VM_ASSIST_VALID   ((1UL << VMASST_TYPE_4gb_segments)        | \
-                                  (1UL << VMASST_TYPE_4gb_segments_notify) | \
-                                  (1UL << VMASST_TYPE_writable_pagetables) | \
-                                  (1UL << VMASST_TYPE_pae_extended_cr3)    | \
-                                  (1UL << VMASST_TYPE_architectural_iopl)  | \
-                                  (1UL << VMASST_TYPE_runstate_update_flag)| \
-                                  (1UL << VMASST_TYPE_m2p_strict))
-#define VM_ASSIST_VALID          NATIVE_VM_ASSIST_VALID
-#define COMPAT_VM_ASSIST_VALID   (NATIVE_VM_ASSIST_VALID & \
-                                  ((1UL << COMPAT_BITS_PER_LONG) - 1))
-
 #define ELFSIZE 64
 
 #define ARCH_CRASH_SAVE_VMCOREINFO
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -700,6 +700,20 @@ static inline void pv_inject_sw_interrup
     pv_inject_event(&event);
 }
 
+#define PV_VM_ASSIST_VALID  ((1UL << VMASST_TYPE_4gb_segments)        | \
+                             (1UL << VMASST_TYPE_4gb_segments_notify) | \
+                             (1UL << VMASST_TYPE_writable_pagetables) | \
+                             (1UL << VMASST_TYPE_pae_extended_cr3)    | \
+                             (1UL << VMASST_TYPE_architectural_iopl)  | \
+                             (1UL << VMASST_TYPE_runstate_update_flag)| \
+                             (1UL << VMASST_TYPE_m2p_strict))
+#define HVM_VM_ASSIST_VALID (1UL << VMASST_TYPE_runstate_update_flag)
+
+#define arch_vm_assist_valid(d) \
+    (is_hvm_domain(d) ? HVM_VM_ASSIST_VALID \
+                      : is_pv_32bit_domain(d) ? (uint32_t)PV_VM_ASSIST_VALID \
+                                              : PV_VM_ASSIST_VALID)
+
 #endif /* __ASM_DOMAIN_H__ */
 
 /*
--- a/xen/include/xen/hypercall.h
+++ b/xen/include/xen/hypercall.h
@@ -192,8 +192,6 @@ extern int compat_xsm_op(
 
 extern int compat_kexec_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) uarg);
 
-extern int compat_vm_assist(unsigned int cmd, unsigned int type);
-
 DEFINE_XEN_GUEST_HANDLE(multicall_entry_compat_t);
 extern int compat_multicall(
     XEN_GUEST_HANDLE_PARAM(multicall_entry_compat_t) call_list,
--- a/xen/include/xen/lib.h
+++ b/xen/include/xen/lib.h
@@ -122,8 +122,6 @@ extern void guest_printk(const struct do
     __attribute__ ((format (printf, 2, 3)));
 extern void noreturn panic(const char *format, ...)
     __attribute__ ((format (printf, 1, 2)));
-extern long vm_assist(struct domain *, unsigned int cmd, unsigned int type,
-                      unsigned long valid);
 extern int __printk_ratelimit(int ratelimit_ms, int ratelimit_burst);
 extern int printk_ratelimit(void);
 

Re: [PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Julien Grall 4 years ago
Hi Jan,

On 14/04/2020 12:34, Jan Beulich wrote:
> In preparation for the addition of VMASST_TYPE_runstate_update_flag
> commit 72c538cca957 ("arm: add support for vm_assist hypercall") enabled
> the hypercall for Arm. I consider it not logical that it then isn't also
> exposed to x86 HVM guests (with the same single feature permitted to be
> enabled as Arm has); Linux actually tries to use it afaict.
> 
> Rather than introducing yet another thin wrapper around vm_assist(),
> make that function the main handler, requiring a per-arch
> arch_vm_assist_valid() definition instead.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Re-work vm_assist() handling/layering at the same time. Also adjust
>      arch_set_info_guest().
> 
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -939,6 +939,9 @@ int arch_set_info_guest(
>           v->arch.dr6 = c(debugreg[6]);
>           v->arch.dr7 = c(debugreg[7]);
>   
> +        if ( v->vcpu_id == 0 )
> +            d->vm_assist = c.nat->vm_assist;
> +
>           hvm_set_info_guest(v);
>           goto out;
>       }
> --- a/xen/arch/x86/hvm/hypercall.c
> +++ b/xen/arch/x86/hvm/hypercall.c
> @@ -128,6 +128,7 @@ static const hypercall_table_t hvm_hyper
>   #ifdef CONFIG_GRANT_TABLE
>       HVM_CALL(grant_table_op),
>   #endif
> +    HYPERCALL(vm_assist),
>       COMPAT_CALL(vcpu_op),
>       HVM_CALL(physdev_op),
>       COMPAT_CALL(xen_version),
> --- a/xen/arch/x86/pv/hypercall.c
> +++ b/xen/arch/x86/pv/hypercall.c
> @@ -57,7 +57,7 @@ const hypercall_table_t pv_hypercall_tab
>   #ifdef CONFIG_GRANT_TABLE
>       COMPAT_CALL(grant_table_op),
>   #endif
> -    COMPAT_CALL(vm_assist),
> +    HYPERCALL(vm_assist),
>       COMPAT_CALL(update_va_mapping_otherdomain),
>       COMPAT_CALL(iret),
>       COMPAT_CALL(vcpu_op),
> --- a/xen/common/compat/kernel.c
> +++ b/xen/common/compat/kernel.c
> @@ -37,11 +37,6 @@ CHECK_TYPE(capabilities_info);
>   
>   CHECK_TYPE(domain_handle);
>   
> -#ifdef COMPAT_VM_ASSIST_VALID
> -#undef VM_ASSIST_VALID
> -#define VM_ASSIST_VALID COMPAT_VM_ASSIST_VALID
> -#endif
> -
>   #define DO(fn) int compat_##fn
>   #define COMPAT
>   
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -1517,20 +1517,23 @@ long do_vcpu_op(int cmd, unsigned int vc
>       return rc;
>   }
>   
> -#ifdef VM_ASSIST_VALID
> -long vm_assist(struct domain *p, unsigned int cmd, unsigned int type,
> -               unsigned long valid)
> +#ifdef arch_vm_assist_valid

How about naming the function arch_vm_assist_valid_mask?

> +long do_vm_assist(unsigned int cmd, unsigned int type)
>   {
> +    struct domain *currd = current->domain;
> +    const unsigned long valid = arch_vm_assist_valid(currd);
> +
>       if ( type >= BITS_PER_LONG || !test_bit(type, &valid) )
>           return -EINVAL;
>   
>       switch ( cmd )
>       {
>       case VMASST_CMD_enable:
> -        set_bit(type, &p->vm_assist);
> +        set_bit(type, &currd->vm_assist);
>           return 0;
> +
>       case VMASST_CMD_disable:
> -        clear_bit(type, &p->vm_assist);
> +        clear_bit(type, &currd->vm_assist);
>           return 0;
>       }
>   
> --- a/xen/common/kernel.c
> +++ b/xen/common/kernel.c
> @@ -566,13 +566,6 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
>       return -ENOSYS;
>   }
>   
> -#ifdef VM_ASSIST_VALID
> -DO(vm_assist)(unsigned int cmd, unsigned int type)
> -{
> -    return vm_assist(current->domain, cmd, type, VM_ASSIST_VALID);
> -}
> -#endif
> -
>   /*
>    * Local variables:
>    * mode: C
> --- a/xen/include/asm-arm/config.h
> +++ b/xen/include/asm-arm/config.h
> @@ -195,8 +195,6 @@ extern unsigned long frametable_virt_end
>   #define watchdog_disable() ((void)0)
>   #define watchdog_enable()  ((void)0)
>   
> -#define VM_ASSIST_VALID          (1UL << VMASST_TYPE_runstate_update_flag)
> -
>   #endif /* __ARM_CONFIG_H__ */
>   /*
>    * Local variables:
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -269,6 +269,8 @@ static inline void free_vcpu_guest_conte
>   
>   static inline void arch_vcpu_block(struct vcpu *v) {}
>   
> +#define arch_vm_assist_valid(d) (1UL << VMASST_TYPE_runstate_update_flag)
> +
>   #endif /* __ASM_DOMAIN_H__ */
>   
>   /*
> --- a/xen/include/asm-x86/config.h
> +++ b/xen/include/asm-x86/config.h
> @@ -309,17 +309,6 @@ extern unsigned long xen_phys_start;
>   #define ARG_XLAT_START(v)        \
>       (ARG_XLAT_VIRT_START + ((v)->vcpu_id << ARG_XLAT_VA_SHIFT))
>   
> -#define NATIVE_VM_ASSIST_VALID   ((1UL << VMASST_TYPE_4gb_segments)        | \
> -                                  (1UL << VMASST_TYPE_4gb_segments_notify) | \
> -                                  (1UL << VMASST_TYPE_writable_pagetables) | \
> -                                  (1UL << VMASST_TYPE_pae_extended_cr3)    | \
> -                                  (1UL << VMASST_TYPE_architectural_iopl)  | \
> -                                  (1UL << VMASST_TYPE_runstate_update_flag)| \
> -                                  (1UL << VMASST_TYPE_m2p_strict))
> -#define VM_ASSIST_VALID          NATIVE_VM_ASSIST_VALID
> -#define COMPAT_VM_ASSIST_VALID   (NATIVE_VM_ASSIST_VALID & \
> -                                  ((1UL << COMPAT_BITS_PER_LONG) - 1))
> -
>   #define ELFSIZE 64
>   
>   #define ARCH_CRASH_SAVE_VMCOREINFO
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -700,6 +700,20 @@ static inline void pv_inject_sw_interrup
>       pv_inject_event(&event);
>   }
>   
> +#define PV_VM_ASSIST_VALID  ((1UL << VMASST_TYPE_4gb_segments)        | \
> +                             (1UL << VMASST_TYPE_4gb_segments_notify) | \
> +                             (1UL << VMASST_TYPE_writable_pagetables) | \
> +                             (1UL << VMASST_TYPE_pae_extended_cr3)    | \
> +                             (1UL << VMASST_TYPE_architectural_iopl)  | \
> +                             (1UL << VMASST_TYPE_runstate_update_flag)| \
> +                             (1UL << VMASST_TYPE_m2p_strict))
> +#define HVM_VM_ASSIST_VALID (1UL << VMASST_TYPE_runstate_update_flag)
> +
> +#define arch_vm_assist_valid(d) \
> +    (is_hvm_domain(d) ? HVM_VM_ASSIST_VALID \
> +                      : is_pv_32bit_domain(d) ? (uint32_t)PV_VM_ASSIST_VALID \

I understand this is matching the current code, however without looking 
at the rest of patch this is not clear why the cast. May I suggest to 
add a comment explaining the rationale?

> +                                              : PV_VM_ASSIST_VALID)
> +
>   #endif /* __ASM_DOMAIN_H__ */
>   
>   /*
> --- a/xen/include/xen/hypercall.h
> +++ b/xen/include/xen/hypercall.h
> @@ -192,8 +192,6 @@ extern int compat_xsm_op(
>   
>   extern int compat_kexec_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) uarg);
>   
> -extern int compat_vm_assist(unsigned int cmd, unsigned int type);
> -
>   DEFINE_XEN_GUEST_HANDLE(multicall_entry_compat_t);
>   extern int compat_multicall(
>       XEN_GUEST_HANDLE_PARAM(multicall_entry_compat_t) call_list,
> --- a/xen/include/xen/lib.h
> +++ b/xen/include/xen/lib.h
> @@ -122,8 +122,6 @@ extern void guest_printk(const struct do
>       __attribute__ ((format (printf, 2, 3)));
>   extern void noreturn panic(const char *format, ...)
>       __attribute__ ((format (printf, 1, 2)));
> -extern long vm_assist(struct domain *, unsigned int cmd, unsigned int type,
> -                      unsigned long valid);
>   extern int __printk_ratelimit(int ratelimit_ms, int ratelimit_burst);
>   extern int printk_ratelimit(void);
>   
> 

Cheers,

-- 
Julien Grall

Re: [PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Jan Beulich 4 years ago
On 20.04.2020 19:53, Julien Grall wrote:
>> --- a/xen/common/domain.c
>> +++ b/xen/common/domain.c
>> @@ -1517,20 +1517,23 @@ long do_vcpu_op(int cmd, unsigned int vc
>>       return rc;
>>   }
>>   -#ifdef VM_ASSIST_VALID
>> -long vm_assist(struct domain *p, unsigned int cmd, unsigned int type,
>> -               unsigned long valid)
>> +#ifdef arch_vm_assist_valid
> 
> How about naming the function arch_vm_assist_valid_mask?

Certainly a possibility, albeit to me the gain would be marginal
and possibly not outweigh the growth in length. Andrew, any
preference?

>> --- a/xen/include/asm-x86/domain.h
>> +++ b/xen/include/asm-x86/domain.h
>> @@ -700,6 +700,20 @@ static inline void pv_inject_sw_interrup
>>     pv_inject_event(&event);
>> }
>> +#define PV_VM_ASSIST_VALID  ((1UL << VMASST_TYPE_4gb_segments)        | \
>> +                             (1UL << VMASST_TYPE_4gb_segments_notify) | \
>> +                             (1UL << VMASST_TYPE_writable_pagetables) | \
>> +                             (1UL << VMASST_TYPE_pae_extended_cr3)    | \
>> +                             (1UL << VMASST_TYPE_architectural_iopl)  | \
>> +                             (1UL << VMASST_TYPE_runstate_update_flag)| \
>> +                             (1UL << VMASST_TYPE_m2p_strict))
>> +#define HVM_VM_ASSIST_VALID (1UL << VMASST_TYPE_runstate_update_flag)
>> +
>> +#define arch_vm_assist_valid(d) \
>> +    (is_hvm_domain(d) ? HVM_VM_ASSIST_VALID \
>> +                      : is_pv_32bit_domain(d) ? (uint32_t)PV_VM_ASSIST_VALID \
> 
> I understand this is matching the current code, however without
> looking at the rest of patch this is not clear why the cast. May
> I suggest to add a comment explaining the rationale?

Hmm, I can state that the rationale is history. Many of the assists in
the low 32 bits are for 32-bit guests only. But we can't start refusing
a 64-bit kernel requesting them. The ones in the high 32 bits are, for
now, applicable to 64-bit guests only, and have always been refused for
32-bit ones.

Imo if anything an explanation on where new bits should be put should
go next to the VMASST_TYPE_* definitions in the public header, yet then
again the public headers aren't (imo) a good place to put
implementation detail comments.

Again, Andrew - since you've ack-ed the patch already, any thoughts
here either way?

Jan

Re: [PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Julien Grall 4 years ago

On 21/04/2020 06:54, Jan Beulich wrote:
> On 20.04.2020 19:53, Julien Grall wrote:
>>> --- a/xen/common/domain.c
>>> +++ b/xen/common/domain.c
>>> @@ -1517,20 +1517,23 @@ long do_vcpu_op(int cmd, unsigned int vc
>>>        return rc;
>>>    }
>>>    -#ifdef VM_ASSIST_VALID
>>> -long vm_assist(struct domain *p, unsigned int cmd, unsigned int type,
>>> -               unsigned long valid)
>>> +#ifdef arch_vm_assist_valid
>>
>> How about naming the function arch_vm_assist_valid_mask?
> 
> Certainly a possibility, albeit to me the gain would be marginal
> and possibly not outweigh the growth in length. Andrew, any
> preference?

You have a point regarding the length of the function.

> 
>>> --- a/xen/include/asm-x86/domain.h
>>> +++ b/xen/include/asm-x86/domain.h
>>> @@ -700,6 +700,20 @@ static inline void pv_inject_sw_interrup
>>>       pv_inject_event(&event);
>>> }
>>> +#define PV_VM_ASSIST_VALID  ((1UL << VMASST_TYPE_4gb_segments)        | \
>>> +                             (1UL << VMASST_TYPE_4gb_segments_notify) | \
>>> +                             (1UL << VMASST_TYPE_writable_pagetables) | \
>>> +                             (1UL << VMASST_TYPE_pae_extended_cr3)    | \
>>> +                             (1UL << VMASST_TYPE_architectural_iopl)  | \
>>> +                             (1UL << VMASST_TYPE_runstate_update_flag)| \
>>> +                             (1UL << VMASST_TYPE_m2p_strict))
>>> +#define HVM_VM_ASSIST_VALID (1UL << VMASST_TYPE_runstate_update_flag)
>>> +
>>> +#define arch_vm_assist_valid(d) \
>>> +    (is_hvm_domain(d) ? HVM_VM_ASSIST_VALID \
>>> +                      : is_pv_32bit_domain(d) ? (uint32_t)PV_VM_ASSIST_VALID \
>>
>> I understand this is matching the current code, however without
>> looking at the rest of patch this is not clear why the cast. May
>> I suggest to add a comment explaining the rationale?
> 
> Hmm, I can state that the rationale is history. Many of the assists in
> the low 32 bits are for 32-bit guests only. But we can't start refusing
> a 64-bit kernel requesting them. The ones in the high 32 bits are, for
> now, applicable to 64-bit guests only, and have always been refused for
> 32-bit ones.
 >
> Imo if anything an explanation on where new bits should be put should
> go next to the VMASST_TYPE_* definitions in the public header, yet then
> again the public headers aren't (imo) a good place to put
> implementation detail comments.

How about splitting PV_VM_ASSIST_VALID in two? One would contain 64-bit 
PV specific flags and the other common PV flags?

This should make the code more obvious and easier to read for someone 
less familiar with the area.

It also means we could have a BUILD_BUG_ON() to check at build time that 
no flags are added above 32-bit.

Cheers,

-- 
Julien Grall

Re: [PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Andrew Cooper 4 years ago
On 21/04/2020 13:35, Julien Grall wrote:
>>>> --- a/xen/include/asm-x86/domain.h
>>>> +++ b/xen/include/asm-x86/domain.h
>>>> @@ -700,6 +700,20 @@ static inline void pv_inject_sw_interrup
>>>>       pv_inject_event(&event);
>>>> }
>>>> +#define PV_VM_ASSIST_VALID  ((1UL <<
>>>> VMASST_TYPE_4gb_segments)        | \
>>>> +                             (1UL <<
>>>> VMASST_TYPE_4gb_segments_notify) | \
>>>> +                             (1UL <<
>>>> VMASST_TYPE_writable_pagetables) | \
>>>> +                             (1UL <<
>>>> VMASST_TYPE_pae_extended_cr3)    | \
>>>> +                             (1UL <<
>>>> VMASST_TYPE_architectural_iopl)  | \
>>>> +                             (1UL <<
>>>> VMASST_TYPE_runstate_update_flag)| \
>>>> +                             (1UL << VMASST_TYPE_m2p_strict))
>>>> +#define HVM_VM_ASSIST_VALID (1UL << VMASST_TYPE_runstate_update_flag)
>>>> +
>>>> +#define arch_vm_assist_valid(d) \
>>>> +    (is_hvm_domain(d) ? HVM_VM_ASSIST_VALID \
>>>> +                      : is_pv_32bit_domain(d) ?
>>>> (uint32_t)PV_VM_ASSIST_VALID \
>>>
>>> I understand this is matching the current code, however without
>>> looking at the rest of patch this is not clear why the cast. May
>>> I suggest to add a comment explaining the rationale?
>>
>> Hmm, I can state that the rationale is history. Many of the assists in
>> the low 32 bits are for 32-bit guests only. But we can't start refusing
>> a 64-bit kernel requesting them. The ones in the high 32 bits are, for
>> now, applicable to 64-bit guests only, and have always been refused for
>> 32-bit ones.
> >
>> Imo if anything an explanation on where new bits should be put should
>> go next to the VMASST_TYPE_* definitions in the public header, yet then
>> again the public headers aren't (imo) a good place to put
>> implementation detail comments.
>
> How about splitting PV_VM_ASSIST_VALID in two? One would contain
> 64-bit PV specific flags and the other common PV flags?
>
> This should make the code more obvious and easier to read for someone
> less familiar with the area.
>
> It also means we could have a BUILD_BUG_ON() to check at build time
> that no flags are added above 32-bit.

I like this suggestion, but would suggest a 64/32 split, rather than
64/common.  These are a total mixed bag and every new one should be
considered for all ABIs rather than falling automatically into some.

~Andrew

Re: [PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Andrew Cooper 4 years ago
On 21/04/2020 06:54, Jan Beulich wrote:
> On 20.04.2020 19:53, Julien Grall wrote:
>>> --- a/xen/common/domain.c
>>> +++ b/xen/common/domain.c
>>> @@ -1517,20 +1517,23 @@ long do_vcpu_op(int cmd, unsigned int vc
>>>       return rc;
>>>   }
>>>   -#ifdef VM_ASSIST_VALID
>>> -long vm_assist(struct domain *p, unsigned int cmd, unsigned int type,
>>> -               unsigned long valid)
>>> +#ifdef arch_vm_assist_valid
>> How about naming the function arch_vm_assist_valid_mask?
> Certainly a possibility, albeit to me the gain would be marginal
> and possibly not outweigh the growth in length. Andrew, any
> preference?

I prefer Julien's suggestion overall.  It is obviously not a predicate,
whereas the shorter name could easily be one.

~Andrew

Re: [PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Andrew Cooper 4 years ago
On 14/04/2020 12:34, Jan Beulich wrote:
> In preparation for the addition of VMASST_TYPE_runstate_update_flag
> commit 72c538cca957 ("arm: add support for vm_assist hypercall") enabled
> the hypercall for Arm. I consider it not logical that it then isn't also
> exposed to x86 HVM guests (with the same single feature permitted to be
> enabled as Arm has); Linux actually tries to use it afaict.
>
> Rather than introducing yet another thin wrapper around vm_assist(),
> make that function the main handler, requiring a per-arch
> arch_vm_assist_valid() definition instead.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Re-work vm_assist() handling/layering at the same time. Also adjust
>     arch_set_info_guest().

Much nicer.  Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

However, ...

> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -1517,20 +1517,23 @@ long do_vcpu_op(int cmd, unsigned int vc
>      return rc;
>  }
>  
> -#ifdef VM_ASSIST_VALID
> -long vm_assist(struct domain *p, unsigned int cmd, unsigned int type,
> -               unsigned long valid)
> +#ifdef arch_vm_assist_valid
> +long do_vm_assist(unsigned int cmd, unsigned int type)
>  {
> +    struct domain *currd = current->domain;
> +    const unsigned long valid = arch_vm_assist_valid(currd);
> +
>      if ( type >= BITS_PER_LONG || !test_bit(type, &valid) )
>          return -EINVAL;

As a thought, would it be better to have a guest_bits_per_long()
helper?  This type >= BITS_PER_LONG isn't terribly correct for 32bit
guests, and it would avoid needing the truncation in the arch helper,
which is asymmetric on the ARM side.

~Andrew

Re: [PATCH v2 1/2] x86/HVM: expose VM assist hypercall
Posted by Jan Beulich 4 years ago
On 20.04.2020 22:16, Andrew Cooper wrote:
> On 14/04/2020 12:34, Jan Beulich wrote:
>> In preparation for the addition of VMASST_TYPE_runstate_update_flag
>> commit 72c538cca957 ("arm: add support for vm_assist hypercall") enabled
>> the hypercall for Arm. I consider it not logical that it then isn't also
>> exposed to x86 HVM guests (with the same single feature permitted to be
>> enabled as Arm has); Linux actually tries to use it afaict.
>>
>> Rather than introducing yet another thin wrapper around vm_assist(),
>> make that function the main handler, requiring a per-arch
>> arch_vm_assist_valid() definition instead.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v2: Re-work vm_assist() handling/layering at the same time. Also adjust
>>     arch_set_info_guest().
> 
> Much nicer.  Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

Thanks.

> However, ...
> 
>> --- a/xen/common/domain.c
>> +++ b/xen/common/domain.c
>> @@ -1517,20 +1517,23 @@ long do_vcpu_op(int cmd, unsigned int vc
>>      return rc;
>>  }
>>  
>> -#ifdef VM_ASSIST_VALID
>> -long vm_assist(struct domain *p, unsigned int cmd, unsigned int type,
>> -               unsigned long valid)
>> +#ifdef arch_vm_assist_valid
>> +long do_vm_assist(unsigned int cmd, unsigned int type)
>>  {
>> +    struct domain *currd = current->domain;
>> +    const unsigned long valid = arch_vm_assist_valid(currd);
>> +
>>      if ( type >= BITS_PER_LONG || !test_bit(type, &valid) )
>>          return -EINVAL;
> 
> As a thought, would it be better to have a guest_bits_per_long()
> helper?  This type >= BITS_PER_LONG isn't terribly correct for 32bit
> guests, and it would avoid needing the truncation in the arch helper,
> which is asymmetric on the ARM side.

I'd rather not - the concept of guest bitness is already fuzzy
enough for HVM (see our 32-bit shared info latching), and
introducing a generic predicate like you suggest would invite
for use of it in places where people may forget how fuzzy the
concept is.

I also don't view the BITS_PER_LONG check here as pertaining
to a guest property - all we want is to bound the test_bit().
There's nothing wrong to, in the future, define bits beyond
possible guest bitness. It's merely a "helps for now" that on
x86 we've decided to put the 1st 64-bit only assist bit in
the high 32 bits (it may well be that this was added back
when we still had 32-bit support for Xen itself).

Jan

[PATCH v2 2/2] x86: validate VM assist value in arch_set_info_guest()
Posted by Jan Beulich 4 years ago
While I can't spot anything that would go wrong, just like the
respective hypercall only permits applicable bits to be set, we should
also do so when loading guest context.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I'd like to note that Arm lacks a field to save/restore vm_assist.

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -932,6 +932,9 @@ int arch_set_info_guest(
         }
     }
 
+    if ( v->vcpu_id == 0 && (c(vm_assist) & ~arch_vm_assist_valid(d)) )
+        return -EINVAL;
+
     if ( is_hvm_domain(d) )
     {
         for ( i = 0; i < ARRAY_SIZE(v->arch.dr); ++i )


Re: [PATCH v2 2/2] x86: validate VM assist value in arch_set_info_guest()
Posted by Andrew Cooper 4 years ago
On 14/04/2020 12:35, Jan Beulich wrote:
> While I can't spot anything that would go wrong, just like the
> respective hypercall only permits applicable bits to be set, we should
> also do so when loading guest context.
>
> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>