xen/arch/x86/include/asm/guest_access.h | 78 ++++++++++++++++++------- 1 file changed, 58 insertions(+), 20 deletions(-)
From: Grygorii Strashko <grygorii_strashko@epam.com>
Xen uses below pattern for raw_x_guest() functions:
define raw_copy_to_guest(dst, src, len) \
(is_hvm_vcpu(current) ? \
copy_to_user_hvm((dst), (src), (len)) : \
copy_to_guest_pv(dst, src, len))
This pattern works depending on CONFIG_PV/CONFIG_HVM as:
- PV=y and HVM=y
Proper guest access function is selected depending on domain type.
- PV=y and HVM=n
Only PV domains are possible. is_hvm_domain/vcpu() will constify to "false"
and compiler will optimize code and skip HVM specific part.
- PV=n and HVM=y
Only HVM domains are possible. is_hvm_domain/vcpu() will not be constified.
No PV specific code will be optimized by compiler.
- PV=n and HVM=n
No guests should possible. The code will still follow PV path.
Rework raw_x_guest() code to use static inline functions which account for
above PV/HVM possible configurations with main intention to optimize code
for (PV=n and HVM=y) case.
For the case (PV=n and HVM=n) return "len" value indicating a failure (no
guests should be possible in this case, which means no access to guest
memory should ever happen).
The measured (bloat-o-meter) improvement for (PV=n and HVM=y) case is about -11K.
[teddy.astie@vates.tech: Suggested to use static inline functions vs
macro combinations]
Suggested-by: Teddy Astie <teddy.astie@vates.tech>
Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
---
changes in v5:
- rebase
- drop moving usercopy.c as it is not needed since commit 7370966d1cb7
("x86: move / split usercopy.c to / into arch-specific library")
changes in v4:
- move usercopy.c into arch/x86/pv/
- rework to always dynamically check for HVM vcpu(domain) by using is_hvm_vcpu()
as requested by Jan Beulich
changes in v3:
- add raw_use_hvm_access() wrapper
changes in v2:
- use static inline functions instead of macro combinations
xen/arch/x86/include/asm/guest_access.h | 78 ++++++++++++++++++-------
1 file changed, 58 insertions(+), 20 deletions(-)
diff --git a/xen/arch/x86/include/asm/guest_access.h b/xen/arch/x86/include/asm/guest_access.h
index 69716c8b41bb..f0e56b112e14 100644
--- a/xen/arch/x86/include/asm/guest_access.h
+++ b/xen/arch/x86/include/asm/guest_access.h
@@ -13,26 +13,64 @@
#include <asm/hvm/guest_access.h>
/* Raw access functions: no type checking. */
-#define raw_copy_to_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_to_user_hvm((dst), (src), (len)) : \
- copy_to_guest_pv(dst, src, len))
-#define raw_copy_from_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_from_user_hvm((dst), (src), (len)) : \
- copy_from_guest_pv(dst, src, len))
-#define raw_clear_guest(dst, len) \
- (is_hvm_vcpu(current) ? \
- clear_user_hvm((dst), (len)) : \
- clear_guest_pv(dst, len))
-#define __raw_copy_to_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_to_user_hvm((dst), (src), (len)) : \
- __copy_to_guest_pv(dst, src, len))
-#define __raw_copy_from_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
- copy_from_user_hvm((dst), (src), (len)) : \
- __copy_from_guest_pv(dst, src, len))
+static inline unsigned int raw_copy_to_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_to_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return copy_to_guest_pv(dst, src, len);
+}
+
+static inline unsigned int raw_copy_from_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_from_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return copy_from_guest_pv(dst, src, len);
+}
+
+static inline unsigned int raw_clear_guest(void *dst, unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return clear_user_hvm(dst, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return clear_guest_pv(dst, len);
+}
+
+static inline unsigned int __raw_copy_to_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_to_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return __copy_to_guest_pv(dst, src, len);
+}
+
+static inline unsigned int __raw_copy_from_guest(void *dst, const void *src,
+ unsigned int len)
+{
+ if ( is_hvm_vcpu(current) )
+ return copy_from_user_hvm(dst, src, len);
+
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return len;
+
+ return __copy_from_guest_pv(dst, src, len);
+}
/*
* Pre-validate a guest handle.
--
2.34.1
On 18.12.2025 14:59, Grygorii Strashko wrote:
> From: Grygorii Strashko <grygorii_strashko@epam.com>
>
> Xen uses below pattern for raw_x_guest() functions:
>
> define raw_copy_to_guest(dst, src, len) \
> (is_hvm_vcpu(current) ? \
> copy_to_user_hvm((dst), (src), (len)) : \
> copy_to_guest_pv(dst, src, len))
>
> This pattern works depending on CONFIG_PV/CONFIG_HVM as:
> - PV=y and HVM=y
> Proper guest access function is selected depending on domain type.
> - PV=y and HVM=n
> Only PV domains are possible. is_hvm_domain/vcpu() will constify to "false"
> and compiler will optimize code and skip HVM specific part.
> - PV=n and HVM=y
> Only HVM domains are possible. is_hvm_domain/vcpu() will not be constified.
> No PV specific code will be optimized by compiler.
> - PV=n and HVM=n
> No guests should possible. The code will still follow PV path.
>
> Rework raw_x_guest() code to use static inline functions which account for
> above PV/HVM possible configurations with main intention to optimize code
> for (PV=n and HVM=y) case.
>
> For the case (PV=n and HVM=n) return "len" value indicating a failure (no
> guests should be possible in this case, which means no access to guest
> memory should ever happen).
I agree with Teddy's sentiment towards HVM={y,n} not really mattering when
PV=n, as far as non-HVM domains go.
> --- a/xen/arch/x86/include/asm/guest_access.h
> +++ b/xen/arch/x86/include/asm/guest_access.h
> @@ -13,26 +13,64 @@
> #include <asm/hvm/guest_access.h>
>
> /* Raw access functions: no type checking. */
> -#define raw_copy_to_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_to_user_hvm((dst), (src), (len)) : \
> - copy_to_guest_pv(dst, src, len))
> -#define raw_copy_from_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_from_user_hvm((dst), (src), (len)) : \
> - copy_from_guest_pv(dst, src, len))
> -#define raw_clear_guest(dst, len) \
> - (is_hvm_vcpu(current) ? \
> - clear_user_hvm((dst), (len)) : \
> - clear_guest_pv(dst, len))
> -#define __raw_copy_to_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_to_user_hvm((dst), (src), (len)) : \
> - __copy_to_guest_pv(dst, src, len))
> -#define __raw_copy_from_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_from_user_hvm((dst), (src), (len)) : \
> - __copy_from_guest_pv(dst, src, len))
> +static inline unsigned int raw_copy_to_guest(void *dst, const void *src,
> + unsigned int len)
A side effect of the converting to inline functions, besides being more
intrusive, is that now you will want to add proper __user modifiers.
See lib/copy-guest.c's use of them. That said, ..._user_hvm() functions
also don't have them, but imo wrongly so. Really I question the use of
pointers in that case, because they "point" into a different address
space, entirely inaccessible via use of those pointers. Hence adding
__user is going to be only a marginal improvement for the HVM case, but
is going to be wanted for the PV side of things.
Jan
On 18.12.25 17:22, Jan Beulich wrote:
> On 18.12.2025 14:59, Grygorii Strashko wrote:
>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>
>> Xen uses below pattern for raw_x_guest() functions:
>>
>> define raw_copy_to_guest(dst, src, len) \
>> (is_hvm_vcpu(current) ? \
>> copy_to_user_hvm((dst), (src), (len)) : \
>> copy_to_guest_pv(dst, src, len))
>>
>> This pattern works depending on CONFIG_PV/CONFIG_HVM as:
>> - PV=y and HVM=y
>> Proper guest access function is selected depending on domain type.
>> - PV=y and HVM=n
>> Only PV domains are possible. is_hvm_domain/vcpu() will constify to "false"
>> and compiler will optimize code and skip HVM specific part.
>> - PV=n and HVM=y
>> Only HVM domains are possible. is_hvm_domain/vcpu() will not be constified.
>> No PV specific code will be optimized by compiler.
>> - PV=n and HVM=n
>> No guests should possible. The code will still follow PV path.
>>
>> Rework raw_x_guest() code to use static inline functions which account for
>> above PV/HVM possible configurations with main intention to optimize code
>> for (PV=n and HVM=y) case.
>>
>> For the case (PV=n and HVM=n) return "len" value indicating a failure (no
>> guests should be possible in this case, which means no access to guest
>> memory should ever happen).
>
> I agree with Teddy's sentiment towards HVM={y,n} not really mattering when
> PV=n, as far as non-HVM domains go.
>
>> --- a/xen/arch/x86/include/asm/guest_access.h
>> +++ b/xen/arch/x86/include/asm/guest_access.h
>> @@ -13,26 +13,64 @@
>> #include <asm/hvm/guest_access.h>
>>
>> /* Raw access functions: no type checking. */
>> -#define raw_copy_to_guest(dst, src, len) \
>> - (is_hvm_vcpu(current) ? \
>> - copy_to_user_hvm((dst), (src), (len)) : \
>> - copy_to_guest_pv(dst, src, len))
>> -#define raw_copy_from_guest(dst, src, len) \
>> - (is_hvm_vcpu(current) ? \
>> - copy_from_user_hvm((dst), (src), (len)) : \
>> - copy_from_guest_pv(dst, src, len))
>> -#define raw_clear_guest(dst, len) \
>> - (is_hvm_vcpu(current) ? \
>> - clear_user_hvm((dst), (len)) : \
>> - clear_guest_pv(dst, len))
>> -#define __raw_copy_to_guest(dst, src, len) \
>> - (is_hvm_vcpu(current) ? \
>> - copy_to_user_hvm((dst), (src), (len)) : \
>> - __copy_to_guest_pv(dst, src, len))
>> -#define __raw_copy_from_guest(dst, src, len) \
>> - (is_hvm_vcpu(current) ? \
>> - copy_from_user_hvm((dst), (src), (len)) : \
>> - __copy_from_guest_pv(dst, src, len))
>> +static inline unsigned int raw_copy_to_guest(void *dst, const void *src,
>> + unsigned int len)
>
> A side effect of the converting to inline functions, besides being more
> intrusive, is that now you will want to add proper __user modifiers.
> See lib/copy-guest.c's use of them. That said, ..._user_hvm() functions
> also don't have them, but imo wrongly so. Really I question the use of
> pointers in that case, because they "point" into a different address
> space, entirely inaccessible via use of those pointers. Hence adding
> __user is going to be only a marginal improvement for the HVM case, but
> is going to be wanted for the PV side of things.
ok. so it need to be like this in all funcs
-static inline unsigned int raw_copy_to_guest(void *dst, const void *src,
+static inline unsigned int raw_copy_to_guest(void __user *dst, const void *src,
unsigned int len)
--
Best regards,
-grygorii
Le 18/12/2025 à 15:01, Grygorii Strashko a écrit :
> From: Grygorii Strashko <grygorii_strashko@epam.com>
>
> Xen uses below pattern for raw_x_guest() functions:
>
> define raw_copy_to_guest(dst, src, len) \
> (is_hvm_vcpu(current) ? \
> copy_to_user_hvm((dst), (src), (len)) : \
> copy_to_guest_pv(dst, src, len))
>
> This pattern works depending on CONFIG_PV/CONFIG_HVM as:
> - PV=y and HVM=y
> Proper guest access function is selected depending on domain type.
> - PV=y and HVM=n
> Only PV domains are possible. is_hvm_domain/vcpu() will constify to "false"
> and compiler will optimize code and skip HVM specific part.
> - PV=n and HVM=y
> Only HVM domains are possible. is_hvm_domain/vcpu() will not be constified.
> No PV specific code will be optimized by compiler.
> - PV=n and HVM=n
> No guests should possible. The code will still follow PV path.
>
^ regarding this
> For the case (PV=n and HVM=n) return "len" value indicating a failure (no
> guests should be possible in this case, which means no access to guest
> memory should ever happen).
>
^ and this
AFAIU it is the same for PV=n and HVM=y for non-HVM domains (the few
that "exists"), so we should rather say that these functions fails on
"non-HVM" domains in PV=n configurations (since no actual PV domain
exists in these cases).
IOW, there is no PV path in PV=n configurations and in !HVM domains, the
function fails instead (as we would expect).
Once the commit message is adjusted on this
Reviewed-by: Teddy Astie <teddy.astie@vates.tech>
> The measured (bloat-o-meter) improvement for (PV=n and HVM=y) case is about -11K.
>
> [teddy.astie@vates.tech: Suggested to use static inline functions vs
> macro combinations]
> Suggested-by: Teddy Astie <teddy.astie@vates.tech>
> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
> ---
> changes in v5:
> - rebase
> - drop moving usercopy.c as it is not needed since commit 7370966d1cb7
> ("x86: move / split usercopy.c to / into arch-specific library")
>
> changes in v4:
> - move usercopy.c into arch/x86/pv/
> - rework to always dynamically check for HVM vcpu(domain) by using is_hvm_vcpu()
> as requested by Jan Beulich
>
> changes in v3:
> - add raw_use_hvm_access() wrapper
>
> changes in v2:
> - use static inline functions instead of macro combinations
>
> xen/arch/x86/include/asm/guest_access.h | 78 ++++++++++++++++++-------
> 1 file changed, 58 insertions(+), 20 deletions(-)
>
> diff --git a/xen/arch/x86/include/asm/guest_access.h b/xen/arch/x86/include/asm/guest_access.h
> index 69716c8b41bb..f0e56b112e14 100644
> --- a/xen/arch/x86/include/asm/guest_access.h
> +++ b/xen/arch/x86/include/asm/guest_access.h
> @@ -13,26 +13,64 @@
> #include <asm/hvm/guest_access.h>
>
> /* Raw access functions: no type checking. */
> -#define raw_copy_to_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_to_user_hvm((dst), (src), (len)) : \
> - copy_to_guest_pv(dst, src, len))
> -#define raw_copy_from_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_from_user_hvm((dst), (src), (len)) : \
> - copy_from_guest_pv(dst, src, len))
> -#define raw_clear_guest(dst, len) \
> - (is_hvm_vcpu(current) ? \
> - clear_user_hvm((dst), (len)) : \
> - clear_guest_pv(dst, len))
> -#define __raw_copy_to_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_to_user_hvm((dst), (src), (len)) : \
> - __copy_to_guest_pv(dst, src, len))
> -#define __raw_copy_from_guest(dst, src, len) \
> - (is_hvm_vcpu(current) ? \
> - copy_from_user_hvm((dst), (src), (len)) : \
> - __copy_from_guest_pv(dst, src, len))
> +static inline unsigned int raw_copy_to_guest(void *dst, const void *src,
> + unsigned int len)
> +{
> + if ( is_hvm_vcpu(current) )
> + return copy_to_user_hvm(dst, src, len);
> +
> + if ( !IS_ENABLED(CONFIG_PV) )
> + return len;
> +
> + return copy_to_guest_pv(dst, src, len);
> +}
> +
> +static inline unsigned int raw_copy_from_guest(void *dst, const void *src,
> + unsigned int len)
> +{
> + if ( is_hvm_vcpu(current) )
> + return copy_from_user_hvm(dst, src, len);
> +
> + if ( !IS_ENABLED(CONFIG_PV) )
> + return len;
> +
> + return copy_from_guest_pv(dst, src, len);
> +}
> +
> +static inline unsigned int raw_clear_guest(void *dst, unsigned int len)
> +{
> + if ( is_hvm_vcpu(current) )
> + return clear_user_hvm(dst, len);
> +
> + if ( !IS_ENABLED(CONFIG_PV) )
> + return len;
> +
> + return clear_guest_pv(dst, len);
> +}
> +
> +static inline unsigned int __raw_copy_to_guest(void *dst, const void *src,
> + unsigned int len)
> +{
> + if ( is_hvm_vcpu(current) )
> + return copy_to_user_hvm(dst, src, len);
> +
> + if ( !IS_ENABLED(CONFIG_PV) )
> + return len;
> +
> + return __copy_to_guest_pv(dst, src, len);
> +}
> +
> +static inline unsigned int __raw_copy_from_guest(void *dst, const void *src,
> + unsigned int len)
> +{
> + if ( is_hvm_vcpu(current) )
> + return copy_from_user_hvm(dst, src, len);
> +
> + if ( !IS_ENABLED(CONFIG_PV) )
> + return len;
> +
> + return __copy_from_guest_pv(dst, src, len);
> +}
>
> /*
> * Pre-validate a guest handle.
Teddy
--
Teddy Astie | Vates XCP-ng Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
© 2016 - 2025 Red Hat, Inc.