[PATCH v2 2/5] x86/HVM: allocate emulation cache entries dynamically

Jan Beulich posted 5 patches 1 year, 4 months ago
There is a newer version of this series
[PATCH v2 2/5] x86/HVM: allocate emulation cache entries dynamically
Posted by Jan Beulich 1 year, 4 months ago
Both caches may need higher capacity, and the upper bound will need to
be determined dynamically based on CPUID policy (for AMX'es TILELOAD /
TILESTORE at least).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
This is a patch taken from the AMX series, but wasn't part of the v3
submission. All I did is strip out the actual AMX bits (from
hvmemul_cache_init()), plus of course change the description. As a
result some local variables there may look unnecessary, but this way
it's going to be less churn when the AMX bits are added. The next patch
pretty strongly depends on the changed approach (contextually, not so
much functionally), and I'd really like to avoid rebasing that one ahead
of this one, and then this one on top of that.

TBD: For AMX hvmemul_cache_init() will become CPUID policy dependent. We
     could of course take the opportunity and also reduce overhead when
     AVX-512 (and maybe even AVX) is unavailable (in the future: to the
     guest).
---
v2: Split off cache bounds check fix.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -26,6 +26,18 @@
 #include <asm/iocap.h>
 #include <asm/vm_event.h>
 
+/*
+ * We may read or write up to m512 or up to a tile row as a number of
+ * device-model transactions.
+ */
+struct hvm_mmio_cache {
+    unsigned long gla;
+    unsigned int size;
+    unsigned int space:31;
+    unsigned int dir:1;
+    uint8_t buffer[] __aligned(sizeof(long));
+};
+
 struct hvmemul_cache
 {
     /* The cache is disabled as long as num_ents > max_ents. */
@@ -935,7 +947,7 @@ static int hvmemul_phys_mmio_access(
     }
 
     /* Accesses must not overflow the cache's buffer. */
-    if ( offset + size > sizeof(cache->buffer) )
+    if ( offset + size > cache->space )
     {
         ASSERT_UNREACHABLE();
         return X86EMUL_UNHANDLEABLE;
@@ -1011,7 +1023,7 @@ static struct hvm_mmio_cache *hvmemul_fi
 
     for ( i = 0; i < hvio->mmio_cache_count; i ++ )
     {
-        cache = &hvio->mmio_cache[i];
+        cache = hvio->mmio_cache[i];
 
         if ( gla == cache->gla &&
              dir == cache->dir )
@@ -1027,10 +1039,11 @@ static struct hvm_mmio_cache *hvmemul_fi
 
     ++hvio->mmio_cache_count;
 
-    cache = &hvio->mmio_cache[i];
-    memset(cache, 0, sizeof (*cache));
+    cache = hvio->mmio_cache[i];
+    memset(cache->buffer, 0, cache->space);
 
     cache->gla = gla;
+    cache->size = 0;
     cache->dir = dir;
 
     return cache;
@@ -2978,16 +2991,21 @@ void hvm_dump_emulation_state(const char
 int hvmemul_cache_init(struct vcpu *v)
 {
     /*
-     * No insn can access more than 16 independent linear addresses (AVX512F
-     * scatters/gathers being the worst). Each such linear range can span a
-     * page boundary, i.e. may require two page walks. Account for each insn
-     * byte individually, for simplicity.
+     * AVX512F scatter/gather insns can access up to 16 independent linear
+     * addresses, up to 8 bytes size. Each such linear range can span a page
+     * boundary, i.e. may require two page walks.
+     */
+    unsigned int nents = 16 * 2 * (CONFIG_PAGING_LEVELS + 1);
+    unsigned int i, max_bytes = 64;
+    struct hvmemul_cache *cache;
+
+    /*
+     * Account for each insn byte individually, both for simplicity and to
+     * leave some slack space.
      */
-    const unsigned int nents = (CONFIG_PAGING_LEVELS + 1) *
-                               (MAX_INST_LEN + 16 * 2);
-    struct hvmemul_cache *cache = xmalloc_flex_struct(struct hvmemul_cache,
-                                                      ents, nents);
+    nents += MAX_INST_LEN * (CONFIG_PAGING_LEVELS + 1);
 
+    cache = xvmalloc_flex_struct(struct hvmemul_cache, ents, nents);
     if ( !cache )
         return -ENOMEM;
 
@@ -2997,6 +3015,15 @@ int hvmemul_cache_init(struct vcpu *v)
 
     v->arch.hvm.hvm_io.cache = cache;
 
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
+    {
+        v->arch.hvm.hvm_io.mmio_cache[i] =
+            xmalloc_flex_struct(struct hvm_mmio_cache, buffer, max_bytes);
+        if ( !v->arch.hvm.hvm_io.mmio_cache[i] )
+            return -ENOMEM;
+        v->arch.hvm.hvm_io.mmio_cache[i]->space = max_bytes;
+    }
+
     return 0;
 }
 
--- a/xen/arch/x86/include/asm/hvm/emulate.h
+++ b/xen/arch/x86/include/asm/hvm/emulate.h
@@ -15,6 +15,7 @@
 #include <xen/err.h>
 #include <xen/mm.h>
 #include <xen/sched.h>
+#include <xen/xvmalloc.h>
 #include <asm/hvm/hvm.h>
 #include <asm/x86_emulate.h>
 
@@ -119,7 +120,11 @@ int hvmemul_do_pio_buffer(uint16_t port,
 int __must_check hvmemul_cache_init(struct vcpu *v);
 static inline void hvmemul_cache_destroy(struct vcpu *v)
 {
-    XFREE(v->arch.hvm.hvm_io.cache);
+    unsigned int i;
+
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
+        XFREE(v->arch.hvm.hvm_io.mmio_cache[i]);
+    XVFREE(v->arch.hvm.hvm_io.cache);
 }
 bool hvmemul_read_cache(const struct vcpu *v, paddr_t gpa,
                         void *buffer, unsigned int size);
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -22,17 +22,6 @@ struct hvm_vcpu_asid {
     uint32_t asid;
 };
 
-/*
- * We may read or write up to m512 as a number of device-model
- * transactions.
- */
-struct hvm_mmio_cache {
-    unsigned long gla;
-    unsigned int size;
-    uint8_t dir;
-    uint8_t buffer[64] __aligned(sizeof(long));
-};
-
 struct hvm_vcpu_io {
     /*
      * HVM emulation:
@@ -48,7 +37,7 @@ struct hvm_vcpu_io {
      * We may need to handle up to 3 distinct memory accesses per
      * instruction.
      */
-    struct hvm_mmio_cache mmio_cache[3];
+    struct hvm_mmio_cache *mmio_cache[3];
     unsigned int mmio_cache_count;
 
     /* For retries we shouldn't re-fetch the instruction. */
Re: [PATCH v2 2/5] x86/HVM: allocate emulation cache entries dynamically
Posted by Roger Pau Monné 1 year ago
On Tue, Oct 01, 2024 at 10:49:10AM +0200, Jan Beulich wrote:
> Both caches may need higher capacity, and the upper bound will need to
> be determined dynamically based on CPUID policy (for AMX'es TILELOAD /
> TILESTORE at least).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Just a couple of comments below.

> ---
> This is a patch taken from the AMX series, but wasn't part of the v3
> submission. All I did is strip out the actual AMX bits (from
> hvmemul_cache_init()), plus of course change the description. As a
> result some local variables there may look unnecessary, but this way
> it's going to be less churn when the AMX bits are added. The next patch
> pretty strongly depends on the changed approach (contextually, not so
> much functionally), and I'd really like to avoid rebasing that one ahead
> of this one, and then this one on top of that.

Oh, I was just going to ask about the weirdness of nents compared to
what was previously.

> 
> TBD: For AMX hvmemul_cache_init() will become CPUID policy dependent. We
>      could of course take the opportunity and also reduce overhead when
>      AVX-512 (and maybe even AVX) is unavailable (in the future: to the
>      guest).
> ---
> v2: Split off cache bounds check fix.
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -26,6 +26,18 @@
>  #include <asm/iocap.h>
>  #include <asm/vm_event.h>
>  
> +/*
> + * We may read or write up to m512 or up to a tile row as a number of
> + * device-model transactions.
> + */
> +struct hvm_mmio_cache {
> +    unsigned long gla;
> +    unsigned int size;
> +    unsigned int space:31;

Having size and space is kind of confusing, would you mind adding a
comment that size is the runtime consumed buffer space, while space is
the total allocated buffer size (and hence not supposed to change
during usage)?

> +    unsigned int dir:1;
> +    uint8_t buffer[] __aligned(sizeof(long));
> +};
> +
>  struct hvmemul_cache
>  {
>      /* The cache is disabled as long as num_ents > max_ents. */
> @@ -935,7 +947,7 @@ static int hvmemul_phys_mmio_access(
>      }
>  
>      /* Accesses must not overflow the cache's buffer. */
> -    if ( offset + size > sizeof(cache->buffer) )
> +    if ( offset + size > cache->space )
>      {
>          ASSERT_UNREACHABLE();
>          return X86EMUL_UNHANDLEABLE;
> @@ -1011,7 +1023,7 @@ static struct hvm_mmio_cache *hvmemul_fi
>  
>      for ( i = 0; i < hvio->mmio_cache_count; i ++ )
>      {
> -        cache = &hvio->mmio_cache[i];
> +        cache = hvio->mmio_cache[i];
>  
>          if ( gla == cache->gla &&
>               dir == cache->dir )
> @@ -1027,10 +1039,11 @@ static struct hvm_mmio_cache *hvmemul_fi
>  
>      ++hvio->mmio_cache_count;
>  
> -    cache = &hvio->mmio_cache[i];
> -    memset(cache, 0, sizeof (*cache));
> +    cache = hvio->mmio_cache[i];
> +    memset(cache->buffer, 0, cache->space);
>  
>      cache->gla = gla;
> +    cache->size = 0;
>      cache->dir = dir;
>  
>      return cache;
> @@ -2978,16 +2991,21 @@ void hvm_dump_emulation_state(const char
>  int hvmemul_cache_init(struct vcpu *v)
>  {
>      /*
> -     * No insn can access more than 16 independent linear addresses (AVX512F
> -     * scatters/gathers being the worst). Each such linear range can span a
> -     * page boundary, i.e. may require two page walks. Account for each insn
> -     * byte individually, for simplicity.
> +     * AVX512F scatter/gather insns can access up to 16 independent linear
> +     * addresses, up to 8 bytes size. Each such linear range can span a page
> +     * boundary, i.e. may require two page walks.
> +     */
> +    unsigned int nents = 16 * 2 * (CONFIG_PAGING_LEVELS + 1);
> +    unsigned int i, max_bytes = 64;
> +    struct hvmemul_cache *cache;
> +
> +    /*
> +     * Account for each insn byte individually, both for simplicity and to
> +     * leave some slack space.
>       */
> -    const unsigned int nents = (CONFIG_PAGING_LEVELS + 1) *
> -                               (MAX_INST_LEN + 16 * 2);
> -    struct hvmemul_cache *cache = xmalloc_flex_struct(struct hvmemul_cache,
> -                                                      ents, nents);
> +    nents += MAX_INST_LEN * (CONFIG_PAGING_LEVELS + 1);
>  
> +    cache = xvmalloc_flex_struct(struct hvmemul_cache, ents, nents);

Change here seems completely unrelated, but I guess this is what you
refer to in the post-commit remark.  IOW: the split of the nents
variable setup, plus the change of xmalloc_flex_struct() ->
xvmalloc_flex_struct() don't seem to be related to the change at
hand.

>      if ( !cache )
>          return -ENOMEM;
>  
> @@ -2997,6 +3015,15 @@ int hvmemul_cache_init(struct vcpu *v)
>  
>      v->arch.hvm.hvm_io.cache = cache;
>  
> +    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
> +    {
> +        v->arch.hvm.hvm_io.mmio_cache[i] =
> +            xmalloc_flex_struct(struct hvm_mmio_cache, buffer, max_bytes);

TBH I would be tempted to just use xvmalloc here also, even if the
structure is never going to be > PAGE_SIZE, it's more consistent IMO.

Thanks, Roger.
Re: [PATCH v2 2/5] x86/HVM: allocate emulation cache entries dynamically
Posted by Jan Beulich 1 year ago
On 22.01.2025 13:00, Roger Pau Monné wrote:
> On Tue, Oct 01, 2024 at 10:49:10AM +0200, Jan Beulich wrote:
>> Both caches may need higher capacity, and the upper bound will need to
>> be determined dynamically based on CPUID policy (for AMX'es TILELOAD /
>> TILESTORE at least).
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> Just a couple of comments below.
> 
>> ---
>> This is a patch taken from the AMX series, but wasn't part of the v3
>> submission. All I did is strip out the actual AMX bits (from
>> hvmemul_cache_init()), plus of course change the description. As a
>> result some local variables there may look unnecessary, but this way
>> it's going to be less churn when the AMX bits are added. The next patch
>> pretty strongly depends on the changed approach (contextually, not so
>> much functionally), and I'd really like to avoid rebasing that one ahead
>> of this one, and then this one on top of that.
> 
> Oh, I was just going to ask about the weirdness of nents compared to
> what was previously.

And then you did ask; I'll comment on that below.

>> --- a/xen/arch/x86/hvm/emulate.c
>> +++ b/xen/arch/x86/hvm/emulate.c
>> @@ -26,6 +26,18 @@
>>  #include <asm/iocap.h>
>>  #include <asm/vm_event.h>
>>  
>> +/*
>> + * We may read or write up to m512 or up to a tile row as a number of
>> + * device-model transactions.
>> + */
>> +struct hvm_mmio_cache {
>> +    unsigned long gla;
>> +    unsigned int size;
>> +    unsigned int space:31;
> 
> Having size and space is kind of confusing, would you mind adding a
> comment that size is the runtime consumed buffer space, while space is
> the total allocated buffer size (and hence not supposed to change
> during usage)?

Sure; I thought the two names would be clear enough when sitting side by
side, but here you go:

    unsigned int size;     /* Amount of buffer[] actually used. */
    unsigned int space:31; /* Allocated size of buffer[]. */


>> @@ -2978,16 +2991,21 @@ void hvm_dump_emulation_state(const char
>>  int hvmemul_cache_init(struct vcpu *v)
>>  {
>>      /*
>> -     * No insn can access more than 16 independent linear addresses (AVX512F
>> -     * scatters/gathers being the worst). Each such linear range can span a
>> -     * page boundary, i.e. may require two page walks. Account for each insn
>> -     * byte individually, for simplicity.
>> +     * AVX512F scatter/gather insns can access up to 16 independent linear
>> +     * addresses, up to 8 bytes size. Each such linear range can span a page
>> +     * boundary, i.e. may require two page walks.
>> +     */
>> +    unsigned int nents = 16 * 2 * (CONFIG_PAGING_LEVELS + 1);
>> +    unsigned int i, max_bytes = 64;
>> +    struct hvmemul_cache *cache;
>> +
>> +    /*
>> +     * Account for each insn byte individually, both for simplicity and to
>> +     * leave some slack space.
>>       */
>> -    const unsigned int nents = (CONFIG_PAGING_LEVELS + 1) *
>> -                               (MAX_INST_LEN + 16 * 2);
>> -    struct hvmemul_cache *cache = xmalloc_flex_struct(struct hvmemul_cache,
>> -                                                      ents, nents);
>> +    nents += MAX_INST_LEN * (CONFIG_PAGING_LEVELS + 1);
>>  
>> +    cache = xvmalloc_flex_struct(struct hvmemul_cache, ents, nents);
> 
> Change here seems completely unrelated, but I guess this is what you
> refer to in the post-commit remark.  IOW: the split of the nents
> variable setup, plus the change of xmalloc_flex_struct() ->
> xvmalloc_flex_struct() don't seem to be related to the change at
> hand.

See the post-commit-message remark that you commented on. To repeat:
It'll be quite a bit easier for me if the seemingly unrelated adjustments
could be kept like that. Unless of course there's something wrong with
them.

>> @@ -2997,6 +3015,15 @@ int hvmemul_cache_init(struct vcpu *v)
>>  
>>      v->arch.hvm.hvm_io.cache = cache;
>>  
>> +    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
>> +    {
>> +        v->arch.hvm.hvm_io.mmio_cache[i] =
>> +            xmalloc_flex_struct(struct hvm_mmio_cache, buffer, max_bytes);
> 
> TBH I would be tempted to just use xvmalloc here also, even if the
> structure is never going to be > PAGE_SIZE, it's more consistent IMO.

Oh, absolutely under the current rules (which weren't in effect yet back
when all of this was written).

Jan

Re: [PATCH v2 2/5] x86/HVM: allocate emulation cache entries dynamically
Posted by Roger Pau Monné 1 year ago
On Wed, Jan 22, 2025 at 02:39:43PM +0100, Jan Beulich wrote:
> On 22.01.2025 13:00, Roger Pau Monné wrote:
> > On Tue, Oct 01, 2024 at 10:49:10AM +0200, Jan Beulich wrote:
> >> Both caches may need higher capacity, and the upper bound will need to
> >> be determined dynamically based on CPUID policy (for AMX'es TILELOAD /
> >> TILESTORE at least).
> >>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> > 
> > Just a couple of comments below.
> > 
> >> ---
> >> This is a patch taken from the AMX series, but wasn't part of the v3
> >> submission. All I did is strip out the actual AMX bits (from
> >> hvmemul_cache_init()), plus of course change the description. As a
> >> result some local variables there may look unnecessary, but this way
> >> it's going to be less churn when the AMX bits are added. The next patch
> >> pretty strongly depends on the changed approach (contextually, not so
> >> much functionally), and I'd really like to avoid rebasing that one ahead
> >> of this one, and then this one on top of that.
> > 
> > Oh, I was just going to ask about the weirdness of nents compared to
> > what was previously.
> 
> And then you did ask; I'll comment on that below.
> 
> >> --- a/xen/arch/x86/hvm/emulate.c
> >> +++ b/xen/arch/x86/hvm/emulate.c
> >> @@ -26,6 +26,18 @@
> >>  #include <asm/iocap.h>
> >>  #include <asm/vm_event.h>
> >>  
> >> +/*
> >> + * We may read or write up to m512 or up to a tile row as a number of
> >> + * device-model transactions.
> >> + */
> >> +struct hvm_mmio_cache {
> >> +    unsigned long gla;
> >> +    unsigned int size;
> >> +    unsigned int space:31;
> > 
> > Having size and space is kind of confusing, would you mind adding a
> > comment that size is the runtime consumed buffer space, while space is
> > the total allocated buffer size (and hence not supposed to change
> > during usage)?
> 
> Sure; I thought the two names would be clear enough when sitting side by
> side, but here you go:
> 
>     unsigned int size;     /* Amount of buffer[] actually used. */
>     unsigned int space:31; /* Allocated size of buffer[]. */
> 
> 
> >> @@ -2978,16 +2991,21 @@ void hvm_dump_emulation_state(const char
> >>  int hvmemul_cache_init(struct vcpu *v)
> >>  {
> >>      /*
> >> -     * No insn can access more than 16 independent linear addresses (AVX512F
> >> -     * scatters/gathers being the worst). Each such linear range can span a
> >> -     * page boundary, i.e. may require two page walks. Account for each insn
> >> -     * byte individually, for simplicity.
> >> +     * AVX512F scatter/gather insns can access up to 16 independent linear
> >> +     * addresses, up to 8 bytes size. Each such linear range can span a page
> >> +     * boundary, i.e. may require two page walks.
> >> +     */
> >> +    unsigned int nents = 16 * 2 * (CONFIG_PAGING_LEVELS + 1);
> >> +    unsigned int i, max_bytes = 64;
> >> +    struct hvmemul_cache *cache;
> >> +
> >> +    /*
> >> +     * Account for each insn byte individually, both for simplicity and to
> >> +     * leave some slack space.
> >>       */
> >> -    const unsigned int nents = (CONFIG_PAGING_LEVELS + 1) *
> >> -                               (MAX_INST_LEN + 16 * 2);
> >> -    struct hvmemul_cache *cache = xmalloc_flex_struct(struct hvmemul_cache,
> >> -                                                      ents, nents);
> >> +    nents += MAX_INST_LEN * (CONFIG_PAGING_LEVELS + 1);
> >>  
> >> +    cache = xvmalloc_flex_struct(struct hvmemul_cache, ents, nents);
> > 
> > Change here seems completely unrelated, but I guess this is what you
> > refer to in the post-commit remark.  IOW: the split of the nents
> > variable setup, plus the change of xmalloc_flex_struct() ->
> > xvmalloc_flex_struct() don't seem to be related to the change at
> > hand.
> 
> See the post-commit-message remark that you commented on. To repeat:
> It'll be quite a bit easier for me if the seemingly unrelated adjustments
> could be kept like that. Unless of course there's something wrong with
> them.
> 
> >> @@ -2997,6 +3015,15 @@ int hvmemul_cache_init(struct vcpu *v)
> >>  
> >>      v->arch.hvm.hvm_io.cache = cache;
> >>  
> >> +    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
> >> +    {
> >> +        v->arch.hvm.hvm_io.mmio_cache[i] =
> >> +            xmalloc_flex_struct(struct hvm_mmio_cache, buffer, max_bytes);
> > 
> > TBH I would be tempted to just use xvmalloc here also, even if the
> > structure is never going to be > PAGE_SIZE, it's more consistent IMO.
> 
> Oh, absolutely under the current rules (which weren't in effect yet back
> when all of this was written).

With the two items above fixed (not the nents related change, that's
fine as-is):

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks, Roger.