Both caches may need higher capacity, and the upper bound will need to
be determined dynamically based on CPUID policy (for AMX'es TILELOAD /
TILESTORE at least).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
This is a patch taken from the AMX series, but wasn't part of the v3
submission. All I did is strip out the actual AMX bits (from
hvmemul_cache_init()), plus of course change the description. As a
result some local variables there may look unnecessary, but this way
it's going to be less churn when the AMX bits are added. The next patch
pretty strongly depends on the changed approach (contextually, not so
much functionally), and I'd really like to avoid rebasing that one ahead
of this one, and then this one on top of that.
TBD: For AMX hvmemul_cache_init() will become CPUID policy dependent. We
could of course take the opportunity and also reduce overhead when
AVX-512 (and maybe even AVX) is unavailable (in the future: to the
guest).
---
v2: Split off cache bounds check fix.
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -26,6 +26,18 @@
#include <asm/iocap.h>
#include <asm/vm_event.h>
+/*
+ * We may read or write up to m512 or up to a tile row as a number of
+ * device-model transactions.
+ */
+struct hvm_mmio_cache {
+ unsigned long gla;
+ unsigned int size;
+ unsigned int space:31;
+ unsigned int dir:1;
+ uint8_t buffer[] __aligned(sizeof(long));
+};
+
struct hvmemul_cache
{
/* The cache is disabled as long as num_ents > max_ents. */
@@ -935,7 +947,7 @@ static int hvmemul_phys_mmio_access(
}
/* Accesses must not overflow the cache's buffer. */
- if ( offset + size > sizeof(cache->buffer) )
+ if ( offset + size > cache->space )
{
ASSERT_UNREACHABLE();
return X86EMUL_UNHANDLEABLE;
@@ -1011,7 +1023,7 @@ static struct hvm_mmio_cache *hvmemul_fi
for ( i = 0; i < hvio->mmio_cache_count; i ++ )
{
- cache = &hvio->mmio_cache[i];
+ cache = hvio->mmio_cache[i];
if ( gla == cache->gla &&
dir == cache->dir )
@@ -1027,10 +1039,11 @@ static struct hvm_mmio_cache *hvmemul_fi
++hvio->mmio_cache_count;
- cache = &hvio->mmio_cache[i];
- memset(cache, 0, sizeof (*cache));
+ cache = hvio->mmio_cache[i];
+ memset(cache->buffer, 0, cache->space);
cache->gla = gla;
+ cache->size = 0;
cache->dir = dir;
return cache;
@@ -2978,16 +2991,21 @@ void hvm_dump_emulation_state(const char
int hvmemul_cache_init(struct vcpu *v)
{
/*
- * No insn can access more than 16 independent linear addresses (AVX512F
- * scatters/gathers being the worst). Each such linear range can span a
- * page boundary, i.e. may require two page walks. Account for each insn
- * byte individually, for simplicity.
+ * AVX512F scatter/gather insns can access up to 16 independent linear
+ * addresses, up to 8 bytes size. Each such linear range can span a page
+ * boundary, i.e. may require two page walks.
+ */
+ unsigned int nents = 16 * 2 * (CONFIG_PAGING_LEVELS + 1);
+ unsigned int i, max_bytes = 64;
+ struct hvmemul_cache *cache;
+
+ /*
+ * Account for each insn byte individually, both for simplicity and to
+ * leave some slack space.
*/
- const unsigned int nents = (CONFIG_PAGING_LEVELS + 1) *
- (MAX_INST_LEN + 16 * 2);
- struct hvmemul_cache *cache = xmalloc_flex_struct(struct hvmemul_cache,
- ents, nents);
+ nents += MAX_INST_LEN * (CONFIG_PAGING_LEVELS + 1);
+ cache = xvmalloc_flex_struct(struct hvmemul_cache, ents, nents);
if ( !cache )
return -ENOMEM;
@@ -2997,6 +3015,15 @@ int hvmemul_cache_init(struct vcpu *v)
v->arch.hvm.hvm_io.cache = cache;
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
+ {
+ v->arch.hvm.hvm_io.mmio_cache[i] =
+ xmalloc_flex_struct(struct hvm_mmio_cache, buffer, max_bytes);
+ if ( !v->arch.hvm.hvm_io.mmio_cache[i] )
+ return -ENOMEM;
+ v->arch.hvm.hvm_io.mmio_cache[i]->space = max_bytes;
+ }
+
return 0;
}
--- a/xen/arch/x86/include/asm/hvm/emulate.h
+++ b/xen/arch/x86/include/asm/hvm/emulate.h
@@ -15,6 +15,7 @@
#include <xen/err.h>
#include <xen/mm.h>
#include <xen/sched.h>
+#include <xen/xvmalloc.h>
#include <asm/hvm/hvm.h>
#include <asm/x86_emulate.h>
@@ -119,7 +120,11 @@ int hvmemul_do_pio_buffer(uint16_t port,
int __must_check hvmemul_cache_init(struct vcpu *v);
static inline void hvmemul_cache_destroy(struct vcpu *v)
{
- XFREE(v->arch.hvm.hvm_io.cache);
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
+ XFREE(v->arch.hvm.hvm_io.mmio_cache[i]);
+ XVFREE(v->arch.hvm.hvm_io.cache);
}
bool hvmemul_read_cache(const struct vcpu *v, paddr_t gpa,
void *buffer, unsigned int size);
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -22,17 +22,6 @@ struct hvm_vcpu_asid {
uint32_t asid;
};
-/*
- * We may read or write up to m512 as a number of device-model
- * transactions.
- */
-struct hvm_mmio_cache {
- unsigned long gla;
- unsigned int size;
- uint8_t dir;
- uint8_t buffer[64] __aligned(sizeof(long));
-};
-
struct hvm_vcpu_io {
/*
* HVM emulation:
@@ -48,7 +37,7 @@ struct hvm_vcpu_io {
* We may need to handle up to 3 distinct memory accesses per
* instruction.
*/
- struct hvm_mmio_cache mmio_cache[3];
+ struct hvm_mmio_cache *mmio_cache[3];
unsigned int mmio_cache_count;
/* For retries we shouldn't re-fetch the instruction. */
© 2016 - 2024 Red Hat, Inc.