This logic is pulled out from xc_cpuid_apply_policy and placed into a
separate helper. Note the legacy part of the introduced function, as
long term Xen will require a proper topology setter function capable
of expressing a more diverse set of topologies.
No functional change intended.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
- s/xc_cpu_policy_topology/xc_cpu_policy_legacy_topology/
---
tools/include/xenguest.h | 4 +
tools/libs/guest/xg_cpuid_x86.c | 186 +++++++++++++++++---------------
2 files changed, 105 insertions(+), 85 deletions(-)
diff --git a/tools/include/xenguest.h b/tools/include/xenguest.h
index 281454dc60..bea02cb542 100644
--- a/tools/include/xenguest.h
+++ b/tools/include/xenguest.h
@@ -821,6 +821,10 @@ bool xc_cpu_policy_is_compatible(xc_interface *xch, xc_cpu_policy_t *host,
int xc_cpu_policy_make_compat_4_12(xc_interface *xch, xc_cpu_policy_t *policy,
bool hvm);
+/* Setup the legacy policy topology. */
+int xc_cpu_policy_legacy_topology(xc_interface *xch, xc_cpu_policy_t *policy,
+ bool hvm);
+
int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
uint32_t *nr_features, uint32_t *featureset);
diff --git a/tools/libs/guest/xg_cpuid_x86.c b/tools/libs/guest/xg_cpuid_x86.c
index bcbf9576c4..eafc1ec7c1 100644
--- a/tools/libs/guest/xg_cpuid_x86.c
+++ b/tools/libs/guest/xg_cpuid_x86.c
@@ -429,13 +429,11 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
{
int rc;
xc_dominfo_t di;
- unsigned int i, nr_leaves, nr_msrs;
+ unsigned int nr_leaves, nr_msrs;
xen_cpuid_leaf_t *leaves = NULL;
struct cpuid_policy *p = NULL;
struct xc_cpu_policy policy = { };
uint32_t err_leaf = -1, err_subleaf = -1, err_msr = -1;
- uint32_t host_featureset[FEATURESET_NR_ENTRIES] = {};
- uint32_t len = ARRAY_SIZE(host_featureset);
if ( xc_domain_getinfo(xch, domid, 1, &di) != 1 ||
di.domid != domid )
@@ -458,22 +456,6 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
(p = calloc(1, sizeof(*p))) == NULL )
goto out;
- /* Get the host policy. */
- rc = xc_get_cpu_featureset(xch, XEN_SYSCTL_cpu_featureset_host,
- &len, host_featureset);
- if ( rc )
- {
- /* Tolerate "buffer too small", as we've got the bits we need. */
- if ( errno == ENOBUFS )
- rc = 0;
- else
- {
- PERROR("Failed to obtain host featureset");
- rc = -errno;
- goto out;
- }
- }
-
/* Get the domain's default policy. */
nr_msrs = 0;
rc = get_system_cpu_policy(xch, di.hvm ? XEN_SYSCTL_cpu_policy_hvm_default
@@ -557,72 +539,11 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
}
}
- if ( !di.hvm )
- {
- /*
- * On hardware without CPUID Faulting, PV guests see real topology.
- * As a consequence, they also need to see the host htt/cmp fields.
- */
- p->basic.htt = test_bit(X86_FEATURE_HTT, host_featureset);
- p->extd.cmp_legacy = test_bit(X86_FEATURE_CMP_LEGACY, host_featureset);
- }
- else
- {
- /*
- * Topology for HVM guests is entirely controlled by Xen. For now, we
- * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
- */
- p->basic.htt = true;
- p->extd.cmp_legacy = false;
-
- /*
- * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package.
- * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid
- * overflow.
- */
- if ( !p->basic.lppp )
- p->basic.lppp = 2;
- else if ( !(p->basic.lppp & 0x80) )
- p->basic.lppp *= 2;
-
- switch ( p->x86_vendor )
- {
- case X86_VENDOR_INTEL:
- for ( i = 0; (p->cache.subleaf[i].type &&
- i < ARRAY_SIZE(p->cache.raw)); ++i )
- {
- p->cache.subleaf[i].cores_per_package =
- (p->cache.subleaf[i].cores_per_package << 1) | 1;
- p->cache.subleaf[i].threads_per_cache = 0;
- }
- break;
-
- case X86_VENDOR_AMD:
- case X86_VENDOR_HYGON:
- /*
- * Leaf 0x80000008 ECX[15:12] is ApicIdCoreSize.
- * Leaf 0x80000008 ECX[7:0] is NumberOfCores (minus one).
- * Update to reflect vLAPIC_ID = vCPU_ID * 2. But avoid
- * - overflow,
- * - going out of sync with leaf 1 EBX[23:16],
- * - incrementing ApicIdCoreSize when it's zero (which changes the
- * meaning of bits 7:0).
- *
- * UPDATE: I addition to avoiding overflow, some
- * proprietary operating systems have trouble with
- * apic_id_size values greater than 7. Limit the value to
- * 7 for now.
- */
- if ( p->extd.nc < 0x7f )
- {
- if ( p->extd.apic_id_size != 0 && p->extd.apic_id_size < 0x7 )
- p->extd.apic_id_size++;
-
- p->extd.nc = (p->extd.nc << 1) | 1;
- }
- break;
- }
- }
+ policy.cpuid = *p;
+ rc = xc_cpu_policy_legacy_topology(xch, &policy, di.hvm);
+ if ( rc )
+ goto out;
+ *p = policy.cpuid;
rc = x86_cpuid_copy_to_buffer(p, leaves, &nr_leaves);
if ( rc )
@@ -944,3 +865,98 @@ int xc_cpu_policy_make_compat_4_12(xc_interface *xch, xc_cpu_policy_t *policy,
xc_cpu_policy_destroy(host);
return rc;
}
+
+int xc_cpu_policy_legacy_topology(xc_interface *xch, xc_cpu_policy_t *policy,
+ bool hvm)
+{
+ if ( !hvm )
+ {
+ xc_cpu_policy_t *host;
+ int rc;
+
+ host = xc_cpu_policy_init();
+ if ( !host )
+ {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ rc = xc_cpu_policy_get_system(xch, XEN_SYSCTL_cpu_policy_host, host);
+ if ( rc )
+ {
+ ERROR("Failed to get host policy");
+ xc_cpu_policy_destroy(host);
+ return rc;
+ }
+
+
+ /*
+ * On hardware without CPUID Faulting, PV guests see real topology.
+ * As a consequence, they also need to see the host htt/cmp fields.
+ */
+ policy->cpuid.basic.htt = host->cpuid.basic.htt;
+ policy->cpuid.extd.cmp_legacy = host->cpuid.extd.cmp_legacy;
+ }
+ else
+ {
+ unsigned int i;
+
+ /*
+ * Topology for HVM guests is entirely controlled by Xen. For now, we
+ * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
+ */
+ policy->cpuid.basic.htt = true;
+ policy->cpuid.extd.cmp_legacy = false;
+
+ /*
+ * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package.
+ * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid
+ * overflow.
+ */
+ if ( !policy->cpuid.basic.lppp )
+ policy->cpuid.basic.lppp = 2;
+ else if ( !(policy->cpuid.basic.lppp & 0x80) )
+ policy->cpuid.basic.lppp *= 2;
+
+ switch ( policy->cpuid.x86_vendor )
+ {
+ case X86_VENDOR_INTEL:
+ for ( i = 0; (policy->cpuid.cache.subleaf[i].type &&
+ i < ARRAY_SIZE(policy->cpuid.cache.raw)); ++i )
+ {
+ policy->cpuid.cache.subleaf[i].cores_per_package =
+ (policy->cpuid.cache.subleaf[i].cores_per_package << 1) | 1;
+ policy->cpuid.cache.subleaf[i].threads_per_cache = 0;
+ }
+ break;
+
+ case X86_VENDOR_AMD:
+ case X86_VENDOR_HYGON:
+ /*
+ * Leaf 0x80000008 ECX[15:12] is ApicIdCoreSize.
+ * Leaf 0x80000008 ECX[7:0] is NumberOfCores (minus one).
+ * Update to reflect vLAPIC_ID = vCPU_ID * 2. But avoid
+ * - overflow,
+ * - going out of sync with leaf 1 EBX[23:16],
+ * - incrementing ApicIdCoreSize when it's zero (which changes the
+ * meaning of bits 7:0).
+ *
+ * UPDATE: I addition to avoiding overflow, some
+ * proprietary operating systems have trouble with
+ * apic_id_size values greater than 7. Limit the value to
+ * 7 for now.
+ */
+ if ( policy->cpuid.extd.nc < 0x7f )
+ {
+ if ( policy->cpuid.extd.apic_id_size != 0 &&
+ policy->cpuid.extd.apic_id_size < 0x7 )
+ policy->cpuid.extd.apic_id_size++;
+
+ policy->cpuid.extd.nc = (policy->cpuid.extd.nc << 1) | 1;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
--
2.33.0
On 29.11.2021 16:33, Roger Pau Monne wrote: > @@ -458,22 +456,6 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, > (p = calloc(1, sizeof(*p))) == NULL ) > goto out; > > - /* Get the host policy. */ > - rc = xc_get_cpu_featureset(xch, XEN_SYSCTL_cpu_featureset_host, > - &len, host_featureset); You go from retrieving the host featureset to ... > @@ -944,3 +865,98 @@ int xc_cpu_policy_make_compat_4_12(xc_interface *xch, xc_cpu_policy_t *policy, > xc_cpu_policy_destroy(host); > return rc; > } > + > +int xc_cpu_policy_legacy_topology(xc_interface *xch, xc_cpu_policy_t *policy, > + bool hvm) > +{ > + if ( !hvm ) > + { > + xc_cpu_policy_t *host; > + int rc; > + > + host = xc_cpu_policy_init(); > + if ( !host ) > + { > + errno = ENOMEM; > + return -1; > + } > + > + rc = xc_cpu_policy_get_system(xch, XEN_SYSCTL_cpu_policy_host, host); ... retrieving the host policy, which afaict is a larger blob of data. Is there a particular reason for doing so? > + if ( rc ) > + { > + ERROR("Failed to get host policy"); > + xc_cpu_policy_destroy(host); > + return rc; > + } > + > + > + /* > + * On hardware without CPUID Faulting, PV guests see real topology. > + * As a consequence, they also need to see the host htt/cmp fields. > + */ > + policy->cpuid.basic.htt = host->cpuid.basic.htt; > + policy->cpuid.extd.cmp_legacy = host->cpuid.extd.cmp_legacy; > + } > + else > + { > + unsigned int i; > + > + /* > + * Topology for HVM guests is entirely controlled by Xen. For now, we > + * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT. > + */ > + policy->cpuid.basic.htt = true; > + policy->cpuid.extd.cmp_legacy = false; > + > + /* > + * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package. > + * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid > + * overflow. > + */ > + if ( !policy->cpuid.basic.lppp ) > + policy->cpuid.basic.lppp = 2; > + else if ( !(policy->cpuid.basic.lppp & 0x80) ) > + policy->cpuid.basic.lppp *= 2; > + > + switch ( policy->cpuid.x86_vendor ) > + { > + case X86_VENDOR_INTEL: > + for ( i = 0; (policy->cpuid.cache.subleaf[i].type && > + i < ARRAY_SIZE(policy->cpuid.cache.raw)); ++i ) > + { > + policy->cpuid.cache.subleaf[i].cores_per_package = > + (policy->cpuid.cache.subleaf[i].cores_per_package << 1) | 1; > + policy->cpuid.cache.subleaf[i].threads_per_cache = 0; > + } > + break; > + > + case X86_VENDOR_AMD: > + case X86_VENDOR_HYGON: > + /* > + * Leaf 0x80000008 ECX[15:12] is ApicIdCoreSize. > + * Leaf 0x80000008 ECX[7:0] is NumberOfCores (minus one). > + * Update to reflect vLAPIC_ID = vCPU_ID * 2. But avoid > + * - overflow, > + * - going out of sync with leaf 1 EBX[23:16], > + * - incrementing ApicIdCoreSize when it's zero (which changes the > + * meaning of bits 7:0). > + * > + * UPDATE: I addition to avoiding overflow, some Nit: Would you mind switching "I" to "In" at this occasion? Jan
On Mon, Dec 06, 2021 at 05:09:42PM +0100, Jan Beulich wrote: > On 29.11.2021 16:33, Roger Pau Monne wrote: > > @@ -458,22 +456,6 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, > > (p = calloc(1, sizeof(*p))) == NULL ) > > goto out; > > > > - /* Get the host policy. */ > > - rc = xc_get_cpu_featureset(xch, XEN_SYSCTL_cpu_featureset_host, > > - &len, host_featureset); > > You go from retrieving the host featureset to ... > > > @@ -944,3 +865,98 @@ int xc_cpu_policy_make_compat_4_12(xc_interface *xch, xc_cpu_policy_t *policy, > > xc_cpu_policy_destroy(host); > > return rc; > > } > > + > > +int xc_cpu_policy_legacy_topology(xc_interface *xch, xc_cpu_policy_t *policy, > > + bool hvm) > > +{ > > + if ( !hvm ) > > + { > > + xc_cpu_policy_t *host; > > + int rc; > > + > > + host = xc_cpu_policy_init(); > > + if ( !host ) > > + { > > + errno = ENOMEM; > > + return -1; > > + } > > + > > + rc = xc_cpu_policy_get_system(xch, XEN_SYSCTL_cpu_policy_host, host); > > ... retrieving the host policy, which afaict is a larger blob of data. > Is there a particular reason for doing so? I did that so I could assign from one CPUID policy to another, but will revert back to using a featureset since it's indeed smaller. > > + if ( rc ) > > + { > > + ERROR("Failed to get host policy"); > > + xc_cpu_policy_destroy(host); > > + return rc; > > + } > > + > > + > > + /* > > + * On hardware without CPUID Faulting, PV guests see real topology. > > + * As a consequence, they also need to see the host htt/cmp fields. > > + */ > > + policy->cpuid.basic.htt = host->cpuid.basic.htt; > > + policy->cpuid.extd.cmp_legacy = host->cpuid.extd.cmp_legacy; > > + } > > + else > > + { > > + unsigned int i; > > + > > + /* > > + * Topology for HVM guests is entirely controlled by Xen. For now, we > > + * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT. > > + */ > > + policy->cpuid.basic.htt = true; > > + policy->cpuid.extd.cmp_legacy = false; > > + > > + /* > > + * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package. > > + * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid > > + * overflow. > > + */ > > + if ( !policy->cpuid.basic.lppp ) > > + policy->cpuid.basic.lppp = 2; > > + else if ( !(policy->cpuid.basic.lppp & 0x80) ) > > + policy->cpuid.basic.lppp *= 2; > > + > > + switch ( policy->cpuid.x86_vendor ) > > + { > > + case X86_VENDOR_INTEL: > > + for ( i = 0; (policy->cpuid.cache.subleaf[i].type && > > + i < ARRAY_SIZE(policy->cpuid.cache.raw)); ++i ) > > + { > > + policy->cpuid.cache.subleaf[i].cores_per_package = > > + (policy->cpuid.cache.subleaf[i].cores_per_package << 1) | 1; > > + policy->cpuid.cache.subleaf[i].threads_per_cache = 0; > > + } > > + break; > > + > > + case X86_VENDOR_AMD: > > + case X86_VENDOR_HYGON: > > + /* > > + * Leaf 0x80000008 ECX[15:12] is ApicIdCoreSize. > > + * Leaf 0x80000008 ECX[7:0] is NumberOfCores (minus one). > > + * Update to reflect vLAPIC_ID = vCPU_ID * 2. But avoid > > + * - overflow, > > + * - going out of sync with leaf 1 EBX[23:16], > > + * - incrementing ApicIdCoreSize when it's zero (which changes the > > + * meaning of bits 7:0). > > + * > > + * UPDATE: I addition to avoiding overflow, some > > Nit: Would you mind switching "I" to "In" at this occasion? Will do. Thanks, Roger.
© 2016 - 2024 Red Hat, Inc.