From: Sean Christopherson <sean.j.christopherson@intel.com>
Move the architectural (for lack of a better term) CPUID leaf generation
to a separate helper so that the generation code can be reused by TDX,
which needs to generate a canonical VM-scoped configuration.
For now this is just a cleanup, so keep the function static.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
Message-ID: <20240229063726.610065-23-xiaoyao.li@intel.com>
[Unify error reporting, rename function. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/kvm/kvm.c | 446 +++++++++++++++++++++---------------------
1 file changed, 224 insertions(+), 222 deletions(-)
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 2577e345502..eab6261e1f5 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -1752,6 +1752,228 @@ static void kvm_init_nested_state(CPUX86State *env)
}
}
+static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
+ struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i)
+{
+ uint32_t limit, i, j;
+ uint32_t unused;
+ struct kvm_cpuid_entry2 *c;
+
+ cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0; i <= limit; i++) {
+ j = 0;
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ switch (i) {
+ case 2: {
+ /* Keep reading function 2 till all the input is received */
+ int times;
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
+ KVM_CPUID_FLAG_STATE_READ_NEXT;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax & 0xff;
+
+ for (j = 1; j < times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ case 0x1f:
+ if (env->nr_dies < 2) {
+ cpuid_i--;
+ break;
+ }
+ /* fallthrough */
+ case 4:
+ case 0xb:
+ case 0xd:
+ for (j = 0; ; j++) {
+ if (i == 0xd && j == 64) {
+ break;
+ }
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (i == 4 && c->eax == 0) {
+ break;
+ }
+ if (i == 0xb && !(c->ecx & 0xff00)) {
+ break;
+ }
+ if (i == 0x1f && !(c->ecx & 0xff00)) {
+ break;
+ }
+ if (i == 0xd && c->eax == 0) {
+ continue;
+ }
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ }
+ break;
+ case 0x12:
+ for (j = 0; ; j++) {
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (j > 1 && (c->eax & 0xf) != 1) {
+ break;
+ }
+
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ }
+ break;
+ case 0x7:
+ case 0x14:
+ case 0x1d:
+ case 0x1e: {
+ uint32_t times;
+
+ c->function = i;
+ c->index = 0;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax;
+
+ for (j = 1; j <= times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ c->function = i;
+ c->index = j;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
+ /*
+ * KVM already returns all zeroes if a CPUID entry is missing,
+ * so we can omit it and avoid hitting KVM's 80-entry limit.
+ */
+ cpuid_i--;
+ }
+ break;
+ }
+ }
+
+ if (limit >= 0x0a) {
+ uint32_t eax, edx;
+
+ cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
+
+ has_architectural_pmu_version = eax & 0xff;
+ if (has_architectural_pmu_version > 0) {
+ num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
+
+ /* Shouldn't be more than 32, since that's the number of bits
+ * available in EBX to tell us _which_ counters are available.
+ * Play it safe.
+ */
+ if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
+ num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
+ }
+
+ if (has_architectural_pmu_version > 1) {
+ num_architectural_pmu_fixed_counters = edx & 0x1f;
+
+ if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
+ num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
+ }
+ }
+ }
+ }
+
+ cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0x80000000; i <= limit; i++) {
+ j = 0;
+ c = &entries[cpuid_i++];
+
+ switch (i) {
+ case 0x8000001d:
+ /* Query for all AMD cache information leaves */
+ for (j = 0; ; j++) {
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (c->eax == 0) {
+ break;
+ }
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ }
+ break;
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
+ /*
+ * KVM already returns all zeroes if a CPUID entry is missing,
+ * so we can omit it and avoid hitting KVM's 80-entry limit.
+ */
+ cpuid_i--;
+ }
+ break;
+ }
+ }
+
+ /* Call Centaur's CPUID instructions they are supported. */
+ if (env->cpuid_xlevel2 > 0) {
+ cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0xC0000000; i <= limit; i++) {
+ j = 0;
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ }
+
+ return cpuid_i;
+
+full:
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+ abort();
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
@@ -1768,8 +1990,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- uint32_t limit, i, j, cpuid_i;
- uint32_t unused;
+ uint32_t cpuid_i;
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
int kvm_base = KVM_CPUID_SIGNATURE;
@@ -1922,8 +2143,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
c->edx = env->features[FEAT_KVM_HINTS];
}
- cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
-
if (cpu->kvm_pv_enforce_cpuid) {
r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
if (r < 0) {
@@ -1934,224 +2153,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
}
- for (i = 0; i <= limit; i++) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "unsupported level value: 0x%x\n", limit);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
-
- switch (i) {
- case 2: {
- /* Keep reading function 2 till all the input is received */
- int times;
-
- c->function = i;
- c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
- KVM_CPUID_FLAG_STATE_READ_NEXT;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- times = c->eax & 0xff;
-
- for (j = 1; j < times; ++j) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- c->function = i;
- c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- }
- break;
- }
- case 0x1f:
- if (env->nr_dies < 2) {
- cpuid_i--;
- break;
- }
- /* fallthrough */
- case 4:
- case 0xb:
- case 0xd:
- for (j = 0; ; j++) {
- if (i == 0xd && j == 64) {
- break;
- }
-
- c->function = i;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- c->index = j;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
-
- if (i == 4 && c->eax == 0) {
- break;
- }
- if (i == 0xb && !(c->ecx & 0xff00)) {
- break;
- }
- if (i == 0x1f && !(c->ecx & 0xff00)) {
- break;
- }
- if (i == 0xd && c->eax == 0) {
- continue;
- }
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- }
- break;
- case 0x12:
- for (j = 0; ; j++) {
- c->function = i;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- c->index = j;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
-
- if (j > 1 && (c->eax & 0xf) != 1) {
- break;
- }
-
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x12,ecx:0x%x)\n", j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- }
- break;
- case 0x7:
- case 0x14:
- case 0x1d:
- case 0x1e: {
- uint32_t times;
-
- c->function = i;
- c->index = 0;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- times = c->eax;
-
- for (j = 1; j <= times; ++j) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- c->function = i;
- c->index = j;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
- }
- break;
- }
- default:
- c->function = i;
- c->flags = 0;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
- /*
- * KVM already returns all zeroes if a CPUID entry is missing,
- * so we can omit it and avoid hitting KVM's 80-entry limit.
- */
- cpuid_i--;
- }
- break;
- }
- }
-
- if (limit >= 0x0a) {
- uint32_t eax, edx;
-
- cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
-
- has_architectural_pmu_version = eax & 0xff;
- if (has_architectural_pmu_version > 0) {
- num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
-
- /* Shouldn't be more than 32, since that's the number of bits
- * available in EBX to tell us _which_ counters are available.
- * Play it safe.
- */
- if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
- num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
- }
-
- if (has_architectural_pmu_version > 1) {
- num_architectural_pmu_fixed_counters = edx & 0x1f;
-
- if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
- num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
- }
- }
- }
- }
-
- cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
-
- for (i = 0x80000000; i <= limit; i++) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
-
- switch (i) {
- case 0x8000001d:
- /* Query for all AMD cache information leaves */
- for (j = 0; ; j++) {
- c->function = i;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- c->index = j;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
-
- if (c->eax == 0) {
- break;
- }
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- }
- break;
- default:
- c->function = i;
- c->flags = 0;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
- /*
- * KVM already returns all zeroes if a CPUID entry is missing,
- * so we can omit it and avoid hitting KVM's 80-entry limit.
- */
- cpuid_i--;
- }
- break;
- }
- }
-
- /* Call Centaur's CPUID instructions they are supported. */
- if (env->cpuid_xlevel2 > 0) {
- cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
-
- for (i = 0xC0000000; i <= limit; i++) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
-
- c->function = i;
- c->flags = 0;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- }
- }
-
+ cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i);
cpuid_data.cpuid.nent = cpuid_i;
if (((env->cpuid_version >> 8)&0xF) >= 6
--
2.44.0
On 3/23/2024 2:11 AM, Paolo Bonzini wrote:
> From: Sean Christopherson <sean.j.christopherson@intel.com>
>
> Move the architectural (for lack of a better term) CPUID leaf generation
> to a separate helper so that the generation code can be reused by TDX,
> which needs to generate a canonical VM-scoped configuration.
>
> For now this is just a cleanup, so keep the function static.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
> Message-ID: <20240229063726.610065-23-xiaoyao.li@intel.com>
> [Unify error reporting, rename function. - Paolo]
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> target/i386/kvm/kvm.c | 446 +++++++++++++++++++++---------------------
> 1 file changed, 224 insertions(+), 222 deletions(-)
>
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index 2577e345502..eab6261e1f5 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -1752,6 +1752,228 @@ static void kvm_init_nested_state(CPUX86State *env)
> }
> }
>
> +static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
> + struct kvm_cpuid_entry2 *entries,
> + uint32_t cpuid_i)
> +{
> + uint32_t limit, i, j;
> + uint32_t unused;
> + struct kvm_cpuid_entry2 *c;
> +
> + cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
> +
> + for (i = 0; i <= limit; i++) {
> + j = 0;
> + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> + goto full;
> + }
> + c = &entries[cpuid_i++];
> + switch (i) {
> + case 2: {
> + /* Keep reading function 2 till all the input is received */
> + int times;
> +
> + c->function = i;
> + c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
> + KVM_CPUID_FLAG_STATE_READ_NEXT;
> + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> + times = c->eax & 0xff;
> +
> + for (j = 1; j < times; ++j) {
> + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> + goto full;
> + }
> + c = &entries[cpuid_i++];
> + c->function = i;
> + c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
> + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> + }
> + break;
> + }
> + case 0x1f:
> + if (env->nr_dies < 2) {
> + cpuid_i--;
> + break;
> + }
> + /* fallthrough */
> + case 4:
> + case 0xb:
> + case 0xd:
> + for (j = 0; ; j++) {
> + if (i == 0xd && j == 64) {
> + break;
> + }
> +
> + c->function = i;
> + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> + c->index = j;
> + cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> +
> + if (i == 4 && c->eax == 0) {
> + break;
> + }
> + if (i == 0xb && !(c->ecx & 0xff00)) {
> + break;
> + }
> + if (i == 0x1f && !(c->ecx & 0xff00)) {
> + break;
> + }
> + if (i == 0xd && c->eax == 0) {
> + continue;
> + }
> + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> + goto full;
> + }
> + c = &entries[cpuid_i++];
> + }
> + break;
> + case 0x12:
> + for (j = 0; ; j++) {
> + c->function = i;
> + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> + c->index = j;
> + cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> +
> + if (j > 1 && (c->eax & 0xf) != 1) {
> + break;
> + }
> +
> + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> + goto full;
> + }
> + c = &entries[cpuid_i++];
> + }
> + break;
> + case 0x7:
> + case 0x14:
> + case 0x1d:
> + case 0x1e: {
> + uint32_t times;
> +
> + c->function = i;
> + c->index = 0;
> + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> + times = c->eax;
> +
> + for (j = 1; j <= times; ++j) {
> + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> + goto full;
> + }
> + c = &entries[cpuid_i++];
> + c->function = i;
> + c->index = j;
> + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> + cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> + }
> + break;
> + }
> + default:
> + c->function = i;
> + c->flags = 0;
> + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> + if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
> + /*
> + * KVM already returns all zeroes if a CPUID entry is missing,
> + * so we can omit it and avoid hitting KVM's 80-entry limit.
> + */
> + cpuid_i--;
> + }
> + break;
> + }
> + }
> +
> + if (limit >= 0x0a) {
> + uint32_t eax, edx;
> +
> + cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
> +
> + has_architectural_pmu_version = eax & 0xff;
> + if (has_architectural_pmu_version > 0) {
> + num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
> +
> + /* Shouldn't be more than 32, since that's the number of bits
> + * available in EBX to tell us _which_ counters are available.
> + * Play it safe.
> + */
> + if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
> + num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
> + }
> +
> + if (has_architectural_pmu_version > 1) {
> + num_architectural_pmu_fixed_counters = edx & 0x1f;
> +
> + if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
> + num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
> + }
> + }
> + }
> + }
> +
> + cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
> +
> + for (i = 0x80000000; i <= limit; i++) {
> + j = 0;
here miss the check of
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
goto full;
}
Otherwise, it looks good to me.
Reviewed-by: Xiaoyao Li <xiaoyoa.li@intel.com>
> + c = &entries[cpuid_i++];
> +
> + switch (i) {
> + case 0x8000001d:
> + /* Query for all AMD cache information leaves */
> + for (j = 0; ; j++) {
> + c->function = i;
> + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> + c->index = j;
> + cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> +
> + if (c->eax == 0) {
> + break;
> + }
> + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> + goto full;
> + }
> + c = &entries[cpuid_i++];
> + }
> + break;
> + default:
> + c->function = i;
> + c->flags = 0;
> + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> + if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
> + /*
> + * KVM already returns all zeroes if a CPUID entry is missing,
> + * so we can omit it and avoid hitting KVM's 80-entry limit.
> + */
> + cpuid_i--;
> + }
> + break;
> + }
> + }
> +
> + /* Call Centaur's CPUID instructions they are supported. */
> + if (env->cpuid_xlevel2 > 0) {
> + cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
> +
> + for (i = 0xC0000000; i <= limit; i++) {
> + j = 0;
> + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> + goto full;
> + }
> + c = &entries[cpuid_i++];
> +
> + c->function = i;
> + c->flags = 0;
> + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> + }
> + }
> +
> + return cpuid_i;
> +
> +full:
> + fprintf(stderr, "cpuid_data is full, no space for "
> + "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
> + abort();
> +}
> +
> int kvm_arch_init_vcpu(CPUState *cs)
> {
> struct {
> @@ -1768,8 +1990,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
>
> X86CPU *cpu = X86_CPU(cs);
> CPUX86State *env = &cpu->env;
> - uint32_t limit, i, j, cpuid_i;
> - uint32_t unused;
> + uint32_t cpuid_i;
> struct kvm_cpuid_entry2 *c;
> uint32_t signature[3];
> int kvm_base = KVM_CPUID_SIGNATURE;
> @@ -1922,8 +2143,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
> c->edx = env->features[FEAT_KVM_HINTS];
> }
>
> - cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
> -
> if (cpu->kvm_pv_enforce_cpuid) {
> r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
> if (r < 0) {
> @@ -1934,224 +2153,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
> }
> }
>
> - for (i = 0; i <= limit; i++) {
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "unsupported level value: 0x%x\n", limit);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> -
> - switch (i) {
> - case 2: {
> - /* Keep reading function 2 till all the input is received */
> - int times;
> -
> - c->function = i;
> - c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
> - KVM_CPUID_FLAG_STATE_READ_NEXT;
> - cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> - times = c->eax & 0xff;
> -
> - for (j = 1; j < times; ++j) {
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "cpuid_data is full, no space for "
> - "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> - c->function = i;
> - c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
> - cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> - }
> - break;
> - }
> - case 0x1f:
> - if (env->nr_dies < 2) {
> - cpuid_i--;
> - break;
> - }
> - /* fallthrough */
> - case 4:
> - case 0xb:
> - case 0xd:
> - for (j = 0; ; j++) {
> - if (i == 0xd && j == 64) {
> - break;
> - }
> -
> - c->function = i;
> - c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> - c->index = j;
> - cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> -
> - if (i == 4 && c->eax == 0) {
> - break;
> - }
> - if (i == 0xb && !(c->ecx & 0xff00)) {
> - break;
> - }
> - if (i == 0x1f && !(c->ecx & 0xff00)) {
> - break;
> - }
> - if (i == 0xd && c->eax == 0) {
> - continue;
> - }
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "cpuid_data is full, no space for "
> - "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> - }
> - break;
> - case 0x12:
> - for (j = 0; ; j++) {
> - c->function = i;
> - c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> - c->index = j;
> - cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> -
> - if (j > 1 && (c->eax & 0xf) != 1) {
> - break;
> - }
> -
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "cpuid_data is full, no space for "
> - "cpuid(eax:0x12,ecx:0x%x)\n", j);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> - }
> - break;
> - case 0x7:
> - case 0x14:
> - case 0x1d:
> - case 0x1e: {
> - uint32_t times;
> -
> - c->function = i;
> - c->index = 0;
> - c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> - cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> - times = c->eax;
> -
> - for (j = 1; j <= times; ++j) {
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "cpuid_data is full, no space for "
> - "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> - c->function = i;
> - c->index = j;
> - c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> - cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> - }
> - break;
> - }
> - default:
> - c->function = i;
> - c->flags = 0;
> - cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> - if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
> - /*
> - * KVM already returns all zeroes if a CPUID entry is missing,
> - * so we can omit it and avoid hitting KVM's 80-entry limit.
> - */
> - cpuid_i--;
> - }
> - break;
> - }
> - }
> -
> - if (limit >= 0x0a) {
> - uint32_t eax, edx;
> -
> - cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
> -
> - has_architectural_pmu_version = eax & 0xff;
> - if (has_architectural_pmu_version > 0) {
> - num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
> -
> - /* Shouldn't be more than 32, since that's the number of bits
> - * available in EBX to tell us _which_ counters are available.
> - * Play it safe.
> - */
> - if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
> - num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
> - }
> -
> - if (has_architectural_pmu_version > 1) {
> - num_architectural_pmu_fixed_counters = edx & 0x1f;
> -
> - if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
> - num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
> - }
> - }
> - }
> - }
> -
> - cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
> -
> - for (i = 0x80000000; i <= limit; i++) {
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> -
> - switch (i) {
> - case 0x8000001d:
> - /* Query for all AMD cache information leaves */
> - for (j = 0; ; j++) {
> - c->function = i;
> - c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
> - c->index = j;
> - cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
> -
> - if (c->eax == 0) {
> - break;
> - }
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "cpuid_data is full, no space for "
> - "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> - }
> - break;
> - default:
> - c->function = i;
> - c->flags = 0;
> - cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> - if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
> - /*
> - * KVM already returns all zeroes if a CPUID entry is missing,
> - * so we can omit it and avoid hitting KVM's 80-entry limit.
> - */
> - cpuid_i--;
> - }
> - break;
> - }
> - }
> -
> - /* Call Centaur's CPUID instructions they are supported. */
> - if (env->cpuid_xlevel2 > 0) {
> - cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
> -
> - for (i = 0xC0000000; i <= limit; i++) {
> - if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
> - fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
> - abort();
> - }
> - c = &cpuid_data.entries[cpuid_i++];
> -
> - c->function = i;
> - c->flags = 0;
> - cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
> - }
> - }
> -
> + cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i);
> cpuid_data.cpuid.nent = cpuid_i;
>
> if (((env->cpuid_version >> 8)&0xF) >= 6
© 2016 - 2026 Red Hat, Inc.