The VirtMachineClass::no_cpu_topology field was
only used by virt-6.1 machine, which got removed.
Remove it as now unused.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
include/hw/arm/virt.h | 2 -
hw/arm/virt-acpi-build.c | 9 ++---
hw/arm/virt.c | 85 +++++++++++++++++++---------------------
3 files changed, 43 insertions(+), 53 deletions(-)
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 21d91a43d8b..ca2e59ec569 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -122,8 +122,6 @@ typedef enum VirtGICType {
struct VirtMachineClass {
MachineClass parent;
bool no_highmem_compact;
- /* Machines < 6.2 have no support for describing cpu topology to guest */
- bool no_cpu_topology;
bool no_tcg_lpa2;
bool no_ns_el2_virt_timer_irq;
bool no_nested_smmu;
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 5db5baa7cf3..8d40570c1e3 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -1110,7 +1110,6 @@ static const AcpiNotificationSourceId hest_ghes_notify_10_0[] = {
static
void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
{
- VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
GArray *table_offsets;
unsigned dsdt, xsdt;
GArray *tables_blob = tables->table_data;
@@ -1134,11 +1133,9 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_madt(tables_blob, tables->linker, vms);
- if (!vmc->no_cpu_topology) {
- acpi_add_table(table_offsets, tables_blob);
- build_pptt(tables_blob, tables->linker, ms,
- vms->oem_id, vms->oem_table_id);
- }
+ acpi_add_table(table_offsets, tables_blob);
+ build_pptt(tables_blob, tables->linker, ms,
+ vms->oem_id, vms->oem_table_id);
acpi_add_table(table_offsets, tables_blob);
build_gtdt(tables_blob, tables->linker, vms);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 3e7858d6e11..4243da3c87e 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -431,7 +431,6 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
int cpu;
int addr_cells = 1;
const MachineState *ms = MACHINE(vms);
- const VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
int smp_cpus = ms->smp.cpus;
/*
@@ -488,57 +487,53 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
ms->possible_cpus->cpus[cs->cpu_index].props.node_id);
}
- if (!vmc->no_cpu_topology) {
- qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
- qemu_fdt_alloc_phandle(ms->fdt));
- }
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
+ qemu_fdt_alloc_phandle(ms->fdt));
g_free(nodename);
}
- if (!vmc->no_cpu_topology) {
- /*
- * Add vCPU topology description through fdt node cpu-map.
- *
- * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt
- * In a SMP system, the hierarchy of CPUs can be defined through
- * four entities that are used to describe the layout of CPUs in
- * the system: socket/cluster/core/thread.
- *
- * A socket node represents the boundary of system physical package
- * and its child nodes must be one or more cluster nodes. A system
- * can contain several layers of clustering within a single physical
- * package and cluster nodes can be contained in parent cluster nodes.
- *
- * Note: currently we only support one layer of clustering within
- * each physical package.
- */
- qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
+ /*
+ * Add vCPU topology description through fdt node cpu-map.
+ *
+ * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt
+ * In a SMP system, the hierarchy of CPUs can be defined through
+ * four entities that are used to describe the layout of CPUs in
+ * the system: socket/cluster/core/thread.
+ *
+ * A socket node represents the boundary of system physical package
+ * and its child nodes must be one or more cluster nodes. A system
+ * can contain several layers of clustering within a single physical
+ * package and cluster nodes can be contained in parent cluster nodes.
+ *
+ * Note: currently we only support one layer of clustering within
+ * each physical package.
+ */
+ qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
- for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
- char *cpu_path = g_strdup_printf("/cpus/cpu@%d", cpu);
- char *map_path;
+ for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
+ char *cpu_path = g_strdup_printf("/cpus/cpu@%d", cpu);
+ char *map_path;
- if (ms->smp.threads > 1) {
- map_path = g_strdup_printf(
- "/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
- cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads),
- (cpu / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters,
- (cpu / ms->smp.threads) % ms->smp.cores,
- cpu % ms->smp.threads);
- } else {
- map_path = g_strdup_printf(
- "/cpus/cpu-map/socket%d/cluster%d/core%d",
- cpu / (ms->smp.clusters * ms->smp.cores),
- (cpu / ms->smp.cores) % ms->smp.clusters,
- cpu % ms->smp.cores);
- }
- qemu_fdt_add_path(ms->fdt, map_path);
- qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path);
-
- g_free(map_path);
- g_free(cpu_path);
+ if (ms->smp.threads > 1) {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
+ cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads),
+ (cpu / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters,
+ (cpu / ms->smp.threads) % ms->smp.cores,
+ cpu % ms->smp.threads);
+ } else {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/cluster%d/core%d",
+ cpu / (ms->smp.clusters * ms->smp.cores),
+ (cpu / ms->smp.cores) % ms->smp.clusters,
+ cpu % ms->smp.cores);
}
+ qemu_fdt_add_path(ms->fdt, map_path);
+ qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path);
+
+ g_free(map_path);
+ g_free(cpu_path);
}
}
--
2.51.0