This shall be used to store user specified topology{socket,cluster,core,thread}
and shall be converted to a unique 'vcpu-id' which is used as slot-index during
hot(un)plug of vCPU.
Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
---
hw/arm/virt.c | 10 ++++++++++
include/hw/arm/virt.h | 28 ++++++++++++++++++++++++++++
target/arm/cpu.c | 4 ++++
target/arm/cpu.h | 4 ++++
4 files changed, 46 insertions(+)
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 3c93c0c0a6..11fc7fc318 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -2215,6 +2215,14 @@ static void machvirt_init(MachineState *machine)
&error_fatal);
aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL);
+ object_property_set_int(cpuobj, "socket-id",
+ virt_get_socket_id(machine, n), NULL);
+ object_property_set_int(cpuobj, "cluster-id",
+ virt_get_cluster_id(machine, n), NULL);
+ object_property_set_int(cpuobj, "core-id",
+ virt_get_core_id(machine, n), NULL);
+ object_property_set_int(cpuobj, "thread-id",
+ virt_get_thread_id(machine, n), NULL);
if (!vms->secure) {
object_property_set_bool(cpuobj, "has_el3", false, NULL);
@@ -2708,6 +2716,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
{
int n;
unsigned int max_cpus = ms->smp.max_cpus;
+ unsigned int smp_threads = ms->smp.threads;
VirtMachineState *vms = VIRT_MACHINE(ms);
MachineClass *mc = MACHINE_GET_CLASS(vms);
@@ -2721,6 +2730,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
ms->possible_cpus->len = max_cpus;
for (n = 0; n < ms->possible_cpus->len; n++) {
ms->possible_cpus->cpus[n].type = ms->cpu_type;
+ ms->possible_cpus->cpus[n].vcpus_count = smp_threads;
ms->possible_cpus->cpus[n].arch_id =
virt_cpu_mp_affinity(vms, n);
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index bb486d36b1..6f9a7bb60b 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -209,4 +209,32 @@ static inline int virt_gicv3_redist_region_count(VirtMachineState *vms)
vms->highmem_redists) ? 2 : 1;
}
+static inline int virt_get_socket_id(const MachineState *ms, int cpu_index)
+{
+ assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len);
+
+ return ms->possible_cpus->cpus[cpu_index].props.socket_id;
+}
+
+static inline int virt_get_cluster_id(const MachineState *ms, int cpu_index)
+{
+ assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len);
+
+ return ms->possible_cpus->cpus[cpu_index].props.cluster_id;
+}
+
+static inline int virt_get_core_id(const MachineState *ms, int cpu_index)
+{
+ assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len);
+
+ return ms->possible_cpus->cpus[cpu_index].props.core_id;
+}
+
+static inline int virt_get_thread_id(const MachineState *ms, int cpu_index)
+{
+ assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len);
+
+ return ms->possible_cpus->cpus[cpu_index].props.thread_id;
+}
+
#endif /* QEMU_ARM_VIRT_H */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 77f8c9c748..abc4ed0842 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2582,6 +2582,10 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
mp_affinity, ARM64_AFFINITY_INVALID),
DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
+ DEFINE_PROP_INT32("socket-id", ARMCPU, socket_id, 0),
+ DEFINE_PROP_INT32("cluster-id", ARMCPU, cluster_id, 0),
+ DEFINE_PROP_INT32("core-id", ARMCPU, core_id, 0),
+ DEFINE_PROP_INT32("thread-id", ARMCPU, thread_id, 0),
DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
/* True to default to the backward-compat old CNTFRQ rather than 1Ghz */
DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false),
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index c17264c239..208c719db3 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1076,6 +1076,10 @@ struct ArchCPU {
QLIST_HEAD(, ARMELChangeHook) el_change_hooks;
int32_t node_id; /* NUMA node this CPU belongs to */
+ int32_t socket_id;
+ int32_t cluster_id;
+ int32_t core_id;
+ int32_t thread_id;
/* Used to synchronize KVM and QEMU in-kernel device levels */
uint8_t device_irq_level;
--
2.34.1
On 6/14/24 9:36 AM, Salil Mehta wrote: > This shall be used to store user specified topology{socket,cluster,core,thread} > and shall be converted to a unique 'vcpu-id' which is used as slot-index during > hot(un)plug of vCPU. > > Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com> > Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> > Signed-off-by: Salil Mehta <salil.mehta@huawei.com> > --- > hw/arm/virt.c | 10 ++++++++++ > include/hw/arm/virt.h | 28 ++++++++++++++++++++++++++++ > target/arm/cpu.c | 4 ++++ > target/arm/cpu.h | 4 ++++ > 4 files changed, 46 insertions(+) > Those 4 properties are introduced to determine the vCPU's slot, which is the index to MachineState::possible_cpus::cpus[]. From there, the CPU object or instance is referenced and then the CPU's state can be further determined. It sounds reasonable to use the CPU's topology to determine the index. However, I'm wandering if this can be simplified to use 'cpu-index' or 'index' for a couple of facts: (1) 'cpu-index' or 'index' is simplified. Users have to provide 4 parameters in order to determine its index in the extreme case, for example "device_add host-arm-cpu, id=cpu7,socket-id=1, cluster-id=1,core-id=1,thread-id=1". With 'cpu-index' or 'index', it can be simplified to 'index=7'. (2) The cold-booted and hotpluggable CPUs are determined by their index instead of their topology. For example, CPU0/1/2/3 are cold-booted CPUs while CPU4/5/6/7 are hotpluggable CPUs with command lines '-smp maxcpus=8,cpus=4'. So 'index' makes more sense to identify a vCPU's slot. > diff --git a/hw/arm/virt.c b/hw/arm/virt.c > index 3c93c0c0a6..11fc7fc318 100644 > --- a/hw/arm/virt.c > +++ b/hw/arm/virt.c > @@ -2215,6 +2215,14 @@ static void machvirt_init(MachineState *machine) > &error_fatal); > > aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL); > + object_property_set_int(cpuobj, "socket-id", > + virt_get_socket_id(machine, n), NULL); > + object_property_set_int(cpuobj, "cluster-id", > + virt_get_cluster_id(machine, n), NULL); > + object_property_set_int(cpuobj, "core-id", > + virt_get_core_id(machine, n), NULL); > + object_property_set_int(cpuobj, "thread-id", > + virt_get_thread_id(machine, n), NULL); > > if (!vms->secure) { > object_property_set_bool(cpuobj, "has_el3", false, NULL); > @@ -2708,6 +2716,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) > { > int n; > unsigned int max_cpus = ms->smp.max_cpus; > + unsigned int smp_threads = ms->smp.threads; > VirtMachineState *vms = VIRT_MACHINE(ms); > MachineClass *mc = MACHINE_GET_CLASS(vms); > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) > ms->possible_cpus->len = max_cpus; > for (n = 0; n < ms->possible_cpus->len; n++) { > ms->possible_cpus->cpus[n].type = ms->cpu_type; > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > ms->possible_cpus->cpus[n].arch_id = > virt_cpu_mp_affinity(vms, n); > Why @vcpus_count is initialized to @smp_threads? it needs to be documented in the commit log. > diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h > index bb486d36b1..6f9a7bb60b 100644 > --- a/include/hw/arm/virt.h > +++ b/include/hw/arm/virt.h > @@ -209,4 +209,32 @@ static inline int virt_gicv3_redist_region_count(VirtMachineState *vms) > vms->highmem_redists) ? 2 : 1; > } > > +static inline int virt_get_socket_id(const MachineState *ms, int cpu_index) > +{ > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > + > + return ms->possible_cpus->cpus[cpu_index].props.socket_id; > +} > + > +static inline int virt_get_cluster_id(const MachineState *ms, int cpu_index) > +{ > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > + > + return ms->possible_cpus->cpus[cpu_index].props.cluster_id; > +} > + > +static inline int virt_get_core_id(const MachineState *ms, int cpu_index) > +{ > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > + > + return ms->possible_cpus->cpus[cpu_index].props.core_id; > +} > + > +static inline int virt_get_thread_id(const MachineState *ms, int cpu_index) > +{ > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > + > + return ms->possible_cpus->cpus[cpu_index].props.thread_id; > +} > + > #endif /* QEMU_ARM_VIRT_H */ > diff --git a/target/arm/cpu.c b/target/arm/cpu.c > index 77f8c9c748..abc4ed0842 100644 > --- a/target/arm/cpu.c > +++ b/target/arm/cpu.c > @@ -2582,6 +2582,10 @@ static Property arm_cpu_properties[] = { > DEFINE_PROP_UINT64("mp-affinity", ARMCPU, > mp_affinity, ARM64_AFFINITY_INVALID), > DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID), > + DEFINE_PROP_INT32("socket-id", ARMCPU, socket_id, 0), > + DEFINE_PROP_INT32("cluster-id", ARMCPU, cluster_id, 0), > + DEFINE_PROP_INT32("core-id", ARMCPU, core_id, 0), > + DEFINE_PROP_INT32("thread-id", ARMCPU, thread_id, 0), > DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1), > /* True to default to the backward-compat old CNTFRQ rather than 1Ghz */ > DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false), > diff --git a/target/arm/cpu.h b/target/arm/cpu.h > index c17264c239..208c719db3 100644 > --- a/target/arm/cpu.h > +++ b/target/arm/cpu.h > @@ -1076,6 +1076,10 @@ struct ArchCPU { > QLIST_HEAD(, ARMELChangeHook) el_change_hooks; > > int32_t node_id; /* NUMA node this CPU belongs to */ > + int32_t socket_id; > + int32_t cluster_id; > + int32_t core_id; > + int32_t thread_id; > > /* Used to synchronize KVM and QEMU in-kernel device levels */ > uint8_t device_irq_level; Thanks, Gavin
HI Gavin, Sorry, I was away for almost entire last week. Joined back today. Thanks for taking out time to review. > From: Gavin Shan <gshan@redhat.com> > Sent: Monday, August 12, 2024 5:36 AM > To: Salil Mehta <salil.mehta@huawei.com>; qemu-devel@nongnu.org; > qemu-arm@nongnu.org; mst@redhat.com > > On 6/14/24 9:36 AM, Salil Mehta wrote: > > This shall be used to store user specified > > topology{socket,cluster,core,thread} > > and shall be converted to a unique 'vcpu-id' which is used as > > slot-index during hot(un)plug of vCPU. > > > > Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com> > > Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> > > Signed-off-by: Salil Mehta <salil.mehta@huawei.com> > > --- > > hw/arm/virt.c | 10 ++++++++++ > > include/hw/arm/virt.h | 28 ++++++++++++++++++++++++++++ > > target/arm/cpu.c | 4 ++++ > > target/arm/cpu.h | 4 ++++ > > 4 files changed, 46 insertions(+) > > > > Those 4 properties are introduced to determine the vCPU's slot, which is the > index to MachineState::possible_cpus::cpus[]. Correct. From there, the CPU object > or instance is referenced and then the CPU's state can be further > determined. It sounds reasonable to use the CPU's topology to determine > the index. However, I'm wandering if this can be simplified to use 'cpu- > index' or 'index' Are you suggesting to use CPU index while specifying vCPUs through command line and I'm not even sure how will it simply CPU naming? CPU index is internal index to QOM. The closest thing which you can have is the 'slot-id' and later can have mapping to the CPU index internally but I'm not sure how much useful it is to introduce this slot abstraction. I did raise this in the original RFC I posted in 2020. for a couple of facts: (1) 'cpu-index' > or 'index' is simplified. Users have to provide 4 parameters in order to > determine its index in the extreme case, for example "device_add host- > arm-cpu, id=cpu7,socket-id=1, cluster-id=1,core-id=1,thread-id=1". With > 'cpu-index' or 'index', it can be simplified to 'index=7'. (2) The cold-booted > and hotpluggable CPUs are determined by their index instead of their > topology. For example, CPU0/1/2/3 are cold-booted CPUs while CPU4/5/6/7 > are hotpluggable CPUs with command lines '-smp maxcpus=8,cpus=4'. So > 'index' makes more sense to identify a vCPU's slot. I'm not sure if anybody wants to use it this way. People want to specify topology i.e. where the vCPU fits. Internally it's up to QOM to translate that topology to some index. > > > diff --git a/hw/arm/virt.c b/hw/arm/virt.c index > > 3c93c0c0a6..11fc7fc318 100644 > > --- a/hw/arm/virt.c > > +++ b/hw/arm/virt.c > > @@ -2215,6 +2215,14 @@ static void machvirt_init(MachineState > *machine) > > &error_fatal); > > > > aarch64 &= object_property_get_bool(cpuobj, "aarch64", > > NULL); > > + object_property_set_int(cpuobj, "socket-id", > > + virt_get_socket_id(machine, n), NULL); > > + object_property_set_int(cpuobj, "cluster-id", > > + virt_get_cluster_id(machine, n), NULL); > > + object_property_set_int(cpuobj, "core-id", > > + virt_get_core_id(machine, n), NULL); > > + object_property_set_int(cpuobj, "thread-id", > > + virt_get_thread_id(machine, n), > > + NULL); > > > > if (!vms->secure) { > > object_property_set_bool(cpuobj, "has_el3", false, > > NULL); @@ -2708,6 +2716,7 @@ static const CPUArchIdList > *virt_possible_cpu_arch_ids(MachineState *ms) > > { > > int n; > > unsigned int max_cpus = ms->smp.max_cpus; > > + unsigned int smp_threads = ms->smp.threads; > > VirtMachineState *vms = VIRT_MACHINE(ms); > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList > *virt_possible_cpu_arch_ids(MachineState *ms) > > ms->possible_cpus->len = max_cpus; > > for (n = 0; n < ms->possible_cpus->len; n++) { > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > > ms->possible_cpus->cpus[n].arch_id = > > virt_cpu_mp_affinity(vms, n); > > > > Why @vcpus_count is initialized to @smp_threads? it needs to be > documented in the commit log. Because every thread internally amounts to a vCPU in QOM and which is in 1:1 relationship with KVM vCPU. AFAIK, QOM does not strictly follows any architecture. Once you start to get into details of threads there are many aspects of shared resources one will have to consider and these can vary across different implementations of architecture. It is a bigger problem than you think, which I've touched at very nascent stages while doing POC of vCPU hotplug but tried to avoid till now. But I would like to hear other community members views on this. Hi Igor/Peter, What is your take on this? Thanks Salil. > > diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index > > bb486d36b1..6f9a7bb60b 100644 > > --- a/include/hw/arm/virt.h > > +++ b/include/hw/arm/virt.h > > @@ -209,4 +209,32 @@ static inline int > virt_gicv3_redist_region_count(VirtMachineState *vms) > > vms->highmem_redists) ? 2 : 1; > > } > > > > +static inline int virt_get_socket_id(const MachineState *ms, int > > +cpu_index) { > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.socket_id; > > +} > > + > > +static inline int virt_get_cluster_id(const MachineState *ms, int > > +cpu_index) { > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.cluster_id; > > +} > > + > > +static inline int virt_get_core_id(const MachineState *ms, int > > +cpu_index) { > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.core_id; > > +} > > + > > +static inline int virt_get_thread_id(const MachineState *ms, int > > +cpu_index) { > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.thread_id; > > +} > > + > > #endif /* QEMU_ARM_VIRT_H */ > > diff --git a/target/arm/cpu.c b/target/arm/cpu.c index > > 77f8c9c748..abc4ed0842 100644 > > --- a/target/arm/cpu.c > > +++ b/target/arm/cpu.c > > @@ -2582,6 +2582,10 @@ static Property arm_cpu_properties[] = { > > DEFINE_PROP_UINT64("mp-affinity", ARMCPU, > > mp_affinity, ARM64_AFFINITY_INVALID), > > DEFINE_PROP_INT32("node-id", ARMCPU, node_id, > > CPU_UNSET_NUMA_NODE_ID), > > + DEFINE_PROP_INT32("socket-id", ARMCPU, socket_id, 0), > > + DEFINE_PROP_INT32("cluster-id", ARMCPU, cluster_id, 0), > > + DEFINE_PROP_INT32("core-id", ARMCPU, core_id, 0), > > + DEFINE_PROP_INT32("thread-id", ARMCPU, thread_id, 0), > > DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1), > > /* True to default to the backward-compat old CNTFRQ rather than > 1Ghz */ > > DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, > backcompat_cntfrq, > > false), diff --git a/target/arm/cpu.h b/target/arm/cpu.h index > > c17264c239..208c719db3 100644 > > --- a/target/arm/cpu.h > > +++ b/target/arm/cpu.h > > @@ -1076,6 +1076,10 @@ struct ArchCPU { > > QLIST_HEAD(, ARMELChangeHook) el_change_hooks; > > > > int32_t node_id; /* NUMA node this CPU belongs to */ > > + int32_t socket_id; > > + int32_t cluster_id; > > + int32_t core_id; > > + int32_t thread_id; > > > > /* Used to synchronize KVM and QEMU in-kernel device levels */ > > uint8_t device_irq_level; > > Thanks, > Gavin >
Hi Salil, On Mon, Aug 19, 2024 at 11:53:52AM +0000, Salil Mehta wrote: > Date: Mon, 19 Aug 2024 11:53:52 +0000 > From: Salil Mehta <salil.mehta@huawei.com> > Subject: RE: [PATCH RFC V3 01/29] arm/virt,target/arm: Add new ARMCPU > {socket,cluster,core,thread}-id property [snip] > > > NULL); @@ -2708,6 +2716,7 @@ static const CPUArchIdList > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > { > > > int n; > > > unsigned int max_cpus = ms->smp.max_cpus; > > > + unsigned int smp_threads = ms->smp.threads; > > > VirtMachineState *vms = VIRT_MACHINE(ms); > > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > ms->possible_cpus->len = max_cpus; > > > for (n = 0; n < ms->possible_cpus->len; n++) { > > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > > > ms->possible_cpus->cpus[n].arch_id = > > > virt_cpu_mp_affinity(vms, n); > > > > > > > Why @vcpus_count is initialized to @smp_threads? it needs to be > > documented in the commit log. > > > Because every thread internally amounts to a vCPU in QOM and which > is in 1:1 relationship with KVM vCPU. AFAIK, QOM does not strictly follows > any architecture. Once you start to get into details of threads there > are many aspects of shared resources one will have to consider and > these can vary across different implementations of architecture. For SPAPR CPU, the granularity of >possible_cpus->cpus[] is "core", and for x86, it's "thread" granularity. And smp.threads means how many threads in one core, so for x86, the vcpus_count of a "thread" is 1, and for spapr, the vcpus_count of a "core" equals to smp.threads. IIUC, your granularity is still "thread", so that this filed should be 1. -Zhao > It is a bigger problem than you think, which I've touched at very nascent > stages while doing POC of vCPU hotplug but tried to avoid till now. > > > But I would like to hear other community members views on this. > > Hi Igor/Peter, > > What is your take on this? > > Thanks > Salil.
Hi Zhao, > From: zhao1.liu@intel.com <zhao1.liu@intel.com> > Sent: Wednesday, September 4, 2024 3:43 PM > To: Salil Mehta <salil.mehta@huawei.com> > > Hi Salil, > > On Mon, Aug 19, 2024 at 11:53:52AM +0000, Salil Mehta wrote: > > Date: Mon, 19 Aug 2024 11:53:52 +0000 > > From: Salil Mehta <salil.mehta@huawei.com> > > Subject: RE: [PATCH RFC V3 01/29] arm/virt,target/arm: Add new ARMCPU > > {socket,cluster,core,thread}-id property > > [snip] > > > > > NULL); @@ -2708,6 +2716,7 @@ static const CPUArchIdList > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > { > > > > int n; > > > > unsigned int max_cpus = ms->smp.max_cpus; > > > > + unsigned int smp_threads = ms->smp.threads; > > > > VirtMachineState *vms = VIRT_MACHINE(ms); > > > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > ms->possible_cpus->len = max_cpus; > > > > for (n = 0; n < ms->possible_cpus->len; n++) { > > > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > > > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > > > > ms->possible_cpus->cpus[n].arch_id = > > > > virt_cpu_mp_affinity(vms, n); > > > > > > > > > > Why @vcpus_count is initialized to @smp_threads? it needs to be > > > documented in the commit log. > > > > > > Because every thread internally amounts to a vCPU in QOM and which is > > in 1:1 relationship with KVM vCPU. AFAIK, QOM does not strictly > > follows any architecture. Once you start to get into details of > > threads there are many aspects of shared resources one will have to > > consider and these can vary across different implementations of > architecture. > > For SPAPR CPU, the granularity of >possible_cpus->cpus[] is "core", and for > x86, it's "thread" granularity. We have threads per-core at microarchitecture level in ARM as well. But each thread appears like a vCPU to OS and AFAICS there are no special attributes attached to it. SMT can be enabled/disabled at firmware and should get reflected in the configuration accordingly i.e. value of *threads-per-core* changes between 1 and 'N'. This means 'vcpus_count' has to reflect the correct configuration. But I think threads lack proper representation in Qemu QOM. In Qemu, each vCPU reflects an execution context (which gets uniquely mapped to KVM vCPU). AFAICS, we only have *CPUState* (Struct ArchCPU) as a placeholder for this execution context and there is no *ThreadState* (derived out of Struct CPUState). Hence, we've to map all the threads as QOM vCPUs. This means the array of present or possible CPUs represented by 'struct CPUArchIdList' contains all execution contexts which actually might be vCPU or a thread. Hence, usage of *vcpus_count* seems quite superficial to me frankly. Also, AFAICS, KVM does not have the concept of the threads and only has KVM vCPUs, but you are still allowed to specify the topology with sockets, dies, clusters, cores, threads in most architectures. > > And smp.threads means how many threads in one core, so for x86, the > vcpus_count of a "thread" is 1, and for spapr, the vcpus_count of a "core" > equals to smp.threads. Sure, but does the KVM specifies this? and how does these threads map to the QOM vCPU objects or execution context? AFAICS there is nothing but 'CPUState' which will be made part of the possible vCPU list 'struct CPUArchIdList'. > > IIUC, your granularity is still "thread", so that this filed should be 1. Well, again we need more discussion on this. I've stated my concerns against doing this. User should be allowed to create virtual topology which will include 'threads' as one of the parameter. > > -Zhao > > > It is a bigger problem than you think, which I've touched at very > > nascent stages while doing POC of vCPU hotplug but tried to avoid till now. > > > > > > But I would like to hear other community members views on this. > > > > Hi Igor/Peter, > > > > What is your take on this? > > > > Thanks > > Salil.
On Wed, Sep 04, 2024 at 05:37:21PM +0000, Salil Mehta wrote: > Date: Wed, 4 Sep 2024 17:37:21 +0000 > From: Salil Mehta <salil.mehta@huawei.com> > Subject: RE: [PATCH RFC V3 01/29] arm/virt,target/arm: Add new ARMCPU > {socket,cluster,core,thread}-id property > > Hi Zhao, > > > From: zhao1.liu@intel.com <zhao1.liu@intel.com> > > Sent: Wednesday, September 4, 2024 3:43 PM > > To: Salil Mehta <salil.mehta@huawei.com> > > > > Hi Salil, > > > > On Mon, Aug 19, 2024 at 11:53:52AM +0000, Salil Mehta wrote: > > > Date: Mon, 19 Aug 2024 11:53:52 +0000 > > > From: Salil Mehta <salil.mehta@huawei.com> > > > Subject: RE: [PATCH RFC V3 01/29] arm/virt,target/arm: Add new ARMCPU > > > {socket,cluster,core,thread}-id property > > > > [snip] > > > > > > > NULL); @@ -2708,6 +2716,7 @@ static const CPUArchIdList > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > { > > > > > int n; > > > > > unsigned int max_cpus = ms->smp.max_cpus; > > > > > + unsigned int smp_threads = ms->smp.threads; > > > > > VirtMachineState *vms = VIRT_MACHINE(ms); > > > > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > > > > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > ms->possible_cpus->len = max_cpus; > > > > > for (n = 0; n < ms->possible_cpus->len; n++) { > > > > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > > > > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > > > > > ms->possible_cpus->cpus[n].arch_id = > > > > > virt_cpu_mp_affinity(vms, n); > > > > > > > > > > > > > Why @vcpus_count is initialized to @smp_threads? it needs to be > > > > documented in the commit log. > > > > > > > > > Because every thread internally amounts to a vCPU in QOM and which is > > > in 1:1 relationship with KVM vCPU. AFAIK, QOM does not strictly > > > follows any architecture. Once you start to get into details of > > > threads there are many aspects of shared resources one will have to > > > consider and these can vary across different implementations of > > architecture. > > > > For SPAPR CPU, the granularity of >possible_cpus->cpus[] is "core", and for > > x86, it's "thread" granularity. > > > We have threads per-core at microarchitecture level in ARM as well. But each > thread appears like a vCPU to OS and AFAICS there are no special attributes > attached to it. SMT can be enabled/disabled at firmware and should get > reflected in the configuration accordingly i.e. value of *threads-per-core* > changes between 1 and 'N'. This means 'vcpus_count' has to reflect the > correct configuration. But I think threads lack proper representation > in Qemu QOM. In topology related part, SMT (of x86) usually represents the logical processor level. And thread has the same meaning. To change these meanings is also possible, but I think it should be based on the actual use case. we can consider the complexity of the implementation when there is a need. > In Qemu, each vCPU reflects an execution context (which gets uniquely mapped > to KVM vCPU). AFAICS, we only have *CPUState* (Struct ArchCPU) as a placeholder > for this execution context and there is no *ThreadState* (derived out of > Struct CPUState). Hence, we've to map all the threads as QOM vCPUs. This means > the array of present or possible CPUs represented by 'struct CPUArchIdList' contains > all execution contexts which actually might be vCPU or a thread. Hence, usage of > *vcpus_count* seems quite superficial to me frankly. > > Also, AFAICS, KVM does not have the concept of the threads and only has > KVM vCPUs, but you are still allowed to specify the topology with sockets, dies, > clusters, cores, threads in most architectures. There are some uses for topology, such as it affects scheduling behavior, and it affects feature emulation, etc. > > And smp.threads means how many threads in one core, so for x86, the > > vcpus_count of a "thread" is 1, and for spapr, the vcpus_count of a "core" > > equals to smp.threads. > > > Sure, but does the KVM specifies this? At least as you said, KVM (for x86) doesn't consider higher-level topologies at the moment, but that's not to say that it won't in the future, as certain registers do have topology dependencies. > and how does these threads map to the QOM vCPU objects or execution context? Each CPU object will create a (software) thread, you can refer the function "kvm_start_vcpu_thread(CPUState *cpu)", which will be called when CPU object realizes. > AFAICS there is nothing but 'CPUState' > which will be made part of the possible vCPU list 'struct CPUArchIdList'. As I said, an example is spapr ("spapr_possible_cpu_arch_ids()"), which maps possible_cpu to core object. However, this is a very specific example, and like Igor's slides said, I understand it's an architectural requirement. > > > > IIUC, your granularity is still "thread", so that this filed should be 1. > > > Well, again we need more discussion on this. I've stated my concerns against > doing this. User should be allowed to create virtual topology which will > include 'threads' as one of the parameter. > I don't seem to understand...There is a “threads” parameter in -smp, does this not satisfy your use case? Regards, Zhao
HI Zhao, > From: Zhao Liu <zhao1.liu@intel.com> > Sent: Monday, September 9, 2024 4:28 PM > To: Salil Mehta <salil.mehta@huawei.com> > > On Wed, Sep 04, 2024 at 05:37:21PM +0000, Salil Mehta wrote: > > Date: Wed, 4 Sep 2024 17:37:21 +0000 > > From: Salil Mehta <salil.mehta@huawei.com> > > Subject: RE: [PATCH RFC V3 01/29] arm/virt,target/arm: Add new ARMCPU > > {socket,cluster,core,thread}-id property > > > > Hi Zhao, > > > > > From: zhao1.liu@intel.com <zhao1.liu@intel.com> > > > Sent: Wednesday, September 4, 2024 3:43 PM > > > To: Salil Mehta <salil.mehta@huawei.com> > > > > > > Hi Salil, > > > > > > On Mon, Aug 19, 2024 at 11:53:52AM +0000, Salil Mehta wrote: > > > > Date: Mon, 19 Aug 2024 11:53:52 +0000 > From: Salil Mehta > > > <salil.mehta@huawei.com> > Subject: RE: [PATCH RFC V3 01/29] > > > arm/virt,target/arm: Add new ARMCPU > > > > {socket,cluster,core,thread}-id property > > > > > > [snip] > > > > > > > > > NULL); @@ -2708,6 +2716,7 @@ static const CPUArchIdList > > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > > { > > > > > > int n; > > > > > > unsigned int max_cpus = ms->smp.max_cpus; > > > > > > + unsigned int smp_threads = ms->smp.threads; > > > > > > VirtMachineState *vms = VIRT_MACHINE(ms); > > > > > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > > > > > > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList > > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > > ms->possible_cpus->len = max_cpus; > > > > > > for (n = 0; n < ms->possible_cpus->len; n++) { > > > > > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > > > > > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > > > > > > ms->possible_cpus->cpus[n].arch_id = > > > > > > virt_cpu_mp_affinity(vms, n); > > > > > > > > > > > > > > > > Why @vcpus_count is initialized to @smp_threads? it needs to > > > be > > documented in the commit log. > > > > > > > > > > > > Because every thread internally amounts to a vCPU in QOM and > > > which is > in 1:1 relationship with KVM vCPU. AFAIK, QOM does not > > > strictly > follows any architecture. Once you start to get into > > > details of > threads there are many aspects of shared resources one > > > will have to > consider and these can vary across different > > > implementations of architecture. > > > > > > For SPAPR CPU, the granularity of >possible_cpus->cpus[] is "core", > > > and for x86, it's "thread" granularity. > > > > > > We have threads per-core at microarchitecture level in ARM as well. > > But each thread appears like a vCPU to OS and AFAICS there are no > > special attributes attached to it. SMT can be enabled/disabled at > > firmware and should get reflected in the configuration accordingly > > i.e. value of *threads-per-core* changes between 1 and 'N'. This > > means 'vcpus_count' has to reflect the correct configuration. But I > > think threads lack proper representation in Qemu QOM. > > In topology related part, SMT (of x86) usually represents the logical > processor level. And thread has the same meaning. Agreed. It is same in ARM as well. The difference could be in how hardware threads are implemented at microarchitecture level. Nevertheless, we do have such virtual configurations, and the meaning of *threads* as-in QOM topology (socket,cluster,core,thread) is virtualized similar to the hardware threads in host. And One should be able to configure threads support in the virtual environment, regardless whether or not underlying hardware supports threads. That's my take. Other aspect is how we then expose these threads to the guest. The guest kernel (just like host kernel) should gather topology information using ACPI PPTT Table (This is ARM specific?). Later is populated by the Qemu (just like by firmware for the host kernel) by making use of the virtual topology. ARM guest kernel, in absence of PPTT support can detect presence of hardware threads by reading MT Bit within the MPIDR_EL1 register. Every property in 'ms->possible_cpus->cpus[n].props should be exactly same as finalized and part of the MachineState::CpuTopology. Hence, number of threads-per-core 'vcpus_count' should not be treated differently. But there is a catch! (I explained that earlier) To change these > meanings is also possible, but I think it should be based on the actual use > case. we can consider the complexity of the implementation when there is a > need. Agreed. There is no ambiguity in the meaning of hardware threads or the virtualized MachineState::CpuTopology. Properties of all the possible vCPUs should exactly be same as part of MachineState. This includes the number of threads-per-core. You mentioned 'vcpus_count' should be 1 but does that mean user can never specify threads > 1 in virtual configuration for x86? > > > In Qemu, each vCPU reflects an execution context (which gets uniquely > > mapped to KVM vCPU). AFAICS, we only have *CPUState* (Struct > ArchCPU) > > as a placeholder for this execution context and there is no > > *ThreadState* (derived out of Struct CPUState). Hence, we've to map > > all the threads as QOM vCPUs. This means the array of present or > > possible CPUs represented by 'struct CPUArchIdList' contains all > > execution contexts which actually might be vCPU or a thread. Hence, > > usage of > > *vcpus_count* seems quite superficial to me frankly. > > > > Also, AFAICS, KVM does not have the concept of the threads and only > > has KVM vCPUs, but you are still allowed to specify the topology with > > sockets, dies, clusters, cores, threads in most architectures. > > There are some uses for topology, such as it affects scheduling behavior, > and it affects feature emulation, etc. True. And we should be flexible at the VMM level. We should let Admin of the VMM control how he creates the virtual topology which best fits on the underlying hardware features of the host. This includes, NUMA, sub-NUMA, cores, hardware, threads, cache topology etc. > > > > And smp.threads means how many threads in one core, so for x86, the > > > vcpus_count of a "thread" is 1, and for spapr, the vcpus_count of a > "core" equals to smp.threads. > > > > > > Sure, but does the KVM specifies this? > > At least as you said, KVM (for x86) doesn't consider higher-level topologies > at the moment, but that's not to say that it won't in the future, as certain > registers do have topology dependencies. sure. so you mean for x86 virtual topology, smp.threads = 1 always? > > > and how does these threads map to the QOM vCPU objects or execution > context? > > Each CPU object will create a (software) thread, you can refer the function > "kvm_start_vcpu_thread(CPUState *cpu)", which will be called when CPU > object realizes. Yes, sure, and each such QOM vCPU thread and 'struct CPUState' is mapped to the lowest granularity of execution specified within the QOM virtual topology. It could be a 'thread' or a 'core'. And all these will run as a KVM vCPU scheduled on some hardware core and maybe hardware thread (if enabled). So there is no difference across architectures regarding this part. I was trying to point that in QOM, even the threads will have their own 'struct CPUState' and each one will be part of the "CPUArchIdList *possible_cpus" maintained at the MachineState. At this level we loose the relationship information of the cores and their corresponding threads (given by 'vcpus_count'). > > AFAICS there is nothing but 'CPUState' > > which will be made part of the possible vCPU list 'struct CPUArchIdList'. > > As I said, an example is spapr ("spapr_possible_cpu_arch_ids()"), which > maps possible_cpu to core object. However, this is a very specific example, > and like Igor's slides said, I understand it's an architectural requirement. I'm sure there must have been some. I'm trying to understand it. Can you share the slides? > > > > > > > IIUC, your granularity is still "thread", so that this filed should be 1. > > > > > > Well, again we need more discussion on this. I've stated my concerns > > against doing this. User should be allowed to create virtual topology > > which will include 'threads' as one of the parameter. > > > > I don't seem to understand...There is a "threads" parameter in -smp, does > this not satisfy your use case? It certainly does. But this is what should get reflected in the 'vcpus_count' as well? Best regards Salil > > Regards, > Zhao >
On Tue, 10 Sep 2024 12:01:05 +0100 Salil Mehta <salil.mehta@huawei.com> wrote: > HI Zhao, A few trivial comments inline. > > > From: Zhao Liu <zhao1.liu@intel.com> > > Sent: Monday, September 9, 2024 4:28 PM > > To: Salil Mehta <salil.mehta@huawei.com> > > > > On Wed, Sep 04, 2024 at 05:37:21PM +0000, Salil Mehta wrote: > > > Date: Wed, 4 Sep 2024 17:37:21 +0000 > > > From: Salil Mehta <salil.mehta@huawei.com> > > > Subject: RE: [PATCH RFC V3 01/29] arm/virt,target/arm: Add new ARMCPU > > > {socket,cluster,core,thread}-id property > > > > > > Hi Zhao, > > > > > > > From: zhao1.liu@intel.com <zhao1.liu@intel.com> > > > > Sent: Wednesday, September 4, 2024 3:43 PM > > > > To: Salil Mehta <salil.mehta@huawei.com> > > > > > > > > Hi Salil, > > > > > > > > On Mon, Aug 19, 2024 at 11:53:52AM +0000, Salil Mehta wrote: > > > > > Date: Mon, 19 Aug 2024 11:53:52 +0000 > From: Salil Mehta > > > > <salil.mehta@huawei.com> > Subject: RE: [PATCH RFC V3 01/29] > > > > arm/virt,target/arm: Add new ARMCPU > > > > > {socket,cluster,core,thread}-id property > > > > > > > > [snip] > > > > > > > > > > > NULL); @@ -2708,6 +2716,7 @@ static const CPUArchIdList > > > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > > > { > > > > > > > int n; > > > > > > > unsigned int max_cpus = ms->smp.max_cpus; > > > > > > > + unsigned int smp_threads = ms->smp.threads; > > > > > > > VirtMachineState *vms = VIRT_MACHINE(ms); > > > > > > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > > > > > > > > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList > > > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > > > ms->possible_cpus->len = max_cpus; > > > > > > > for (n = 0; n < ms->possible_cpus->len; n++) { > > > > > > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > > > > > > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > > > > > > > ms->possible_cpus->cpus[n].arch_id = > > > > > > > virt_cpu_mp_affinity(vms, n); > > > > > > > > > > > > > > > > > > > Why @vcpus_count is initialized to @smp_threads? it needs to > > > > be > > documented in the commit log. > > > > > > > > > > > > > > > Because every thread internally amounts to a vCPU in QOM and > > > > which is > in 1:1 relationship with KVM vCPU. AFAIK, QOM does not > > > > strictly > follows any architecture. Once you start to get into > > > > details of > threads there are many aspects of shared resources one > > > > will have to > consider and these can vary across different > > > > implementations of architecture. > > > > > > > > For SPAPR CPU, the granularity of >possible_cpus->cpus[] is "core", > > > > and for x86, it's "thread" granularity. > > > > > > > > > We have threads per-core at microarchitecture level in ARM as well. > > > But each thread appears like a vCPU to OS and AFAICS there are no > > > special attributes attached to it. SMT can be enabled/disabled at > > > firmware and should get reflected in the configuration accordingly > > > i.e. value of *threads-per-core* changes between 1 and 'N'. This > > > means 'vcpus_count' has to reflect the correct configuration. But I > > > think threads lack proper representation in Qemu QOM. > > > > In topology related part, SMT (of x86) usually represents the logical > > processor level. And thread has the same meaning. > > > Agreed. It is same in ARM as well. The difference could be in how hardware > threads are implemented at microarchitecture level. Nevertheless, we do > have such virtual configurations, and the meaning of *threads* as-in QOM > topology (socket,cluster,core,thread) is virtualized similar to the hardware > threads in host. And One should be able to configure threads support in the > virtual environment, regardless whether or not underlying hardware > supports threads. That's my take. > > Other aspect is how we then expose these threads to the guest. The guest > kernel (just like host kernel) should gather topology information using > ACPI PPTT Table (This is ARM specific?). Not ARM specific, but not used by x86 in practice (I believe some risc-v boards use it). https://lore.kernel.org/linux-riscv/20240617131425.7526-3-cuiyunhui@bytedance.com/ > Later is populated by the Qemu > (just like by firmware for the host kernel) by making use of the virtual > topology. ARM guest kernel, in absence of PPTT support can detect > presence of hardware threads by reading MT Bit within the MPIDR_EL1 > register. Sadly no it can't. Lots of CPUs cores that are single thread set that bit anyway (so it's garbage and PPTT / DT is the only source of truth) https://lore.kernel.org/all/CAFfO_h7vUEhqV15epf+_zVrbDhc3JrejkkOVsHzHgCXNk+nDdg@mail.gmail.com/T/ Jonathan
On Wed, Sep 11, 2024 at 11:35 AM Jonathan Cameron < Jonathan.Cameron@huawei.com> wrote: > On Tue, 10 Sep 2024 12:01:05 +0100 > Salil Mehta <salil.mehta@huawei.com> wrote: > > > HI Zhao, > > A few trivial comments inline. > > > > > > From: Zhao Liu <zhao1.liu@intel.com> > > > Sent: Monday, September 9, 2024 4:28 PM > > > To: Salil Mehta <salil.mehta@huawei.com> > > > > > > On Wed, Sep 04, 2024 at 05:37:21PM +0000, Salil Mehta wrote: > > > > Date: Wed, 4 Sep 2024 17:37:21 +0000 > > > > From: Salil Mehta <salil.mehta@huawei.com> > > > > Subject: RE: [PATCH RFC V3 01/29] arm/virt,target/arm: Add new > ARMCPU > > > > {socket,cluster,core,thread}-id property > > > > > > > > Hi Zhao, > > > > > > > > > From: zhao1.liu@intel.com <zhao1.liu@intel.com> > > > > > Sent: Wednesday, September 4, 2024 3:43 PM > > > > > To: Salil Mehta <salil.mehta@huawei.com> > > > > > > > > > > Hi Salil, > > > > > > > > > > On Mon, Aug 19, 2024 at 11:53:52AM +0000, Salil Mehta wrote: > > > > > > Date: Mon, 19 Aug 2024 11:53:52 +0000 > From: Salil Mehta > > > > > <salil.mehta@huawei.com> > Subject: RE: [PATCH RFC V3 01/29] > > > > > arm/virt,target/arm: Add new ARMCPU > > > > > > {socket,cluster,core,thread}-id property > > > > > > > > > > [snip] > > > > > > > > > > > > > NULL); @@ -2708,6 +2716,7 @@ static const CPUArchIdList > > > > > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > > > > { > > > > > > > > int n; > > > > > > > > unsigned int max_cpus = ms->smp.max_cpus; > > > > > > > > + unsigned int smp_threads = ms->smp.threads; > > > > > > > > VirtMachineState *vms = VIRT_MACHINE(ms); > > > > > > > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > > > > > > > > > > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList > > > > > > > *virt_possible_cpu_arch_ids(MachineState *ms) > > > > > > > > ms->possible_cpus->len = max_cpus; > > > > > > > > for (n = 0; n < ms->possible_cpus->len; n++) { > > > > > > > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > > > > > > > + ms->possible_cpus->cpus[n].vcpus_count = > smp_threads; > > > > > > > > ms->possible_cpus->cpus[n].arch_id = > > > > > > > > virt_cpu_mp_affinity(vms, n); > > > > > > > > > > > > > > > > > > > > > > Why @vcpus_count is initialized to @smp_threads? it needs > to > > > > > be > > documented in the commit log. > > > > > > > > > > > > > > > > > > Because every thread internally amounts to a vCPU in QOM and > > > > > which is > in 1:1 relationship with KVM vCPU. AFAIK, QOM does not > > > > > strictly > follows any architecture. Once you start to get into > > > > > details of > threads there are many aspects of shared resources > one > > > > > will have to > consider and these can vary across different > > > > > implementations of architecture. > > > > > > > > > > For SPAPR CPU, the granularity of >possible_cpus->cpus[] is > "core", > > > > > and for x86, it's "thread" granularity. > > > > > > > > > > > > We have threads per-core at microarchitecture level in ARM as well. > > > > But each thread appears like a vCPU to OS and AFAICS there are no > > > > special attributes attached to it. SMT can be enabled/disabled at > > > > firmware and should get reflected in the configuration accordingly > > > > i.e. value of *threads-per-core* changes between 1 and 'N'. This > > > > means 'vcpus_count' has to reflect the correct configuration. But I > > > > think threads lack proper representation in Qemu QOM. > > > > > > In topology related part, SMT (of x86) usually represents the logical > > > processor level. And thread has the same meaning. > > > > > > Agreed. It is same in ARM as well. The difference could be in how > hardware > > threads are implemented at microarchitecture level. Nevertheless, we do > > have such virtual configurations, and the meaning of *threads* as-in QOM > > topology (socket,cluster,core,thread) is virtualized similar to the > hardware > > threads in host. And One should be able to configure threads support in > the > > virtual environment, regardless whether or not underlying hardware > > supports threads. That's my take. > > > > Other aspect is how we then expose these threads to the guest. The guest > > kernel (just like host kernel) should gather topology information using > > ACPI PPTT Table (This is ARM specific?). > > Not ARM specific, but not used by x86 in practice (I believe some risc-v > boards > use it). > > https://lore.kernel.org/linux-riscv/20240617131425.7526-3-cuiyunhui@bytedance.com/ > > > Later is populated by the Qemu > > (just like by firmware for the host kernel) by making use of the virtual > > topology. ARM guest kernel, in absence of PPTT support can detect > > presence of hardware threads by reading MT Bit within the MPIDR_EL1 > > register. > > Sadly no it can't. Lots of CPUs cores that are single thread set that > bit anyway (so it's garbage and PPTT / DT is the only source of truth) > > https://lore.kernel.org/all/CAFfO_h7vUEhqV15epf+_zVrbDhc3JrejkkOVsHzHgCXNk+nDdg@mail.gmail.com/T/ Yes, agreed, this last explanation was not completely correct. IICRC, Marc did point out in RFC V1 of June 2020 that value MT=0 is set by KVM to tell the guest kernel that vCPUs at the same affinity-1 fields are independent. Problem was with the interpretation of non-zero MT Bit and it was not consistent. The key thing is we should not rely on the value of the MT Bit in MPIDR to know if multithreading exists. So yes, to know the exact status of the multithreading on ARM systems parsing PPTT Table is the only way. I mentioned that because handling still exists in the kernel code but I think it exists for handling those other cases. Maybe a comment is required here (?): https://elixir.bootlin.com/linux/v6.11-rc7/source/arch/arm64/kernel/topology.c#L34 Thanks for pointing this out. Thanks Salil. > > > Jonathan > >
On Mon, 12 Aug 2024 14:35:56 +1000 Gavin Shan <gshan@redhat.com> wrote: > On 6/14/24 9:36 AM, Salil Mehta wrote: > > This shall be used to store user specified topology{socket,cluster,core,thread} > > and shall be converted to a unique 'vcpu-id' which is used as slot-index during > > hot(un)plug of vCPU. > > > > Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com> > > Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> > > Signed-off-by: Salil Mehta <salil.mehta@huawei.com> > > --- > > hw/arm/virt.c | 10 ++++++++++ > > include/hw/arm/virt.h | 28 ++++++++++++++++++++++++++++ > > target/arm/cpu.c | 4 ++++ > > target/arm/cpu.h | 4 ++++ > > 4 files changed, 46 insertions(+) > > > > Those 4 properties are introduced to determine the vCPU's slot, which is the index > to MachineState::possible_cpus::cpus[]. From there, the CPU object or instance is > referenced and then the CPU's state can be further determined. It sounds reasonable > to use the CPU's topology to determine the index. However, I'm wandering if this can > be simplified to use 'cpu-index' or 'index' for a couple of facts: (1) 'cpu-index' Please, don't. We've spent a bunch of time to get rid of cpu-index in user visible interface (well, old NUMA CLI is still there along with 'new' topology based one, but that's the last one). > or 'index' is simplified. Users have to provide 4 parameters in order to determine > its index in the extreme case, for example "device_add host-arm-cpu, id=cpu7,socket-id=1, > cluster-id=1,core-id=1,thread-id=1". With 'cpu-index' or 'index', it can be simplified > to 'index=7'. (2) The cold-booted and hotpluggable CPUs are determined by their index > instead of their topology. For example, CPU0/1/2/3 are cold-booted CPUs while CPU4/5/6/7 > are hotpluggable CPUs with command lines '-smp maxcpus=8,cpus=4'. So 'index' makes > more sense to identify a vCPU's slot. cpu-index have been used for hotplug with x86 machines as a starting point to implement hotplug as it was easy to hack and it has already existed in QEMU. But that didn't scale as was desired and had its own issues. Hence the current interface that majority agreed upon. I don't remember exact arguments anymore (they could be found qemu-devel if needed) Here is a link to the talk that tried to explain why topo based was introduced. http://events17.linuxfoundation.org/sites/events/files/slides/CPU%20Hot-plug%20support%20in%20QEMU.pdf > > diff --git a/hw/arm/virt.c b/hw/arm/virt.c > > index 3c93c0c0a6..11fc7fc318 100644 > > --- a/hw/arm/virt.c > > +++ b/hw/arm/virt.c > > @@ -2215,6 +2215,14 @@ static void machvirt_init(MachineState *machine) > > &error_fatal); > > > > aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL); > > + object_property_set_int(cpuobj, "socket-id", > > + virt_get_socket_id(machine, n), NULL); > > + object_property_set_int(cpuobj, "cluster-id", > > + virt_get_cluster_id(machine, n), NULL); > > + object_property_set_int(cpuobj, "core-id", > > + virt_get_core_id(machine, n), NULL); > > + object_property_set_int(cpuobj, "thread-id", > > + virt_get_thread_id(machine, n), NULL); > > > > if (!vms->secure) { > > object_property_set_bool(cpuobj, "has_el3", false, NULL); > > @@ -2708,6 +2716,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) > > { > > int n; > > unsigned int max_cpus = ms->smp.max_cpus; > > + unsigned int smp_threads = ms->smp.threads; > > VirtMachineState *vms = VIRT_MACHINE(ms); > > MachineClass *mc = MACHINE_GET_CLASS(vms); > > > > @@ -2721,6 +2730,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) > > ms->possible_cpus->len = max_cpus; > > for (n = 0; n < ms->possible_cpus->len; n++) { > > ms->possible_cpus->cpus[n].type = ms->cpu_type; > > + ms->possible_cpus->cpus[n].vcpus_count = smp_threads; > > ms->possible_cpus->cpus[n].arch_id = > > virt_cpu_mp_affinity(vms, n); > > > > Why @vcpus_count is initialized to @smp_threads? it needs to be documented in > the commit log. > > > diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h > > index bb486d36b1..6f9a7bb60b 100644 > > --- a/include/hw/arm/virt.h > > +++ b/include/hw/arm/virt.h > > @@ -209,4 +209,32 @@ static inline int virt_gicv3_redist_region_count(VirtMachineState *vms) > > vms->highmem_redists) ? 2 : 1; > > } > > > > +static inline int virt_get_socket_id(const MachineState *ms, int cpu_index) > > +{ > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.socket_id; > > +} > > + > > +static inline int virt_get_cluster_id(const MachineState *ms, int cpu_index) > > +{ > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.cluster_id; > > +} > > + > > +static inline int virt_get_core_id(const MachineState *ms, int cpu_index) > > +{ > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.core_id; > > +} > > + > > +static inline int virt_get_thread_id(const MachineState *ms, int cpu_index) > > +{ > > + assert(cpu_index >= 0 && cpu_index < ms->possible_cpus->len); > > + > > + return ms->possible_cpus->cpus[cpu_index].props.thread_id; > > +} > > + > > #endif /* QEMU_ARM_VIRT_H */ > > diff --git a/target/arm/cpu.c b/target/arm/cpu.c > > index 77f8c9c748..abc4ed0842 100644 > > --- a/target/arm/cpu.c > > +++ b/target/arm/cpu.c > > @@ -2582,6 +2582,10 @@ static Property arm_cpu_properties[] = { > > DEFINE_PROP_UINT64("mp-affinity", ARMCPU, > > mp_affinity, ARM64_AFFINITY_INVALID), > > DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID), > > + DEFINE_PROP_INT32("socket-id", ARMCPU, socket_id, 0), > > + DEFINE_PROP_INT32("cluster-id", ARMCPU, cluster_id, 0), > > + DEFINE_PROP_INT32("core-id", ARMCPU, core_id, 0), > > + DEFINE_PROP_INT32("thread-id", ARMCPU, thread_id, 0), > > DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1), > > /* True to default to the backward-compat old CNTFRQ rather than 1Ghz */ > > DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false), > > diff --git a/target/arm/cpu.h b/target/arm/cpu.h > > index c17264c239..208c719db3 100644 > > --- a/target/arm/cpu.h > > +++ b/target/arm/cpu.h > > @@ -1076,6 +1076,10 @@ struct ArchCPU { > > QLIST_HEAD(, ARMELChangeHook) el_change_hooks; > > > > int32_t node_id; /* NUMA node this CPU belongs to */ > > + int32_t socket_id; > > + int32_t cluster_id; > > + int32_t core_id; > > + int32_t thread_id; > > > > /* Used to synchronize KVM and QEMU in-kernel device levels */ > > uint8_t device_irq_level; > > Thanks, > Gavin >
> From: Igor Mammedov <imammedo@redhat.com> > Sent: Monday, August 12, 2024 9:16 AM > To: Gavin Shan <gshan@redhat.com> > > On Mon, 12 Aug 2024 14:35:56 +1000 > Gavin Shan <gshan@redhat.com> wrote: > > > On 6/14/24 9:36 AM, Salil Mehta wrote: > > > This shall be used to store user specified > > > topology{socket,cluster,core,thread} > > > and shall be converted to a unique 'vcpu-id' which is used as > > > slot-index during hot(un)plug of vCPU. > > > > > > Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com> > > > Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> > > > Signed-off-by: Salil Mehta <salil.mehta@huawei.com> > > > --- > > > hw/arm/virt.c | 10 ++++++++++ > > > include/hw/arm/virt.h | 28 ++++++++++++++++++++++++++++ > > > target/arm/cpu.c | 4 ++++ > > > target/arm/cpu.h | 4 ++++ > > > 4 files changed, 46 insertions(+) > > > > > > > Those 4 properties are introduced to determine the vCPU's slot, which > > is the index to MachineState::possible_cpus::cpus[]. From there, the > > CPU object or instance is referenced and then the CPU's state can be > > further determined. It sounds reasonable to use the CPU's topology to > > determine the index. However, I'm wandering if this can be simplified to > use 'cpu-index' or 'index' for a couple of facts: (1) 'cpu-index' > > Please, don't. We've spent a bunch of time to get rid of cpu-index in user > visible interface (well, old NUMA CLI is still there along with 'new' topology > based one, but that's the last one). Agreed. We shouldn't expose CPU index to user. > > > or 'index' is simplified. Users have to provide 4 parameters in order > > to determine its index in the extreme case, for example "device_add > > host-arm-cpu, id=cpu7,socket-id=1, > > cluster-id=1,core-id=1,thread-id=1". With 'cpu-index' or 'index', it > > can be simplified to 'index=7'. (2) The cold-booted and hotpluggable > > CPUs are determined by their index instead of their topology. For > > example, CPU0/1/2/3 are cold-booted CPUs while CPU4/5/6/7 are > hotpluggable CPUs with command lines '-smp maxcpus=8,cpus=4'. So 'index' > makes more sense to identify a vCPU's slot. > cpu-index have been used for hotplug with x86 machines as a starting point > to implement hotplug as it was easy to hack and it has already existed in > QEMU. > > But that didn't scale as was desired and had its own issues. > Hence the current interface that majority agreed upon. > I don't remember exact arguments anymore (they could be found qemu- > devel if needed) Here is a link to the talk that tried to explain why topo > based was introduced. > > http://events17.linuxfoundation.org/sites/events/files/slides/CPU%20Hot- > plug%20support%20in%20QEMU.pdf I think you are referring to slide-19 of above presentation? Thanks Salil.
On 8/12/24 6:15 PM, Igor Mammedov wrote: > On Mon, 12 Aug 2024 14:35:56 +1000 > Gavin Shan <gshan@redhat.com> wrote: > >> On 6/14/24 9:36 AM, Salil Mehta wrote: >>> This shall be used to store user specified topology{socket,cluster,core,thread} >>> and shall be converted to a unique 'vcpu-id' which is used as slot-index during >>> hot(un)plug of vCPU. >>> >>> Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com> >>> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> >>> Signed-off-by: Salil Mehta <salil.mehta@huawei.com> >>> --- >>> hw/arm/virt.c | 10 ++++++++++ >>> include/hw/arm/virt.h | 28 ++++++++++++++++++++++++++++ >>> target/arm/cpu.c | 4 ++++ >>> target/arm/cpu.h | 4 ++++ >>> 4 files changed, 46 insertions(+) >>> >> >> Those 4 properties are introduced to determine the vCPU's slot, which is the index >> to MachineState::possible_cpus::cpus[]. From there, the CPU object or instance is >> referenced and then the CPU's state can be further determined. It sounds reasonable >> to use the CPU's topology to determine the index. However, I'm wandering if this can >> be simplified to use 'cpu-index' or 'index' for a couple of facts: (1) 'cpu-index' > > Please, don't. We've spent a bunch of time to get rid of cpu-index in user > visible interface (well, old NUMA CLI is still there along with 'new' topology > based one, but that's the last one). > Ok, thanks for the hints. It's a question I had from the beginning. I didn't dig into the historic background. From the vCPU hotplug document (cpu-hotplug.rst), the CPU topology is used to identify hot-added vCPU on x86 and it's reasonable for ARM to follow this mechanism. >> or 'index' is simplified. Users have to provide 4 parameters in order to determine >> its index in the extreme case, for example "device_add host-arm-cpu, id=cpu7,socket-id=1, >> cluster-id=1,core-id=1,thread-id=1". With 'cpu-index' or 'index', it can be simplified >> to 'index=7'. (2) The cold-booted and hotpluggable CPUs are determined by their index >> instead of their topology. For example, CPU0/1/2/3 are cold-booted CPUs while CPU4/5/6/7 >> are hotpluggable CPUs with command lines '-smp maxcpus=8,cpus=4'. So 'index' makes >> more sense to identify a vCPU's slot. > cpu-index have been used for hotplug with x86 machines as a starting point > to implement hotplug as it was easy to hack and it has already existed in QEMU. > > But that didn't scale as was desired and had its own issues. > Hence the current interface that majority agreed upon. > I don't remember exact arguments anymore (they could be found qemu-devel if needed) > Here is a link to the talk that tried to explain why topo based was introduced. > http://events17.linuxfoundation.org/sites/events/files/slides/CPU%20Hot-plug%20support%20in%20QEMU.pdf > Right, I overlooked the migration case where the source and destination vCPU have to be strictly correlation. This strict correlation can become broken with 'index' or 'cpu-index', but it's ensured with the CPU topology as stated on page-19. Thanks, Gavin
© 2016 - 2024 Red Hat, Inc.