This patch updates the check rules on legeacy -smp parse from user command
and it's designed to obey the same restrictions as socket/core/thread model.
Signed-off-by: Like Xu <like.xu@linux.intel.com>
---
hmp.c | 3 +++
hw/core/machine.c | 12 ++++++++++++
vl.c | 33 ++++++++++++++++++++-------------
3 files changed, 35 insertions(+), 13 deletions(-)
diff --git a/hmp.c b/hmp.c
index 80aa5ab..05ac133 100644
--- a/hmp.c
+++ b/hmp.c
@@ -3013,6 +3013,9 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict)
if (c->has_socket_id) {
monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c->socket_id);
}
+ if (c->has_die_id) {
+ monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id);
+ }
if (c->has_core_id) {
monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id);
}
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 95dc7c3..05bc545 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -601,6 +601,11 @@ void machine_set_cpu_numa_node(MachineState *machine,
return;
}
+ if (props->has_die_id && !slot->props.has_die_id) {
+ error_setg(errp, "die-id is not supported");
+ return;
+ }
+
if (props->has_socket_id && !slot->props.has_socket_id) {
error_setg(errp, "socket-id is not supported");
return;
@@ -615,6 +620,10 @@ void machine_set_cpu_numa_node(MachineState *machine,
continue;
}
+ if (props->has_die_id && props->die_id != slot->props.die_id) {
+ continue;
+ }
+
if (props->has_socket_id && props->socket_id != slot->props.socket_id) {
continue;
}
@@ -849,6 +858,9 @@ static char *cpu_slot_to_string(const CPUArchId *cpu)
if (cpu->props.has_socket_id) {
g_string_append_printf(s, "socket-id: %"PRId64, cpu->props.socket_id);
}
+ if (cpu->props.has_die_id) {
+ g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id);
+ }
if (cpu->props.has_core_id) {
if (s->len) {
g_string_append_printf(s, ", ");
diff --git a/vl.c b/vl.c
index 9b8ea3f..72be689 100644
--- a/vl.c
+++ b/vl.c
@@ -169,6 +169,7 @@ int win2k_install_hack = 0;
int singlestep = 0;
int smp_cpus;
unsigned int max_cpus;
+int smp_dies = 1;
int smp_cores = 1;
int smp_threads = 1;
int acpi_enabled = 1;
@@ -1208,6 +1209,9 @@ static QemuOptsList qemu_smp_opts = {
.name = "sockets",
.type = QEMU_OPT_NUMBER,
}, {
+ .name = "dies",
+ .type = QEMU_OPT_NUMBER,
+ }, {
.name = "cores",
.type = QEMU_OPT_NUMBER,
}, {
@@ -1226,32 +1230,34 @@ static void smp_parse(QemuOpts *opts)
if (opts) {
unsigned cpus = qemu_opt_get_number(opts, "cpus", 0);
unsigned sockets = qemu_opt_get_number(opts, "sockets", 0);
+ unsigned dies = qemu_opt_get_number(opts, "dies", 0);
unsigned cores = qemu_opt_get_number(opts, "cores", 0);
unsigned threads = qemu_opt_get_number(opts, "threads", 0);
/* compute missing values, prefer sockets over cores over threads */
+ dies = dies > 0 ? dies : 1;
if (cpus == 0 || sockets == 0) {
cores = cores > 0 ? cores : 1;
threads = threads > 0 ? threads : 1;
if (cpus == 0) {
sockets = sockets > 0 ? sockets : 1;
- cpus = cores * threads * sockets;
+ cpus = cores * threads * dies * sockets;
} else {
max_cpus = qemu_opt_get_number(opts, "maxcpus", cpus);
- sockets = max_cpus / (cores * threads);
+ sockets = max_cpus / (cores * threads * dies);
}
} else if (cores == 0) {
threads = threads > 0 ? threads : 1;
- cores = cpus / (sockets * threads);
+ cores = cpus / (sockets * dies * threads);
cores = cores > 0 ? cores : 1;
} else if (threads == 0) {
- threads = cpus / (cores * sockets);
+ threads = cpus / (cores * dies * sockets);
threads = threads > 0 ? threads : 1;
- } else if (sockets * cores * threads < cpus) {
+ } else if (sockets * dies * cores * threads < cpus) {
error_report("cpu topology: "
- "sockets (%u) * cores (%u) * threads (%u) < "
+ "sockets (%u) * dies (%u) * cores (%u) * threads (%u) < "
"smp_cpus (%u)",
- sockets, cores, threads, cpus);
+ sockets, dies, cores, threads, cpus);
exit(1);
}
@@ -1262,22 +1268,23 @@ static void smp_parse(QemuOpts *opts)
exit(1);
}
- if (sockets * cores * threads > max_cpus) {
+ if (sockets * dies * cores * threads > max_cpus) {
error_report("cpu topology: "
- "sockets (%u) * cores (%u) * threads (%u) > "
+ "sockets (%u) * dies (%u) * cores (%u) * threads (%u) > "
"maxcpus (%u)",
- sockets, cores, threads, max_cpus);
+ sockets, dies, cores, threads, max_cpus);
exit(1);
}
- if (sockets * cores * threads != max_cpus) {
+ if (sockets * dies * cores * threads != max_cpus) {
warn_report("Invalid CPU topology deprecated: "
- "sockets (%u) * cores (%u) * threads (%u) "
+ "sockets (%u) * dies (%u) * cores (%u) * threads (%u) "
"!= maxcpus (%u)",
- sockets, cores, threads, max_cpus);
+ sockets, dies, cores, threads, max_cpus);
}
smp_cpus = cpus;
+ smp_dies = dies;
smp_cores = cores;
smp_threads = threads;
}
--
1.8.3.1
On Mon, Jan 14, 2019 at 08:24:56PM +0800, Like Xu wrote:
> This patch updates the check rules on legeacy -smp parse from user command
> and it's designed to obey the same restrictions as socket/core/thread model.
>
> Signed-off-by: Like Xu <like.xu@linux.intel.com>
This would require the documentation for -smp to be updated.
qemu-options.hx still says that "cores=" is the number of cores
per socket.
Also, I'm not completely sure we should change the meaning of
"cores=" and smp_cores to be per-die instead of per-socket. Most
machines won't have any code for tracking dies, so we probably
shouldn't make the extra complexity affect all machines.[1]
What would be the disadvantages of a simple -machine
"dies-per-socket" option, specific for PC?
Keeping core-id and smp_cores per-socket instead of per-die also
seems necessary to keep backwards compatibility on the interface
for identifying CPU hotplug slots. Igor, what do you think?
[1] I would even argue that the rest of the -smp options belong
to the machine object, and topology rules should be
machine-specific, but cleaning this up will require
additional work.
> ---
> hmp.c | 3 +++
> hw/core/machine.c | 12 ++++++++++++
> vl.c | 33 ++++++++++++++++++++-------------
> 3 files changed, 35 insertions(+), 13 deletions(-)
>
> diff --git a/hmp.c b/hmp.c
> index 80aa5ab..05ac133 100644
> --- a/hmp.c
> +++ b/hmp.c
> @@ -3013,6 +3013,9 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict)
> if (c->has_socket_id) {
> monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c->socket_id);
> }
> + if (c->has_die_id) {
> + monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id);
> + }
> if (c->has_core_id) {
> monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id);
> }
> diff --git a/hw/core/machine.c b/hw/core/machine.c
> index 95dc7c3..05bc545 100644
> --- a/hw/core/machine.c
> +++ b/hw/core/machine.c
> @@ -601,6 +601,11 @@ void machine_set_cpu_numa_node(MachineState *machine,
> return;
> }
>
> + if (props->has_die_id && !slot->props.has_die_id) {
> + error_setg(errp, "die-id is not supported");
> + return;
> + }
> +
> if (props->has_socket_id && !slot->props.has_socket_id) {
> error_setg(errp, "socket-id is not supported");
> return;
> @@ -615,6 +620,10 @@ void machine_set_cpu_numa_node(MachineState *machine,
> continue;
> }
>
> + if (props->has_die_id && props->die_id != slot->props.die_id) {
> + continue;
> + }
> +
> if (props->has_socket_id && props->socket_id != slot->props.socket_id) {
> continue;
> }
> @@ -849,6 +858,9 @@ static char *cpu_slot_to_string(const CPUArchId *cpu)
> if (cpu->props.has_socket_id) {
> g_string_append_printf(s, "socket-id: %"PRId64, cpu->props.socket_id);
> }
> + if (cpu->props.has_die_id) {
> + g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id);
> + }
> if (cpu->props.has_core_id) {
> if (s->len) {
> g_string_append_printf(s, ", ");
> diff --git a/vl.c b/vl.c
> index 9b8ea3f..72be689 100644
> --- a/vl.c
> +++ b/vl.c
> @@ -169,6 +169,7 @@ int win2k_install_hack = 0;
> int singlestep = 0;
> int smp_cpus;
> unsigned int max_cpus;
> +int smp_dies = 1;
> int smp_cores = 1;
> int smp_threads = 1;
> int acpi_enabled = 1;
> @@ -1208,6 +1209,9 @@ static QemuOptsList qemu_smp_opts = {
> .name = "sockets",
> .type = QEMU_OPT_NUMBER,
> }, {
> + .name = "dies",
> + .type = QEMU_OPT_NUMBER,
> + }, {
> .name = "cores",
> .type = QEMU_OPT_NUMBER,
> }, {
> @@ -1226,32 +1230,34 @@ static void smp_parse(QemuOpts *opts)
> if (opts) {
> unsigned cpus = qemu_opt_get_number(opts, "cpus", 0);
> unsigned sockets = qemu_opt_get_number(opts, "sockets", 0);
> + unsigned dies = qemu_opt_get_number(opts, "dies", 0);
> unsigned cores = qemu_opt_get_number(opts, "cores", 0);
> unsigned threads = qemu_opt_get_number(opts, "threads", 0);
>
> /* compute missing values, prefer sockets over cores over threads */
> + dies = dies > 0 ? dies : 1;
> if (cpus == 0 || sockets == 0) {
> cores = cores > 0 ? cores : 1;
> threads = threads > 0 ? threads : 1;
> if (cpus == 0) {
> sockets = sockets > 0 ? sockets : 1;
> - cpus = cores * threads * sockets;
> + cpus = cores * threads * dies * sockets;
> } else {
> max_cpus = qemu_opt_get_number(opts, "maxcpus", cpus);
> - sockets = max_cpus / (cores * threads);
> + sockets = max_cpus / (cores * threads * dies);
> }
> } else if (cores == 0) {
> threads = threads > 0 ? threads : 1;
> - cores = cpus / (sockets * threads);
> + cores = cpus / (sockets * dies * threads);
> cores = cores > 0 ? cores : 1;
> } else if (threads == 0) {
> - threads = cpus / (cores * sockets);
> + threads = cpus / (cores * dies * sockets);
> threads = threads > 0 ? threads : 1;
> - } else if (sockets * cores * threads < cpus) {
> + } else if (sockets * dies * cores * threads < cpus) {
> error_report("cpu topology: "
> - "sockets (%u) * cores (%u) * threads (%u) < "
> + "sockets (%u) * dies (%u) * cores (%u) * threads (%u) < "
> "smp_cpus (%u)",
> - sockets, cores, threads, cpus);
> + sockets, dies, cores, threads, cpus);
> exit(1);
> }
>
> @@ -1262,22 +1268,23 @@ static void smp_parse(QemuOpts *opts)
> exit(1);
> }
>
> - if (sockets * cores * threads > max_cpus) {
> + if (sockets * dies * cores * threads > max_cpus) {
> error_report("cpu topology: "
> - "sockets (%u) * cores (%u) * threads (%u) > "
> + "sockets (%u) * dies (%u) * cores (%u) * threads (%u) > "
> "maxcpus (%u)",
> - sockets, cores, threads, max_cpus);
> + sockets, dies, cores, threads, max_cpus);
> exit(1);
> }
>
> - if (sockets * cores * threads != max_cpus) {
> + if (sockets * dies * cores * threads != max_cpus) {
> warn_report("Invalid CPU topology deprecated: "
> - "sockets (%u) * cores (%u) * threads (%u) "
> + "sockets (%u) * dies (%u) * cores (%u) * threads (%u) "
> "!= maxcpus (%u)",
> - sockets, cores, threads, max_cpus);
> + sockets, dies, cores, threads, max_cpus);
> }
>
> smp_cpus = cpus;
> + smp_dies = dies;
> smp_cores = cores;
> smp_threads = threads;
> }
> --
> 1.8.3.1
>
--
Eduardo
> -----Original Message-----
> From: Eduardo Habkost [mailto:ehabkost@redhat.com]
> Sent: Tuesday, January 15, 2019 4:52 AM
> To: Like Xu <like.xu@linux.intel.com>
> Cc: qemu-devel@nongnu.org; Xu, Like <like.xu@intel.com>;
> imammedo@redhat.com; drjones@redhat.com; Michael S. Tsirkin
> <mst@redhat.com>; Marcelo Tosatti <mtosatti@redhat.com>; Marcel
> Apfelbaum <marcel.apfelbaum@gmail.com>; Paolo Bonzini
> <pbonzini@redhat.com>; Peter Crosthwaite
> <crosthwaite.peter@gmail.com>; Richard Henderson <rth@twiddle.net>
> Subject: Re: [Qemu-devel] [PATCH v1 2/5] vl.c: add -smp,dies=* command
> line support
>
> On Mon, Jan 14, 2019 at 08:24:56PM +0800, Like Xu wrote:
> > This patch updates the check rules on legeacy -smp parse from user
> > command and it's designed to obey the same restrictions as
> socket/core/thread model.
> >
> > Signed-off-by: Like Xu <like.xu@linux.intel.com>
>
> This would require the documentation for -smp to be updated.
> qemu-options.hx still says that "cores=" is the number of cores per socket.
[Xu, Like] I'll add more docs in next version and thanks.
>
> Also, I'm not completely sure we should change the meaning of "cores="
> and smp_cores to be per-die instead of per-socket. Most machines won't
> have any code for tracking dies, so we probably shouldn't make the extra
> complexity affect all machines.[1]
[Xu, Like] I'd prefer to apply die level in a general way without extra affect.
>
> What would be the disadvantages of a simple -machine "dies-per-socket"
> option, specific for PC?
[Xu, Like] It may not be a good choice to cut up cpu topo parser logic and
die level is so generic that any machine provided by qemu as far as I know
could benefit from potential socket/die/core/thread model.
>
> Keeping core-id and smp_cores per-socket instead of per-die also seems
> necessary to keep backwards compatibility on the interface for identifying
> CPU hotplug slots. Igor, what do you think?
[Xu, Like] The compatibility issue on hotplug from MCP challenge is still being
evaluated and Igor, what do you think :D ?
>
>
> [1] I would even argue that the rest of the -smp options belong
> to the machine object, and topology rules should be
> machine-specific, but cleaning this up will require
> additional work.
[Xu, Like] I agree and Intel may have another
two cpu topo levels named module and tile from SDM spec for packaging
and that should be machine-specific as proposal if any. However
the die level I believe is much more generic just like core or thread.
>
> > ---
> > hmp.c | 3 +++
> > hw/core/machine.c | 12 ++++++++++++
> > vl.c | 33 ++++++++++++++++++++-------------
> > 3 files changed, 35 insertions(+), 13 deletions(-)
> >
> > diff --git a/hmp.c b/hmp.c
> > index 80aa5ab..05ac133 100644
> > --- a/hmp.c
> > +++ b/hmp.c
> > @@ -3013,6 +3013,9 @@ void hmp_hotpluggable_cpus(Monitor *mon,
> const QDict *qdict)
> > if (c->has_socket_id) {
> > monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c-
> >socket_id);
> > }
> > + if (c->has_die_id) {
> > + monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id);
> > + }
> > if (c->has_core_id) {
> > monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id);
> > }
> > diff --git a/hw/core/machine.c b/hw/core/machine.c index
> > 95dc7c3..05bc545 100644
> > --- a/hw/core/machine.c
> > +++ b/hw/core/machine.c
> > @@ -601,6 +601,11 @@ void
> machine_set_cpu_numa_node(MachineState *machine,
> > return;
> > }
> >
> > + if (props->has_die_id && !slot->props.has_die_id) {
> > + error_setg(errp, "die-id is not supported");
> > + return;
> > + }
> > +
> > if (props->has_socket_id && !slot->props.has_socket_id) {
> > error_setg(errp, "socket-id is not supported");
> > return;
> > @@ -615,6 +620,10 @@ void
> machine_set_cpu_numa_node(MachineState *machine,
> > continue;
> > }
> >
> > + if (props->has_die_id && props->die_id != slot->props.die_id) {
> > + continue;
> > + }
> > +
> > if (props->has_socket_id && props->socket_id != slot-
> >props.socket_id) {
> > continue;
> > }
> > @@ -849,6 +858,9 @@ static char *cpu_slot_to_string(const CPUArchId
> *cpu)
> > if (cpu->props.has_socket_id) {
> > g_string_append_printf(s, "socket-id: %"PRId64, cpu-
> >props.socket_id);
> > }
> > + if (cpu->props.has_die_id) {
> > + g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id);
> > + }
> > if (cpu->props.has_core_id) {
> > if (s->len) {
> > g_string_append_printf(s, ", "); diff --git a/vl.c b/vl.c
> > index 9b8ea3f..72be689 100644
> > --- a/vl.c
> > +++ b/vl.c
> > @@ -169,6 +169,7 @@ int win2k_install_hack = 0; int singlestep = 0;
> > int smp_cpus; unsigned int max_cpus;
> > +int smp_dies = 1;
> > int smp_cores = 1;
> > int smp_threads = 1;
> > int acpi_enabled = 1;
> > @@ -1208,6 +1209,9 @@ static QemuOptsList qemu_smp_opts = {
> > .name = "sockets",
> > .type = QEMU_OPT_NUMBER,
> > }, {
> > + .name = "dies",
> > + .type = QEMU_OPT_NUMBER,
> > + }, {
> > .name = "cores",
> > .type = QEMU_OPT_NUMBER,
> > }, {
> > @@ -1226,32 +1230,34 @@ static void smp_parse(QemuOpts *opts)
> > if (opts) {
> > unsigned cpus = qemu_opt_get_number(opts, "cpus", 0);
> > unsigned sockets = qemu_opt_get_number(opts, "sockets", 0);
> > + unsigned dies = qemu_opt_get_number(opts, "dies", 0);
> > unsigned cores = qemu_opt_get_number(opts, "cores", 0);
> > unsigned threads = qemu_opt_get_number(opts, "threads", 0);
> >
> > /* compute missing values, prefer sockets over cores over
> > threads */
> > + dies = dies > 0 ? dies : 1;
> > if (cpus == 0 || sockets == 0) {
> > cores = cores > 0 ? cores : 1;
> > threads = threads > 0 ? threads : 1;
> > if (cpus == 0) {
> > sockets = sockets > 0 ? sockets : 1;
> > - cpus = cores * threads * sockets;
> > + cpus = cores * threads * dies * sockets;
> > } else {
> > max_cpus = qemu_opt_get_number(opts, "maxcpus", cpus);
> > - sockets = max_cpus / (cores * threads);
> > + sockets = max_cpus / (cores * threads * dies);
> > }
> > } else if (cores == 0) {
> > threads = threads > 0 ? threads : 1;
> > - cores = cpus / (sockets * threads);
> > + cores = cpus / (sockets * dies * threads);
> > cores = cores > 0 ? cores : 1;
> > } else if (threads == 0) {
> > - threads = cpus / (cores * sockets);
> > + threads = cpus / (cores * dies * sockets);
> > threads = threads > 0 ? threads : 1;
> > - } else if (sockets * cores * threads < cpus) {
> > + } else if (sockets * dies * cores * threads < cpus) {
> > error_report("cpu topology: "
> > - "sockets (%u) * cores (%u) * threads (%u) < "
> > + "sockets (%u) * dies (%u) * cores (%u) * threads (%u) < "
> > "smp_cpus (%u)",
> > - sockets, cores, threads, cpus);
> > + sockets, dies, cores, threads, cpus);
> > exit(1);
> > }
> >
> > @@ -1262,22 +1268,23 @@ static void smp_parse(QemuOpts *opts)
> > exit(1);
> > }
> >
> > - if (sockets * cores * threads > max_cpus) {
> > + if (sockets * dies * cores * threads > max_cpus) {
> > error_report("cpu topology: "
> > - "sockets (%u) * cores (%u) * threads (%u) > "
> > + "sockets (%u) * dies (%u) * cores (%u) * threads (%u) > "
> > "maxcpus (%u)",
> > - sockets, cores, threads, max_cpus);
> > + sockets, dies, cores, threads, max_cpus);
> > exit(1);
> > }
> >
> > - if (sockets * cores * threads != max_cpus) {
> > + if (sockets * dies * cores * threads != max_cpus) {
> > warn_report("Invalid CPU topology deprecated: "
> > - "sockets (%u) * cores (%u) * threads (%u) "
> > + "sockets (%u) * dies (%u) * cores (%u) * threads (%u) "
> > "!= maxcpus (%u)",
> > - sockets, cores, threads, max_cpus);
> > + sockets, dies, cores, threads, max_cpus);
> > }
> >
> > smp_cpus = cpus;
> > + smp_dies = dies;
> > smp_cores = cores;
> > smp_threads = threads;
> > }
> > --
> > 1.8.3.1
> >
>
> --
> Eduardo
On Mon, Jan 14, 2019 at 06:51:34PM -0200, Eduardo Habkost wrote:
> On Mon, Jan 14, 2019 at 08:24:56PM +0800, Like Xu wrote:
> > This patch updates the check rules on legeacy -smp parse from user command
> > and it's designed to obey the same restrictions as socket/core/thread model.
> >
> > Signed-off-by: Like Xu <like.xu@linux.intel.com>
>
> This would require the documentation for -smp to be updated.
> qemu-options.hx still says that "cores=" is the number of cores
> per socket.
>
> Also, I'm not completely sure we should change the meaning of
> "cores=" and smp_cores to be per-die instead of per-socket. Most
> machines won't have any code for tracking dies, so we probably
> shouldn't make the extra complexity affect all machines.[1]
Could we not simply have a 'max-dies' property against the machine
base class which defaults to 1. Then no existing machine types
need any changes unless they want to opt-in to supporting
"dies > 1".
> What would be the disadvantages of a simple -machine
> "dies-per-socket" option, specific for PC?
Libvirt currently has
<cpu>
<topology sockets='1' cores='2' threads='1'/>
</cpu>
To me the natural way to expand that is to use
<cpu>
<topology sockets='1' dies='2' cores='2' threads='1'/>
</cpu>
but this rather implies dies-per-socket + cores-per-die
not cores-per-socket. Libvirt could of course convert
its value from cores-per-die into cores-per-socket
before giving it to QEMU, albeit with the potential
for confusion from people comparing the libvirt and QEMU
level configs
> Keeping core-id and smp_cores per-socket instead of per-die also
> seems necessary to keep backwards compatibility on the interface
> for identifying CPU hotplug slots. Igor, what do you think?
Is there really a backwards compatibility problem, given that
no existing mgmt app will have created a VM with "dies != 1".
IOW, if an application adds logic to support configuring a
VM with "dies > 1" it seems fine that they should need to
understand how this impacts the way you identify CPUs for
hotplug.
> [1] I would even argue that the rest of the -smp options belong
> to the machine object, and topology rules should be
> machine-specific, but cleaning this up will require
> additional work.
If we ever expect to support non-homogenous CPUs then our
modelling of topology is fatally flawed, as it doesm't allow
us to specify creating a VM with 1 socket containing 2
cores and a second socket containing 4 cores. Fixing that
might require modelling each socket, die, and core as a
distinct set of nested QOM objects which gets real fun.
Regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
On 2019/1/17 2:26, Daniel P. Berrangé wrote: > On Mon, Jan 14, 2019 at 06:51:34PM -0200, Eduardo Habkost wrote: >> On Mon, Jan 14, 2019 at 08:24:56PM +0800, Like Xu wrote: >>> This patch updates the check rules on legeacy -smp parse from user command >>> and it's designed to obey the same restrictions as socket/core/thread model. >>> >>> Signed-off-by: Like Xu <like.xu@linux.intel.com> >> >> This would require the documentation for -smp to be updated. >> qemu-options.hx still says that "cores=" is the number of cores >> per socket. >> >> Also, I'm not completely sure we should change the meaning of >> "cores=" and smp_cores to be per-die instead of per-socket. Most >> machines won't have any code for tracking dies, so we probably >> shouldn't make the extra complexity affect all machines.[1] > > Could we not simply have a 'max-dies' property against the machine > base class which defaults to 1. Then no existing machine types > need any changes unless they want to opt-in to supporting > "dies > 1". It's nice to have max-dies for machine base class. > >> What would be the disadvantages of a simple -machine >> "dies-per-socket" option, specific for PC? > > Libvirt currently has > > <cpu> > <topology sockets='1' cores='2' threads='1'/> > </cpu> > > To me the natural way to expand that is to use > > <cpu> > <topology sockets='1' dies='2' cores='2' threads='1'/> > </cpu> > > but this rather implies dies-per-socket + cores-per-die > not cores-per-socket. Libvirt could of course convert > its value from cores-per-die into cores-per-socket > before giving it to QEMU, albeit with the potential > for confusion from people comparing the libvirt and QEMU > level configs It is a recommended update on cpu topo configuration of libvirt as well as other upper layer apps. > >> Keeping core-id and smp_cores per-socket instead of per-die also >> seems necessary to keep backwards compatibility on the interface >> for identifying CPU hotplug slots. Igor, what do you think? > > Is there really a backwards compatibility problem, given that > no existing mgmt app will have created a VM with "dies != 1". > IOW, if an application adds logic to support configuring a > VM with "dies > 1" it seems fine that they should need to > understand how this impacts the way you identify CPUs for > hotplug. The impacts from MCP model will be documented continuously. Any concerns for hot-plugging CPUs in MCP socket is welcomed. > >> [1] I would even argue that the rest of the -smp options belong >> to the machine object, and topology rules should be >> machine-specific, but cleaning this up will require >> additional work. > > If we ever expect to support non-homogenous CPUs then our > modelling of topology is fatally flawed, as it doesm't allow > us to specify creating a VM with 1 socket containing 2 > cores and a second socket containing 4 cores. Fixing that > might require modelling each socket, die, and core as a > distinct set of nested QOM objects which gets real fun. Do we really need to go out of this non-homogeneous step? Currently there is no support on physical host AFAIK. Is there enough benefit? > > > Regards, > Daniel >
On Thu, Jan 17, 2019 at 09:18:29AM +0800, Like Xu wrote: > On 2019/1/17 2:26, Daniel P. Berrangé wrote: > > On Mon, Jan 14, 2019 at 06:51:34PM -0200, Eduardo Habkost wrote: > > > On Mon, Jan 14, 2019 at 08:24:56PM +0800, Like Xu wrote: > > > > This patch updates the check rules on legeacy -smp parse from user command > > > > and it's designed to obey the same restrictions as socket/core/thread model. > > > > > > > > Signed-off-by: Like Xu <like.xu@linux.intel.com> > > > > > > This would require the documentation for -smp to be updated. > > > qemu-options.hx still says that "cores=" is the number of cores > > > per socket. > > > > > > Also, I'm not completely sure we should change the meaning of > > > "cores=" and smp_cores to be per-die instead of per-socket. Most > > > machines won't have any code for tracking dies, so we probably > > > shouldn't make the extra complexity affect all machines.[1] > > > > Could we not simply have a 'max-dies' property against the machine > > base class which defaults to 1. Then no existing machine types > > need any changes unless they want to opt-in to supporting > > "dies > 1". > It's nice to have max-dies for machine base class. > > > > > What would be the disadvantages of a simple -machine > > > "dies-per-socket" option, specific for PC? > > > > Libvirt currently has > > > > <cpu> > > <topology sockets='1' cores='2' threads='1'/> > > </cpu> > > > > To me the natural way to expand that is to use > > > > <cpu> > > <topology sockets='1' dies='2' cores='2' threads='1'/> > > </cpu> > > > > but this rather implies dies-per-socket + cores-per-die > > not cores-per-socket. Libvirt could of course convert > > its value from cores-per-die into cores-per-socket > > before giving it to QEMU, albeit with the potential > > for confusion from people comparing the libvirt and QEMU > > level configs > It is a recommended update on cpu topo configuration of libvirt > as well as other upper layer apps. > > > > > Keeping core-id and smp_cores per-socket instead of per-die also > > > seems necessary to keep backwards compatibility on the interface > > > for identifying CPU hotplug slots. Igor, what do you think? > > > > Is there really a backwards compatibility problem, given that > > no existing mgmt app will have created a VM with "dies != 1". > > IOW, if an application adds logic to support configuring a > > VM with "dies > 1" it seems fine that they should need to > > understand how this impacts the way you identify CPUs for > > hotplug. > The impacts from MCP model will be documented continuously. > Any concerns for hot-plugging CPUs in MCP socket is welcomed. > > > > > [1] I would even argue that the rest of the -smp options belong > > > to the machine object, and topology rules should be > > > machine-specific, but cleaning this up will require > > > additional work. > > > > If we ever expect to support non-homogenous CPUs then our > > modelling of topology is fatally flawed, as it doesm't allow > > us to specify creating a VM with 1 socket containing 2 > > cores and a second socket containing 4 cores. Fixing that > > might require modelling each socket, die, and core as a > > distinct set of nested QOM objects which gets real fun. > Do we really need to go out of this non-homogeneous step? > Currently there is no support on physical host AFAIK. > Is there enough benefit? I'm not suggesting we need to solve this now - I just meant to indicate that we shouldn't over-think representing of the 'dies' parameter today, because any problems with the simple solution you proposed are negligible compared to the problem of non-homogeneous CPUs. IOW, I think it is fine to keep your simple proposal now. Worry about the hard problems later when we'll need better modelling of everything. Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
© 2016 - 2025 Red Hat, Inc.