The work to be done in h_home_node_associativity() intersects
with what is already done in spapr_numa_fixup_cpu_dt(). This
patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
be used for both spapr_numa_fixup_cpu_dt() and
h_home_node_associativity().
While we're at it, use memcpy() instead of loop assignment
to created the returned array.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
---
hw/ppc/spapr_numa.c | 30 ++++++++++++++++++++----------
1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index 368c1a494d..980a6488bf 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -71,13 +71,15 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
sizeof(spapr->numa_assoc_array[nodeid]))));
}
-int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
- int offset, PowerPCCPU *cpu)
+static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
+ PowerPCCPU *cpu,
+ uint *vcpu_assoc_size)
{
- uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
- uint32_t vcpu_assoc[vcpu_assoc_size];
+ uint32_t *vcpu_assoc = NULL;
int index = spapr_get_vcpu_id(cpu);
- int i;
+
+ *vcpu_assoc_size = (NUMA_ASSOC_SIZE + 1) * sizeof(uint32_t);
+ vcpu_assoc = g_malloc(*vcpu_assoc_size);
/*
* VCPUs have an extra 'cpu_id' value in ibm,associativity
@@ -86,16 +88,24 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
* cpu_id last.
*/
vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
+ memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id],
+ MAX_DISTANCE_REF_POINTS);
+ vcpu_assoc[MAX_DISTANCE_REF_POINTS + 1] = cpu_to_be32(index);
- for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
- vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
- }
+ return vcpu_assoc;
+}
+
+int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
+ int offset, PowerPCCPU *cpu)
+{
+ g_autofree uint32_t *vcpu_assoc = NULL;
+ uint vcpu_assoc_size;
- vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
+ vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu, &vcpu_assoc_size);
/* Advertise NUMA via ibm,associativity */
return fdt_setprop(fdt, offset, "ibm,associativity",
- vcpu_assoc, sizeof(vcpu_assoc));
+ vcpu_assoc, vcpu_assoc_size);
}
--
2.26.2
On Thu, Sep 03, 2020 at 10:04:38PM -0300, Daniel Henrique Barboza wrote:
> The work to be done in h_home_node_associativity() intersects
> with what is already done in spapr_numa_fixup_cpu_dt(). This
> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
> be used for both spapr_numa_fixup_cpu_dt() and
> h_home_node_associativity().
>
> While we're at it, use memcpy() instead of loop assignment
> to created the returned array.
>
> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> ---
> hw/ppc/spapr_numa.c | 30 ++++++++++++++++++++----------
> 1 file changed, 20 insertions(+), 10 deletions(-)
>
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index 368c1a494d..980a6488bf 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -71,13 +71,15 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
> sizeof(spapr->numa_assoc_array[nodeid]))));
> }
>
> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> - int offset, PowerPCCPU *cpu)
> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
> + PowerPCCPU *cpu,
> + uint *vcpu_assoc_size)
> {
> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
> - uint32_t vcpu_assoc[vcpu_assoc_size];
> + uint32_t *vcpu_assoc = NULL;
> int index = spapr_get_vcpu_id(cpu);
> - int i;
> +
> + *vcpu_assoc_size = (NUMA_ASSOC_SIZE + 1) * sizeof(uint32_t);
> + vcpu_assoc = g_malloc(*vcpu_assoc_size);
>
> /*
> * VCPUs have an extra 'cpu_id' value in ibm,associativity
> @@ -86,16 +88,24 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> * cpu_id last.
> */
> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id],
> + MAX_DISTANCE_REF_POINTS);
That needs to be MAX_DISTANCE_REF_POINTS * sizeof(uint32_t), doesn't it?
> + vcpu_assoc[MAX_DISTANCE_REF_POINTS + 1] = cpu_to_be32(index);
>
> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
> - }
> + return vcpu_assoc;
> +}
> +
> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> + int offset, PowerPCCPU *cpu)
> +{
> + g_autofree uint32_t *vcpu_assoc = NULL;
> + uint vcpu_assoc_size;
>
> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu, &vcpu_assoc_size);
>
> /* Advertise NUMA via ibm,associativity */
> return fdt_setprop(fdt, offset, "ibm,associativity",
> - vcpu_assoc, sizeof(vcpu_assoc));
> + vcpu_assoc, vcpu_assoc_size);> }
>
>
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
On 9/4/20 1:10 AM, David Gibson wrote:
> On Thu, Sep 03, 2020 at 10:04:38PM -0300, Daniel Henrique Barboza wrote:
>> The work to be done in h_home_node_associativity() intersects
>> with what is already done in spapr_numa_fixup_cpu_dt(). This
>> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
>> be used for both spapr_numa_fixup_cpu_dt() and
>> h_home_node_associativity().
>>
>> While we're at it, use memcpy() instead of loop assignment
>> to created the returned array.
>>
>> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
>> ---
>> hw/ppc/spapr_numa.c | 30 ++++++++++++++++++++----------
>> 1 file changed, 20 insertions(+), 10 deletions(-)
>>
>> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
>> index 368c1a494d..980a6488bf 100644
>> --- a/hw/ppc/spapr_numa.c
>> +++ b/hw/ppc/spapr_numa.c
>> @@ -71,13 +71,15 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
>> sizeof(spapr->numa_assoc_array[nodeid]))));
>> }
>>
>> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> - int offset, PowerPCCPU *cpu)
>> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
>> + PowerPCCPU *cpu,
>> + uint *vcpu_assoc_size)
>> {
>> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
>> - uint32_t vcpu_assoc[vcpu_assoc_size];
>> + uint32_t *vcpu_assoc = NULL;
>> int index = spapr_get_vcpu_id(cpu);
>> - int i;
>> +
>> + *vcpu_assoc_size = (NUMA_ASSOC_SIZE + 1) * sizeof(uint32_t);
>> + vcpu_assoc = g_malloc(*vcpu_assoc_size);
>>
>> /*
>> * VCPUs have an extra 'cpu_id' value in ibm,associativity
>> @@ -86,16 +88,24 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> * cpu_id last.
>> */
>> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
>> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id],
>> + MAX_DISTANCE_REF_POINTS);
>
> That needs to be MAX_DISTANCE_REF_POINTS * sizeof(uint32_t), doesn't it?
Hmmmm yeah it does. Even if this didn't break spectacularly in my guest (not
sure why), we're doing a similar operation in spapr_numa_write_assoc_lookup_arrays()
using 'sizeof(uint32_t)'. Might as well do the same here.
Thanks,
DHB
>
>> + vcpu_assoc[MAX_DISTANCE_REF_POINTS + 1] = cpu_to_be32(index);
>>
>> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
>> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
>> - }
>> + return vcpu_assoc;
>> +}
>> +
>> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> + int offset, PowerPCCPU *cpu)
>> +{
>> + g_autofree uint32_t *vcpu_assoc = NULL;
>> + uint vcpu_assoc_size;
>>
>> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
>> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu, &vcpu_assoc_size);
>>
>> /* Advertise NUMA via ibm,associativity */
>> return fdt_setprop(fdt, offset, "ibm,associativity",
>> - vcpu_assoc, sizeof(vcpu_assoc));
>> + vcpu_assoc, vcpu_assoc_size);> }
>>
>>
>
On Thu, 3 Sep 2020 22:04:38 -0300
Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
> The work to be done in h_home_node_associativity() intersects
> with what is already done in spapr_numa_fixup_cpu_dt(). This
> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
> be used for both spapr_numa_fixup_cpu_dt() and
> h_home_node_associativity().
>
> While we're at it, use memcpy() instead of loop assignment
> to created the returned array.
>
> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> ---
Hi Daniel,
A few comments below.
> hw/ppc/spapr_numa.c | 30 ++++++++++++++++++++----------
> 1 file changed, 20 insertions(+), 10 deletions(-)
>
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index 368c1a494d..980a6488bf 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -71,13 +71,15 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
> sizeof(spapr->numa_assoc_array[nodeid]))));
> }
>
> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> - int offset, PowerPCCPU *cpu)
> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
> + PowerPCCPU *cpu,
> + uint *vcpu_assoc_size)
> {
> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
> - uint32_t vcpu_assoc[vcpu_assoc_size];
> + uint32_t *vcpu_assoc = NULL;
You don't need to initialize this pointer since it is assigned a value
unconditionally just below.
> int index = spapr_get_vcpu_id(cpu);
> - int i;
> +
> + *vcpu_assoc_size = (NUMA_ASSOC_SIZE + 1) * sizeof(uint32_t);
It's a bit weird to return something that is definitely a compile
time constant by reference... What about introducing a macro ?
#define VCPU_NUMA_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
> + vcpu_assoc = g_malloc(*vcpu_assoc_size);
>
vcpu_assoc = g_new(uint32_t, VCPU_NUMA_ASSOC_SIZE);
> /*
> * VCPUs have an extra 'cpu_id' value in ibm,associativity
> @@ -86,16 +88,24 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> * cpu_id last.
> */
> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id],
> + MAX_DISTANCE_REF_POINTS);
> + vcpu_assoc[MAX_DISTANCE_REF_POINTS + 1] = cpu_to_be32(index);
>
memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id],
(VPCU_ASSOC_SIZE - 2) * sizeof(uint32_t));
vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
I personally find more clear than using MAX_DISTANCE_REF_POINTS in an array
that was just allocated with NUMA_ASSOC_SIZE... one has to check spapr.h
to see that NUMA_ASSOC_SIZE == MAX_DISTANCE_REF_POINTS + 1
> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
> - }
> + return vcpu_assoc;
> +}
> +
> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> + int offset, PowerPCCPU *cpu)
> +{
> + g_autofree uint32_t *vcpu_assoc = NULL;
> + uint vcpu_assoc_size;
>
> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu, &vcpu_assoc_size);
>
> /* Advertise NUMA via ibm,associativity */
> return fdt_setprop(fdt, offset, "ibm,associativity",
> - vcpu_assoc, sizeof(vcpu_assoc));
> + vcpu_assoc, vcpu_assoc_size);
return fdt_setprop(fdt, offset, "ibm,associativity",
vcpu_assoc, VCPU_NUMA_ASSOC_SIZE * sizeof(uint32_t));
> }
>
>
On 9/4/20 7:02 AM, Greg Kurz wrote:
> On Thu, 3 Sep 2020 22:04:38 -0300
> Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
>
>> The work to be done in h_home_node_associativity() intersects
>> with what is already done in spapr_numa_fixup_cpu_dt(). This
>> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
>> be used for both spapr_numa_fixup_cpu_dt() and
>> h_home_node_associativity().
>>
>> While we're at it, use memcpy() instead of loop assignment
>> to created the returned array.
>>
>> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
>> ---
>
> Hi Daniel,
>
> A few comments below.
>
>> hw/ppc/spapr_numa.c | 30 ++++++++++++++++++++----------
>> 1 file changed, 20 insertions(+), 10 deletions(-)
>>
>> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
>> index 368c1a494d..980a6488bf 100644
>> --- a/hw/ppc/spapr_numa.c
>> +++ b/hw/ppc/spapr_numa.c
>> @@ -71,13 +71,15 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
>> sizeof(spapr->numa_assoc_array[nodeid]))));
>> }
>>
>> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> - int offset, PowerPCCPU *cpu)
>> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
>> + PowerPCCPU *cpu,
>> + uint *vcpu_assoc_size)
>> {
>> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
>> - uint32_t vcpu_assoc[vcpu_assoc_size];
>> + uint32_t *vcpu_assoc = NULL;
>
> You don't need to initialize this pointer since it is assigned a value
> unconditionally just below.
>
>> int index = spapr_get_vcpu_id(cpu);
>> - int i;
>> +
>> + *vcpu_assoc_size = (NUMA_ASSOC_SIZE + 1) * sizeof(uint32_t);
>
> It's a bit weird to return something that is definitely a compile
> time constant by reference... What about introducing a macro ?
>
> #define VCPU_NUMA_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
>
>> + vcpu_assoc = g_malloc(*vcpu_assoc_size);
>>
>
> vcpu_assoc = g_new(uint32_t, VCPU_NUMA_ASSOC_SIZE);
>
>> /*
>> * VCPUs have an extra 'cpu_id' value in ibm,associativity
>> @@ -86,16 +88,24 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> * cpu_id last.
>> */
>> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
>> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id],
>> + MAX_DISTANCE_REF_POINTS);
>> + vcpu_assoc[MAX_DISTANCE_REF_POINTS + 1] = cpu_to_be32(index);
>>
>
> memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id],
> (VPCU_ASSOC_SIZE - 2) * sizeof(uint32_t));
> vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
>
> I personally find more clear than using MAX_DISTANCE_REF_POINTS in an array
> that was just allocated with NUMA_ASSOC_SIZE... one has to check spapr.h
> to see that NUMA_ASSOC_SIZE == MAX_DISTANCE_REF_POINTS + 1
That all makes sense to me. I'll introduce a VCPU_ASSOC_SIZE in spapr_numa.h
and use it when operating the associativity for vcpus, both in this patch
and also in patch 3.
Thanks,
DHB
>
>> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
>> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
>> - }
>> + return vcpu_assoc;
>> +}
>> +
>> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> + int offset, PowerPCCPU *cpu)
>> +{
>> + g_autofree uint32_t *vcpu_assoc = NULL;
>> + uint vcpu_assoc_size;
>>
>> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
>> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu, &vcpu_assoc_size);
>>
>> /* Advertise NUMA via ibm,associativity */
>> return fdt_setprop(fdt, offset, "ibm,associativity",
>> - vcpu_assoc, sizeof(vcpu_assoc));
>> + vcpu_assoc, vcpu_assoc_size);
>
> return fdt_setprop(fdt, offset, "ibm,associativity",
> vcpu_assoc, VCPU_NUMA_ASSOC_SIZE * sizeof(uint32_t));
>
>> }
>>
>>
>
© 2016 - 2026 Red Hat, Inc.