The work to be done in h_home_node_associativity() intersects
with what is already done in spapr_numa_fixup_cpu_dt(). This
patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
be used for both spapr_numa_fixup_cpu_dt() and
h_home_node_associativity().
While we're at it, use memcpy() instead of loop assignment
to created the returned array.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
---
hw/ppc/spapr_numa.c | 33 ++++++++++++++++++++-------------
include/hw/ppc/spapr.h | 7 ++++++-
2 files changed, 26 insertions(+), 14 deletions(-)
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index 368c1a494d..674d2ee86d 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -71,31 +71,38 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
sizeof(spapr->numa_assoc_array[nodeid]))));
}
-int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
- int offset, PowerPCCPU *cpu)
+static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
+ PowerPCCPU *cpu)
{
- uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
- uint32_t vcpu_assoc[vcpu_assoc_size];
+ uint32_t *vcpu_assoc = g_malloc(VCPU_ASSOC_SIZE * sizeof(uint32_t));
int index = spapr_get_vcpu_id(cpu);
- int i;
+
+ g_assert(vcpu_assoc != NULL);
/*
* VCPUs have an extra 'cpu_id' value in ibm,associativity
* compared to other resources. Increment the size at index
- * 0, copy all associativity domains already set, then put
- * cpu_id last.
+ * 0, put cpu_id last, then copy the remaining associativity
+ * domains.
*/
vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
+ vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
+ memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
+ (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
- for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
- vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
- }
+ return vcpu_assoc;
+}
+
+int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
+ int offset, PowerPCCPU *cpu)
+{
+ g_autofree uint32_t *vcpu_assoc = NULL;
- vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
+ vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
/* Advertise NUMA via ibm,associativity */
- return fdt_setprop(fdt, offset, "ibm,associativity",
- vcpu_assoc, sizeof(vcpu_assoc));
+ return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
+ VCPU_ASSOC_SIZE * sizeof(uint32_t));
}
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 9a63380801..e50a2672e3 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -107,13 +107,18 @@ typedef enum {
/*
* NUMA related macros. MAX_DISTANCE_REF_POINTS was taken
- * from Taken from Linux kernel arch/powerpc/mm/numa.h.
+ * from Linux kernel arch/powerpc/mm/numa.h. It represents the
+ * amount of associativity domains for non-CPU resources.
*
* NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
* array for any non-CPU resource.
+ *
+ * VCPU_ASSOC_SIZE represents the size of ibm,associativity array
+ * for CPUs, which has an extra element (vcpu_id) in the end.
*/
#define MAX_DISTANCE_REF_POINTS 4
#define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1)
+#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
typedef struct SpaprCapabilities SpaprCapabilities;
struct SpaprCapabilities {
--
2.26.2
On Fri, 4 Sep 2020 10:56:30 -0300
Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
> The work to be done in h_home_node_associativity() intersects
> with what is already done in spapr_numa_fixup_cpu_dt(). This
> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
> be used for both spapr_numa_fixup_cpu_dt() and
> h_home_node_associativity().
>
> While we're at it, use memcpy() instead of loop assignment
> to created the returned array.
>
> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> ---
> hw/ppc/spapr_numa.c | 33 ++++++++++++++++++++-------------
> include/hw/ppc/spapr.h | 7 ++++++-
> 2 files changed, 26 insertions(+), 14 deletions(-)
>
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index 368c1a494d..674d2ee86d 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -71,31 +71,38 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
> sizeof(spapr->numa_assoc_array[nodeid]))));
> }
>
> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> - int offset, PowerPCCPU *cpu)
> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
> + PowerPCCPU *cpu)
> {
> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
> - uint32_t vcpu_assoc[vcpu_assoc_size];
> + uint32_t *vcpu_assoc = g_malloc(VCPU_ASSOC_SIZE * sizeof(uint32_t));
CODING_STYLE recommends g_new(uint32_t, VCPU_ASSOC_SIZE)
> int index = spapr_get_vcpu_id(cpu);
> - int i;
> +
> + g_assert(vcpu_assoc != NULL);
>
g_malloc() and friends only return NULL when passed a zero size,
which is obviously not the case here.
> /*
> * VCPUs have an extra 'cpu_id' value in ibm,associativity
> * compared to other resources. Increment the size at index
> - * 0, copy all associativity domains already set, then put
> - * cpu_id last.
> + * 0, put cpu_id last, then copy the remaining associativity
> + * domains.
> */
> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
> + vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
> + (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
>
> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
> - }
> + return vcpu_assoc;
> +}
> +
> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
> + int offset, PowerPCCPU *cpu)
> +{
> + g_autofree uint32_t *vcpu_assoc = NULL;
>
> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
>
> /* Advertise NUMA via ibm,associativity */
> - return fdt_setprop(fdt, offset, "ibm,associativity",
> - vcpu_assoc, sizeof(vcpu_assoc));
> + return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
> + VCPU_ASSOC_SIZE * sizeof(uint32_t));
> }
>
>
> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
> index 9a63380801..e50a2672e3 100644
> --- a/include/hw/ppc/spapr.h
> +++ b/include/hw/ppc/spapr.h
> @@ -107,13 +107,18 @@ typedef enum {
>
> /*
> * NUMA related macros. MAX_DISTANCE_REF_POINTS was taken
> - * from Taken from Linux kernel arch/powerpc/mm/numa.h.
Heh a good opportunity to fix the "from Taken from" typo I guess ;)
> + * from Linux kernel arch/powerpc/mm/numa.h. It represents the
> + * amount of associativity domains for non-CPU resources.
> *
> * NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
> * array for any non-CPU resource.
> + *
> + * VCPU_ASSOC_SIZE represents the size of ibm,associativity array
> + * for CPUs, which has an extra element (vcpu_id) in the end.
> */
> #define MAX_DISTANCE_REF_POINTS 4
> #define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1)
> +#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
>
> typedef struct SpaprCapabilities SpaprCapabilities;
> struct SpaprCapabilities {
With the comments on g_malloc() addressed, feel free to add:
Reviewed-by: Greg Kurz <groug@kaod.org>
On 9/4/20 11:35 AM, Greg Kurz wrote:
> On Fri, 4 Sep 2020 10:56:30 -0300
> Daniel Henrique Barboza <danielhb413@gmail.com> wrote:
>
>> The work to be done in h_home_node_associativity() intersects
>> with what is already done in spapr_numa_fixup_cpu_dt(). This
>> patch creates a new helper, spapr_numa_get_vcpu_assoc(), to
>> be used for both spapr_numa_fixup_cpu_dt() and
>> h_home_node_associativity().
>>
>> While we're at it, use memcpy() instead of loop assignment
>> to created the returned array.
>>
>> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
>> ---
>> hw/ppc/spapr_numa.c | 33 ++++++++++++++++++++-------------
>> include/hw/ppc/spapr.h | 7 ++++++-
>> 2 files changed, 26 insertions(+), 14 deletions(-)
>>
>> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
>> index 368c1a494d..674d2ee86d 100644
>> --- a/hw/ppc/spapr_numa.c
>> +++ b/hw/ppc/spapr_numa.c
>> @@ -71,31 +71,38 @@ void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
>> sizeof(spapr->numa_assoc_array[nodeid]))));
>> }
>>
>> -int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> - int offset, PowerPCCPU *cpu)
>> +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
>> + PowerPCCPU *cpu)
>> {
>> - uint vcpu_assoc_size = NUMA_ASSOC_SIZE + 1;
>> - uint32_t vcpu_assoc[vcpu_assoc_size];
>> + uint32_t *vcpu_assoc = g_malloc(VCPU_ASSOC_SIZE * sizeof(uint32_t));
>
> CODING_STYLE recommends g_new(uint32_t, VCPU_ASSOC_SIZE)
Guess I can't rely solely on scripts/checkpath.pl anymore ....
>
>> int index = spapr_get_vcpu_id(cpu);
>> - int i;
>> +
>> + g_assert(vcpu_assoc != NULL);
>>
>
> g_malloc() and friends only return NULL when passed a zero size,
> which is obviously not the case here.
This was my attempt of trying to cover the case where g_malloc() might fail,
but reading the docs again I believe failure in g_malloc() would result in
application terminationl. What I did there makes sense for g_try_malloc().
I'll change up there for g_new() and drop this g_assert().
>
>> /*
>> * VCPUs have an extra 'cpu_id' value in ibm,associativity
>> * compared to other resources. Increment the size at index
>> - * 0, copy all associativity domains already set, then put
>> - * cpu_id last.
>> + * 0, put cpu_id last, then copy the remaining associativity
>> + * domains.
>> */
>> vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
>> + vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
>> + memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
>> + (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
>>
>> - for (i = 1; i <= MAX_DISTANCE_REF_POINTS; i++) {
>> - vcpu_assoc[i] = spapr->numa_assoc_array[cpu->node_id][i];
>> - }
>> + return vcpu_assoc;
>> +}
>> +
>> +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
>> + int offset, PowerPCCPU *cpu)
>> +{
>> + g_autofree uint32_t *vcpu_assoc = NULL;
>>
>> - vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index);
>> + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
>>
>> /* Advertise NUMA via ibm,associativity */
>> - return fdt_setprop(fdt, offset, "ibm,associativity",
>> - vcpu_assoc, sizeof(vcpu_assoc));
>> + return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
>> + VCPU_ASSOC_SIZE * sizeof(uint32_t));
>> }
>>
>>
>> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
>> index 9a63380801..e50a2672e3 100644
>> --- a/include/hw/ppc/spapr.h
>> +++ b/include/hw/ppc/spapr.h
>> @@ -107,13 +107,18 @@ typedef enum {
>>
>> /*
>> * NUMA related macros. MAX_DISTANCE_REF_POINTS was taken
>> - * from Taken from Linux kernel arch/powerpc/mm/numa.h.
>
> Heh a good opportunity to fix the "from Taken from" typo I guess ;)
>
>> + * from Linux kernel arch/powerpc/mm/numa.h. It represents the
>> + * amount of associativity domains for non-CPU resources.
>> *
>> * NUMA_ASSOC_SIZE is the base array size of an ibm,associativity
>> * array for any non-CPU resource.
>> + *
>> + * VCPU_ASSOC_SIZE represents the size of ibm,associativity array
>> + * for CPUs, which has an extra element (vcpu_id) in the end.
>> */
>> #define MAX_DISTANCE_REF_POINTS 4
>> #define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1)
>> +#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1)
>>
>> typedef struct SpaprCapabilities SpaprCapabilities;
>> struct SpaprCapabilities {
>
> With the comments on g_malloc() addressed, feel free to add:
>
> Reviewed-by: Greg Kurz <groug@kaod.org>
Thanks! I'll change g_malloc() and so on like you suggested and send
this as v6.
DHB
>
© 2016 - 2026 Red Hat, Inc.