Legacy resctrl features operated on subsets of CPUs in the system with
the defining attribute of each subset being an instance of a particular
level of cache. E.g. all CPUs sharing an L3 cache would be part of the
same domain.
In preparation for features that are scoped at the NUMA node level
change the code from explicit references to "cache_level" to a more
generic scope. At this point the only options for this scope are groups
of CPUs that share an L2 cache or L3 cache.
No functional change.
Signed-off-by: Tony Luck <tony.luck@intel.com>
---
include/linux/resctrl.h | 9 ++++++--
arch/x86/kernel/cpu/resctrl/core.c | 27 ++++++++++++++++++-----
arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 15 ++++++++++++-
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++++++++++-
4 files changed, 56 insertions(+), 10 deletions(-)
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 8334eeacfec5..2db1244ae642 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -144,13 +144,18 @@ struct resctrl_membw {
struct rdt_parse_data;
struct resctrl_schema;
+enum resctrl_scope {
+ RESCTRL_L3_CACHE,
+ RESCTRL_L2_CACHE,
+};
+
/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
* @num_rmid: Number of RMIDs available
- * @cache_level: Which cache level defines scope of this resource
+ * @scope: Scope of this resource
* @cache: Cache allocation related data
* @membw: If the component has bandwidth controls, their properties.
* @domains: All domains for this resource
@@ -168,7 +173,7 @@ struct rdt_resource {
bool alloc_capable;
bool mon_capable;
int num_rmid;
- int cache_level;
+ enum resctrl_scope scope;
struct resctrl_cache cache;
struct resctrl_membw membw;
struct list_head domains;
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 030d3b409768..0d3bae523ecb 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -65,7 +65,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_L3,
.name = "L3",
- .cache_level = 3,
+ .scope = RESCTRL_L3_CACHE,
.domains = domain_init(RDT_RESOURCE_L3),
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
@@ -79,7 +79,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_L2,
.name = "L2",
- .cache_level = 2,
+ .scope = RESCTRL_L2_CACHE,
.domains = domain_init(RDT_RESOURCE_L2),
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
@@ -93,7 +93,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_MBA,
.name = "MB",
- .cache_level = 3,
+ .scope = RESCTRL_L3_CACHE,
.domains = domain_init(RDT_RESOURCE_MBA),
.parse_ctrlval = parse_bw,
.format_str = "%d=%*u",
@@ -105,7 +105,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.r_resctrl = {
.rid = RDT_RESOURCE_SMBA,
.name = "SMBA",
- .cache_level = 3,
+ .scope = RESCTRL_L3_CACHE,
.domains = domain_init(RDT_RESOURCE_SMBA),
.parse_ctrlval = parse_bw,
.format_str = "%d=%*u",
@@ -487,6 +487,21 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
return 0;
}
+static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
+{
+ switch (scope) {
+ case RESCTRL_L3_CACHE:
+ return get_cpu_cacheinfo_id(cpu, 3);
+ case RESCTRL_L2_CACHE:
+ return get_cpu_cacheinfo_id(cpu, 2);
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return -1;
+}
+
/*
* domain_add_cpu - Add a cpu to a resource's domain list.
*
@@ -502,7 +517,7 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
*/
static void domain_add_cpu(int cpu, struct rdt_resource *r)
{
- int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
+ int id = get_domain_id_from_scope(cpu, r->scope);
struct list_head *add_pos = NULL;
struct rdt_hw_domain *hw_dom;
struct rdt_domain *d;
@@ -552,7 +567,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
{
- int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
+ int id = get_domain_id_from_scope(cpu, r->scope);
struct rdt_hw_domain *hw_dom;
struct rdt_domain *d;
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 458cb7419502..e79324676f57 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -279,6 +279,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
{
struct cpu_cacheinfo *ci;
+ int cache_level;
int ret;
int i;
@@ -296,8 +297,20 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
+ switch (plr->s->res->scope) {
+ case RESCTRL_L3_CACHE:
+ cache_level = 3;
+ break;
+ case RESCTRL_L2_CACHE:
+ cache_level = 2;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+ }
+
for (i = 0; i < ci->num_leaves; i++) {
- if (ci->info_list[i].level == plr->s->res->cache_level) {
+ if (ci->info_list[i].level == cache_level) {
plr->line_size = ci->info_list[i].coherency_line_size;
return 0;
}
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 725344048f85..f510414bf6ce 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1343,12 +1343,25 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
{
struct cpu_cacheinfo *ci;
unsigned int size = 0;
+ int cache_level;
int num_b, i;
+ switch (r->scope) {
+ case RESCTRL_L3_CACHE:
+ cache_level = 3;
+ break;
+ case RESCTRL_L2_CACHE:
+ cache_level = 2;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return size;
+ }
+
num_b = bitmap_weight(&cbm, r->cache.cbm_len);
ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
for (i = 0; i < ci->num_leaves; i++) {
- if (ci->info_list[i].level == r->cache_level) {
+ if (ci->info_list[i].level == cache_level) {
size = ci->info_list[i].size / r->cache.cbm_len * num_b;
break;
}
--
2.41.0
Hi Tony,
On 8/29/23 18:44, Tony Luck wrote:
> Legacy resctrl features operated on subsets of CPUs in the system with
> the defining attribute of each subset being an instance of a particular
> level of cache. E.g. all CPUs sharing an L3 cache would be part of the
> same domain.
>
> In preparation for features that are scoped at the NUMA node level
> change the code from explicit references to "cache_level" to a more
> generic scope. At this point the only options for this scope are groups
> of CPUs that share an L2 cache or L3 cache.
>
> No functional change.
>
> Signed-off-by: Tony Luck <tony.luck@intel.com>
> ---
> include/linux/resctrl.h | 9 ++++++--
> arch/x86/kernel/cpu/resctrl/core.c | 27 ++++++++++++++++++-----
> arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 15 ++++++++++++-
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++++++++++-
> 4 files changed, 56 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
> index 8334eeacfec5..2db1244ae642 100644
> --- a/include/linux/resctrl.h
> +++ b/include/linux/resctrl.h
> @@ -144,13 +144,18 @@ struct resctrl_membw {
> struct rdt_parse_data;
> struct resctrl_schema;
>
> +enum resctrl_scope {
> + RESCTRL_L3_CACHE,
> + RESCTRL_L2_CACHE,
> +};
How about?
enum resctrl_scope {
RESCTRL_L2_CACHE = 2,
RESCTRL_L3_CACHE,
};
I think this will simplify lot of your code below. You can directly use
scope as it is done now.
> +
> /**
> * struct rdt_resource - attributes of a resctrl resource
> * @rid: The index of the resource
> * @alloc_capable: Is allocation available on this machine
> * @mon_capable: Is monitor feature available on this machine
> * @num_rmid: Number of RMIDs available
> - * @cache_level: Which cache level defines scope of this resource
> + * @scope: Scope of this resource
> * @cache: Cache allocation related data
> * @membw: If the component has bandwidth controls, their properties.
> * @domains: All domains for this resource
> @@ -168,7 +173,7 @@ struct rdt_resource {
> bool alloc_capable;
> bool mon_capable;
> int num_rmid;
> - int cache_level;
> + enum resctrl_scope scope;
> struct resctrl_cache cache;
> struct resctrl_membw membw;
> struct list_head domains;
> diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> index 030d3b409768..0d3bae523ecb 100644
> --- a/arch/x86/kernel/cpu/resctrl/core.c
> +++ b/arch/x86/kernel/cpu/resctrl/core.c
> @@ -65,7 +65,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_L3,
> .name = "L3",
> - .cache_level = 3,
> + .scope = RESCTRL_L3_CACHE,
> .domains = domain_init(RDT_RESOURCE_L3),
> .parse_ctrlval = parse_cbm,
> .format_str = "%d=%0*x",
> @@ -79,7 +79,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_L2,
> .name = "L2",
> - .cache_level = 2,
> + .scope = RESCTRL_L2_CACHE,
> .domains = domain_init(RDT_RESOURCE_L2),
> .parse_ctrlval = parse_cbm,
> .format_str = "%d=%0*x",
> @@ -93,7 +93,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_MBA,
> .name = "MB",
> - .cache_level = 3,
> + .scope = RESCTRL_L3_CACHE,
> .domains = domain_init(RDT_RESOURCE_MBA),
> .parse_ctrlval = parse_bw,
> .format_str = "%d=%*u",
> @@ -105,7 +105,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_SMBA,
> .name = "SMBA",
> - .cache_level = 3,
> + .scope = RESCTRL_L3_CACHE,
> .domains = domain_init(RDT_RESOURCE_SMBA),
> .parse_ctrlval = parse_bw,
> .format_str = "%d=%*u",
> @@ -487,6 +487,21 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
> return 0;
> }
>
> +static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
> +{
> + switch (scope) {
> + case RESCTRL_L3_CACHE:
> + return get_cpu_cacheinfo_id(cpu, 3);
> + case RESCTRL_L2_CACHE:
> + return get_cpu_cacheinfo_id(cpu, 2);
> + default:
> + WARN_ON_ONCE(1);
> + break;
> + }
> +
> + return -1;
> +}
> +
You may not need this code with new definition of resctrl_scope.
> /*
> * domain_add_cpu - Add a cpu to a resource's domain list.
> *
> @@ -502,7 +517,7 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
> */
> static void domain_add_cpu(int cpu, struct rdt_resource *r)
> {
> - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
> + int id = get_domain_id_from_scope(cpu, r->scope);
You can directly call
int id = get_cpu_cacheinfo_id(cpu, r->scope);
> struct list_head *add_pos = NULL;
> struct rdt_hw_domain *hw_dom;
> struct rdt_domain *d;
> @@ -552,7 +567,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
>
> static void domain_remove_cpu(int cpu, struct rdt_resource *r)
> {
> - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
> + int id = get_domain_id_from_scope(cpu, r->scope);
> struct rdt_hw_domain *hw_dom;
> struct rdt_domain *d;
>
> diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> index 458cb7419502..e79324676f57 100644
> --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> @@ -279,6 +279,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
> static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
> {
> struct cpu_cacheinfo *ci;
> + int cache_level;
> int ret;
> int i;
>
> @@ -296,8 +297,20 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
>
> plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
>
> + switch (plr->s->res->scope) {
> + case RESCTRL_L3_CACHE:
> + cache_level = 3;
> + break;
> + case RESCTRL_L2_CACHE:
> + cache_level = 2;
> + break;
> + default:
> + WARN_ON_ONCE(1);
> + return -ENODEV;
> + }
Again this may not be required.
> +
> for (i = 0; i < ci->num_leaves; i++) {
> - if (ci->info_list[i].level == plr->s->res->cache_level) {
> + if (ci->info_list[i].level == cache_level) {
> plr->line_size = ci->info_list[i].coherency_line_size;
> return 0;
> }
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 725344048f85..f510414bf6ce 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -1343,12 +1343,25 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
> {
> struct cpu_cacheinfo *ci;
> unsigned int size = 0;
> + int cache_level;
> int num_b, i;
>
> + switch (r->scope) {
> + case RESCTRL_L3_CACHE:
> + cache_level = 3;
> + break;
> + case RESCTRL_L2_CACHE:
> + cache_level = 2;
> + break;
> + default:
> + WARN_ON_ONCE(1);
> + return size;
> + }
Again this may not be required.
> +
> num_b = bitmap_weight(&cbm, r->cache.cbm_len);
> ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
> for (i = 0; i < ci->num_leaves; i++) {
> - if (ci->info_list[i].level == r->cache_level) {
> + if (ci->info_list[i].level == cache_level) {
> size = ci->info_list[i].size / r->cache.cbm_len * num_b;
> break;
> }
--
Thanks
Babu Moger
>> +enum resctrl_scope {
>> + RESCTRL_L3_CACHE,
>> + RESCTRL_L2_CACHE,
>> +};
>
> How about?
>
> enum resctrl_scope {
> RESCTRL_L2_CACHE = 2,
> RESCTRL_L3_CACHE,
> };
Babu. Thanks for the review.
Reinette made the same observation. I'm updating the
patch to do this. With small extra defensive step to explicitly define
RESCTRL_L3_CACHE = 3,
rather than relying on the compiler picking the next integer ... just in
case somebody adds another enum between the L2 and L3 lines.
-Tony
Hi Tony,
On 8/29/2023 4:44 PM, Tony Luck wrote:
> Legacy resctrl features operated on subsets of CPUs in the system with
What is a "legacy" resctrl feature? I am not aware of anything in resctrl
that distinguishes a feature as legacy. Could "resctrl resource" be more
appropriate to match the resctrl implementation?
Please use "operate on" instead of "operated on".
> the defining attribute of each subset being an instance of a particular
> level of cache. E.g. all CPUs sharing an L3 cache would be part of the
> same domain.
>
> In preparation for features that are scoped at the NUMA node level
> change the code from explicit references to "cache_level" to a more
> generic scope. At this point the only options for this scope are groups
> of CPUs that share an L2 cache or L3 cache.
>
> No functional change.
>
> Signed-off-by: Tony Luck <tony.luck@intel.com>
> ---
> include/linux/resctrl.h | 9 ++++++--
> arch/x86/kernel/cpu/resctrl/core.c | 27 ++++++++++++++++++-----
> arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 15 ++++++++++++-
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++++++++++-
> 4 files changed, 56 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
> index 8334eeacfec5..2db1244ae642 100644
> --- a/include/linux/resctrl.h
> +++ b/include/linux/resctrl.h
> @@ -144,13 +144,18 @@ struct resctrl_membw {
> struct rdt_parse_data;
> struct resctrl_schema;
>
> +enum resctrl_scope {
> + RESCTRL_L3_CACHE,
> + RESCTRL_L2_CACHE,
> +};
I'm curious why L3 appears before L2? This is not a big deal but
I think the additional indirection that this introduces is
not necessary. As you had in an earlier version this could be
RESCTRL_L2_CACHE = 2 and then the value can just be used directly
(after ensuring it is used in a valid context).
> +
> /**
> * struct rdt_resource - attributes of a resctrl resource
> * @rid: The index of the resource
> * @alloc_capable: Is allocation available on this machine
> * @mon_capable: Is monitor feature available on this machine
> * @num_rmid: Number of RMIDs available
> - * @cache_level: Which cache level defines scope of this resource
> + * @scope: Scope of this resource
> * @cache: Cache allocation related data
> * @membw: If the component has bandwidth controls, their properties.
> * @domains: All domains for this resource
> @@ -168,7 +173,7 @@ struct rdt_resource {
> bool alloc_capable;
> bool mon_capable;
> int num_rmid;
> - int cache_level;
> + enum resctrl_scope scope;
> struct resctrl_cache cache;
> struct resctrl_membw membw;
> struct list_head domains;
> diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> index 030d3b409768..0d3bae523ecb 100644
> --- a/arch/x86/kernel/cpu/resctrl/core.c
> +++ b/arch/x86/kernel/cpu/resctrl/core.c
> @@ -65,7 +65,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_L3,
> .name = "L3",
> - .cache_level = 3,
> + .scope = RESCTRL_L3_CACHE,
> .domains = domain_init(RDT_RESOURCE_L3),
> .parse_ctrlval = parse_cbm,
> .format_str = "%d=%0*x",
> @@ -79,7 +79,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_L2,
> .name = "L2",
> - .cache_level = 2,
> + .scope = RESCTRL_L2_CACHE,
> .domains = domain_init(RDT_RESOURCE_L2),
> .parse_ctrlval = parse_cbm,
> .format_str = "%d=%0*x",
> @@ -93,7 +93,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_MBA,
> .name = "MB",
> - .cache_level = 3,
> + .scope = RESCTRL_L3_CACHE,
> .domains = domain_init(RDT_RESOURCE_MBA),
> .parse_ctrlval = parse_bw,
> .format_str = "%d=%*u",
> @@ -105,7 +105,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> .r_resctrl = {
> .rid = RDT_RESOURCE_SMBA,
> .name = "SMBA",
> - .cache_level = 3,
> + .scope = RESCTRL_L3_CACHE,
> .domains = domain_init(RDT_RESOURCE_SMBA),
> .parse_ctrlval = parse_bw,
> .format_str = "%d=%*u",
> @@ -487,6 +487,21 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
> return 0;
> }
>
> +static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
> +{
> + switch (scope) {
> + case RESCTRL_L3_CACHE:
> + return get_cpu_cacheinfo_id(cpu, 3);
> + case RESCTRL_L2_CACHE:
> + return get_cpu_cacheinfo_id(cpu, 2);
> + default:
> + WARN_ON_ONCE(1);
> + break;
> + }
> +
> + return -1;
> +}
Looking ahead at the next patch some members of rdt_resources_all[]
are left with a default "0" initialization of mon_scope that is a
valid scope of RESCTRL_L3_CACHE in this implementation that would
not be caught with defensive code like above. For the above to catch
a case like this I think that there should be some default
initialization - but if you do move to something like you
had in v3 then this may not be necessary. If the L2 scope is 2,
L3 scope is 3, node scope is 4, then no initialization will be zero
and the above default can more accurately catch failure cases.
> +
> /*
> * domain_add_cpu - Add a cpu to a resource's domain list.
> *
> @@ -502,7 +517,7 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
> */
> static void domain_add_cpu(int cpu, struct rdt_resource *r)
> {
> - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
> + int id = get_domain_id_from_scope(cpu, r->scope);
> struct list_head *add_pos = NULL;
> struct rdt_hw_domain *hw_dom;
> struct rdt_domain *d;
> @@ -552,7 +567,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
>
> static void domain_remove_cpu(int cpu, struct rdt_resource *r)
> {
> - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
> + int id = get_domain_id_from_scope(cpu, r->scope);
> struct rdt_hw_domain *hw_dom;
> struct rdt_domain *d;
>
> diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> index 458cb7419502..e79324676f57 100644
> --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> @@ -279,6 +279,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
> static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
> {
> struct cpu_cacheinfo *ci;
> + int cache_level;
> int ret;
> int i;
>
> @@ -296,8 +297,20 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
>
> plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
>
> + switch (plr->s->res->scope) {
> + case RESCTRL_L3_CACHE:
> + cache_level = 3;
> + break;
> + case RESCTRL_L2_CACHE:
> + cache_level = 2;
> + break;
> + default:
> + WARN_ON_ONCE(1);
> + return -ENODEV;
> + }
I think this can be simplified without the indirection - a simplified
WARN can just test for valid values of plr->s->res->scope directly
(exit on failure) and then it can be used directly in the code below
when the enum value corresponds to a cache level.
> +
> for (i = 0; i < ci->num_leaves; i++) {
> - if (ci->info_list[i].level == plr->s->res->cache_level) {
> + if (ci->info_list[i].level == cache_level) {
> plr->line_size = ci->info_list[i].coherency_line_size;
> return 0;
> }
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 725344048f85..f510414bf6ce 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -1343,12 +1343,25 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
> {
> struct cpu_cacheinfo *ci;
> unsigned int size = 0;
> + int cache_level;
> int num_b, i;
>
> + switch (r->scope) {
> + case RESCTRL_L3_CACHE:
> + cache_level = 3;
> + break;
> + case RESCTRL_L2_CACHE:
> + cache_level = 2;
> + break;
> + default:
> + WARN_ON_ONCE(1);
> + return size;
> + }
> +
Same here.
> num_b = bitmap_weight(&cbm, r->cache.cbm_len);
> ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
> for (i = 0; i < ci->num_leaves; i++) {
> - if (ci->info_list[i].level == r->cache_level) {
> + if (ci->info_list[i].level == cache_level) {
> size = ci->info_list[i].size / r->cache.cbm_len * num_b;
> break;
> }
Reinette
On Mon, Sep 25, 2023 at 04:22:29PM -0700, Reinette Chatre wrote:
> Hi Tony,
>
> On 8/29/2023 4:44 PM, Tony Luck wrote:
> > Legacy resctrl features operated on subsets of CPUs in the system with
>
> What is a "legacy" resctrl feature? I am not aware of anything in resctrl
> that distinguishes a feature as legacy. Could "resctrl resource" be more
> appropriate to match the resctrl implementation?
Ok. Will change.
>
> Please use "operate on" instead of "operated on".
Ditto.
>
> > the defining attribute of each subset being an instance of a particular
> > level of cache. E.g. all CPUs sharing an L3 cache would be part of the
> > same domain.
> >
> > In preparation for features that are scoped at the NUMA node level
> > change the code from explicit references to "cache_level" to a more
> > generic scope. At this point the only options for this scope are groups
> > of CPUs that share an L2 cache or L3 cache.
> >
> > No functional change.
> >
> > Signed-off-by: Tony Luck <tony.luck@intel.com>
> > ---
> > include/linux/resctrl.h | 9 ++++++--
> > arch/x86/kernel/cpu/resctrl/core.c | 27 ++++++++++++++++++-----
> > arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 15 ++++++++++++-
> > arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++++++++++-
> > 4 files changed, 56 insertions(+), 10 deletions(-)
> >
> > diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
> > index 8334eeacfec5..2db1244ae642 100644
> > --- a/include/linux/resctrl.h
> > +++ b/include/linux/resctrl.h
> > @@ -144,13 +144,18 @@ struct resctrl_membw {
> > struct rdt_parse_data;
> > struct resctrl_schema;
> >
> > +enum resctrl_scope {
> > + RESCTRL_L3_CACHE,
> > + RESCTRL_L2_CACHE,
> > +};
>
> I'm curious why L3 appears before L2? This is not a big deal but
> I think the additional indirection that this introduces is
> not necessary. As you had in an earlier version this could be
> RESCTRL_L2_CACHE = 2 and then the value can just be used directly
> (after ensuring it is used in a valid context).
I appear to have misundertood your earlier comments. I thought you
didn't like my use of RESCTRL_L2_CACHE = 2, and RESCTRL_L3_CACHE = 3
so that the could be passed directly to get_cpu_cacheinfo_id().
But I see now the issue was "after ensuring it is used in a valid
context". Are you looking for something like this:
enum resctrl_scope {
RESCTRL_UNINITIALIZED_SCOPE = 0,
RESCTRL_L2_CACHE = 2,
RESCTRL_L3_CACHE = 3,
RESCTRL_L3_NODE = 4,
};
static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
{
switch (scope) {
case RESCTRL_L2_CACHE:
case RESCTRL_L3_CACHE:
return get_cpu_cacheinfo_id(cpu, scope);
case RESCTRL_NODE:
return cpu_to_node(cpu);
default:
case RESCTRL_UNINITIALIZED_SCOPE:
WARN_ON_ONCE(1);
return -1;
}
return -1;
}
>
> > +
> > /**
> > * struct rdt_resource - attributes of a resctrl resource
> > * @rid: The index of the resource
> > * @alloc_capable: Is allocation available on this machine
> > * @mon_capable: Is monitor feature available on this machine
> > * @num_rmid: Number of RMIDs available
> > - * @cache_level: Which cache level defines scope of this resource
> > + * @scope: Scope of this resource
> > * @cache: Cache allocation related data
> > * @membw: If the component has bandwidth controls, their properties.
> > * @domains: All domains for this resource
> > @@ -168,7 +173,7 @@ struct rdt_resource {
> > bool alloc_capable;
> > bool mon_capable;
> > int num_rmid;
> > - int cache_level;
> > + enum resctrl_scope scope;
> > struct resctrl_cache cache;
> > struct resctrl_membw membw;
> > struct list_head domains;
> > diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> > index 030d3b409768..0d3bae523ecb 100644
> > --- a/arch/x86/kernel/cpu/resctrl/core.c
> > +++ b/arch/x86/kernel/cpu/resctrl/core.c
> > @@ -65,7 +65,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> > .r_resctrl = {
> > .rid = RDT_RESOURCE_L3,
> > .name = "L3",
> > - .cache_level = 3,
> > + .scope = RESCTRL_L3_CACHE,
> > .domains = domain_init(RDT_RESOURCE_L3),
> > .parse_ctrlval = parse_cbm,
> > .format_str = "%d=%0*x",
> > @@ -79,7 +79,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> > .r_resctrl = {
> > .rid = RDT_RESOURCE_L2,
> > .name = "L2",
> > - .cache_level = 2,
> > + .scope = RESCTRL_L2_CACHE,
> > .domains = domain_init(RDT_RESOURCE_L2),
> > .parse_ctrlval = parse_cbm,
> > .format_str = "%d=%0*x",
> > @@ -93,7 +93,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> > .r_resctrl = {
> > .rid = RDT_RESOURCE_MBA,
> > .name = "MB",
> > - .cache_level = 3,
> > + .scope = RESCTRL_L3_CACHE,
> > .domains = domain_init(RDT_RESOURCE_MBA),
> > .parse_ctrlval = parse_bw,
> > .format_str = "%d=%*u",
> > @@ -105,7 +105,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
> > .r_resctrl = {
> > .rid = RDT_RESOURCE_SMBA,
> > .name = "SMBA",
> > - .cache_level = 3,
> > + .scope = RESCTRL_L3_CACHE,
> > .domains = domain_init(RDT_RESOURCE_SMBA),
> > .parse_ctrlval = parse_bw,
> > .format_str = "%d=%*u",
> > @@ -487,6 +487,21 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
> > return 0;
> > }
> >
> > +static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
> > +{
> > + switch (scope) {
> > + case RESCTRL_L3_CACHE:
> > + return get_cpu_cacheinfo_id(cpu, 3);
> > + case RESCTRL_L2_CACHE:
> > + return get_cpu_cacheinfo_id(cpu, 2);
> > + default:
> > + WARN_ON_ONCE(1);
> > + break;
> > + }
> > +
> > + return -1;
> > +}
>
> Looking ahead at the next patch some members of rdt_resources_all[]
> are left with a default "0" initialization of mon_scope that is a
> valid scope of RESCTRL_L3_CACHE in this implementation that would
> not be caught with defensive code like above. For the above to catch
> a case like this I think that there should be some default
> initialization - but if you do move to something like you
> had in v3 then this may not be necessary. If the L2 scope is 2,
> L3 scope is 3, node scope is 4, then no initialization will be zero
> and the above default can more accurately catch failure cases.
See above (with a defensive enum constant that has the value 0).
>
> > +
> > /*
> > * domain_add_cpu - Add a cpu to a resource's domain list.
> > *
> > @@ -502,7 +517,7 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
> > */
> > static void domain_add_cpu(int cpu, struct rdt_resource *r)
> > {
> > - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
> > + int id = get_domain_id_from_scope(cpu, r->scope);
> > struct list_head *add_pos = NULL;
> > struct rdt_hw_domain *hw_dom;
> > struct rdt_domain *d;
> > @@ -552,7 +567,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
> >
> > static void domain_remove_cpu(int cpu, struct rdt_resource *r)
> > {
> > - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
> > + int id = get_domain_id_from_scope(cpu, r->scope);
> > struct rdt_hw_domain *hw_dom;
> > struct rdt_domain *d;
> >
> > diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> > index 458cb7419502..e79324676f57 100644
> > --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> > +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
> > @@ -279,6 +279,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
> > static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
> > {
> > struct cpu_cacheinfo *ci;
> > + int cache_level;
> > int ret;
> > int i;
> >
> > @@ -296,8 +297,20 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
> >
> > plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
> >
> > + switch (plr->s->res->scope) {
> > + case RESCTRL_L3_CACHE:
> > + cache_level = 3;
> > + break;
> > + case RESCTRL_L2_CACHE:
> > + cache_level = 2;
> > + break;
> > + default:
> > + WARN_ON_ONCE(1);
> > + return -ENODEV;
> > + }
>
> I think this can be simplified without the indirection - a simplified
> WARN can just test for valid values of plr->s->res->scope directly
> (exit on failure) and then it can be used directly in the code below
> when the enum value corresponds to a cache level.
Is this what you want here?
if (plr->s->res->scope != RESCTRL_L2_CACHE &&
plr->s->res->scope != RESCTRL_L3_CACHE) {
WARN_ON_ONCE(1);
return -ENODEV;
}
>
> > +
> > for (i = 0; i < ci->num_leaves; i++) {
> > - if (ci->info_list[i].level == plr->s->res->cache_level) {
> > + if (ci->info_list[i].level == cache_level) {
then drop this change to keep using plr->s->res->cache_level (and
delete the now unused local variable cache_level).
> > plr->line_size = ci->info_list[i].coherency_line_size;
> > return 0;
> > }
> > diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> > index 725344048f85..f510414bf6ce 100644
> > --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> > +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> > @@ -1343,12 +1343,25 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
> > {
> > struct cpu_cacheinfo *ci;
> > unsigned int size = 0;
> > + int cache_level;
> > int num_b, i;
> >
> > + switch (r->scope) {
> > + case RESCTRL_L3_CACHE:
> > + cache_level = 3;
> > + break;
> > + case RESCTRL_L2_CACHE:
> > + cache_level = 2;
> > + break;
> > + default:
> > + WARN_ON_ONCE(1);
> > + return size;
> > + }
> > +
>
> Same here.
If above it what you want, will do it here too.
>
> > num_b = bitmap_weight(&cbm, r->cache.cbm_len);
> > ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
> > for (i = 0; i < ci->num_leaves; i++) {
> > - if (ci->info_list[i].level == r->cache_level) {
> > + if (ci->info_list[i].level == cache_level) {
> > size = ci->info_list[i].size / r->cache.cbm_len * num_b;
> > break;
> > }
>
>
> Reinette
-Tony
Hi Tony,
On 9/25/2023 5:56 PM, Tony Luck wrote:
> On Mon, Sep 25, 2023 at 04:22:29PM -0700, Reinette Chatre wrote:
>> On 8/29/2023 4:44 PM, Tony Luck wrote:
...
>>> diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
>>> index 8334eeacfec5..2db1244ae642 100644
>>> --- a/include/linux/resctrl.h
>>> +++ b/include/linux/resctrl.h
>>> @@ -144,13 +144,18 @@ struct resctrl_membw {
>>> struct rdt_parse_data;
>>> struct resctrl_schema;
>>>
>>> +enum resctrl_scope {
>>> + RESCTRL_L3_CACHE,
>>> + RESCTRL_L2_CACHE,
>>> +};
>>
>> I'm curious why L3 appears before L2? This is not a big deal but
>> I think the additional indirection that this introduces is
>> not necessary. As you had in an earlier version this could be
>> RESCTRL_L2_CACHE = 2 and then the value can just be used directly
>> (after ensuring it is used in a valid context).
>
> I appear to have misundertood your earlier comments. I thought you
> didn't like my use of RESCTRL_L2_CACHE = 2, and RESCTRL_L3_CACHE = 3
> so that the could be passed directly to get_cpu_cacheinfo_id().
>
> But I see now the issue was "after ensuring it is used in a valid
> context". Are you looking for something like this:
Indeed. My concern with the earlier version was seeing a variable that
may contain any scope be used in a context where it can only represent
a cache level.
>
> enum resctrl_scope {
> RESCTRL_UNINITIALIZED_SCOPE = 0,
> RESCTRL_L2_CACHE = 2,
> RESCTRL_L3_CACHE = 3,
> RESCTRL_L3_NODE = 4,
> };
I do not think RESCTRL_UNINITIALIZED_SCOPE is required (perhaps getting too
defensive?) but adding it would surely not do harm and indeed make it
obvious how the uninitialized case is handled. I am not familiar with
customs in this regard and would be ok either way. You can decide what
works best for you.
About the new name RESCTRL_L3_NODE. It is not clear to me why "L3" is
in the name.
>
> static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
> {
> switch (scope) {
> case RESCTRL_L2_CACHE:
> case RESCTRL_L3_CACHE:
> return get_cpu_cacheinfo_id(cpu, scope);
> case RESCTRL_NODE:
> return cpu_to_node(cpu);
(oh - maybe the earlier RESCTRL_L3_NODE was a typo?)
> default:
> case RESCTRL_UNINITIALIZED_SCOPE:
> WARN_ON_ONCE(1);
> return -1;
> }
>
> return -1;
> }
>
I'll leave it to you to decide if you want to use
RESCTRL_UNINITIALIZED_SCOPE. If you do decide to keep it
could you please let the "default" case be the last one?
>>
>>> +
>>> /**
>>> * struct rdt_resource - attributes of a resctrl resource
>>> * @rid: The index of the resource
>>> * @alloc_capable: Is allocation available on this machine
>>> * @mon_capable: Is monitor feature available on this machine
>>> * @num_rmid: Number of RMIDs available
>>> - * @cache_level: Which cache level defines scope of this resource
>>> + * @scope: Scope of this resource
>>> * @cache: Cache allocation related data
>>> * @membw: If the component has bandwidth controls, their properties.
>>> * @domains: All domains for this resource
>>> @@ -168,7 +173,7 @@ struct rdt_resource {
>>> bool alloc_capable;
>>> bool mon_capable;
>>> int num_rmid;
>>> - int cache_level;
>>> + enum resctrl_scope scope;
>>> struct resctrl_cache cache;
>>> struct resctrl_membw membw;
>>> struct list_head domains;
>>> diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
>>> index 030d3b409768..0d3bae523ecb 100644
>>> --- a/arch/x86/kernel/cpu/resctrl/core.c
>>> +++ b/arch/x86/kernel/cpu/resctrl/core.c
>>> @@ -65,7 +65,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
>>> .r_resctrl = {
>>> .rid = RDT_RESOURCE_L3,
>>> .name = "L3",
>>> - .cache_level = 3,
>>> + .scope = RESCTRL_L3_CACHE,
>>> .domains = domain_init(RDT_RESOURCE_L3),
>>> .parse_ctrlval = parse_cbm,
>>> .format_str = "%d=%0*x",
>>> @@ -79,7 +79,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
>>> .r_resctrl = {
>>> .rid = RDT_RESOURCE_L2,
>>> .name = "L2",
>>> - .cache_level = 2,
>>> + .scope = RESCTRL_L2_CACHE,
>>> .domains = domain_init(RDT_RESOURCE_L2),
>>> .parse_ctrlval = parse_cbm,
>>> .format_str = "%d=%0*x",
>>> @@ -93,7 +93,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
>>> .r_resctrl = {
>>> .rid = RDT_RESOURCE_MBA,
>>> .name = "MB",
>>> - .cache_level = 3,
>>> + .scope = RESCTRL_L3_CACHE,
>>> .domains = domain_init(RDT_RESOURCE_MBA),
>>> .parse_ctrlval = parse_bw,
>>> .format_str = "%d=%*u",
>>> @@ -105,7 +105,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
>>> .r_resctrl = {
>>> .rid = RDT_RESOURCE_SMBA,
>>> .name = "SMBA",
>>> - .cache_level = 3,
>>> + .scope = RESCTRL_L3_CACHE,
>>> .domains = domain_init(RDT_RESOURCE_SMBA),
>>> .parse_ctrlval = parse_bw,
>>> .format_str = "%d=%*u",
>>> @@ -487,6 +487,21 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
>>> return 0;
>>> }
>>>
>>> +static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
>>> +{
>>> + switch (scope) {
>>> + case RESCTRL_L3_CACHE:
>>> + return get_cpu_cacheinfo_id(cpu, 3);
>>> + case RESCTRL_L2_CACHE:
>>> + return get_cpu_cacheinfo_id(cpu, 2);
>>> + default:
>>> + WARN_ON_ONCE(1);
>>> + break;
>>> + }
>>> +
>>> + return -1;
>>> +}
>>
>> Looking ahead at the next patch some members of rdt_resources_all[]
>> are left with a default "0" initialization of mon_scope that is a
>> valid scope of RESCTRL_L3_CACHE in this implementation that would
>> not be caught with defensive code like above. For the above to catch
>> a case like this I think that there should be some default
>> initialization - but if you do move to something like you
>> had in v3 then this may not be necessary. If the L2 scope is 2,
>> L3 scope is 3, node scope is 4, then no initialization will be zero
>> and the above default can more accurately catch failure cases.
>
> See above (with a defensive enum constant that has the value 0).
>
>>
>>> +
>>> /*
>>> * domain_add_cpu - Add a cpu to a resource's domain list.
>>> *
>>> @@ -502,7 +517,7 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
>>> */
>>> static void domain_add_cpu(int cpu, struct rdt_resource *r)
>>> {
>>> - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
>>> + int id = get_domain_id_from_scope(cpu, r->scope);
>>> struct list_head *add_pos = NULL;
>>> struct rdt_hw_domain *hw_dom;
>>> struct rdt_domain *d;
>>> @@ -552,7 +567,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
>>>
>>> static void domain_remove_cpu(int cpu, struct rdt_resource *r)
>>> {
>>> - int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
>>> + int id = get_domain_id_from_scope(cpu, r->scope);
>>> struct rdt_hw_domain *hw_dom;
>>> struct rdt_domain *d;
>>>
>>> diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
>>> index 458cb7419502..e79324676f57 100644
>>> --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
>>> +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
>>> @@ -279,6 +279,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
>>> static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
>>> {
>>> struct cpu_cacheinfo *ci;
>>> + int cache_level;
>>> int ret;
>>> int i;
>>>
>>> @@ -296,8 +297,20 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
>>>
>>> plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
>>>
>>> + switch (plr->s->res->scope) {
>>> + case RESCTRL_L3_CACHE:
>>> + cache_level = 3;
>>> + break;
>>> + case RESCTRL_L2_CACHE:
>>> + cache_level = 2;
>>> + break;
>>> + default:
>>> + WARN_ON_ONCE(1);
>>> + return -ENODEV;
>>> + }
>>
>> I think this can be simplified without the indirection - a simplified
>> WARN can just test for valid values of plr->s->res->scope directly
>> (exit on failure) and then it can be used directly in the code below
>> when the enum value corresponds to a cache level.
>
> Is this what you want here?
>
> if (plr->s->res->scope != RESCTRL_L2_CACHE &&
> plr->s->res->scope != RESCTRL_L3_CACHE) {
> WARN_ON_ONCE(1);
> return -ENODEV;
> }
Something like this, yes. It could be simplified more with a:
/* Just to get line length shorter: */
enum resctrl_scope scope = plr->s->res->scope; /* or cache_level? */
if (WARN_ON_ONCE(scope != RESCTRL_L2_CACHE && scope != RESCTRL_L3_CACHE))
return -ENODEV;
Reinette
Hello,
On 2023-08-29 at 16:44:19 -0700, Tony Luck wrote:
>diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
>index 030d3b409768..0d3bae523ecb 100644
>--- a/arch/x86/kernel/cpu/resctrl/core.c
>+++ b/arch/x86/kernel/cpu/resctrl/core.c
>@@ -487,6 +487,21 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
> return 0;
> }
>
>+static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
>+{
>+ switch (scope) {
>+ case RESCTRL_L3_CACHE:
>+ return get_cpu_cacheinfo_id(cpu, 3);
>+ case RESCTRL_L2_CACHE:
>+ return get_cpu_cacheinfo_id(cpu, 2);
>+ default:
>+ WARN_ON_ONCE(1);
>+ break;
>+ }
>+
>+ return -1;
>+}
Is there some reason the "return -1" is outside of the default switch
case?
Other switch statements in this patch do have returns inside the default
case, is this one different in some way?
--
Kind regards
Maciej Wieczór-Retman
> >+static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
> >+{
> >+ switch (scope) {
> >+ case RESCTRL_L3_CACHE:
> >+ return get_cpu_cacheinfo_id(cpu, 3);
> >+ case RESCTRL_L2_CACHE:
> >+ return get_cpu_cacheinfo_id(cpu, 2);
> >+ default:
> >+ WARN_ON_ONCE(1);
> >+ break;
> >+ }
> >+
> >+ return -1;
> >+}
>
> Is there some reason the "return -1" is outside of the default switch
> case?
>
> Other switch statements in this patch do have returns inside the default
> case, is this one different in some way?
I've sometimes had compilers complain about code written:
static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
{
switch (scope) {
case RESCTRL_L3_CACHE:
return get_cpu_cacheinfo_id(cpu, 3);
case RESCTRL_L2_CACHE:
return get_cpu_cacheinfo_id(cpu, 2);
default:
WARN_ON_ONCE(1);
return -1;
}
}
because they failed to notice that every path in the switch does a "return and they
issue a warning that the function has no return value because they don't realize
that the end of the function can't be reached.
So it's defensive programming against possible complier issues.
-Tony
On 2023-08-30 at 14:11:14 +0000, Luck, Tony wrote:
>> >+static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
>> >+{
>> >+ switch (scope) {
>> >+ case RESCTRL_L3_CACHE:
>> >+ return get_cpu_cacheinfo_id(cpu, 3);
>> >+ case RESCTRL_L2_CACHE:
>> >+ return get_cpu_cacheinfo_id(cpu, 2);
>> >+ default:
>> >+ WARN_ON_ONCE(1);
>> >+ break;
>> >+ }
>> >+
>> >+ return -1;
>> >+}
>>
>> Is there some reason the "return -1" is outside of the default switch
>> case?
>>
>> Other switch statements in this patch do have returns inside the default
>> case, is this one different in some way?
>
>I've sometimes had compilers complain about code written:
>
>static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
>{
> switch (scope) {
> case RESCTRL_L3_CACHE:
> return get_cpu_cacheinfo_id(cpu, 3);
> case RESCTRL_L2_CACHE:
> return get_cpu_cacheinfo_id(cpu, 2);
> default:
> WARN_ON_ONCE(1);
> return -1;
> }
>}
>
>because they failed to notice that every path in the switch does a "return and they
>issue a warning that the function has no return value because they don't realize
>that the end of the function can't be reached.
>
>So it's defensive programming against possible complier issues.
I recall getting that error somewhere while playing around with a
language server protocol for neovim a while ago but I tried to cause
it today with gcc and clang and with some different flags and coulnd't.
Are there some particular compilers or compiler flags that trigger
that?
--
Kind regards
Maciej Wieczór-Retman
> >I've sometimes had compilers complain about code written:
> >
> >static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
> >{
> > switch (scope) {
> > case RESCTRL_L3_CACHE:
> > return get_cpu_cacheinfo_id(cpu, 3);
> > case RESCTRL_L2_CACHE:
> > return get_cpu_cacheinfo_id(cpu, 2);
> > default:
> > WARN_ON_ONCE(1);
> > return -1;
> > }
> >}
> >
> >because they failed to notice that every path in the switch does a "return and they
> >issue a warning that the function has no return value because they don't realize
> >that the end of the function can't be reached.
> >
> >So it's defensive programming against possible complier issues.
>
> I recall getting that error somewhere while playing around with a
> language server protocol for neovim a while ago but I tried to cause
> it today with gcc and clang and with some different flags and coulnd't.
> Are there some particular compilers or compiler flags that trigger
> that?
I think I saw this from an lkp report using clang. But it's quite possible
that the exact code construct was different in some way.
-Tony
© 2016 - 2025 Red Hat, Inc.