When Sub-NUMA cluster is enabled (snc_ways > 1) use the RDT_RESOURCE_NODE
instead of RDT_RESOURCE_L3 for all monitoring operations.
The mon_scale and num_rmid values from CPUID(0xf,0x1),(EBX,ECX) must be
scaled down by the number of Sub-NUMA Clusters.
A subsequent change will detect sub-NUMA cluster mode and set
"snc_ways". For now set to one (meaning each L3 cache spans one
node).
Signed-off-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Peter Newman <peternewman@google.com>
---
arch/x86/kernel/cpu/resctrl/internal.h | 7 +++++++
arch/x86/kernel/cpu/resctrl/core.c | 7 ++++++-
arch/x86/kernel/cpu/resctrl/monitor.c | 4 ++--
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +-
4 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 243017096ddf..38bac0062c82 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -430,6 +430,8 @@ DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
extern struct dentry *debugfs_resctrl;
+extern int snc_ways;
+
enum resctrl_res_level {
RDT_RESOURCE_L3,
RDT_RESOURCE_L2,
@@ -447,6 +449,11 @@ enum resctrl_scope {
SCOPE_NODE,
};
+static inline int get_mbm_res_level(void)
+{
+ return snc_ways > 1 ? RDT_RESOURCE_NODE : RDT_RESOURCE_L3;
+}
+
static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res);
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index e4bd3072927c..6fe9f87d4403 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -48,6 +48,11 @@ int max_name_width, max_data_width;
*/
bool rdt_alloc_capable;
+/*
+ * How many Sub-Numa Cluster nodes share a single L3 cache
+ */
+int snc_ways = 1;
+
static void
mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
struct rdt_resource *r);
@@ -831,7 +836,7 @@ static __init bool get_rdt_alloc_resources(void)
static __init bool get_rdt_mon_resources(void)
{
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct rdt_resource *r = &rdt_resources_all[get_mbm_res_level()].r_resctrl;
if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 9be6ffdd01ae..da3f36212898 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -787,8 +787,8 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
int ret;
resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
- hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
- r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
+ hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale / snc_ways;
+ r->num_rmid = (boot_cpu_data.x86_cache_max_rmid + 1) / snc_ways;
hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 418658f0a9ad..d037f3da9e55 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2524,7 +2524,7 @@ static int rdt_get_tree(struct fs_context *fc)
static_branch_enable_cpuslocked(&rdt_enable_key);
if (is_mbm_enabled()) {
- r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ r = &rdt_resources_all[get_mbm_res_level()].r_resctrl;
list_for_each_entry(dom, &r->domains, list)
mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
}
--
2.40.1
Hi Tony, Regarding subject: "Add code" is not necessary. On 7/13/2023 9:32 AM, Tony Luck wrote: > When Sub-NUMA cluster is enabled (snc_ways > 1) use the RDT_RESOURCE_NODE > instead of RDT_RESOURCE_L3 for all monitoring operations. This duplication of resource does not look right to me. RDT_RESOURCE_NODE now contains the monitoring data for RDT_RESOURCE_L3 with related structures within RDT_RESOURCE_L3 going unused. > > The mon_scale and num_rmid values from CPUID(0xf,0x1),(EBX,ECX) must be > scaled down by the number of Sub-NUMA Clusters. > > A subsequent change will detect sub-NUMA cluster mode and set > "snc_ways". For now set to one (meaning each L3 cache spans one > node). > > Signed-off-by: Tony Luck <tony.luck@intel.com> > Reviewed-by: Peter Newman <peternewman@google.com> > --- > arch/x86/kernel/cpu/resctrl/internal.h | 7 +++++++ > arch/x86/kernel/cpu/resctrl/core.c | 7 ++++++- > arch/x86/kernel/cpu/resctrl/monitor.c | 4 ++-- > arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +- > 4 files changed, 16 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h > index 243017096ddf..38bac0062c82 100644 > --- a/arch/x86/kernel/cpu/resctrl/internal.h > +++ b/arch/x86/kernel/cpu/resctrl/internal.h > @@ -430,6 +430,8 @@ DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); > > extern struct dentry *debugfs_resctrl; > > +extern int snc_ways; > + > enum resctrl_res_level { > RDT_RESOURCE_L3, > RDT_RESOURCE_L2, > @@ -447,6 +449,11 @@ enum resctrl_scope { > SCOPE_NODE, > }; > > +static inline int get_mbm_res_level(void) > +{ > + return snc_ways > 1 ? RDT_RESOURCE_NODE : RDT_RESOURCE_L3; > +} Need to return the enum here? It may be simpler for this helper to just return a pointer to the resource. (Although the need for a separate resource is still not clear to me.) > + > static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) > { > struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res); > diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c > index e4bd3072927c..6fe9f87d4403 100644 > --- a/arch/x86/kernel/cpu/resctrl/core.c > +++ b/arch/x86/kernel/cpu/resctrl/core.c > @@ -48,6 +48,11 @@ int max_name_width, max_data_width; > */ > bool rdt_alloc_capable; > > +/* > + * How many Sub-Numa Cluster nodes share a single L3 cache > + */ > +int snc_ways = 1; > + Since snc_ways is always used I think the comment should provide more detail on the possible values it may have. For example, to a reader it may not be obvious what the value of snc_ways should be if SNC is disabled. Also, what does "ways" refer to? (Also mentioned later). > static void > mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, > struct rdt_resource *r); > @@ -831,7 +836,7 @@ static __init bool get_rdt_alloc_resources(void) > > static __init bool get_rdt_mon_resources(void) > { > - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > + struct rdt_resource *r = &rdt_resources_all[get_mbm_res_level()].r_resctrl; > > if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC)) > rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID); > diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c > index 9be6ffdd01ae..da3f36212898 100644 > --- a/arch/x86/kernel/cpu/resctrl/monitor.c > +++ b/arch/x86/kernel/cpu/resctrl/monitor.c > @@ -787,8 +787,8 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) > int ret; > > resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024; > - hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; > - r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1; > + hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale / snc_ways; > + r->num_rmid = (boot_cpu_data.x86_cache_max_rmid + 1) / snc_ways; > hw_res->mbm_width = MBM_CNTR_WIDTH_BASE; > > if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX) > diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c > index 418658f0a9ad..d037f3da9e55 100644 > --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c > +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c > @@ -2524,7 +2524,7 @@ static int rdt_get_tree(struct fs_context *fc) > static_branch_enable_cpuslocked(&rdt_enable_key); > > if (is_mbm_enabled()) { > - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > + r = &rdt_resources_all[get_mbm_res_level()].r_resctrl; > list_for_each_entry(dom, &r->domains, list) > mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); > } This final hunk makes me wonder why the monitor.c: mon_resource is necessary at all. A single helper used everywhere may be simpler. Reinette
© 2016 - 2025 Red Hat, Inc.