Now that the x86 topology code has a sensible nodes-per-package
measure, that does not depend on the online status of CPUs, use this
to divinate the SNC mode.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/x86/kernel/cpu/resctrl/monitor.c | 44 ----------------------------------
1 file changed, 1 insertion(+), 43 deletions(-)
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -364,51 +364,9 @@ void arch_mon_domain_online(struct rdt_r
msr_clear_bit(MSR_RMID_SNC_CONFIG, 0);
}
-/* CPU models that support MSR_RMID_SNC_CONFIG */
-static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
- X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
- X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
- X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
- X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, 0),
- X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, 0),
- X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, 0),
- {}
-};
-
-/*
- * There isn't a simple hardware bit that indicates whether a CPU is running
- * in Sub-NUMA Cluster (SNC) mode. Infer the state by comparing the
- * number of CPUs sharing the L3 cache with CPU0 to the number of CPUs in
- * the same NUMA node as CPU0.
- * It is not possible to accurately determine SNC state if the system is
- * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes
- * to L3 caches. It will be OK if system is booted with hyperthreading
- * disabled (since this doesn't affect the ratio).
- */
static __init int snc_get_config(void)
{
- struct cacheinfo *ci = get_cpu_cacheinfo_level(0, RESCTRL_L3_CACHE);
- const cpumask_t *node0_cpumask;
- int cpus_per_node, cpus_per_l3;
- int ret;
-
- if (!x86_match_cpu(snc_cpu_ids) || !ci)
- return 1;
-
- cpus_read_lock();
- if (num_online_cpus() != num_present_cpus())
- pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
- cpus_read_unlock();
-
- node0_cpumask = cpumask_of_node(cpu_to_node(0));
-
- cpus_per_node = cpumask_weight(node0_cpumask);
- cpus_per_l3 = cpumask_weight(&ci->shared_cpu_map);
-
- if (!cpus_per_node || !cpus_per_l3)
- return 1;
-
- ret = cpus_per_l3 / cpus_per_node;
+ int ret = __num_nodes_per_package;
/* sanity check: Only valid results are 1, 2, 3, 4, 6 */
switch (ret) {
On Thu, Feb 26, 2026 at 11:49:15AM +0100, Peter Zijlstra wrote:
> Now that the x86 topology code has a sensible nodes-per-package
> measure, that does not depend on the online status of CPUs, use this
> to divinate the SNC mode.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
> arch/x86/kernel/cpu/resctrl/monitor.c | 44 ----------------------------------
> 1 file changed, 1 insertion(+), 43 deletions(-)
>
> --- a/arch/x86/kernel/cpu/resctrl/monitor.c
> +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
> @@ -364,51 +364,9 @@ void arch_mon_domain_online(struct rdt_r
> msr_clear_bit(MSR_RMID_SNC_CONFIG, 0);
> }
>
> -/* CPU models that support MSR_RMID_SNC_CONFIG */
> -static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
> - X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
> - X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
> - X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
> - X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, 0),
> - X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, 0),
> - X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, 0),
> - {}
> -};
It isn't safe to drop this and the x86_match_cpu() check.
These are the CPUs that implement SNC and MSR_RMID_SNC_CONFIG. So if you
set __num_nodes_per_package > 1 on an older CoD system Linux will
think this is SNC and poke this MSR (and get #GP).
-Tony
I think this resctrl patch should look like this:
-Tony
From c1a1b6c681a3a466c6c0bb4ba5a2cdd4dbefdb60 Mon Sep 17 00:00:00 2001
From: Tony Luck <tony.luck@intel.com>
Date: Thu, 26 Feb 2026 12:35:54 -0800
Subject: [PATCH] x86/resctrl: Fix SNC detection
Now that the x86 topology code has a sensible nodes-per-package
measure, that does not depend on the online status of CPUs, use this
to divinate the SNC mode.
Note that when Cluster on Die (CoD) is configured on older systems this
will also show multiple NUMA nodes per package. Intel Resource Director
Technology is incomaptible with CoD. Print a warning and do not use the
fixup MSR_RMID_SNC_CONFIG.
Signed-off-by: Tony Luck <tony.luck@intel.com>
---
arch/x86/kernel/cpu/resctrl/monitor.c | 36 ++++-----------------------
1 file changed, 5 insertions(+), 31 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index e6a154240b8d..8ff0f78b8658 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -364,7 +364,7 @@ void arch_mon_domain_online(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
msr_clear_bit(MSR_RMID_SNC_CONFIG, 0);
}
-/* CPU models that support MSR_RMID_SNC_CONFIG */
+/* CPU models that support SNC and MSR_RMID_SNC_CONFIG */
static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
@@ -375,40 +375,14 @@ static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
{}
};
-/*
- * There isn't a simple hardware bit that indicates whether a CPU is running
- * in Sub-NUMA Cluster (SNC) mode. Infer the state by comparing the
- * number of CPUs sharing the L3 cache with CPU0 to the number of CPUs in
- * the same NUMA node as CPU0.
- * It is not possible to accurately determine SNC state if the system is
- * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes
- * to L3 caches. It will be OK if system is booted with hyperthreading
- * disabled (since this doesn't affect the ratio).
- */
static __init int snc_get_config(void)
{
- struct cacheinfo *ci = get_cpu_cacheinfo_level(0, RESCTRL_L3_CACHE);
- const cpumask_t *node0_cpumask;
- int cpus_per_node, cpus_per_l3;
- int ret;
-
- if (!x86_match_cpu(snc_cpu_ids) || !ci)
- return 1;
+ int ret = __num_nodes_per_package;
- cpus_read_lock();
- if (num_online_cpus() != num_present_cpus())
- pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
- cpus_read_unlock();
-
- node0_cpumask = cpumask_of_node(cpu_to_node(0));
-
- cpus_per_node = cpumask_weight(node0_cpumask);
- cpus_per_l3 = cpumask_weight(&ci->shared_cpu_map);
-
- if (!cpus_per_node || !cpus_per_l3)
+ if (__num_nodes_per_package > 1 && !x86_match_cpu(snc_cpu_ids)) {
+ pr_warn("CoD enabled system? Resctrl not supported\n");
return 1;
-
- ret = cpus_per_l3 / cpus_per_node;
+ }
/* sanity check: Only valid results are 1, 2, 3, 4, 6 */
switch (ret) {
--
2.53.0
On Thu, Feb 26, 2026 at 12:47:41PM -0800, Luck, Tony wrote: > I think this resctrl patch should look like this: Ah, great. I was a little heavy on the delete button indeed. Also, I hope you told the RDT guys what you think about being required to know the SNC number, but them not providing it anywhere ;-)
© 2016 - 2026 Red Hat, Inc.