[PATCH v2 2/5] workqueue: add WQ_AFFN_CACHE_SHARD affinity scope

Breno Leitao posted 5 patches 2 weeks, 2 days ago
There is a newer version of this series
[PATCH v2 2/5] workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
Posted by Breno Leitao 2 weeks, 2 days ago
On systems where many CPUs share one LLC, unbound workqueues using
WQ_AFFN_CACHE collapse to a single worker pool, causing heavy spinlock
contention on pool->lock. For example, Chuck Lever measured 39% of
cycles lost to native_queued_spin_lock_slowpath on a 12-core shared-L3
NFS-over-RDMA system.

The existing affinity hierarchy (cpu, smt, cache, numa, system) offers
no intermediate option between per-LLC and per-SMT-core granularity.

Add WQ_AFFN_CACHE_SHARD, which subdivides each LLC into groups of at
most wq_cache_shard_size cores (default 8, tunable via boot parameter).
Shards are always split on core (SMT group) boundaries so that
Hyper-Threading siblings are never placed in different pods. Cores are
distributed across shards as evenly as possible -- for example, 36 cores
in a single LLC with max shard size 8 produces 5 shards of 8+7+7+7+7
cores.

The implementation follows the same comparator pattern as other affinity
scopes: cpu_cache_shard_id() computes a per-CPU shard index on the fly
from the already-initialized WQ_AFFN_CACHE and WQ_AFFN_SMT topology,
and cpus_share_cache_shard() is passed to init_pod_type().

Benchmark on NVIDIA Grace (72 CPUs, single LLC, 50k items/thread), show
cache_shard delivers ~5x the throughput and ~6.5x lower p50 latency
compared to cache scope on this 72-core single-LLC system.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Breno Leitao <leitao@debian.org>
---
 include/linux/workqueue.h |   1 +
 kernel/workqueue.c        | 108 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 109 insertions(+)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 17543aec2a6e1..50bdb7e30d35f 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -133,6 +133,7 @@ enum wq_affn_scope {
 	WQ_AFFN_CPU,			/* one pod per CPU */
 	WQ_AFFN_SMT,			/* one pod per SMT */
 	WQ_AFFN_CACHE,			/* one pod per LLC */
+	WQ_AFFN_CACHE_SHARD,		/* synthetic sub-LLC shards */
 	WQ_AFFN_NUMA,			/* one pod per NUMA node */
 	WQ_AFFN_SYSTEM,			/* one pod across the whole system */
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a050971393f1f..ebbc7971b4fa6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -409,6 +409,7 @@ static const char * const wq_affn_names[WQ_AFFN_NR_TYPES] = {
 	[WQ_AFFN_CPU]		= "cpu",
 	[WQ_AFFN_SMT]		= "smt",
 	[WQ_AFFN_CACHE]		= "cache",
+	[WQ_AFFN_CACHE_SHARD]	= "cache_shard",
 	[WQ_AFFN_NUMA]		= "numa",
 	[WQ_AFFN_SYSTEM]	= "system",
 };
@@ -431,6 +432,9 @@ module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh
 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 
+static unsigned int wq_cache_shard_size = 8;
+module_param_named(cache_shard_size, wq_cache_shard_size, uint, 0444);
+
 static bool wq_online;			/* can kworkers be created yet? */
 static bool wq_topo_initialized __read_mostly = false;
 
@@ -8107,6 +8111,104 @@ static bool __init cpus_share_numa(int cpu0, int cpu1)
 	return cpu_to_node(cpu0) == cpu_to_node(cpu1);
 }
 
+/**
+ * llc_count_cores - count distinct cores (SMT groups) within a cpumask
+ * @pod_cpus: the cpumask to scan (typically an LLC pod)
+ * @smt_pt:   the SMT pod type, used to identify sibling groups
+ *
+ * A core is represented by the lowest-numbered CPU in its SMT group. Returns
+ * the number of distinct cores found in @pod_cpus.
+ */
+static int __init llc_count_cores(const struct cpumask *pod_cpus,
+				  struct wq_pod_type *smt_pt)
+{
+	const struct cpumask *smt_cpus;
+	int nr_cores = 0, c;
+
+	for_each_cpu(c, pod_cpus) {
+		smt_cpus = smt_pt->pod_cpus[smt_pt->cpu_pod[c]];
+		if (cpumask_first(smt_cpus) == c)
+			nr_cores++;
+	}
+
+	return nr_cores;
+}
+
+/**
+ * llc_cpu_core_pos - find a CPU's core position within a cpumask
+ * @cpu:      the CPU to locate
+ * @pod_cpus: the cpumask to scan (typically an LLC pod)
+ * @smt_pt:   the SMT pod type, used to identify sibling groups
+ *
+ * Returns the zero-based index of @cpu's core among the distinct cores in
+ * @pod_cpus, ordered by lowest CPU number in each SMT group.
+ */
+static int __init llc_cpu_core_pos(int cpu, const struct cpumask *pod_cpus,
+				   struct wq_pod_type *smt_pt)
+{
+	const struct cpumask *smt_cpus;
+	int core_pos = 0, c;
+
+	for_each_cpu(c, pod_cpus) {
+		smt_cpus = smt_pt->pod_cpus[smt_pt->cpu_pod[c]];
+		if (cpumask_test_cpu(cpu, smt_cpus))
+			break;
+		if (cpumask_first(smt_cpus) == c)
+			core_pos++;
+	}
+
+	return core_pos;
+}
+
+/**
+ * cpu_cache_shard_id - compute the shard index for a CPU within its LLC pod
+ * @cpu: the CPU to look up
+ *
+ * Returns a shard index that is unique within the CPU's LLC pod. The LLC is
+ * divided into shards of at most wq_cache_shard_size cores, always split on
+ * core (SMT group) boundaries so that SMT siblings are never placed in
+ * different shards. Cores are distributed across shards as evenly as possible.
+ *
+ * Example: 36 cores with wq_cache_shard_size=8 gives 5 shards of
+ * 8+7+7+7+7 cores.
+ */
+static int __init cpu_cache_shard_id(int cpu)
+{
+	struct wq_pod_type *cache_pt = &wq_pod_types[WQ_AFFN_CACHE];
+	struct wq_pod_type *smt_pt = &wq_pod_types[WQ_AFFN_SMT];
+	const struct cpumask *pod_cpus;
+	int nr_cores, nr_shards, cores_per_shard, remainder, core_pos;
+
+	/* CPUs in the same LLC as @cpu */
+	pod_cpus = cache_pt->pod_cpus[cache_pt->cpu_pod[cpu]];
+	nr_cores = llc_count_cores(pod_cpus, smt_pt);
+
+	/* Compute number of shards from the max cores per shard */
+	nr_shards = DIV_ROUND_UP(nr_cores, wq_cache_shard_size);
+	/* Distribute cores as evenly as possible across shards */
+	cores_per_shard = nr_cores / nr_shards;
+	remainder = nr_cores % nr_shards;
+
+	core_pos = llc_cpu_core_pos(cpu, pod_cpus, smt_pt);
+
+	/*
+	 * Map core position to shard index. The first @remainder shards have
+	 * (cores_per_shard + 1) cores, the rest have @cores_per_shard cores.
+	 */
+	if (core_pos < remainder * (cores_per_shard + 1))
+		return core_pos / (cores_per_shard + 1);
+
+	return remainder + (core_pos - remainder * (cores_per_shard + 1)) / cores_per_shard;
+}
+
+static bool __init cpus_share_cache_shard(int cpu0, int cpu1)
+{
+	if (!cpus_share_cache(cpu0, cpu1))
+		return false;
+
+	return cpu_cache_shard_id(cpu0) == cpu_cache_shard_id(cpu1);
+}
+
 /**
  * workqueue_init_topology - initialize CPU pods for unbound workqueues
  *
@@ -8119,9 +8221,15 @@ void __init workqueue_init_topology(void)
 	struct workqueue_struct *wq;
 	int cpu;
 
+	if (!wq_cache_shard_size) {
+		pr_warn("workqueue: cache_shard_size must be > 0, setting to 1\n");
+		wq_cache_shard_size = 1;
+	}
+
 	init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
 	init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
 	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
+	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE_SHARD], cpus_share_cache_shard);
 	init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
 
 	wq_topo_initialized = true;

-- 
2.52.0
Re: [PATCH v2 2/5] workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
Posted by Tejun Heo 1 week, 6 days ago
Hello,

On Fri, Mar 20, 2026 at 10:56:28AM -0700, Breno Leitao wrote:
> +/**
> + * llc_count_cores - count distinct cores (SMT groups) within a cpumask
> + * @pod_cpus: the cpumask to scan (typically an LLC pod)
> + * @smt_pt:   the SMT pod type, used to identify sibling groups
> + *
> + * A core is represented by the lowest-numbered CPU in its SMT group. Returns
> + * the number of distinct cores found in @pod_cpus.
> + */
> +static int __init llc_count_cores(const struct cpumask *pod_cpus,
> +				  struct wq_pod_type *smt_pt)
> +{
> +	const struct cpumask *smt_cpus;
> +	int nr_cores = 0, c;
> +
> +	for_each_cpu(c, pod_cpus) {
> +		smt_cpus = smt_pt->pod_cpus[smt_pt->cpu_pod[c]];
> +		if (cpumask_first(smt_cpus) == c)
> +			nr_cores++;
> +	}
> +
> +	return nr_cores;
> +}
> +
> +/**
> + * llc_cpu_core_pos - find a CPU's core position within a cpumask
> + * @cpu:      the CPU to locate
> + * @pod_cpus: the cpumask to scan (typically an LLC pod)
> + * @smt_pt:   the SMT pod type, used to identify sibling groups
> + *
> + * Returns the zero-based index of @cpu's core among the distinct cores in
> + * @pod_cpus, ordered by lowest CPU number in each SMT group.
> + */
> +static int __init llc_cpu_core_pos(int cpu, const struct cpumask *pod_cpus,
> +				   struct wq_pod_type *smt_pt)
> +{
> +	const struct cpumask *smt_cpus;
> +	int core_pos = 0, c;
> +
> +	for_each_cpu(c, pod_cpus) {
> +		smt_cpus = smt_pt->pod_cpus[smt_pt->cpu_pod[c]];
> +		if (cpumask_test_cpu(cpu, smt_cpus))
> +			break;
> +		if (cpumask_first(smt_cpus) == c)
> +			core_pos++;
> +	}
> +
> +	return core_pos;
> +}

Can you do the above two in a separate pass and record the results and then
use that to implement cpu_cache_shard_id()? Doing all of it on the fly makes
it unnecessarily difficult to follow and init_pod_type() is already O(N^2)
and the above makes it O(N^4). Make the machine large enough and this may
become noticeable.

> +/**
> + * cpu_cache_shard_id - compute the shard index for a CPU within its LLC pod
> + * @cpu: the CPU to look up
> + *
> + * Returns a shard index that is unique within the CPU's LLC pod. The LLC is
> + * divided into shards of at most wq_cache_shard_size cores, always split on
> + * core (SMT group) boundaries so that SMT siblings are never placed in
> + * different shards. Cores are distributed across shards as evenly as possible.
> + *
> + * Example: 36 cores with wq_cache_shard_size=8 gives 5 shards of
> + * 8+7+7+7+7 cores.
> + */

I always feel a bit uneasy about using max number as split point in cases
like this because the reason why you picked 8 as the default was that
testing showed shard sizes close to 8 seems to behave the best (or at least
acceptably in most cases). However, setting max number to 8 doesn't
necessarily keep you close to that. e.g. If there are 9 cores, you end up
with 5 and 4 even though 9 is a lot closer to the 8 that we picked as the
default. Can the sharding logic updated so that "whatever sharding that gets
the system closest to the config target?".

Thanks.

-- 
tejun
Re: [PATCH v2 2/5] workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
Posted by Breno Leitao 1 week, 3 days ago
Hello Tejun,

On Mon, Mar 23, 2026 at 12:43:31PM -1000, Tejun Heo wrote:
> On Fri, Mar 20, 2026 at 10:56:28AM -0700, Breno Leitao wrote:
> > +static int __init llc_cpu_core_pos(int cpu, const struct cpumask *pod_cpus,
> > +				   struct wq_pod_type *smt_pt)
> > +{
> > +	const struct cpumask *smt_cpus;
> > +	int core_pos = 0, c;
> > +
> > +	for_each_cpu(c, pod_cpus) {
> > +		smt_cpus = smt_pt->pod_cpus[smt_pt->cpu_pod[c]];
> > +		if (cpumask_test_cpu(cpu, smt_cpus))
> > +			break;
> > +		if (cpumask_first(smt_cpus) == c)
> > +			core_pos++;
> > +	}
> > +
> > +	return core_pos;
> > +}
> 
> Can you do the above two in a separate pass and record the results and then
> use that to implement cpu_cache_shard_id()? Doing all of it on the fly makes
> it unnecessarily difficult to follow and init_pod_type() is already O(N^2)
> and the above makes it O(N^4). Make the machine large enough and this may
> become noticeable.

Ack. I am planning to create a __initdata per-CPU array to host the
shard per CPU, and query it instad.

	/* Per-CPU shard index within its LLC pod; populated by precompute_cache_shard_ids() */
	static int __initdata cpu_shard_id[NR_CPUS];



> > + * cpu_cache_shard_id - compute the shard index for a CPU within its LLC pod
> > + * @cpu: the CPU to look up
> > + *
> > + * Returns a shard index that is unique within the CPU's LLC pod. The LLC is
> > + * divided into shards of at most wq_cache_shard_size cores, always split on
> > + * core (SMT group) boundaries so that SMT siblings are never placed in
> > + * different shards. Cores are distributed across shards as evenly as possible.
> > + *
> > + * Example: 36 cores with wq_cache_shard_size=8 gives 5 shards of
> > + * 8+7+7+7+7 cores.
> > + */
> 
> I always feel a bit uneasy about using max number as split point in cases
> like this because the reason why you picked 8 as the default was that
> testing showed shard sizes close to 8 seems to behave the best (or at least
> acceptably in most cases). However, setting max number to 8 doesn't
> necessarily keep you close to that. e.g. If there are 9 cores, you end up
> with 5 and 4 even though 9 is a lot closer to the 8 that we picked as the
> default. Can the sharding logic updated so that "whatever sharding that gets
> the system closest to the config target?".

I think DIV_ROUND_CLOSEST will do what we want, something as:

	nr_shards       = max(1, DIV_ROUND_CLOSEST(nr_cores,
	wq_cache_shard_size))
	cores_per_shard = nr_cores / nr_shards
	remainder       = nr_cores % nr_shards

The first remainder shards get cores_per_shard+1 cores (large shards),
the rest get cores_per_shard.

Assuming wq_cache_shard_size = 8;, we would have the following number of pool
per number of CPU (not vCPU):

  - 1–11 CPUs → DIV_ROUND_CLOSEST(n, 8) ≤ 1 → 1 pool containing all CPUs.
  - 12 CPUs → DIV_ROUND_CLOSEST(12, 8) = 2 → 2 pools of 6 cores each. This is the first split.
  - 12–19 → 2 pools
  - 20–27 → 3 pools
  - 28–35 → 4 pools
  - 36–43 → 5 pools
  - 44–51 → 6 pools
  - 52–59 → 7 pools
  - 60–67 → 8 pools
  - 68–75 → 9 pools (e.g. 72-CPU NVIDIA Grace → 9×8)
  - 76–83 → 10 pools
  - 84–91 → 11 pools
  - 92–99 → 12 pools
  - 100 → 13 pools (9×8 + 4×7)


Is this what you meant?

This is the current code I have been testing with the changes above:

commit ff6c6272e5925d3099109107789e685f58bd4c1e
Author: Breno Leitao <leitao@debian.org>
Date:   Mon Mar 9 08:39:52 2026 -0700

    workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
    
    On systems where many CPUs share one LLC, unbound workqueues using
    WQ_AFFN_CACHE collapse to a single worker pool, causing heavy spinlock
    contention on pool->lock. For example, Chuck Lever measured 39% of
    cycles lost to native_queued_spin_lock_slowpath on a 12-core shared-L3
    NFS-over-RDMA system.
    
    The existing affinity hierarchy (cpu, smt, cache, numa, system) offers
    no intermediate option between per-LLC and per-SMT-core granularity.
    
    Add WQ_AFFN_CACHE_SHARD, which subdivides each LLC into groups of at
    most wq_cache_shard_size cores (default 8, tunable via boot parameter).
    Shards are always split on core (SMT group) boundaries so that
    Hyper-Threading siblings are never placed in different pods. Cores are
    distributed across shards as evenly as possible -- for example, 36 cores
    in a single LLC with max shard size 8 produces 5 shards of 8+7+7+7+7
    cores.
    
    The implementation follows the same comparator pattern as other affinity
    scopes: cpu_cache_shard_id() computes a per-CPU shard index on the fly
    from the already-initialized WQ_AFFN_CACHE and WQ_AFFN_SMT topology,
    and cpus_share_cache_shard() is passed to init_pod_type().
    
    Benchmark on NVIDIA Grace (72 CPUs, single LLC, 50k items/thread), show
    cache_shard delivers ~5x the throughput and ~6.5x lower p50 latency
    compared to cache scope on this 72-core single-LLC system.
    
    Suggested-by: Tejun Heo <tj@kernel.org>
    Signed-off-by: Breno Leitao <leitao@debian.org>

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 17543aec2a6e1..50bdb7e30d35f 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -133,6 +133,7 @@ enum wq_affn_scope {
 	WQ_AFFN_CPU,			/* one pod per CPU */
 	WQ_AFFN_SMT,			/* one pod per SMT */
 	WQ_AFFN_CACHE,			/* one pod per LLC */
+	WQ_AFFN_CACHE_SHARD,		/* synthetic sub-LLC shards */
 	WQ_AFFN_NUMA,			/* one pod per NUMA node */
 	WQ_AFFN_SYSTEM,			/* one pod across the whole system */
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cbff51397ea77..8f432ba2bba65 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -409,6 +409,7 @@ static const char * const wq_affn_names[WQ_AFFN_NR_TYPES] = {
 	[WQ_AFFN_CPU]		= "cpu",
 	[WQ_AFFN_SMT]		= "smt",
 	[WQ_AFFN_CACHE]		= "cache",
+	[WQ_AFFN_CACHE_SHARD]	= "cache_shard",
 	[WQ_AFFN_NUMA]		= "numa",
 	[WQ_AFFN_SYSTEM]	= "system",
 };
@@ -431,6 +432,9 @@ module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh
 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 
+static unsigned int wq_cache_shard_size = 8;
+module_param_named(cache_shard_size, wq_cache_shard_size, uint, 0444);
+
 static bool wq_online;			/* can kworkers be created yet? */
 static bool wq_topo_initialized __read_mostly = false;
 
@@ -8107,6 +8111,136 @@ static bool __init cpus_share_numa(int cpu0, int cpu1)
 	return cpu_to_node(cpu0) == cpu_to_node(cpu1);
 }
 
+/* Per-CPU shard index within its LLC pod; populated by precompute_cache_shard_ids() */
+static int __initdata cpu_shard_id[NR_CPUS];
+
+/**
+ * llc_count_cores - count distinct cores (SMT groups) within an LLC pod
+ * @pod_cpus:  the cpumask of CPUs in the LLC pod
+ * @smt_pods:  the SMT pod type, used to identify sibling groups
+ *
+ * A core is represented by the lowest-numbered CPU in its SMT group. Returns
+ * the number of distinct cores found in @pod_cpus.
+ */
+static int __init llc_count_cores(const struct cpumask *pod_cpus,
+				  struct wq_pod_type *smt_pods)
+{
+	const struct cpumask *smt_cpus;
+	int nr_cores = 0, c;
+
+	for_each_cpu(c, pod_cpus) {
+		smt_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]];
+		if (cpumask_first(smt_cpus) == c)
+			nr_cores++;
+	}
+
+	return nr_cores;
+}
+
+/**
+ * llc_core_to_shard - map a core position to a shard index
+ * @core_pos:       zero-based position of the core within its LLC pod
+ * @cores_per_shard: base number of cores per shard (floor division)
+ * @remainder:      number of shards that get one extra core
+ *
+ * Cores are distributed as evenly as possible: the first @remainder shards
+ * have (@cores_per_shard + 1) cores (aka large shards), the rest have
+ * @cores_per_shard cores.
+ *
+ *  In summary, the initial `remainder` shards are large, the rest
+ *  are standard shards
+ *
+ * Returns the shard index for the given core position.
+ */
+static int __init llc_core_to_shard(int core_pos, int cores_per_shard,
+				    int remainder)
+{
+	int ret;
+
+	/*
+	 * These cores falls within the large shards.
+	 * Each large shard has (cores_per_shard + 1) cores
+	 */
+	if (core_pos < remainder * (cores_per_shard + 1))
+		return core_pos / (cores_per_shard + 1);
+
+	/* These are standard shards */
+	ret = (core_pos - remainder * (cores_per_shard + 1)) / cores_per_shard;
+
+	/*
+	 * Regular shards start after index 'remainder'
+	 */
+	return ret + remainder;
+}
+
+/**
+ * llc_assign_shard_ids - record the shard index for each CPU in an LLC pod
+ * @pod_cpus:  the cpumask of CPUs in the LLC pod
+ * @smt_pods:  the SMT pod type, used to identify sibling groups
+ * @nr_cores:  number of distinct cores in @pod_cpus (from llc_count_cores())
+ *
+ * Chooses the number of shards that keeps average shard size closest to
+ * wq_cache_shard_size, then walks @pod_cpus advancing the shard index at
+ * each new core (SMT group leader) boundary. Results are written to
+ * cpu_shard_id[].
+ */
+static void __init llc_assign_shard_ids(const struct cpumask *pod_cpus,
+					struct wq_pod_type *smt_pods, int nr_cores)
+{
+	int nr_shards, cores_per_shard, remainder;
+	const struct cpumask *sibling_cpus;
+	int core_pos, shard_id, c;
+
+	/*
+	 * This is the total number of shared we re going to have for this
+	 * cache pod
+	 */
+	nr_shards = max(1, DIV_ROUND_CLOSEST(nr_cores, wq_cache_shard_size));
+	cores_per_shard = nr_cores / nr_shards;
+	remainder = nr_cores % nr_shards;
+
+	core_pos = -1;
+	shard_id = 0;
+	for_each_cpu(c, pod_cpus) {
+		sibling_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]];
+		if (cpumask_first(sibling_cpus) == c)
+			shard_id = llc_core_to_shard(++core_pos, cores_per_shard,
+						     remainder);
+		cpu_shard_id[c] = shard_id;
+	}
+}
+
+/**
+ * precompute_cache_shard_ids - assign each CPU its shard index within its LLC
+ *
+ * Iterates over all LLC pods. For each pod, counts distinct cores then assigns
+ * shard indices to all CPUs in the pod. Must be called after WQ_AFFN_CACHE and
+ * WQ_AFFN_SMT have been initialized.
+ */
+static void __init precompute_cache_shard_ids(void)
+{
+	struct wq_pod_type *llc_pods = &wq_pod_types[WQ_AFFN_CACHE];
+	struct wq_pod_type *smt_pods = &wq_pod_types[WQ_AFFN_SMT];
+	int pod;
+
+	for (pod = 0; pod < llc_pods->nr_pods; pod++) {
+		const struct cpumask *cpus_sharing_llc = llc_pods->pod_cpus[pod];
+		int nr_cores;
+
+		/* Number of cores in this given LLC */
+		nr_cores = llc_count_cores(cpus_sharing_llc, smt_pods);
+		llc_assign_shard_ids(cpus_sharing_llc, smt_pods, nr_cores);
+	}
+}
+
+static bool __init cpus_share_cache_shard(int cpu0, int cpu1)
+{
+	if (!cpus_share_cache(cpu0, cpu1))
+		return false;
+
+	return cpu_shard_id[cpu0] == cpu_shard_id[cpu1];
+}
+
 /**
  * workqueue_init_topology - initialize CPU pods for unbound workqueues
  *
@@ -8119,9 +8253,16 @@ void __init workqueue_init_topology(void)
 	struct workqueue_struct *wq;
 	int cpu;
 
+	if (!wq_cache_shard_size) {
+		pr_warn("workqueue: cache_shard_size must be > 0, setting to 1\n");
+		wq_cache_shard_size = 1;
+	}
+
 	init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
 	init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
 	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
+	precompute_cache_shard_ids();
+	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE_SHARD], cpus_share_cache_shard);
 	init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
 
 	wq_topo_initialized = true;
Re: [PATCH v2 2/5] workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
Posted by Tejun Heo 1 week, 3 days ago
On Thu, Mar 26, 2026 at 09:20:15AM -0700, Breno Leitao wrote:
> Assuming wq_cache_shard_size = 8;, we would have the following number of pool
> per number of CPU (not vCPU):
> 
>   - 1–11 CPUs → DIV_ROUND_CLOSEST(n, 8) ≤ 1 → 1 pool containing all CPUs.
>   - 12 CPUs → DIV_ROUND_CLOSEST(12, 8) = 2 → 2 pools of 6 cores each. This is the first split.
>   - 12–19 → 2 pools
>   - 20–27 → 3 pools
>   - 28–35 → 4 pools
>   - 36–43 → 5 pools
>   - 44–51 → 6 pools
>   - 52–59 → 7 pools
>   - 60–67 → 8 pools
>   - 68–75 → 9 pools (e.g. 72-CPU NVIDIA Grace → 9×8)
>   - 76–83 → 10 pools
>   - 84–91 → 11 pools
>   - 92–99 → 12 pools
>   - 100 → 13 pools (9×8 + 4×7)
> 
> Is this what you meant?

Yes.

> +static int __init llc_core_to_shard(int core_pos, int cores_per_shard,
> +				    int remainder)
> +{
> +	int ret;
> +
> +	/*
> +	 * These cores falls within the large shards.
> +	 * Each large shard has (cores_per_shard + 1) cores
> +	 */
> +	if (core_pos < remainder * (cores_per_shard + 1))
> +		return core_pos / (cores_per_shard + 1);
> +
> +	/* These are standard shards */
> +	ret = (core_pos - remainder * (cores_per_shard + 1)) / cores_per_shard;

This is too smart. Any chance you can dumb it down? If you have to go
through intermediate data structures, that's fine too.

Thanks.

-- 
tejun
Re: [PATCH v2 2/5] workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
Posted by Breno Leitao 1 week, 2 days ago
On Thu, Mar 26, 2026 at 09:41:14AM -1000, Tejun Heo wrote:
> On Thu, Mar 26, 2026 at 09:20:15AM -0700, Breno Leitao wrote:
> > +static int __init llc_core_to_shard(int core_pos, int cores_per_shard,
> > +				    int remainder)
> > +{
> > +	int ret;
> > +
> > +	/*
> > +	 * These cores falls within the large shards.
> > +	 * Each large shard has (cores_per_shard + 1) cores
> > +	 */
> > +	if (core_pos < remainder * (cores_per_shard + 1))
> > +		return core_pos / (cores_per_shard + 1);
> > +
> > +	/* These are standard shards */
> > +	ret = (core_pos - remainder * (cores_per_shard + 1)) / cores_per_shard;
> 
> This is too smart. Any chance you can dumb it down? If you have to go
> through intermediate data structures, that's fine too.

Thanks, Let me create a layout to represent the shard, and give some
names to shard, then it will make the code easier to digest.

Tl;DR: We have "large shard" and "regular shards". Where large shards is
regular shards + 1 (if the division is not exact).

	/* Layout of shards within one LLC pod */
	struct llc_shard_layout {
		int nr_large_shards;	/* number of large shards (cores_per_shard + 1) */
		int cores_per_shard;	/* base number of cores per default shard */
		int nr_shards;		/* total number of shards */
		/* nr_default shards = (nr_shards - nr_large_shards) */
	};

Then, when populating the it using:


	static struct llc_shard_layout __init llc_calc_shard_layout(int nr_cores)
	{
		struct llc_shard_layout layout;

		layout.nr_shards = max(1, DIV_ROUND_CLOSEST(nr_cores, q_cache_shard_size));
		layout.cores_per_shard = nr_cores / layout.nr_shards;
		layout.nr_large_shards = nr_cores % layout.nr_shards; (whit was the remainder in the last patch)

		return layout;
	}


This is the full patch I am working on:



commit ea801773c3b80f50d81c52f4e174276013f1e562
Author: Breno Leitao <leitao@debian.org>
Date:   Mon Mar 9 08:39:52 2026 -0700

    workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
    
    On systems where many CPUs share one LLC, unbound workqueues using
    WQ_AFFN_CACHE collapse to a single worker pool, causing heavy spinlock
    contention on pool->lock. For example, Chuck Lever measured 39% of
    cycles lost to native_queued_spin_lock_slowpath on a 12-core shared-L3
    NFS-over-RDMA system.
    
    The existing affinity hierarchy (cpu, smt, cache, numa, system) offers
    no intermediate option between per-LLC and per-SMT-core granularity.
    
    Add WQ_AFFN_CACHE_SHARD, which subdivides each LLC into groups of at
    most wq_cache_shard_size cores (default 8, tunable via boot parameter).
    Shards are always split on core (SMT group) boundaries so that
    Hyper-Threading siblings are never placed in different pods. Cores are
    distributed across shards as evenly as possible -- for example, 36 cores
    in a single LLC with max shard size 8 produces 5 shards of 8+7+7+7+7
    cores.
    
    The implementation follows the same comparator pattern as other affinity
    scopes: cpu_cache_shard_id() computes a per-CPU shard index on the fly
    from the already-initialized WQ_AFFN_CACHE and WQ_AFFN_SMT topology,
    and cpus_share_cache_shard() is passed to init_pod_type().
    
    Benchmark on NVIDIA Grace (72 CPUs, single LLC, 50k items/thread), show
    cache_shard delivers ~5x the throughput and ~6.5x lower p50 latency
    compared to cache scope on this 72-core single-LLC system.
    
    Suggested-by: Tejun Heo <tj@kernel.org>
    Signed-off-by: Breno Leitao <leitao@debian.org>

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 17543aec2a6e1..50bdb7e30d35f 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -133,6 +133,7 @@ enum wq_affn_scope {
 	WQ_AFFN_CPU,			/* one pod per CPU */
 	WQ_AFFN_SMT,			/* one pod per SMT */
 	WQ_AFFN_CACHE,			/* one pod per LLC */
+	WQ_AFFN_CACHE_SHARD,		/* synthetic sub-LLC shards */
 	WQ_AFFN_NUMA,			/* one pod per NUMA node */
 	WQ_AFFN_SYSTEM,			/* one pod across the whole system */
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cbff51397ea77..22dcd977bbf87 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -130,6 +130,14 @@ enum wq_internal_consts {
 	WORKER_ID_LEN		= 10 + WQ_NAME_LEN, /* "kworker/R-" + WQ_NAME_LEN */
 };
 
+/* Layout of shards within one LLC pod */
+struct llc_shard_layout {
+	int nr_large_shards;	/* number of large shards (cores_per_shard + 1) */
+	int cores_per_shard;	/* base number of cores per default shard */
+	int nr_shards;		/* total number of shards */
+	/* nr_default shards = (nr_shards - nr_large_shards) */
+};
+
 /*
  * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and
  * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because
@@ -409,6 +417,7 @@ static const char * const wq_affn_names[WQ_AFFN_NR_TYPES] = {
 	[WQ_AFFN_CPU]		= "cpu",
 	[WQ_AFFN_SMT]		= "smt",
 	[WQ_AFFN_CACHE]		= "cache",
+	[WQ_AFFN_CACHE_SHARD]	= "cache_shard",
 	[WQ_AFFN_NUMA]		= "numa",
 	[WQ_AFFN_SYSTEM]	= "system",
 };
@@ -431,6 +440,9 @@ module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh
 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 
+static unsigned int wq_cache_shard_size = 8;
+module_param_named(cache_shard_size, wq_cache_shard_size, uint, 0444);
+
 static bool wq_online;			/* can kworkers be created yet? */
 static bool wq_topo_initialized __read_mostly = false;
 
@@ -8107,6 +8119,150 @@ static bool __init cpus_share_numa(int cpu0, int cpu1)
 	return cpu_to_node(cpu0) == cpu_to_node(cpu1);
 }
 
+/* Per-CPU shard index within its LLC pod; populated by precompute_cache_shard_ids() */
+static int __initdata cpu_shard_id[NR_CPUS];
+
+/**
+ * llc_count_cores - count distinct cores (SMT groups) within an LLC pod
+ * @pod_cpus:  the cpumask of CPUs in the LLC pod
+ * @smt_pods:  the SMT pod type, used to identify sibling groups
+ *
+ * A core is represented by the lowest-numbered CPU in its SMT group. Returns
+ * the number of distinct cores found in @pod_cpus.
+ */
+static int __init llc_count_cores(const struct cpumask *pod_cpus,
+				  struct wq_pod_type *smt_pods)
+{
+	const struct cpumask *smt_cpus;
+	int nr_cores = 0, c;
+
+	for_each_cpu(c, pod_cpus) {
+		smt_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]];
+		if (cpumask_first(smt_cpus) == c)
+			nr_cores++;
+	}
+
+	return nr_cores;
+}
+
+/*
+ * llc_shard_size - number of cores in a given shard
+ *
+ * Cores are spread as evenly as possible. The first @nr_large_shards shards are
+ * "large shards" with (cores_per_shard + 1) cores; the rest are "default
+ * shards" with cores_per_shard cores.
+ */
+static int __init llc_shard_size(int shard_id, int cores_per_shard, int nr_large_shards)
+{
+	/* The first @nr_large_shards shards are large shards */
+	if (shard_id < nr_large_shards)
+		return cores_per_shard + 1;
+
+	/* The remaining shards are default shards */
+	return cores_per_shard;
+}
+
+/*
+ * llc_calc_shard_layout - compute the shard layout for an LLC pod
+ * @nr_cores:  number of distinct cores in the LLC pod
+ *
+ * Chooses the number of shards that keeps average shard size closest to
+ * wq_cache_shard_size. Returns a struct describing the total number of shards,
+ * the base size of each, and how many are large shards.
+ */
+static struct llc_shard_layout __init llc_calc_shard_layout(int nr_cores)
+{
+	struct llc_shard_layout layout;
+
+	layout.nr_shards = max(1, DIV_ROUND_CLOSEST(nr_cores, wq_cache_shard_size));
+	layout.cores_per_shard = nr_cores / layout.nr_shards;
+	layout.nr_large_shards = nr_cores % layout.nr_shards;
+
+	return layout;
+}
+
+static bool __init llc_shard_is_full(int cores_in_shard, int shard_id,
+				     const struct llc_shard_layout *layout)
+{
+	return cores_in_shard == llc_shard_size(shard_id, layout->cores_per_shard,
+						layout->nr_large_shards);
+}
+
+/**
+ * llc_assign_shard_ids - record the shard index for each CPU in an LLC pod
+ * @pod_cpus:  the cpumask of CPUs in the LLC pod
+ * @smt_pods:  the SMT pod type, used to identify sibling groups
+ * @nr_cores:  number of distinct cores in @pod_cpus (from llc_count_cores())
+ *
+ * Walks @pod_cpus in order. At each SMT group leader, advances to the next
+ * shard once the current shard is full. Results are written to cpu_shard_id[].
+ */
+static void __init llc_assign_shard_ids(const struct cpumask *pod_cpus,
+					struct wq_pod_type *smt_pods, int nr_cores)
+{
+	struct llc_shard_layout layout = llc_calc_shard_layout(nr_cores);
+	const struct cpumask *sibling_cpus;
+	/* Count the number of cores in the current shard_id */
+	int cores_in_shard = 0;
+	/* This is a cursor for the shards. Go from zero to nr_shards - 1*/
+	int shard_id = 0;
+	int c;
+
+	/* Iterate at every CPU for a given LLC pod, and assign it a shard */
+	for_each_cpu(c, pod_cpus) {
+		sibling_cpus = smt_pods->pod_cpus[smt_pods->cpu_pod[c]];
+		if (cpumask_first(sibling_cpus) == c) {
+			/* This is the CPU leader for the siblings */
+			if (llc_shard_is_full(cores_in_shard, shard_id, &layout)) {
+				shard_id++;
+				cores_in_shard = 0;
+			}
+			cores_in_shard++;
+			cpu_shard_id[c] = shard_id;
+		} else {
+			/*
+			 * The siblings' shard MUST be the same as the leader.
+			 * never split threads in the same core.
+			 */
+			cpu_shard_id[c] = cpu_shard_id[cpumask_first(sibling_cpus)];
+		}
+	}
+
+	WARN_ON_ONCE(shard_id != (layout.nr_shards - 1));
+}
+
+/**
+ * precompute_cache_shard_ids - assign each CPU its shard index within its LLC
+ *
+ * Iterates over all LLC pods. For each pod, counts distinct cores then assigns
+ * shard indices to all CPUs in the pod. Must be called after WQ_AFFN_CACHE and
+ * WQ_AFFN_SMT have been initialized.
+ */
+static void __init precompute_cache_shard_ids(void)
+{
+	struct wq_pod_type *llc_pods = &wq_pod_types[WQ_AFFN_CACHE];
+	struct wq_pod_type *smt_pods = &wq_pod_types[WQ_AFFN_SMT];
+	const struct cpumask *cpus_sharing_llc;
+	int nr_cores;
+	int pod;
+
+	for (pod = 0; pod < llc_pods->nr_pods; pod++) {
+		cpus_sharing_llc = llc_pods->pod_cpus[pod];
+
+		/* Number of cores in this given LLC */
+		nr_cores = llc_count_cores(cpus_sharing_llc, smt_pods);
+		llc_assign_shard_ids(cpus_sharing_llc, smt_pods, nr_cores);
+	}
+}
+
+static bool __init cpus_share_cache_shard(int cpu0, int cpu1)
+{
+	if (!cpus_share_cache(cpu0, cpu1))
+		return false;
+
+	return cpu_shard_id[cpu0] == cpu_shard_id[cpu1];
+}
+
 /**
  * workqueue_init_topology - initialize CPU pods for unbound workqueues
  *
@@ -8119,9 +8275,21 @@ void __init workqueue_init_topology(void)
 	struct workqueue_struct *wq;
 	int cpu;
 
+	if (!wq_cache_shard_size) {
+		pr_warn("workqueue: cache_shard_size must be > 0, setting to 1\n");
+		wq_cache_shard_size = 1;
+	}
+
 	init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
 	init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
 	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
+	precompute_cache_shard_ids();
+	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE_SHARD], cpus_share_cache_shard);
+
+	for (cpu = 0; cpu < wq_pod_types[WQ_AFFN_CACHE_SHARD].nr_pods; cpu++)
+		pr_info("workqueue: cache_shard %d: cpus %*pbl\n", cpu,
+			cpumask_pr_args(wq_pod_types[WQ_AFFN_CACHE_SHARD].pod_cpus[cpu]));
+
 	init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
 
 	wq_topo_initialized = true;