include/linux/slab.h | 5 ++++ mm/slab.h | 1 + mm/slab_common.c | 52 +++++++++++++++++++++++++++++------------ mm/slub.c | 55 ++++++++++++++++++++++++-------------------- 4 files changed, 73 insertions(+), 40 deletions(-)
Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab
caches when a cache is destroyed. This is unnecessary when destroying
a slab cache; only the RCU sheaves belonging to the cache being destroyed
need to be flushed.
As suggested by Vlastimil Babka, introduce a weaker form of
kvfree_rcu_barrier() that operates on a specific slab cache and call it
on cache destruction.
The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen
5900X machine (1 socket), by loading slub_kunit module.
Before:
Total calls: 19
Average latency (us): 8529
Total time (us): 162069
After:
Total calls: 19
Average latency (us): 3804
Total time (us): 72287
Link: https://lore.kernel.org/linux-mm/0406562e-2066-4cf8-9902-b2b0616dd742@kernel.org
Link: https://lore.kernel.org/linux-mm/e988eff6-1287-425e-a06c-805af5bbf262@nvidia.com
Link: https://lore.kernel.org/linux-mm/1bda09da-93be-4737-aef0-d47f8c5c9301@suse.cz
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---
Not sure if the regression is worse on the reporters' machines due to
higher core count (or because some cores were busy doing other things,
dunno).
Hopefully this will reduce the time to complete tests,
and Suren could add his patch on top of this ;)
include/linux/slab.h | 5 ++++
mm/slab.h | 1 +
mm/slab_common.c | 52 +++++++++++++++++++++++++++++------------
mm/slub.c | 55 ++++++++++++++++++++++++--------------------
4 files changed, 73 insertions(+), 40 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index cf443f064a66..937c93d44e8c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1149,6 +1149,10 @@ static inline void kvfree_rcu_barrier(void)
{
rcu_barrier();
}
+static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
+{
+ rcu_barrier();
+}
static inline void kfree_rcu_scheduler_running(void) { }
#else
@@ -1156,6 +1160,7 @@ void kvfree_rcu_barrier(void);
void kfree_rcu_scheduler_running(void);
#endif
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
/**
* kmalloc_size_roundup - Report allocation bucket size for the given size
diff --git a/mm/slab.h b/mm/slab.h
index f730e012553c..e767aa7e91b0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
void flush_all_rcu_sheaves(void);
+void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 84dfff4f7b1f..dd8a49d6f9cc 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
return;
/* in-flight kfree_rcu()'s may include objects from our cache */
- kvfree_rcu_barrier();
+ kvfree_rcu_barrier_on_cache(s);
if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
(s->flags & SLAB_TYPESAFE_BY_RCU)) {
@@ -2038,25 +2038,13 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
}
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
-/**
- * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
- *
- * Note that a single argument of kvfree_rcu() call has a slow path that
- * triggers synchronize_rcu() following by freeing a pointer. It is done
- * before the return from the function. Therefore for any single-argument
- * call that will result in a kfree() to a cache that is to be destroyed
- * during module exit, it is developer's responsibility to ensure that all
- * such calls have returned before the call to kmem_cache_destroy().
- */
-void kvfree_rcu_barrier(void)
+static inline void __kvfree_rcu_barrier(void)
{
struct kfree_rcu_cpu_work *krwp;
struct kfree_rcu_cpu *krcp;
bool queued;
int i, cpu;
- flush_all_rcu_sheaves();
-
/*
* Firstly we detach objects and queue them over an RCU-batch
* for all CPUs. Finally queued works are flushed for each CPU.
@@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void)
}
}
}
+
+/**
+ * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
+ *
+ * Note that a single argument of kvfree_rcu() call has a slow path that
+ * triggers synchronize_rcu() following by freeing a pointer. It is done
+ * before the return from the function. Therefore for any single-argument
+ * call that will result in a kfree() to a cache that is to be destroyed
+ * during module exit, it is developer's responsibility to ensure that all
+ * such calls have returned before the call to kmem_cache_destroy().
+ */
+void kvfree_rcu_barrier(void)
+{
+ flush_all_rcu_sheaves();
+ __kvfree_rcu_barrier();
+}
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
+/**
+ * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a
+ * specific slab cache.
+ * @s: slab cache to wait for
+ *
+ * See the description of kvfree_rcu_barrier() for details.
+ */
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
+{
+ if (s->cpu_sheaves)
+ flush_rcu_sheaves_on_cache(s);
+ /*
+ * TODO: Introduce a version of __kvfree_rcu_barrier() that works
+ * on a specific slab cache.
+ */
+ __kvfree_rcu_barrier();
+}
+EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache);
+
static unsigned long
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
@@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void)
}
#endif /* CONFIG_KVFREE_RCU_BATCHED */
-
diff --git a/mm/slub.c b/mm/slub.c
index 785e25a14999..7cec2220712b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4118,42 +4118,47 @@ static void flush_rcu_sheaf(struct work_struct *w)
/* needed for kvfree_rcu_barrier() */
-void flush_all_rcu_sheaves(void)
+void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
{
struct slub_flush_work *sfw;
- struct kmem_cache *s;
unsigned int cpu;
- cpus_read_lock();
- mutex_lock(&slab_mutex);
+ mutex_lock(&flush_lock);
- list_for_each_entry(s, &slab_caches, list) {
- if (!s->cpu_sheaves)
- continue;
+ for_each_online_cpu(cpu) {
+ sfw = &per_cpu(slub_flush, cpu);
- mutex_lock(&flush_lock);
+ /*
+ * we don't check if rcu_free sheaf exists - racing
+ * __kfree_rcu_sheaf() might have just removed it.
+ * by executing flush_rcu_sheaf() on the cpu we make
+ * sure the __kfree_rcu_sheaf() finished its call_rcu()
+ */
- for_each_online_cpu(cpu) {
- sfw = &per_cpu(slub_flush, cpu);
+ INIT_WORK(&sfw->work, flush_rcu_sheaf);
+ sfw->s = s;
+ queue_work_on(cpu, flushwq, &sfw->work);
+ }
- /*
- * we don't check if rcu_free sheaf exists - racing
- * __kfree_rcu_sheaf() might have just removed it.
- * by executing flush_rcu_sheaf() on the cpu we make
- * sure the __kfree_rcu_sheaf() finished its call_rcu()
- */
+ for_each_online_cpu(cpu) {
+ sfw = &per_cpu(slub_flush, cpu);
+ flush_work(&sfw->work);
+ }
- INIT_WORK(&sfw->work, flush_rcu_sheaf);
- sfw->s = s;
- queue_work_on(cpu, flushwq, &sfw->work);
- }
+ mutex_unlock(&flush_lock);
+}
- for_each_online_cpu(cpu) {
- sfw = &per_cpu(slub_flush, cpu);
- flush_work(&sfw->work);
- }
+void flush_all_rcu_sheaves(void)
+{
+ struct kmem_cache *s;
+
+ cpus_read_lock();
+ mutex_lock(&slab_mutex);
- mutex_unlock(&flush_lock);
+ list_for_each_entry(s, &slab_caches, list) {
+ if (!s->cpu_sheaves)
+ continue;
+ flush_rcu_sheaves_on_cache(s);
}
mutex_unlock(&slab_mutex);
--
2.43.0
On 28/11/2025 12.37, Harry Yoo wrote: > Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab > caches when a cache is destroyed. This is unnecessary when destroying > a slab cache; only the RCU sheaves belonging to the cache being destroyed > need to be flushed. > > As suggested by Vlastimil Babka, introduce a weaker form of > kvfree_rcu_barrier() that operates on a specific slab cache and call it > on cache destruction. > > The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen > 5900X machine (1 socket), by loading slub_kunit module. > > Before: > Total calls: 19 > Average latency (us): 8529 > Total time (us): 162069 > > After: > Total calls: 19 > Average latency (us): 3804 > Total time (us): 72287 > > Link: https://lore.kernel.org/linux-mm/0406562e-2066-4cf8-9902-b2b0616dd742@kernel.org > Link: https://lore.kernel.org/linux-mm/e988eff6-1287-425e-a06c-805af5bbf262@nvidia.com > Link: https://lore.kernel.org/linux-mm/1bda09da-93be-4737-aef0-d47f8c5c9301@suse.cz > Suggested-by: Vlastimil Babka <vbabka@suse.cz> > Signed-off-by: Harry Yoo <harry.yoo@oracle.com> > --- Thanks Harry for the patch, A quick test on a different machine from the one I originally used to report this shows a decrease from 214s to 100s. LGTM, Tested-by: Daniel Gomez <da.gomez@samsung.com> > > Not sure if the regression is worse on the reporters' machines due to > higher core count (or because some cores were busy doing other things, > dunno). FWIW, CI modules run on an 8 core VM. Depending on the host CPU, this made the absolute number different but equivalent performance degradation. > > Hopefully this will reduce the time to complete tests, > and Suren could add his patch on top of this ;) > > include/linux/slab.h | 5 ++++ > mm/slab.h | 1 + > mm/slab_common.c | 52 +++++++++++++++++++++++++++++------------ > mm/slub.c | 55 ++++++++++++++++++++++++-------------------- > 4 files changed, 73 insertions(+), 40 deletions(-)
On Fri, Nov 28, 2025 at 08:37:40PM +0900, Harry Yoo wrote:
> Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab
> caches when a cache is destroyed. This is unnecessary when destroying
> a slab cache; only the RCU sheaves belonging to the cache being destroyed
> need to be flushed.
>
> As suggested by Vlastimil Babka, introduce a weaker form of
> kvfree_rcu_barrier() that operates on a specific slab cache and call it
> on cache destruction.
>
> The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen
> 5900X machine (1 socket), by loading slub_kunit module.
>
> Before:
> Total calls: 19
> Average latency (us): 8529
> Total time (us): 162069
>
> After:
> Total calls: 19
> Average latency (us): 3804
> Total time (us): 72287
Ooh, I just realized that I messed up the config and
have only two cores enabled. Will update the numbers after enabling 22 more :)
> Link: https://lore.kernel.org/linux-mm/0406562e-2066-4cf8-9902-b2b0616dd742@kernel.org
> Link: https://lore.kernel.org/linux-mm/e988eff6-1287-425e-a06c-805af5bbf262@nvidia.com
> Link: https://lore.kernel.org/linux-mm/1bda09da-93be-4737-aef0-d47f8c5c9301@suse.cz
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
> ---
>
> Not sure if the regression is worse on the reporters' machines due to
> higher core count (or because some cores were busy doing other things,
> dunno).
>
> Hopefully this will reduce the time to complete tests,
> and Suren could add his patch on top of this ;)
>
> include/linux/slab.h | 5 ++++
> mm/slab.h | 1 +
> mm/slab_common.c | 52 +++++++++++++++++++++++++++++------------
> mm/slub.c | 55 ++++++++++++++++++++++++--------------------
> 4 files changed, 73 insertions(+), 40 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index cf443f064a66..937c93d44e8c 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -1149,6 +1149,10 @@ static inline void kvfree_rcu_barrier(void)
> {
> rcu_barrier();
> }
> +static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
> +{
> + rcu_barrier();
> +}
>
> static inline void kfree_rcu_scheduler_running(void) { }
> #else
> @@ -1156,6 +1160,7 @@ void kvfree_rcu_barrier(void);
>
> void kfree_rcu_scheduler_running(void);
> #endif
> +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
>
> /**
> * kmalloc_size_roundup - Report allocation bucket size for the given size
> diff --git a/mm/slab.h b/mm/slab.h
> index f730e012553c..e767aa7e91b0 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
>
> bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
> void flush_all_rcu_sheaves(void);
> +void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
>
> #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
> SLAB_CACHE_DMA32 | SLAB_PANIC | \
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 84dfff4f7b1f..dd8a49d6f9cc 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
> return;
>
> /* in-flight kfree_rcu()'s may include objects from our cache */
> - kvfree_rcu_barrier();
> + kvfree_rcu_barrier_on_cache(s);
>
> if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
> (s->flags & SLAB_TYPESAFE_BY_RCU)) {
> @@ -2038,25 +2038,13 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
> }
> EXPORT_SYMBOL_GPL(kvfree_call_rcu);
>
> -/**
> - * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
> - *
> - * Note that a single argument of kvfree_rcu() call has a slow path that
> - * triggers synchronize_rcu() following by freeing a pointer. It is done
> - * before the return from the function. Therefore for any single-argument
> - * call that will result in a kfree() to a cache that is to be destroyed
> - * during module exit, it is developer's responsibility to ensure that all
> - * such calls have returned before the call to kmem_cache_destroy().
> - */
> -void kvfree_rcu_barrier(void)
> +static inline void __kvfree_rcu_barrier(void)
> {
> struct kfree_rcu_cpu_work *krwp;
> struct kfree_rcu_cpu *krcp;
> bool queued;
> int i, cpu;
>
> - flush_all_rcu_sheaves();
> -
> /*
> * Firstly we detach objects and queue them over an RCU-batch
> * for all CPUs. Finally queued works are flushed for each CPU.
> @@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void)
> }
> }
> }
> +
> +/**
> + * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
> + *
> + * Note that a single argument of kvfree_rcu() call has a slow path that
> + * triggers synchronize_rcu() following by freeing a pointer. It is done
> + * before the return from the function. Therefore for any single-argument
> + * call that will result in a kfree() to a cache that is to be destroyed
> + * during module exit, it is developer's responsibility to ensure that all
> + * such calls have returned before the call to kmem_cache_destroy().
> + */
> +void kvfree_rcu_barrier(void)
> +{
> + flush_all_rcu_sheaves();
> + __kvfree_rcu_barrier();
> +}
> EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
>
> +/**
> + * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a
> + * specific slab cache.
> + * @s: slab cache to wait for
> + *
> + * See the description of kvfree_rcu_barrier() for details.
> + */
> +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
> +{
> + if (s->cpu_sheaves)
> + flush_rcu_sheaves_on_cache(s);
> + /*
> + * TODO: Introduce a version of __kvfree_rcu_barrier() that works
> + * on a specific slab cache.
> + */
> + __kvfree_rcu_barrier();
> +}
> +EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache);
> +
> static unsigned long
> kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
> {
> @@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void)
> }
>
> #endif /* CONFIG_KVFREE_RCU_BATCHED */
> -
> diff --git a/mm/slub.c b/mm/slub.c
> index 785e25a14999..7cec2220712b 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4118,42 +4118,47 @@ static void flush_rcu_sheaf(struct work_struct *w)
>
>
> /* needed for kvfree_rcu_barrier() */
> -void flush_all_rcu_sheaves(void)
> +void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
> {
> struct slub_flush_work *sfw;
> - struct kmem_cache *s;
> unsigned int cpu;
>
> - cpus_read_lock();
> - mutex_lock(&slab_mutex);
> + mutex_lock(&flush_lock);
>
> - list_for_each_entry(s, &slab_caches, list) {
> - if (!s->cpu_sheaves)
> - continue;
> + for_each_online_cpu(cpu) {
> + sfw = &per_cpu(slub_flush, cpu);
>
> - mutex_lock(&flush_lock);
> + /*
> + * we don't check if rcu_free sheaf exists - racing
> + * __kfree_rcu_sheaf() might have just removed it.
> + * by executing flush_rcu_sheaf() on the cpu we make
> + * sure the __kfree_rcu_sheaf() finished its call_rcu()
> + */
>
> - for_each_online_cpu(cpu) {
> - sfw = &per_cpu(slub_flush, cpu);
> + INIT_WORK(&sfw->work, flush_rcu_sheaf);
> + sfw->s = s;
> + queue_work_on(cpu, flushwq, &sfw->work);
> + }
>
> - /*
> - * we don't check if rcu_free sheaf exists - racing
> - * __kfree_rcu_sheaf() might have just removed it.
> - * by executing flush_rcu_sheaf() on the cpu we make
> - * sure the __kfree_rcu_sheaf() finished its call_rcu()
> - */
> + for_each_online_cpu(cpu) {
> + sfw = &per_cpu(slub_flush, cpu);
> + flush_work(&sfw->work);
> + }
>
> - INIT_WORK(&sfw->work, flush_rcu_sheaf);
> - sfw->s = s;
> - queue_work_on(cpu, flushwq, &sfw->work);
> - }
> + mutex_unlock(&flush_lock);
> +}
>
> - for_each_online_cpu(cpu) {
> - sfw = &per_cpu(slub_flush, cpu);
> - flush_work(&sfw->work);
> - }
> +void flush_all_rcu_sheaves(void)
> +{
> + struct kmem_cache *s;
> +
> + cpus_read_lock();
> + mutex_lock(&slab_mutex);
>
> - mutex_unlock(&flush_lock);
> + list_for_each_entry(s, &slab_caches, list) {
> + if (!s->cpu_sheaves)
> + continue;
> + flush_rcu_sheaves_on_cache(s);
> }
>
> mutex_unlock(&slab_mutex);
> --
> 2.43.0
>
--
Cheers,
Harry / Hyeonggon
© 2016 - 2025 Red Hat, Inc.