Before we enable percpu sheaves for kmalloc caches, we need to make sure
kmalloc_nolock() and kfree_nolock() will continue working properly and
not spin when not allowed to.
Percpu sheaves themselves use local_trylock() so they are already
compatible. We just need to be careful with the barn->lock spin_lock.
Pass a new allow_spin parameter where necessary to use
spin_trylock_irqsave().
In kmalloc_nolock_noprof() we can now attempt alloc_from_pcs() safely,
for now it will always fail until we enable sheaves for kmalloc caches
next. Similarly in kfree_nolock() we can attempt free_to_pcs().
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
mm/slub.c | 82 ++++++++++++++++++++++++++++++++++++++++++++++-----------------
1 file changed, 60 insertions(+), 22 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 41e1bf35707c..4ca6bd944854 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2889,7 +2889,8 @@ static void pcs_destroy(struct kmem_cache *s)
s->cpu_sheaves = NULL;
}
-static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn)
+static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
+ bool allow_spin)
{
struct slab_sheaf *empty = NULL;
unsigned long flags;
@@ -2897,7 +2898,10 @@ static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn)
if (!data_race(barn->nr_empty))
return NULL;
- spin_lock_irqsave(&barn->lock, flags);
+ if (likely(allow_spin))
+ spin_lock_irqsave(&barn->lock, flags);
+ else if (!spin_trylock_irqsave(&barn->lock, flags))
+ return NULL;
if (likely(barn->nr_empty)) {
empty = list_first_entry(&barn->sheaves_empty,
@@ -2974,7 +2978,8 @@ static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
* change.
*/
static struct slab_sheaf *
-barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty)
+barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
+ bool allow_spin)
{
struct slab_sheaf *full = NULL;
unsigned long flags;
@@ -2982,7 +2987,10 @@ barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty)
if (!data_race(barn->nr_full))
return NULL;
- spin_lock_irqsave(&barn->lock, flags);
+ if (likely(allow_spin))
+ spin_lock_irqsave(&barn->lock, flags);
+ else if (!spin_trylock_irqsave(&barn->lock, flags))
+ return NULL;
if (likely(barn->nr_full)) {
full = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
@@ -3003,7 +3011,8 @@ barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty)
* barn. But if there are too many full sheaves, reject this with -E2BIG.
*/
static struct slab_sheaf *
-barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full)
+barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full,
+ bool allow_spin)
{
struct slab_sheaf *empty;
unsigned long flags;
@@ -3014,7 +3023,10 @@ barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full)
if (!data_race(barn->nr_empty))
return ERR_PTR(-ENOMEM);
- spin_lock_irqsave(&barn->lock, flags);
+ if (likely(allow_spin))
+ spin_lock_irqsave(&barn->lock, flags);
+ else if (!spin_trylock_irqsave(&barn->lock, flags))
+ return ERR_PTR(-EBUSY);
if (likely(barn->nr_empty)) {
empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf,
@@ -5008,7 +5020,8 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
return NULL;
}
- full = barn_replace_empty_sheaf(barn, pcs->main);
+ full = barn_replace_empty_sheaf(barn, pcs->main,
+ gfpflags_allow_spinning(gfp));
if (full) {
stat(s, BARN_GET);
@@ -5025,7 +5038,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
empty = pcs->spare;
pcs->spare = NULL;
} else {
- empty = barn_get_empty_sheaf(barn);
+ empty = barn_get_empty_sheaf(barn, true);
}
}
@@ -5165,7 +5178,8 @@ void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
}
static __fastpath_inline
-unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
+unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
+ void **p)
{
struct slub_percpu_sheaves *pcs;
struct slab_sheaf *main;
@@ -5199,7 +5213,8 @@ unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
return allocated;
}
- full = barn_replace_empty_sheaf(barn, pcs->main);
+ full = barn_replace_empty_sheaf(barn, pcs->main,
+ gfpflags_allow_spinning(gfp));
if (full) {
stat(s, BARN_GET);
@@ -5700,7 +5715,7 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags;
struct kmem_cache *s;
bool can_retry = true;
- void *ret = ERR_PTR(-EBUSY);
+ void *ret;
VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO |
__GFP_NO_OBJ_EXT));
@@ -5731,6 +5746,12 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
*/
return NULL;
+ ret = alloc_from_pcs(s, alloc_gfp, node);
+ if (ret)
+ goto success;
+
+ ret = ERR_PTR(-EBUSY);
+
/*
* Do not call slab_alloc_node(), since trylock mode isn't
* compatible with slab_pre_alloc_hook/should_failslab and
@@ -5767,6 +5788,7 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
ret = NULL;
}
+success:
maybe_wipe_obj_freeptr(s, ret);
slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret,
slab_want_init_on_alloc(alloc_gfp, s), size);
@@ -6087,7 +6109,8 @@ static void __pcs_install_empty_sheaf(struct kmem_cache *s,
* unlocked.
*/
static struct slub_percpu_sheaves *
-__pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
+__pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
+ bool allow_spin)
{
struct slab_sheaf *empty;
struct node_barn *barn;
@@ -6111,7 +6134,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
put_fail = false;
if (!pcs->spare) {
- empty = barn_get_empty_sheaf(barn);
+ empty = barn_get_empty_sheaf(barn, allow_spin);
if (empty) {
pcs->spare = pcs->main;
pcs->main = empty;
@@ -6125,7 +6148,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
return pcs;
}
- empty = barn_replace_full_sheaf(barn, pcs->main);
+ empty = barn_replace_full_sheaf(barn, pcs->main, allow_spin);
if (!IS_ERR(empty)) {
stat(s, BARN_PUT);
@@ -6133,7 +6156,8 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
return pcs;
}
- if (PTR_ERR(empty) == -E2BIG) {
+ /* sheaf_flush_unused() doesn't support !allow_spin */
+ if (PTR_ERR(empty) == -E2BIG && allow_spin) {
/* Since we got here, spare exists and is full */
struct slab_sheaf *to_flush = pcs->spare;
@@ -6158,6 +6182,14 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
alloc_empty:
local_unlock(&s->cpu_sheaves->lock);
+ /*
+ * alloc_empty_sheaf() doesn't support !allow_spin and it's
+ * easier to fall back to freeing directly without sheaves
+ * than add the support (and to sheaf_flush_unused() above)
+ */
+ if (!allow_spin)
+ return NULL;
+
empty = alloc_empty_sheaf(s, GFP_NOWAIT);
if (empty)
goto got_empty;
@@ -6200,7 +6232,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
* The object is expected to have passed slab_free_hook() already.
*/
static __fastpath_inline
-bool free_to_pcs(struct kmem_cache *s, void *object)
+bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
{
struct slub_percpu_sheaves *pcs;
@@ -6211,7 +6243,7 @@ bool free_to_pcs(struct kmem_cache *s, void *object)
if (unlikely(pcs->main->size == s->sheaf_capacity)) {
- pcs = __pcs_replace_full_main(s, pcs);
+ pcs = __pcs_replace_full_main(s, pcs, allow_spin);
if (unlikely(!pcs))
return false;
}
@@ -6333,7 +6365,7 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
goto fail;
}
- empty = barn_get_empty_sheaf(barn);
+ empty = barn_get_empty_sheaf(barn, true);
if (empty) {
pcs->rcu_free = empty;
@@ -6453,7 +6485,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
goto no_empty;
if (!pcs->spare) {
- empty = barn_get_empty_sheaf(barn);
+ empty = barn_get_empty_sheaf(barn, true);
if (!empty)
goto no_empty;
@@ -6467,7 +6499,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
goto do_free;
}
- empty = barn_replace_full_sheaf(barn, pcs->main);
+ empty = barn_replace_full_sheaf(barn, pcs->main, true);
if (IS_ERR(empty)) {
stat(s, BARN_PUT_FAIL);
goto no_empty;
@@ -6719,7 +6751,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())
&& likely(!slab_test_pfmemalloc(slab))) {
- if (likely(free_to_pcs(s, object)))
+ if (likely(free_to_pcs(s, object, true)))
return;
}
@@ -6980,6 +7012,12 @@ void kfree_nolock(const void *object)
* since kasan quarantine takes locks and not supported from NMI.
*/
kasan_slab_free(s, x, false, false, /* skip quarantine */true);
+
+ if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())) {
+ if (likely(free_to_pcs(s, x, false)))
+ return;
+ }
+
do_slab_free(s, slab, x, x, 0, _RET_IP_);
}
EXPORT_SYMBOL_GPL(kfree_nolock);
@@ -7532,7 +7570,7 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
size--;
}
- i = alloc_from_pcs_bulk(s, size, p);
+ i = alloc_from_pcs_bulk(s, flags, size, p);
if (i < size) {
/*
--
2.52.0
* Vlastimil Babka <vbabka@suse.cz> [260123 01:54]:
> Before we enable percpu sheaves for kmalloc caches, we need to make sure
> kmalloc_nolock() and kfree_nolock() will continue working properly and
> not spin when not allowed to.
>
> Percpu sheaves themselves use local_trylock() so they are already
> compatible. We just need to be careful with the barn->lock spin_lock.
> Pass a new allow_spin parameter where necessary to use
> spin_trylock_irqsave().
>
> In kmalloc_nolock_noprof() we can now attempt alloc_from_pcs() safely,
> for now it will always fail until we enable sheaves for kmalloc caches
> next. Similarly in kfree_nolock() we can attempt free_to_pcs().
>
> Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
> Reviewed-by: Hao Li <hao.li@linux.dev>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
I'd rather have a helper that _mayspin() or something. That way, when
we know we need to or don't want to, we avoid extra instructions. We
can also avoid passing true/false and complicating the interface when
it's not necessary.
The way it is written works as well, though.
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
> mm/slub.c | 82 ++++++++++++++++++++++++++++++++++++++++++++++-----------------
> 1 file changed, 60 insertions(+), 22 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 41e1bf35707c..4ca6bd944854 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2889,7 +2889,8 @@ static void pcs_destroy(struct kmem_cache *s)
> s->cpu_sheaves = NULL;
> }
>
> -static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn)
> +static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn,
> + bool allow_spin)
> {
> struct slab_sheaf *empty = NULL;
> unsigned long flags;
> @@ -2897,7 +2898,10 @@ static struct slab_sheaf *barn_get_empty_sheaf(struct node_barn *barn)
> if (!data_race(barn->nr_empty))
> return NULL;
>
> - spin_lock_irqsave(&barn->lock, flags);
> + if (likely(allow_spin))
> + spin_lock_irqsave(&barn->lock, flags);
> + else if (!spin_trylock_irqsave(&barn->lock, flags))
> + return NULL;
>
> if (likely(barn->nr_empty)) {
> empty = list_first_entry(&barn->sheaves_empty,
> @@ -2974,7 +2978,8 @@ static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn)
> * change.
> */
> static struct slab_sheaf *
> -barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty)
> +barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty,
> + bool allow_spin)
> {
> struct slab_sheaf *full = NULL;
> unsigned long flags;
> @@ -2982,7 +2987,10 @@ barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty)
> if (!data_race(barn->nr_full))
> return NULL;
>
> - spin_lock_irqsave(&barn->lock, flags);
> + if (likely(allow_spin))
> + spin_lock_irqsave(&barn->lock, flags);
> + else if (!spin_trylock_irqsave(&barn->lock, flags))
> + return NULL;
>
> if (likely(barn->nr_full)) {
> full = list_first_entry(&barn->sheaves_full, struct slab_sheaf,
> @@ -3003,7 +3011,8 @@ barn_replace_empty_sheaf(struct node_barn *barn, struct slab_sheaf *empty)
> * barn. But if there are too many full sheaves, reject this with -E2BIG.
> */
> static struct slab_sheaf *
> -barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full)
> +barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full,
> + bool allow_spin)
> {
> struct slab_sheaf *empty;
> unsigned long flags;
> @@ -3014,7 +3023,10 @@ barn_replace_full_sheaf(struct node_barn *barn, struct slab_sheaf *full)
> if (!data_race(barn->nr_empty))
> return ERR_PTR(-ENOMEM);
>
> - spin_lock_irqsave(&barn->lock, flags);
> + if (likely(allow_spin))
> + spin_lock_irqsave(&barn->lock, flags);
> + else if (!spin_trylock_irqsave(&barn->lock, flags))
> + return ERR_PTR(-EBUSY);
>
> if (likely(barn->nr_empty)) {
> empty = list_first_entry(&barn->sheaves_empty, struct slab_sheaf,
> @@ -5008,7 +5020,8 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
> return NULL;
> }
>
> - full = barn_replace_empty_sheaf(barn, pcs->main);
> + full = barn_replace_empty_sheaf(barn, pcs->main,
> + gfpflags_allow_spinning(gfp));
>
> if (full) {
> stat(s, BARN_GET);
> @@ -5025,7 +5038,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
> empty = pcs->spare;
> pcs->spare = NULL;
> } else {
> - empty = barn_get_empty_sheaf(barn);
> + empty = barn_get_empty_sheaf(barn, true);
> }
> }
>
> @@ -5165,7 +5178,8 @@ void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
> }
>
> static __fastpath_inline
> -unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
> +unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, gfp_t gfp, size_t size,
> + void **p)
> {
> struct slub_percpu_sheaves *pcs;
> struct slab_sheaf *main;
> @@ -5199,7 +5213,8 @@ unsigned int alloc_from_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
> return allocated;
> }
>
> - full = barn_replace_empty_sheaf(barn, pcs->main);
> + full = barn_replace_empty_sheaf(barn, pcs->main,
> + gfpflags_allow_spinning(gfp));
>
> if (full) {
> stat(s, BARN_GET);
> @@ -5700,7 +5715,7 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
> gfp_t alloc_gfp = __GFP_NOWARN | __GFP_NOMEMALLOC | gfp_flags;
> struct kmem_cache *s;
> bool can_retry = true;
> - void *ret = ERR_PTR(-EBUSY);
> + void *ret;
>
> VM_WARN_ON_ONCE(gfp_flags & ~(__GFP_ACCOUNT | __GFP_ZERO |
> __GFP_NO_OBJ_EXT));
> @@ -5731,6 +5746,12 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
> */
> return NULL;
>
> + ret = alloc_from_pcs(s, alloc_gfp, node);
> + if (ret)
> + goto success;
> +
> + ret = ERR_PTR(-EBUSY);
> +
> /*
> * Do not call slab_alloc_node(), since trylock mode isn't
> * compatible with slab_pre_alloc_hook/should_failslab and
> @@ -5767,6 +5788,7 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
> ret = NULL;
> }
>
> +success:
> maybe_wipe_obj_freeptr(s, ret);
> slab_post_alloc_hook(s, NULL, alloc_gfp, 1, &ret,
> slab_want_init_on_alloc(alloc_gfp, s), size);
> @@ -6087,7 +6109,8 @@ static void __pcs_install_empty_sheaf(struct kmem_cache *s,
> * unlocked.
> */
> static struct slub_percpu_sheaves *
> -__pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
> +__pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
> + bool allow_spin)
> {
> struct slab_sheaf *empty;
> struct node_barn *barn;
> @@ -6111,7 +6134,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
> put_fail = false;
>
> if (!pcs->spare) {
> - empty = barn_get_empty_sheaf(barn);
> + empty = barn_get_empty_sheaf(barn, allow_spin);
> if (empty) {
> pcs->spare = pcs->main;
> pcs->main = empty;
> @@ -6125,7 +6148,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
> return pcs;
> }
>
> - empty = barn_replace_full_sheaf(barn, pcs->main);
> + empty = barn_replace_full_sheaf(barn, pcs->main, allow_spin);
>
> if (!IS_ERR(empty)) {
> stat(s, BARN_PUT);
> @@ -6133,7 +6156,8 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
> return pcs;
> }
>
> - if (PTR_ERR(empty) == -E2BIG) {
> + /* sheaf_flush_unused() doesn't support !allow_spin */
> + if (PTR_ERR(empty) == -E2BIG && allow_spin) {
> /* Since we got here, spare exists and is full */
> struct slab_sheaf *to_flush = pcs->spare;
>
> @@ -6158,6 +6182,14 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
> alloc_empty:
> local_unlock(&s->cpu_sheaves->lock);
>
> + /*
> + * alloc_empty_sheaf() doesn't support !allow_spin and it's
> + * easier to fall back to freeing directly without sheaves
> + * than add the support (and to sheaf_flush_unused() above)
> + */
> + if (!allow_spin)
> + return NULL;
> +
> empty = alloc_empty_sheaf(s, GFP_NOWAIT);
> if (empty)
> goto got_empty;
> @@ -6200,7 +6232,7 @@ __pcs_replace_full_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs)
> * The object is expected to have passed slab_free_hook() already.
> */
> static __fastpath_inline
> -bool free_to_pcs(struct kmem_cache *s, void *object)
> +bool free_to_pcs(struct kmem_cache *s, void *object, bool allow_spin)
> {
> struct slub_percpu_sheaves *pcs;
>
> @@ -6211,7 +6243,7 @@ bool free_to_pcs(struct kmem_cache *s, void *object)
>
> if (unlikely(pcs->main->size == s->sheaf_capacity)) {
>
> - pcs = __pcs_replace_full_main(s, pcs);
> + pcs = __pcs_replace_full_main(s, pcs, allow_spin);
> if (unlikely(!pcs))
> return false;
> }
> @@ -6333,7 +6365,7 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
> goto fail;
> }
>
> - empty = barn_get_empty_sheaf(barn);
> + empty = barn_get_empty_sheaf(barn, true);
>
> if (empty) {
> pcs->rcu_free = empty;
> @@ -6453,7 +6485,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
> goto no_empty;
>
> if (!pcs->spare) {
> - empty = barn_get_empty_sheaf(barn);
> + empty = barn_get_empty_sheaf(barn, true);
> if (!empty)
> goto no_empty;
>
> @@ -6467,7 +6499,7 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
> goto do_free;
> }
>
> - empty = barn_replace_full_sheaf(barn, pcs->main);
> + empty = barn_replace_full_sheaf(barn, pcs->main, true);
> if (IS_ERR(empty)) {
> stat(s, BARN_PUT_FAIL);
> goto no_empty;
> @@ -6719,7 +6751,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>
> if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())
> && likely(!slab_test_pfmemalloc(slab))) {
> - if (likely(free_to_pcs(s, object)))
> + if (likely(free_to_pcs(s, object, true)))
> return;
> }
>
> @@ -6980,6 +7012,12 @@ void kfree_nolock(const void *object)
> * since kasan quarantine takes locks and not supported from NMI.
> */
> kasan_slab_free(s, x, false, false, /* skip quarantine */true);
> +
> + if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())) {
> + if (likely(free_to_pcs(s, x, false)))
> + return;
> + }
> +
> do_slab_free(s, slab, x, x, 0, _RET_IP_);
> }
> EXPORT_SYMBOL_GPL(kfree_nolock);
> @@ -7532,7 +7570,7 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
> size--;
> }
>
> - i = alloc_from_pcs_bulk(s, size, p);
> + i = alloc_from_pcs_bulk(s, flags, size, p);
>
> if (i < size) {
> /*
>
> --
> 2.52.0
>
>
On 1/27/26 18:36, Liam R. Howlett wrote: > * Vlastimil Babka <vbabka@suse.cz> [260123 01:54]: >> Before we enable percpu sheaves for kmalloc caches, we need to make sure >> kmalloc_nolock() and kfree_nolock() will continue working properly and >> not spin when not allowed to. >> >> Percpu sheaves themselves use local_trylock() so they are already >> compatible. We just need to be careful with the barn->lock spin_lock. >> Pass a new allow_spin parameter where necessary to use >> spin_trylock_irqsave(). >> >> In kmalloc_nolock_noprof() we can now attempt alloc_from_pcs() safely, >> for now it will always fail until we enable sheaves for kmalloc caches >> next. Similarly in kfree_nolock() we can attempt free_to_pcs(). >> >> Reviewed-by: Suren Baghdasaryan <surenb@google.com> >> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> >> Reviewed-by: Hao Li <hao.li@linux.dev> >> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > > I'd rather have a helper that _mayspin() or something. That way, when > we know we need to or don't want to, we avoid extra instructions. We > can also avoid passing true/false and complicating the interface when > it's not necessary. Can you elaborate a bit more how the helper looks like and it's used? I don't follow immediately. We do have the helper for checking gfp flags to extract mayspin, but that wouldn't avoid extra instructions so you must be thinking of something else. > The way it is written works as well, though. > > Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> Thanks!
On Thu, Jan 22, 2026 at 10:53 PM Vlastimil Babka <vbabka@suse.cz> wrote: > > Before we enable percpu sheaves for kmalloc caches, we need to make sure > kmalloc_nolock() and kfree_nolock() will continue working properly and > not spin when not allowed to. > > Percpu sheaves themselves use local_trylock() so they are already > compatible. We just need to be careful with the barn->lock spin_lock. > Pass a new allow_spin parameter where necessary to use > spin_trylock_irqsave(). > > In kmalloc_nolock_noprof() we can now attempt alloc_from_pcs() safely, > for now it will always fail until we enable sheaves for kmalloc caches > next. Similarly in kfree_nolock() we can attempt free_to_pcs(). > > Reviewed-by: Suren Baghdasaryan <surenb@google.com> > Reviewed-by: Harry Yoo <harry.yoo@oracle.com> > Reviewed-by: Hao Li <hao.li@linux.dev> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Alexei Starovoitov <ast@kernel.org>
© 2016 - 2026 Red Hat, Inc.