mm/zsmalloc.c | 106 ++++++++++++++++++++++++-------------------------- 1 file changed, 50 insertions(+), 56 deletions(-)
Currently, zsmalloc creates kmem_cache of handles and zspages
for each pool, which may be suboptimal from the memory usage
point of view (extra internal fragmentation per pool). Systems
that create multiple zsmalloc pools may benefit from shared
common zsmalloc caches.
Make handles and zspages kmem caches global. The memory
savings depend on particular setup and data patterns and
can be found via slabinfo.
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
v1->v2:
- factored out zs_destroy_caches/zs_init_caches (Yosry)
mm/zsmalloc.c | 106 ++++++++++++++++++++++++--------------------------
1 file changed, 50 insertions(+), 56 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5a68349403d5..8df45aa1b5c8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -198,12 +198,13 @@ struct link_free {
};
};
+static struct kmem_cache *handle_cachep;
+static struct kmem_cache *zspage_cachep;
+
struct zs_pool {
const char *name;
struct size_class *size_class[ZS_SIZE_CLASSES];
- struct kmem_cache *handle_cachep;
- struct kmem_cache *zspage_cachep;
atomic_long_t pages_allocated;
@@ -376,60 +377,28 @@ static void init_deferred_free(struct zs_pool *pool) {}
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
#endif
-static int create_cache(struct zs_pool *pool)
+static unsigned long cache_alloc_handle(gfp_t gfp)
{
- char *name;
-
- name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name);
- if (!name)
- return -ENOMEM;
- pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE,
- 0, 0, NULL);
- kfree(name);
- if (!pool->handle_cachep)
- return -EINVAL;
-
- name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name);
- if (!name)
- return -ENOMEM;
- pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage),
- 0, 0, NULL);
- kfree(name);
- if (!pool->zspage_cachep) {
- kmem_cache_destroy(pool->handle_cachep);
- pool->handle_cachep = NULL;
- return -EINVAL;
- }
+ gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE);
- return 0;
-}
-
-static void destroy_cache(struct zs_pool *pool)
-{
- kmem_cache_destroy(pool->handle_cachep);
- kmem_cache_destroy(pool->zspage_cachep);
+ return (unsigned long)kmem_cache_alloc(handle_cachep, gfp);
}
-static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
+static void cache_free_handle(unsigned long handle)
{
- return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+ kmem_cache_free(handle_cachep, (void *)handle);
}
-static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
+static struct zspage *cache_alloc_zspage(gfp_t gfp)
{
- kmem_cache_free(pool->handle_cachep, (void *)handle);
-}
+ gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE);
-static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
-{
- return kmem_cache_zalloc(pool->zspage_cachep,
- flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+ return kmem_cache_zalloc(zspage_cachep, gfp);
}
-static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+static void cache_free_zspage(struct zspage *zspage)
{
- kmem_cache_free(pool->zspage_cachep, zspage);
+ kmem_cache_free(zspage_cachep, zspage);
}
/* class->lock(which owns the handle) synchronizes races */
@@ -858,7 +827,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
zpdesc = next;
} while (zpdesc != NULL);
- cache_free_zspage(pool, zspage);
+ cache_free_zspage(zspage);
class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
@@ -971,7 +940,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
{
int i;
struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE];
- struct zspage *zspage = cache_alloc_zspage(pool, gfp);
+ struct zspage *zspage = cache_alloc_zspage(gfp);
if (!zspage)
return NULL;
@@ -993,7 +962,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
zpdesc_dec_zone_page_state(zpdescs[i]);
free_zpdesc(zpdescs[i]);
}
- cache_free_zspage(pool, zspage);
+ cache_free_zspage(zspage);
return NULL;
}
__zpdesc_set_zsmalloc(zpdesc);
@@ -1344,7 +1313,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp,
if (unlikely(size > ZS_MAX_ALLOC_SIZE))
return (unsigned long)ERR_PTR(-ENOSPC);
- handle = cache_alloc_handle(pool, gfp);
+ handle = cache_alloc_handle(gfp);
if (!handle)
return (unsigned long)ERR_PTR(-ENOMEM);
@@ -1368,7 +1337,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp,
zspage = alloc_zspage(pool, class, gfp, nid);
if (!zspage) {
- cache_free_handle(pool, handle);
+ cache_free_handle(handle);
return (unsigned long)ERR_PTR(-ENOMEM);
}
@@ -1448,7 +1417,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
free_zspage(pool, class, zspage);
spin_unlock(&class->lock);
- cache_free_handle(pool, handle);
+ cache_free_handle(handle);
}
EXPORT_SYMBOL_GPL(zs_free);
@@ -2110,9 +2079,6 @@ struct zs_pool *zs_create_pool(const char *name)
if (!pool->name)
goto err;
- if (create_cache(pool))
- goto err;
-
/*
* Iterate reversely, because, size of size_class that we want to use
* for merging should be larger or equal to current size.
@@ -2234,20 +2200,47 @@ void zs_destroy_pool(struct zs_pool *pool)
kfree(class);
}
- destroy_cache(pool);
kfree(pool->name);
kfree(pool);
}
EXPORT_SYMBOL_GPL(zs_destroy_pool);
+static void zs_destroy_caches(void)
+{
+ kmem_cache_destroy(handle_cachep);
+ handle_cachep = NULL;
+ kmem_cache_destroy(zspage_cachep);
+ zspage_cachep = NULL;
+}
+
+static int __init zs_init_caches(void)
+{
+ handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
+ 0, 0, NULL);
+ zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
+ 0, 0, NULL);
+
+ if (!handle_cachep || !zspage_cachep) {
+ zs_destroy_caches();
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static int __init zs_init(void)
{
- int rc __maybe_unused;
+ int rc;
+
+ rc = zs_init_caches();
+ if (rc)
+ return rc;
#ifdef CONFIG_COMPACTION
rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc);
- if (rc)
+ if (rc) {
+ zs_destroy_caches();
return rc;
+ }
#endif
zs_stat_init();
return 0;
@@ -2259,6 +2252,7 @@ static void __exit zs_exit(void)
set_movable_ops(NULL, PGTY_zsmalloc);
#endif
zs_stat_exit();
+ zs_destroy_caches();
}
module_init(zs_init);
--
2.52.0.457.g6b5491de43-goog
On Sat, Jan 17, 2026 at 11:54:05AM +0900, Sergey Senozhatsky wrote: > Currently, zsmalloc creates kmem_cache of handles and zspages > for each pool, which may be suboptimal from the memory usage > point of view (extra internal fragmentation per pool). Systems > that create multiple zsmalloc pools may benefit from shared > common zsmalloc caches. > > Make handles and zspages kmem caches global. The memory > savings depend on particular setup and data patterns and > can be found via slabinfo. > > Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
On Fri, Jan 16, 2026 at 6:54 PM Sergey Senozhatsky <senozhatsky@chromium.org> wrote: > > Currently, zsmalloc creates kmem_cache of handles and zspages > for each pool, which may be suboptimal from the memory usage > point of view (extra internal fragmentation per pool). Systems > that create multiple zsmalloc pools may benefit from shared > common zsmalloc caches. > > Make handles and zspages kmem caches global. The memory > savings depend on particular setup and data patterns and > can be found via slabinfo. > > Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> LGTM! Reviewed-by: Nhat Pham <nphamcs@gmail.com>
© 2016 - 2026 Red Hat, Inc.