[PATCH 04/11] mm/zsmalloc: Introduce objcgs pointer in struct zspage

Joshua Hahn posted 11 patches 3 weeks, 5 days ago
[PATCH 04/11] mm/zsmalloc: Introduce objcgs pointer in struct zspage
Posted by Joshua Hahn 3 weeks, 5 days ago
Introduce an array of struct obj_cgroup pointers to zspage to keep track
of compressed objects' memcg ownership, if the zs_pool has been made to
be memcg-aware at creation time.

Move the error path for alloc_zspage to a jump label to simplify the
growing error handling path for a failed zpdesc allocation.

Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
---
 mm/zsmalloc.c | 34 ++++++++++++++++++++++++++--------
 1 file changed, 26 insertions(+), 8 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 3f0f42b78314..dcf99516227c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -39,6 +39,7 @@
 #include <linux/zsmalloc.h>
 #include <linux/fs.h>
 #include <linux/workqueue.h>
+#include <linux/memcontrol.h>
 #include "zpdesc.h"
 
 #define ZSPAGE_MAGIC	0x58
@@ -273,6 +274,7 @@ struct zspage {
 	struct zpdesc *first_zpdesc;
 	struct list_head list; /* fullness list */
 	struct zs_pool *pool;
+	struct obj_cgroup **objcgs;
 	struct zspage_lock zsl;
 };
 
@@ -825,6 +827,8 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
 		zpdesc = next;
 	} while (zpdesc != NULL);
 
+	if (pool->memcg_aware)
+		kfree(zspage->objcgs);
 	cache_free_zspage(zspage);
 
 	class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
@@ -946,6 +950,16 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 	if (!IS_ENABLED(CONFIG_COMPACTION))
 		gfp &= ~__GFP_MOVABLE;
 
+	if (pool->memcg_aware) {
+		zspage->objcgs = kcalloc(class->objs_per_zspage,
+					 sizeof(struct obj_cgroup *),
+					 gfp & ~__GFP_HIGHMEM);
+		if (!zspage->objcgs) {
+			cache_free_zspage(zspage);
+			return NULL;
+		}
+	}
+
 	zspage->magic = ZSPAGE_MAGIC;
 	zspage->pool = pool;
 	zspage->class = class->index;
@@ -955,14 +969,8 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 		struct zpdesc *zpdesc;
 
 		zpdesc = alloc_zpdesc(gfp, nid);
-		if (!zpdesc) {
-			while (--i >= 0) {
-				zpdesc_dec_zone_page_state(zpdescs[i]);
-				free_zpdesc(zpdescs[i]);
-			}
-			cache_free_zspage(zspage);
-			return NULL;
-		}
+		if (!zpdesc)
+			goto err;
 		__zpdesc_set_zsmalloc(zpdesc);
 
 		zpdesc_inc_zone_page_state(zpdesc);
@@ -973,6 +981,16 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 	init_zspage(class, zspage);
 
 	return zspage;
+
+err:
+	while (--i >= 0) {
+		zpdesc_dec_zone_page_state(zpdescs[i]);
+		free_zpdesc(zpdescs[i]);
+	}
+	if (pool->memcg_aware)
+		kfree(zspage->objcgs);
+	cache_free_zspage(zspage);
+	return NULL;
 }
 
 static struct zspage *find_get_zspage(struct size_class *class)
-- 
2.52.0
Re: [PATCH 04/11] mm/zsmalloc: Introduce objcgs pointer in struct zspage
Posted by Nhat Pham 3 weeks, 5 days ago
On Wed, Mar 11, 2026 at 12:52 PM Joshua Hahn <joshua.hahnjy@gmail.com> wrote:
>
> Introduce an array of struct obj_cgroup pointers to zspage to keep track
> of compressed objects' memcg ownership, if the zs_pool has been made to
> be memcg-aware at creation time.
>
> Move the error path for alloc_zspage to a jump label to simplify the
> growing error handling path for a failed zpdesc allocation.
>
> Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
> Suggested-by: Harry Yoo <harry.yoo@oracle.com>
> Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
> ---
>  mm/zsmalloc.c | 34 ++++++++++++++++++++++++++--------
>  1 file changed, 26 insertions(+), 8 deletions(-)
>
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 3f0f42b78314..dcf99516227c 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -39,6 +39,7 @@
>  #include <linux/zsmalloc.h>
>  #include <linux/fs.h>
>  #include <linux/workqueue.h>
> +#include <linux/memcontrol.h>
>  #include "zpdesc.h"
>
>  #define ZSPAGE_MAGIC   0x58
> @@ -273,6 +274,7 @@ struct zspage {
>         struct zpdesc *first_zpdesc;
>         struct list_head list; /* fullness list */
>         struct zs_pool *pool;
> +       struct obj_cgroup **objcgs;
>         struct zspage_lock zsl;
>  };
>
> @@ -825,6 +827,8 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
>                 zpdesc = next;
>         } while (zpdesc != NULL);
>
> +       if (pool->memcg_aware)
> +               kfree(zspage->objcgs);
>         cache_free_zspage(zspage);
>
>         class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
> @@ -946,6 +950,16 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
>         if (!IS_ENABLED(CONFIG_COMPACTION))
>                 gfp &= ~__GFP_MOVABLE;
>
> +       if (pool->memcg_aware) {
> +               zspage->objcgs = kcalloc(class->objs_per_zspage,
> +                                        sizeof(struct obj_cgroup *),
> +                                        gfp & ~__GFP_HIGHMEM);

I remembered asking this, so my apologies if I missed/forgot your
response - but would vmalloc work here? i.e kvcalloc to fallback to
vmalloc etc.?

> +               if (!zspage->objcgs) {
> +                       cache_free_zspage(zspage);
> +                       return NULL;
> +               }
> +       }
> +
>         zspage->magic = ZSPAGE_MAGIC;
>         zspage->pool = pool;
>         zspage->class = class->index;
> @@ -955,14 +969,8 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
>                 struct zpdesc *zpdesc;
>
>                 zpdesc = alloc_zpdesc(gfp, nid);
> -               if (!zpdesc) {
> -                       while (--i >= 0) {
> -                               zpdesc_dec_zone_page_state(zpdescs[i]);
> -                               free_zpdesc(zpdescs[i]);
> -                       }
> -                       cache_free_zspage(zspage);
> -                       return NULL;
> -               }
> +               if (!zpdesc)
> +                       goto err;
>                 __zpdesc_set_zsmalloc(zpdesc);
>
>                 zpdesc_inc_zone_page_state(zpdesc);
> @@ -973,6 +981,16 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
>         init_zspage(class, zspage);
>
>         return zspage;
> +
> +err:
> +       while (--i >= 0) {
> +               zpdesc_dec_zone_page_state(zpdescs[i]);
> +               free_zpdesc(zpdescs[i]);
> +       }
> +       if (pool->memcg_aware)
> +               kfree(zspage->objcgs);
> +       cache_free_zspage(zspage);
> +       return NULL;
>  }
>
>  static struct zspage *find_get_zspage(struct size_class *class)
> --
> 2.52.0
>
Re: [PATCH 04/11] mm/zsmalloc: Introduce objcgs pointer in struct zspage
Posted by Joshua Hahn 3 weeks, 5 days ago
On Wed, 11 Mar 2026 13:17:22 -0700 Nhat Pham <nphamcs@gmail.com> wrote:

> On Wed, Mar 11, 2026 at 12:52 PM Joshua Hahn <joshua.hahnjy@gmail.com> wrote:
> >
> > Introduce an array of struct obj_cgroup pointers to zspage to keep track
> > of compressed objects' memcg ownership, if the zs_pool has been made to
> > be memcg-aware at creation time.
> >
> > Move the error path for alloc_zspage to a jump label to simplify the
> > growing error handling path for a failed zpdesc allocation.
> >
> > Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
> > Suggested-by: Harry Yoo <harry.yoo@oracle.com>
> > Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
> > ---
> >  mm/zsmalloc.c | 34 ++++++++++++++++++++++++++--------
> >  1 file changed, 26 insertions(+), 8 deletions(-)
> >
> > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> > index 3f0f42b78314..dcf99516227c 100644
> > --- a/mm/zsmalloc.c
> > +++ b/mm/zsmalloc.c
> > @@ -39,6 +39,7 @@
> >  #include <linux/zsmalloc.h>
> >  #include <linux/fs.h>
> >  #include <linux/workqueue.h>
> > +#include <linux/memcontrol.h>
> >  #include "zpdesc.h"
> >
> >  #define ZSPAGE_MAGIC   0x58
> > @@ -273,6 +274,7 @@ struct zspage {
> >         struct zpdesc *first_zpdesc;
> >         struct list_head list; /* fullness list */
> >         struct zs_pool *pool;
> > +       struct obj_cgroup **objcgs;
> >         struct zspage_lock zsl;
> >  };
> >
> > @@ -825,6 +827,8 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
> >                 zpdesc = next;
> >         } while (zpdesc != NULL);
> >
> > +       if (pool->memcg_aware)
> > +               kfree(zspage->objcgs);
> >         cache_free_zspage(zspage);
> >
> >         class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
> > @@ -946,6 +950,16 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
> >         if (!IS_ENABLED(CONFIG_COMPACTION))
> >                 gfp &= ~__GFP_MOVABLE;
> >
> > +       if (pool->memcg_aware) {
> > +               zspage->objcgs = kcalloc(class->objs_per_zspage,
> > +                                        sizeof(struct obj_cgroup *),
> > +                                        gfp & ~__GFP_HIGHMEM);
> 
> I remembered asking this, so my apologies if I missed/forgot your
> response - but would vmalloc work here? i.e kvcalloc to fallback to
> vmalloc etc.?

Hello Nhat : -)

Thank you for reviewing, and for your acks on the other parts!

You're right, I missed changing that on my end after v1. No reason
vmalloc shouldn't work here, let me make that change in v3.

Thanks, I hope you have a great day!
Joshua