Zswap compresses and uncompresses in PAGE_SIZE units, which simplifies
the accounting for how much memory it has compressed. However, when a
compressed object is stored at the boundary of two zspages, accounting
at a PAGE_SIZE granularity makes it difficult to fractionally charge
each backing zspage with the ratio of memory it backs for the
compressed object.
To make sub-PAGE_SIZE granularity charging possible for MEMCG_ZSWAPPED,
track the value in bytes and adjust its accounting accordingly.
No functional changes intended.
Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
---
include/linux/memcontrol.h | 2 +-
mm/memcontrol.c | 5 +++--
mm/zsmalloc.c | 4 ++--
mm/zswap.c | 8 +++++---
4 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 701d9ab6fef1..ce2e598b5963 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -38,7 +38,7 @@ enum memcg_stat_item {
MEMCG_VMALLOC,
MEMCG_KMEM,
MEMCG_ZSWAP_B,
- MEMCG_ZSWAPPED,
+ MEMCG_ZSWAPPED_B,
MEMCG_NR_STAT,
};
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 68139be66a4f..1cb02d2febe8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -342,7 +342,7 @@ static const unsigned int memcg_stat_items[] = {
MEMCG_VMALLOC,
MEMCG_KMEM,
MEMCG_ZSWAP_B,
- MEMCG_ZSWAPPED,
+ MEMCG_ZSWAPPED_B,
};
#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
@@ -1364,7 +1364,7 @@ static const struct memory_stat memory_stats[] = {
{ "shmem", NR_SHMEM },
#ifdef CONFIG_ZSWAP
{ "zswap", MEMCG_ZSWAP_B },
- { "zswapped", MEMCG_ZSWAPPED },
+ { "zswapped", MEMCG_ZSWAPPED_B },
#endif
{ "file_mapped", NR_FILE_MAPPED },
{ "file_dirty", NR_FILE_DIRTY },
@@ -1412,6 +1412,7 @@ static int memcg_page_state_unit(int item)
switch (item) {
case MEMCG_PERCPU_B:
case MEMCG_ZSWAP_B:
+ case MEMCG_ZSWAPPED_B:
case NR_SLAB_RECLAIMABLE_B:
case NR_SLAB_UNRECLAIMABLE_B:
return 1;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 291194572a09..24665d7cd4a9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1047,7 +1047,7 @@ static void zs_charge_objcg(struct zs_pool *pool, struct obj_cgroup *objcg,
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
mod_memcg_state(memcg, pool->compressed_stat, size);
- mod_memcg_state(memcg, pool->uncompressed_stat, 1);
+ mod_memcg_state(memcg, pool->uncompressed_stat, PAGE_SIZE);
rcu_read_unlock();
}
@@ -1066,7 +1066,7 @@ static void zs_uncharge_objcg(struct zs_pool *pool, struct obj_cgroup *objcg,
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
mod_memcg_state(memcg, pool->compressed_stat, -size);
- mod_memcg_state(memcg, pool->uncompressed_stat, -1);
+ mod_memcg_state(memcg, pool->uncompressed_stat, -(int)PAGE_SIZE);
rcu_read_unlock();
}
#else
diff --git a/mm/zswap.c b/mm/zswap.c
index bca29a6e18f3..d81e2db4490b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -257,7 +257,7 @@ static struct zswap_pool *zswap_pool_create(char *compressor)
/* unique name for each pool specifically required by zsmalloc */
snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
pool->zs_pool = zs_create_pool(name, true, MEMCG_ZSWAP_B,
- MEMCG_ZSWAPPED);
+ MEMCG_ZSWAPPED_B);
if (!pool->zs_pool)
goto error;
@@ -1214,8 +1214,10 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
*/
if (!mem_cgroup_disabled()) {
mem_cgroup_flush_stats(memcg);
- nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
- nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B);
+ nr_backing >>= PAGE_SHIFT;
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED_B);
+ nr_stored >>= PAGE_SHIFT;
} else {
nr_backing = zswap_total_pages();
nr_stored = atomic_long_read(&zswap_stored_pages);
--
2.52.0
On Wed, Mar 11, 2026 at 12:52 PM Joshua Hahn <joshua.hahnjy@gmail.com> wrote:
>
> Zswap compresses and uncompresses in PAGE_SIZE units, which simplifies
> the accounting for how much memory it has compressed. However, when a
> compressed object is stored at the boundary of two zspages, accounting
> at a PAGE_SIZE granularity makes it difficult to fractionally charge
> each backing zspage with the ratio of memory it backs for the
> compressed object.
>
> To make sub-PAGE_SIZE granularity charging possible for MEMCG_ZSWAPPED,
> track the value in bytes and adjust its accounting accordingly.
>
> No functional changes intended.
>
> Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
LGTM.
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
> ---
> include/linux/memcontrol.h | 2 +-
> mm/memcontrol.c | 5 +++--
> mm/zsmalloc.c | 4 ++--
> mm/zswap.c | 8 +++++---
> 4 files changed, 11 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 701d9ab6fef1..ce2e598b5963 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -38,7 +38,7 @@ enum memcg_stat_item {
> MEMCG_VMALLOC,
> MEMCG_KMEM,
> MEMCG_ZSWAP_B,
> - MEMCG_ZSWAPPED,
> + MEMCG_ZSWAPPED_B,
> MEMCG_NR_STAT,
> };
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 68139be66a4f..1cb02d2febe8 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -342,7 +342,7 @@ static const unsigned int memcg_stat_items[] = {
> MEMCG_VMALLOC,
> MEMCG_KMEM,
> MEMCG_ZSWAP_B,
> - MEMCG_ZSWAPPED,
> + MEMCG_ZSWAPPED_B,
> };
>
> #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
> @@ -1364,7 +1364,7 @@ static const struct memory_stat memory_stats[] = {
> { "shmem", NR_SHMEM },
> #ifdef CONFIG_ZSWAP
> { "zswap", MEMCG_ZSWAP_B },
> - { "zswapped", MEMCG_ZSWAPPED },
> + { "zswapped", MEMCG_ZSWAPPED_B },
> #endif
> { "file_mapped", NR_FILE_MAPPED },
> { "file_dirty", NR_FILE_DIRTY },
> @@ -1412,6 +1412,7 @@ static int memcg_page_state_unit(int item)
> switch (item) {
> case MEMCG_PERCPU_B:
> case MEMCG_ZSWAP_B:
> + case MEMCG_ZSWAPPED_B:
> case NR_SLAB_RECLAIMABLE_B:
> case NR_SLAB_UNRECLAIMABLE_B:
> return 1;
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 291194572a09..24665d7cd4a9 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -1047,7 +1047,7 @@ static void zs_charge_objcg(struct zs_pool *pool, struct obj_cgroup *objcg,
> rcu_read_lock();
> memcg = obj_cgroup_memcg(objcg);
> mod_memcg_state(memcg, pool->compressed_stat, size);
> - mod_memcg_state(memcg, pool->uncompressed_stat, 1);
> + mod_memcg_state(memcg, pool->uncompressed_stat, PAGE_SIZE);
> rcu_read_unlock();
> }
>
> @@ -1066,7 +1066,7 @@ static void zs_uncharge_objcg(struct zs_pool *pool, struct obj_cgroup *objcg,
> rcu_read_lock();
> memcg = obj_cgroup_memcg(objcg);
> mod_memcg_state(memcg, pool->compressed_stat, -size);
> - mod_memcg_state(memcg, pool->uncompressed_stat, -1);
> + mod_memcg_state(memcg, pool->uncompressed_stat, -(int)PAGE_SIZE);
nit: seems a bit awkward lol?
On Wed, 11 Mar 2026 13:33:34 -0700 Nhat Pham <nphamcs@gmail.com> wrote:
> On Wed, Mar 11, 2026 at 12:52 PM Joshua Hahn <joshua.hahnjy@gmail.com> wrote:
> >
> > Zswap compresses and uncompresses in PAGE_SIZE units, which simplifies
> > the accounting for how much memory it has compressed. However, when a
> > compressed object is stored at the boundary of two zspages, accounting
> > at a PAGE_SIZE granularity makes it difficult to fractionally charge
> > each backing zspage with the ratio of memory it backs for the
> > compressed object.
> >
> > To make sub-PAGE_SIZE granularity charging possible for MEMCG_ZSWAPPED,
> > track the value in bytes and adjust its accounting accordingly.
> >
> > No functional changes intended.
> >
> > Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
>
> LGTM.
> Reviewed-by: Nhat Pham <nphamcs@gmail.com>
[...snip...]
> > @@ -1066,7 +1066,7 @@ static void zs_uncharge_objcg(struct zs_pool *pool, struct obj_cgroup *objcg,
> > rcu_read_lock();
> > memcg = obj_cgroup_memcg(objcg);
> > mod_memcg_state(memcg, pool->compressed_stat, -size);
> > - mod_memcg_state(memcg, pool->uncompressed_stat, -1);
> > + mod_memcg_state(memcg, pool->uncompressed_stat, -(int)PAGE_SIZE);
>
> nit: seems a bit awkward lol?
Hello Nhat,
I totally just saw the Reviewed-by and moved on and didn't see this nit
here :p sorry!!
But yeah, I agree that it looks very awkward. AFAICT I don't think there's
a signed version of PAGE_SIZE or a negative PAGE_SIZE definition, so
unfortunately this cast is needed : -(
mm/zsmalloc.c: In function ‘zs_uncharge_objcg’:
mm/zsmalloc.c:1068:66: warning: overflow in conversion from ‘long unsigned int’ to ‘int’ changes value from ‘18446744073709547520’ to ‘-4096’ [-Woverflow]
1068 | mod_memcg_state(memcg, pool->memcg_params->uncompressed, -PAGE_SIZE);
| ^~~~~~~~~~
I will note that this is a temporary cast, we immediately remove this line
in the next patch. I did this because I wanted to show a natrual transition
from MEMCG_ZSWAPPED --> MEMCG_ZSWAPPED_B --> NR_ZSWAPPED_B and thought it
would be easier to review, but this does leave some intermediary changes in
this patch that are removed right away. If you would prefer that I squash
this commit and the next into a single patch so that there is less
intermediate code, I would be happy to do that instead!
I hope you have a great day!
Joshua
© 2016 - 2026 Red Hat, Inc.