All zswap entries will take a reference of zswap_pool when
zswap_store(), and drop it when free. Change it to use the
percpu_ref is better for scalability performance.
Although percpu_ref use a bit more memory which should be ok
for our use case, since we almost have only one zswap_pool to
be using. The performance gain is for zswap_store/load hotpath.
Testing kernel build (32 threads) in tmpfs with memory.max=2GB.
(zswap shrinker and writeback enabled with one 50GB swapfile,
on a 128 CPUs x86-64 machine, below is the average of 5 runs)
mm-unstable zswap-global-lru
real 63.20 63.12
user 1061.75 1062.95
sys 268.74 264.44
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
mm/zswap.c | 36 +++++++++++++++++++++++++++---------
1 file changed, 27 insertions(+), 9 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index d275eb523fc4..961349162997 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -173,7 +173,7 @@ struct crypto_acomp_ctx {
struct zswap_pool {
struct zpool *zpools[ZSWAP_NR_ZPOOLS];
struct crypto_acomp_ctx __percpu *acomp_ctx;
- struct kref kref;
+ struct percpu_ref ref;
struct list_head list;
struct work_struct release_work;
struct hlist_node node;
@@ -305,6 +305,7 @@ static void zswap_update_total_size(void)
/*********************************
* pool functions
**********************************/
+static void __zswap_pool_empty(struct percpu_ref *ref);
static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
{
@@ -358,13 +359,18 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
/* being the current pool takes 1 ref; this func expects the
* caller to always add the new pool as the current pool
*/
- kref_init(&pool->kref);
+ ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+ if (ret)
+ goto ref_fail;
INIT_LIST_HEAD(&pool->list);
zswap_pool_debug("created", pool);
return pool;
+ref_fail:
+ cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
error:
if (pool->acomp_ctx)
free_percpu(pool->acomp_ctx);
@@ -437,8 +443,9 @@ static void __zswap_pool_release(struct work_struct *work)
synchronize_rcu();
- /* nobody should have been able to get a kref... */
- WARN_ON(kref_get_unless_zero(&pool->kref));
+ /* nobody should have been able to get a ref... */
+ WARN_ON(!percpu_ref_is_zero(&pool->ref));
+ percpu_ref_exit(&pool->ref);
/* pool is now off zswap_pools list and has no references. */
zswap_pool_destroy(pool);
@@ -446,11 +453,11 @@ static void __zswap_pool_release(struct work_struct *work)
static struct zswap_pool *zswap_pool_current(void);
-static void __zswap_pool_empty(struct kref *kref)
+static void __zswap_pool_empty(struct percpu_ref *ref)
{
struct zswap_pool *pool;
- pool = container_of(kref, typeof(*pool), kref);
+ pool = container_of(ref, typeof(*pool), ref);
spin_lock(&zswap_pools_lock);
@@ -469,12 +476,12 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool)
if (!pool)
return 0;
- return kref_get_unless_zero(&pool->kref);
+ return percpu_ref_tryget(&pool->ref);
}
static void zswap_pool_put(struct zswap_pool *pool)
{
- kref_put(&pool->kref, __zswap_pool_empty);
+ percpu_ref_put(&pool->ref);
}
static struct zswap_pool *__zswap_pool_current(void)
@@ -604,6 +611,17 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
if (!pool)
pool = zswap_pool_create(type, compressor);
+ else {
+ /*
+ * Restore the initial ref dropped by percpu_ref_kill()
+ * when the pool was decommissioned and switch it again
+ * to percpu mode.
+ */
+ percpu_ref_resurrect(&pool->ref);
+
+ /* Drop the ref from zswap_pool_find_get(). */
+ zswap_pool_put(pool);
+ }
if (pool)
ret = param_set_charp(s, kp);
@@ -642,7 +660,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
* or the new pool we failed to add
*/
if (put_pool)
- zswap_pool_put(put_pool);
+ percpu_ref_kill(&put_pool->ref);
return ret;
}
--
b4 0.10.1
On Fri, Feb 16, 2024 at 12:55 AM Chengming Zhou
<zhouchengming@bytedance.com> wrote:
>
> All zswap entries will take a reference of zswap_pool when
> zswap_store(), and drop it when free. Change it to use the
> percpu_ref is better for scalability performance.
>
> Although percpu_ref use a bit more memory which should be ok
> for our use case, since we almost have only one zswap_pool to
> be using. The performance gain is for zswap_store/load hotpath.
>
> Testing kernel build (32 threads) in tmpfs with memory.max=2GB.
> (zswap shrinker and writeback enabled with one 50GB swapfile,
> on a 128 CPUs x86-64 machine, below is the average of 5 runs)
>
> mm-unstable zswap-global-lru
> real 63.20 63.12
> user 1061.75 1062.95
> sys 268.74 264.44
Idea is straightforward + code looks solid to me FWIW, so:
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> ---
> mm/zswap.c | 36 +++++++++++++++++++++++++++---------
> 1 file changed, 27 insertions(+), 9 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index d275eb523fc4..961349162997 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -173,7 +173,7 @@ struct crypto_acomp_ctx {
> struct zswap_pool {
> struct zpool *zpools[ZSWAP_NR_ZPOOLS];
> struct crypto_acomp_ctx __percpu *acomp_ctx;
> - struct kref kref;
> + struct percpu_ref ref;
> struct list_head list;
> struct work_struct release_work;
> struct hlist_node node;
> @@ -305,6 +305,7 @@ static void zswap_update_total_size(void)
> /*********************************
> * pool functions
> **********************************/
> +static void __zswap_pool_empty(struct percpu_ref *ref);
>
> static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
> {
> @@ -358,13 +359,18 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
> /* being the current pool takes 1 ref; this func expects the
> * caller to always add the new pool as the current pool
> */
> - kref_init(&pool->kref);
> + ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
> + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
> + if (ret)
> + goto ref_fail;
> INIT_LIST_HEAD(&pool->list);
>
> zswap_pool_debug("created", pool);
>
> return pool;
>
> +ref_fail:
> + cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
> error:
> if (pool->acomp_ctx)
> free_percpu(pool->acomp_ctx);
> @@ -437,8 +443,9 @@ static void __zswap_pool_release(struct work_struct *work)
>
> synchronize_rcu();
>
> - /* nobody should have been able to get a kref... */
> - WARN_ON(kref_get_unless_zero(&pool->kref));
> + /* nobody should have been able to get a ref... */
> + WARN_ON(!percpu_ref_is_zero(&pool->ref));
Ah nice - this is actually even clearer :) For some reason I missed
it, my apologies.
> + percpu_ref_exit(&pool->ref);
>
> /* pool is now off zswap_pools list and has no references. */
> zswap_pool_destroy(pool);
> @@ -446,11 +453,11 @@ static void __zswap_pool_release(struct work_struct *work)
>
> static struct zswap_pool *zswap_pool_current(void);
>
> -static void __zswap_pool_empty(struct kref *kref)
> +static void __zswap_pool_empty(struct percpu_ref *ref)
> {
> struct zswap_pool *pool;
>
> - pool = container_of(kref, typeof(*pool), kref);
> + pool = container_of(ref, typeof(*pool), ref);
>
> spin_lock(&zswap_pools_lock);
>
> @@ -469,12 +476,12 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool)
> if (!pool)
> return 0;
>
> - return kref_get_unless_zero(&pool->kref);
> + return percpu_ref_tryget(&pool->ref);
> }
>
> static void zswap_pool_put(struct zswap_pool *pool)
> {
> - kref_put(&pool->kref, __zswap_pool_empty);
> + percpu_ref_put(&pool->ref);
> }
>
> static struct zswap_pool *__zswap_pool_current(void)
> @@ -604,6 +611,17 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
>
> if (!pool)
> pool = zswap_pool_create(type, compressor);
> + else {
> + /*
> + * Restore the initial ref dropped by percpu_ref_kill()
> + * when the pool was decommissioned and switch it again
> + * to percpu mode.
> + */
> + percpu_ref_resurrect(&pool->ref);
> +
> + /* Drop the ref from zswap_pool_find_get(). */
> + zswap_pool_put(pool);
> + }
>
> if (pool)
> ret = param_set_charp(s, kp);
> @@ -642,7 +660,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
> * or the new pool we failed to add
> */
> if (put_pool)
> - zswap_pool_put(put_pool);
> + percpu_ref_kill(&put_pool->ref);
>
> return ret;
> }
>
> --
> b4 0.10.1
Now the release of zswap pool is controlled by percpu_ref, its release
callback (__zswap_pool_empty()) will be called when percpu_ref hit 0.
But this release callback may potentially be called from RCU callback
context by percpu_ref_kill(), which maybe from the softirq context.
So we need to use spin_lock/unlock_bh() to avoid potential deadlock.
This problem is introduced by the commit f3da427e82c4 ("mm/zswap: change
zswap_pool kref to percpu_ref"), which is in mm-unstable branch now.
It can be reproduced by testing kernel build in tmpfs with zswap and
CONFIG_LOCKDEP enabled, meanwhile changing the zswap compressor setting
dynamically.
Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
---
v2:
- Change to use spin_lock/unlock_bh(), per Matthew.
---
mm/zswap.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 011e068eb355..da90933c6d20 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -459,7 +459,7 @@ static void __zswap_pool_empty(struct percpu_ref *ref)
pool = container_of(ref, typeof(*pool), ref);
- spin_lock(&zswap_pools_lock);
+ spin_lock_bh(&zswap_pools_lock);
WARN_ON(pool == zswap_pool_current());
@@ -468,7 +468,7 @@ static void __zswap_pool_empty(struct percpu_ref *ref)
INIT_WORK(&pool->release_work, __zswap_pool_release);
schedule_work(&pool->release_work);
- spin_unlock(&zswap_pools_lock);
+ spin_unlock_bh(&zswap_pools_lock);
}
static int __must_check zswap_pool_get(struct zswap_pool *pool)
@@ -598,7 +598,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
return -EINVAL;
}
- spin_lock(&zswap_pools_lock);
+ spin_lock_bh(&zswap_pools_lock);
pool = zswap_pool_find_get(type, compressor);
if (pool) {
@@ -607,7 +607,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
list_del_rcu(&pool->list);
}
- spin_unlock(&zswap_pools_lock);
+ spin_unlock_bh(&zswap_pools_lock);
if (!pool)
pool = zswap_pool_create(type, compressor);
@@ -628,7 +628,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
else
ret = -EINVAL;
- spin_lock(&zswap_pools_lock);
+ spin_lock_bh(&zswap_pools_lock);
if (!ret) {
put_pool = zswap_pool_current();
@@ -643,7 +643,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
put_pool = pool;
}
- spin_unlock(&zswap_pools_lock);
+ spin_unlock_bh(&zswap_pools_lock);
if (!zswap_has_pool && !pool) {
/* if initial pool creation failed, and this pool creation also
--
2.40.1
Now the release of zswap pool is controlled by percpu_ref, its release
callback (__zswap_pool_empty()) will be called when percpu_ref hit 0.
But this release callback may potentially be called from RCU callback
context by percpu_ref_kill(), which maybe in the interrupt context.
So we need to use spin_lock_irqsave() and spin_unlock_irqrestore()
in the release callback: __zswap_pool_empty(). In other task context
places, spin_lock_irq() and spin_unlock_irq() are enough to avoid
potential deadlock.
This problem is introduced by the commit f3da427e82c4 ("mm/zswap: change
zswap_pool kref to percpu_ref"), which is in mm-unstable branch now.
It can be reproduced by testing kernel build in tmpfs with zswap and
CONFIG_LOCKDEP enabled, meanwhile changing the zswap compressor setting
dynamically.
Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
---
mm/zswap.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 011e068eb355..894bd184f78e 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -456,10 +456,11 @@ static struct zswap_pool *zswap_pool_current(void);
static void __zswap_pool_empty(struct percpu_ref *ref)
{
struct zswap_pool *pool;
+ unsigned long flags;
pool = container_of(ref, typeof(*pool), ref);
- spin_lock(&zswap_pools_lock);
+ spin_lock_irqsave(&zswap_pools_lock, flags);
WARN_ON(pool == zswap_pool_current());
@@ -468,7 +469,7 @@ static void __zswap_pool_empty(struct percpu_ref *ref)
INIT_WORK(&pool->release_work, __zswap_pool_release);
schedule_work(&pool->release_work);
- spin_unlock(&zswap_pools_lock);
+ spin_unlock_irqrestore(&zswap_pools_lock, flags);
}
static int __must_check zswap_pool_get(struct zswap_pool *pool)
@@ -598,7 +599,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
return -EINVAL;
}
- spin_lock(&zswap_pools_lock);
+ spin_lock_irq(&zswap_pools_lock);
pool = zswap_pool_find_get(type, compressor);
if (pool) {
@@ -607,7 +608,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
list_del_rcu(&pool->list);
}
- spin_unlock(&zswap_pools_lock);
+ spin_unlock_irq(&zswap_pools_lock);
if (!pool)
pool = zswap_pool_create(type, compressor);
@@ -628,7 +629,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
else
ret = -EINVAL;
- spin_lock(&zswap_pools_lock);
+ spin_lock_irq(&zswap_pools_lock);
if (!ret) {
put_pool = zswap_pool_current();
@@ -643,7 +644,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
put_pool = pool;
}
- spin_unlock(&zswap_pools_lock);
+ spin_unlock_irq(&zswap_pools_lock);
if (!zswap_has_pool && !pool) {
/* if initial pool creation failed, and this pool creation also
--
2.40.1
On Wed, Feb 28, 2024 at 03:18:32PM +0000, Chengming Zhou wrote: > Now the release of zswap pool is controlled by percpu_ref, its release > callback (__zswap_pool_empty()) will be called when percpu_ref hit 0. > But this release callback may potentially be called from RCU callback > context by percpu_ref_kill(), which maybe in the interrupt context. > > So we need to use spin_lock_irqsave() and spin_unlock_irqrestore() > in the release callback: __zswap_pool_empty(). In other task context > places, spin_lock_irq() and spin_unlock_irq() are enough to avoid > potential deadlock. RCU callback context is BH, not IRQ, so it's enough to use spin_lock_bh(), no?
On 2024/2/28 23:24, Matthew Wilcox wrote: > On Wed, Feb 28, 2024 at 03:18:32PM +0000, Chengming Zhou wrote: >> Now the release of zswap pool is controlled by percpu_ref, its release >> callback (__zswap_pool_empty()) will be called when percpu_ref hit 0. >> But this release callback may potentially be called from RCU callback >> context by percpu_ref_kill(), which maybe in the interrupt context. >> >> So we need to use spin_lock_irqsave() and spin_unlock_irqrestore() >> in the release callback: __zswap_pool_empty(). In other task context >> places, spin_lock_irq() and spin_unlock_irq() are enough to avoid >> potential deadlock. > > RCU callback context is BH, not IRQ, so it's enough to use > spin_lock_bh(), no? You're right, it's the softirq context, so spin_lock_bh() is enough. Thanks!
© 2016 - 2026 Red Hat, Inc.