From nobody Mon Feb 9 17:14:33 2026 Received: from out-189.mta1.migadu.com (out-189.mta1.migadu.com [95.215.58.189]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1A32B79C1 for ; Wed, 14 Feb 2024 08:54:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=95.215.58.189 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1707900892; cv=none; b=WUWtOALJKWyggyJxVTJ70bqbB/vtyUT/OzYnnMWy85YCybOGJ9owvxJiu8csd/RB9MXWRbehcBrjdmgfPZVb66QACPKjt++exSehFh7Pm7R1OY6k7a9/sm7QjoeiqrQc1RhMM8FKOJQ4GSkqqViVIncjJUx44sLyDBl6V9WGZUY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1707900892; c=relaxed/simple; bh=U9XE1ZD4+Nsy4Ae0+dBvXcf2SA+2l8ojvJ3g84xIWUk=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=ACL3hddnx2dpKrUsioRG6x8RzxFVxieqEQ2xALX9NI8rb/ia8Tj37Y3gBPI5wvCmpUgjnRBBDZ1p+MIPYu7Ecuf70dMEWkCkV5mmm0CQ9tegjXt+m/uziqYBn7TJyN3tq/nffc1sdTyO9cW/UXJfoJkEfFJjGGApEnGkXUoizCM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com; spf=pass smtp.mailfrom=linux.dev; arc=none smtp.client-ip=95.215.58.189 Authentication-Results: smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. From: Chengming Zhou Date: Wed, 14 Feb 2024 08:54:37 +0000 Subject: [PATCH v2 1/2] mm/zswap: global lru and shrinker shared by all zswap_pools Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20240210-zswap-global-lru-v2-1-fbee3b11a62e@bytedance.com> References: <20240210-zswap-global-lru-v2-0-fbee3b11a62e@bytedance.com> In-Reply-To: <20240210-zswap-global-lru-v2-0-fbee3b11a62e@bytedance.com> To: Yosry Ahmed , Johannes Weiner , Andrew Morton , Nhat Pham Cc: linux-mm@kvack.org, Chengming Zhou , linux-kernel@vger.kernel.org X-Migadu-Flow: FLOW_OUT Dynamic zswap_pool creation may create/reuse to have multiple zswap_pools in a list, only the first will be current used. Each zswap_pool has its own lru and shrinker, which is not necessary and has its problem: 1. When memory has pressure, all shrinker of zswap_pools will try to shrink its own lru, there is no order between them. 2. When zswap limit hit, only the last zswap_pool's shrink_work will try to shrink its lru, which is inefficient. Anyway, having a global lru and shrinker shared by all zswap_pools is better and efficient. Signed-off-by: Chengming Zhou Acked-by: Yosry Ahmed --- mm/zswap.c | 170 +++++++++++++++++++++++----------------------------------= ---- 1 file changed, 65 insertions(+), 105 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index 62fe307521c9..dbff67d7e1c7 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -176,14 +176,18 @@ struct zswap_pool { struct kref kref; struct list_head list; struct work_struct release_work; - struct work_struct shrink_work; struct hlist_node node; char tfm_name[CRYPTO_MAX_ALG_NAME]; +}; + +static struct { struct list_lru list_lru; - struct mem_cgroup *next_shrink; - struct shrinker *shrinker; atomic_t nr_stored; -}; + struct shrinker *shrinker; + struct work_struct shrink_work; + struct mem_cgroup *next_shrink; + spinlock_t shrink_lock; +} zswap; =20 /* * struct zswap_entry @@ -301,9 +305,6 @@ static void zswap_update_total_size(void) * pool functions **********************************/ =20 -static void zswap_alloc_shrinker(struct zswap_pool *pool); -static void shrink_worker(struct work_struct *w); - static struct zswap_pool *zswap_pool_create(char *type, char *compressor) { int i; @@ -353,30 +354,16 @@ static struct zswap_pool *zswap_pool_create(char *typ= e, char *compressor) if (ret) goto error; =20 - zswap_alloc_shrinker(pool); - if (!pool->shrinker) - goto error; - - pr_debug("using %s compressor\n", pool->tfm_name); - /* being the current pool takes 1 ref; this func expects the * caller to always add the new pool as the current pool */ kref_init(&pool->kref); INIT_LIST_HEAD(&pool->list); - if (list_lru_init_memcg(&pool->list_lru, pool->shrinker)) - goto lru_fail; - shrinker_register(pool->shrinker); - INIT_WORK(&pool->shrink_work, shrink_worker); - atomic_set(&pool->nr_stored, 0); =20 zswap_pool_debug("created", pool); =20 return pool; =20 -lru_fail: - list_lru_destroy(&pool->list_lru); - shrinker_free(pool->shrinker); error: if (pool->acomp_ctx) free_percpu(pool->acomp_ctx); @@ -434,15 +421,8 @@ static void zswap_pool_destroy(struct zswap_pool *pool) =20 zswap_pool_debug("destroying", pool); =20 - shrinker_free(pool->shrinker); cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); free_percpu(pool->acomp_ctx); - list_lru_destroy(&pool->list_lru); - - spin_lock(&zswap_pools_lock); - mem_cgroup_iter_break(NULL, pool->next_shrink); - pool->next_shrink =3D NULL; - spin_unlock(&zswap_pools_lock); =20 for (i =3D 0; i < ZSWAP_NR_ZPOOLS; i++) zpool_destroy_pool(pool->zpools[i]); @@ -529,24 +509,6 @@ static struct zswap_pool *zswap_pool_current_get(void) return pool; } =20 -static struct zswap_pool *zswap_pool_last_get(void) -{ - struct zswap_pool *pool, *last =3D NULL; - - rcu_read_lock(); - - list_for_each_entry_rcu(pool, &zswap_pools, list) - last =3D pool; - WARN_ONCE(!last && zswap_has_pool, - "%s: no page storage pool!\n", __func__); - if (!zswap_pool_get(last)) - last =3D NULL; - - rcu_read_unlock(); - - return last; -} - /* type and compressor must be null-terminated */ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) { @@ -816,15 +778,11 @@ void zswap_folio_swapin(struct folio *folio) =20 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) { - struct zswap_pool *pool; - - /* lock out zswap pools list modification */ - spin_lock(&zswap_pools_lock); - list_for_each_entry(pool, &zswap_pools, list) { - if (pool->next_shrink =3D=3D memcg) - pool->next_shrink =3D mem_cgroup_iter(NULL, pool->next_shrink, NULL); - } - spin_unlock(&zswap_pools_lock); + /* lock out zswap shrinker walking memcg tree */ + spin_lock(&zswap.shrink_lock); + if (zswap.next_shrink =3D=3D memcg) + zswap.next_shrink =3D mem_cgroup_iter(NULL, zswap.next_shrink, NULL); + spin_unlock(&zswap.shrink_lock); } =20 /********************************* @@ -923,9 +881,9 @@ static void zswap_entry_free(struct zswap_entry *entry) if (!entry->length) atomic_dec(&zswap_same_filled_pages); else { - zswap_lru_del(&entry->pool->list_lru, entry); + zswap_lru_del(&zswap.list_lru, entry); zpool_free(zswap_find_zpool(entry), entry->handle); - atomic_dec(&entry->pool->nr_stored); + atomic_dec(&zswap.nr_stored); zswap_pool_put(entry->pool); } if (entry->objcg) { @@ -1288,7 +1246,6 @@ static unsigned long zswap_shrinker_scan(struct shrin= ker *shrinker, { struct lruvec *lruvec =3D mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)= ); unsigned long shrink_ret, nr_protected, lru_size; - struct zswap_pool *pool =3D shrinker->private_data; bool encountered_page_in_swapcache =3D false; =20 if (!zswap_shrinker_enabled || @@ -1299,7 +1256,7 @@ static unsigned long zswap_shrinker_scan(struct shrin= ker *shrinker, =20 nr_protected =3D atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); - lru_size =3D list_lru_shrink_count(&pool->list_lru, sc); + lru_size =3D list_lru_shrink_count(&zswap.list_lru, sc); =20 /* * Abort if we are shrinking into the protected region. @@ -1316,7 +1273,7 @@ static unsigned long zswap_shrinker_scan(struct shrin= ker *shrinker, return SHRINK_STOP; } =20 - shrink_ret =3D list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb, + shrink_ret =3D list_lru_shrink_walk(&zswap.list_lru, sc, &shrink_memcg_cb, &encountered_page_in_swapcache); =20 if (encountered_page_in_swapcache) @@ -1328,7 +1285,6 @@ static unsigned long zswap_shrinker_scan(struct shrin= ker *shrinker, static unsigned long zswap_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { - struct zswap_pool *pool =3D shrinker->private_data; struct mem_cgroup *memcg =3D sc->memcg; struct lruvec *lruvec =3D mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); unsigned long nr_backing, nr_stored, nr_freeable, nr_protected; @@ -1342,8 +1298,8 @@ static unsigned long zswap_shrinker_count(struct shri= nker *shrinker, nr_stored =3D memcg_page_state(memcg, MEMCG_ZSWAPPED); #else /* use pool stats instead of memcg stats */ - nr_backing =3D get_zswap_pool_size(pool) >> PAGE_SHIFT; - nr_stored =3D atomic_read(&pool->nr_stored); + nr_backing =3D zswap_pool_total_size >> PAGE_SHIFT; + nr_stored =3D atomic_read(&zswap.nr_stored); #endif =20 if (!nr_stored) @@ -1351,7 +1307,7 @@ static unsigned long zswap_shrinker_count(struct shri= nker *shrinker, =20 nr_protected =3D atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); - nr_freeable =3D list_lru_shrink_count(&pool->list_lru, sc); + nr_freeable =3D list_lru_shrink_count(&zswap.list_lru, sc); /* * Subtract the lru size by an estimate of the number of pages * that should be protected. @@ -1367,23 +1323,24 @@ static unsigned long zswap_shrinker_count(struct sh= rinker *shrinker, return mult_frac(nr_freeable, nr_backing, nr_stored); } =20 -static void zswap_alloc_shrinker(struct zswap_pool *pool) +static struct shrinker *zswap_alloc_shrinker(void) { - pool->shrinker =3D + struct shrinker *shrinker; + + shrinker =3D shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); - if (!pool->shrinker) - return; + if (!shrinker) + return NULL; =20 - pool->shrinker->private_data =3D pool; - pool->shrinker->scan_objects =3D zswap_shrinker_scan; - pool->shrinker->count_objects =3D zswap_shrinker_count; - pool->shrinker->batch =3D 0; - pool->shrinker->seeks =3D DEFAULT_SEEKS; + shrinker->scan_objects =3D zswap_shrinker_scan; + shrinker->count_objects =3D zswap_shrinker_count; + shrinker->batch =3D 0; + shrinker->seeks =3D DEFAULT_SEEKS; + return shrinker; } =20 static int shrink_memcg(struct mem_cgroup *memcg) { - struct zswap_pool *pool; int nid, shrunk =3D 0; =20 if (!mem_cgroup_zswap_writeback_enabled(memcg)) @@ -1396,32 +1353,25 @@ static int shrink_memcg(struct mem_cgroup *memcg) if (memcg && !mem_cgroup_online(memcg)) return -ENOENT; =20 - pool =3D zswap_pool_current_get(); - if (!pool) - return -EINVAL; - for_each_node_state(nid, N_NORMAL_MEMORY) { unsigned long nr_to_walk =3D 1; =20 - shrunk +=3D list_lru_walk_one(&pool->list_lru, nid, memcg, + shrunk +=3D list_lru_walk_one(&zswap.list_lru, nid, memcg, &shrink_memcg_cb, NULL, &nr_to_walk); } - zswap_pool_put(pool); return shrunk ? 0 : -EAGAIN; } =20 static void shrink_worker(struct work_struct *w) { - struct zswap_pool *pool =3D container_of(w, typeof(*pool), - shrink_work); struct mem_cgroup *memcg; int ret, failures =3D 0; =20 /* global reclaim will select cgroup in a round-robin fashion. */ do { - spin_lock(&zswap_pools_lock); - pool->next_shrink =3D mem_cgroup_iter(NULL, pool->next_shrink, NULL); - memcg =3D pool->next_shrink; + spin_lock(&zswap.shrink_lock); + zswap.next_shrink =3D mem_cgroup_iter(NULL, zswap.next_shrink, NULL); + memcg =3D zswap.next_shrink; =20 /* * We need to retry if we have gone through a full round trip, or if we @@ -1435,7 +1385,7 @@ static void shrink_worker(struct work_struct *w) * memcg is not killed when we are reclaiming. */ if (!memcg) { - spin_unlock(&zswap_pools_lock); + spin_unlock(&zswap.shrink_lock); if (++failures =3D=3D MAX_RECLAIM_RETRIES) break; =20 @@ -1445,15 +1395,15 @@ static void shrink_worker(struct work_struct *w) if (!mem_cgroup_tryget_online(memcg)) { /* drop the reference from mem_cgroup_iter() */ mem_cgroup_iter_break(NULL, memcg); - pool->next_shrink =3D NULL; - spin_unlock(&zswap_pools_lock); + zswap.next_shrink =3D NULL; + spin_unlock(&zswap.shrink_lock); =20 if (++failures =3D=3D MAX_RECLAIM_RETRIES) break; =20 goto resched; } - spin_unlock(&zswap_pools_lock); + spin_unlock(&zswap.shrink_lock); =20 ret =3D shrink_memcg(memcg); /* drop the extra reference */ @@ -1467,7 +1417,6 @@ static void shrink_worker(struct work_struct *w) resched: cond_resched(); } while (!zswap_can_accept()); - zswap_pool_put(pool); } =20 static int zswap_is_page_same_filled(void *ptr, unsigned long *value) @@ -1508,7 +1457,6 @@ bool zswap_store(struct folio *folio) struct zswap_entry *entry, *dupentry; struct obj_cgroup *objcg =3D NULL; struct mem_cgroup *memcg =3D NULL; - struct zswap_pool *shrink_pool; =20 VM_WARN_ON_ONCE(!folio_test_locked(folio)); VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); @@ -1576,7 +1524,7 @@ bool zswap_store(struct folio *folio) =20 if (objcg) { memcg =3D get_mem_cgroup_from_objcg(objcg); - if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) { + if (memcg_list_lru_alloc(memcg, &zswap.list_lru, GFP_KERNEL)) { mem_cgroup_put(memcg); goto put_pool; } @@ -1607,8 +1555,8 @@ bool zswap_store(struct folio *folio) } if (entry->length) { INIT_LIST_HEAD(&entry->lru); - zswap_lru_add(&entry->pool->list_lru, entry); - atomic_inc(&entry->pool->nr_stored); + zswap_lru_add(&zswap.list_lru, entry); + atomic_inc(&zswap.nr_stored); } spin_unlock(&tree->lock); =20 @@ -1640,9 +1588,7 @@ bool zswap_store(struct folio *folio) return false; =20 shrink: - shrink_pool =3D zswap_pool_last_get(); - if (shrink_pool && !queue_work(shrink_wq, &shrink_pool->shrink_work)) - zswap_pool_put(shrink_pool); + queue_work(shrink_wq, &zswap.shrink_work); goto reject; } =20 @@ -1804,6 +1750,22 @@ static int zswap_setup(void) if (ret) goto hp_fail; =20 + shrink_wq =3D alloc_workqueue("zswap-shrink", + WQ_UNBOUND|WQ_MEM_RECLAIM, 1); + if (!shrink_wq) + goto shrink_wq_fail; + + zswap.shrinker =3D zswap_alloc_shrinker(); + if (!zswap.shrinker) + goto shrinker_fail; + if (list_lru_init_memcg(&zswap.list_lru, zswap.shrinker)) + goto lru_fail; + shrinker_register(zswap.shrinker); + + INIT_WORK(&zswap.shrink_work, shrink_worker); + atomic_set(&zswap.nr_stored, 0); + spin_lock_init(&zswap.shrink_lock); + pool =3D __zswap_pool_create_fallback(); if (pool) { pr_info("loaded using pool %s/%s\n", pool->tfm_name, @@ -1815,19 +1777,17 @@ static int zswap_setup(void) zswap_enabled =3D false; } =20 - shrink_wq =3D alloc_workqueue("zswap-shrink", - WQ_UNBOUND|WQ_MEM_RECLAIM, 1); - if (!shrink_wq) - goto fallback_fail; - if (zswap_debugfs_init()) pr_warn("debugfs initialization failed\n"); zswap_init_state =3D ZSWAP_INIT_SUCCEED; return 0; =20 -fallback_fail: - if (pool) - zswap_pool_destroy(pool); +lru_fail: + shrinker_free(zswap.shrinker); +shrinker_fail: + destroy_workqueue(shrink_wq); +shrink_wq_fail: + cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); hp_fail: kmem_cache_destroy(zswap_entry_cache); cache_fail: --=20 b4 0.10.1 From nobody Mon Feb 9 17:14:33 2026 Received: from out-182.mta1.migadu.com (out-182.mta1.migadu.com [95.215.58.182]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E8959BA33 for ; Wed, 14 Feb 2024 08:54:51 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=95.215.58.182 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1707900893; cv=none; b=PbT+bP6LC784exmbovg+WRB4/IdZWYQZ/WtlKRC3B45vqQo/cPmC6j8sZHGjmzZPPJxvtt2Z1Q8P1BF2yRgbgHz0+RhSFgTcxCY+JWcluxYBW1gQGvHK8oB/4xRFyNuq/fHQ+fReGJ+OmsXMxiV8tOzpsvgkLRgvzc3WxI3W3qs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1707900893; c=relaxed/simple; bh=PR0N3thqXKq1e2jCnYh9FdSFO4iCOQYoAEQ9nreBSmM=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=eDVM/VFfgrKBaVuRSGMeBNN35x0bZPjbWXBxkDx2ETdMXmiAXL3vkU83WSxzwIbiU/7fZ82hGfJm/3Lz63gENuTzzTvc9mxR2fJBRJYhIN16CSfqgOWCRACaoxFvdhJ+NKFY9cSCysM/5CimByjO0boklyNCRWYncGXCThaDpTg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com; spf=pass smtp.mailfrom=linux.dev; arc=none smtp.client-ip=95.215.58.182 Authentication-Results: smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. From: Chengming Zhou Date: Wed, 14 Feb 2024 08:54:38 +0000 Subject: [PATCH v2 2/2] mm/zswap: change zswap_pool kref to percpu_ref Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20240210-zswap-global-lru-v2-2-fbee3b11a62e@bytedance.com> References: <20240210-zswap-global-lru-v2-0-fbee3b11a62e@bytedance.com> In-Reply-To: <20240210-zswap-global-lru-v2-0-fbee3b11a62e@bytedance.com> To: Yosry Ahmed , Johannes Weiner , Andrew Morton , Nhat Pham Cc: linux-mm@kvack.org, Chengming Zhou , linux-kernel@vger.kernel.org X-Migadu-Flow: FLOW_OUT All zswap entries will take a reference of zswap_pool when zswap_store(), and drop it when free. Change it to use the percpu_ref is better for scalability performance. Although percpu_ref use a bit more memory which should be ok for our use case, since we almost have only one zswap_pool to be using. The performance gain is for zswap_store/load hotpath. Testing kernel build (32 threads) in tmpfs with memory.max=3D2GB. (zswap shrinker and writeback enabled with one 50GB swapfile, on a 128 CPUs x86-64 machine, below is the average of 5 runs) mm-unstable zswap-global-lru real 63.20 63.12 user 1061.75 1062.95 sys 268.74 264.44 Signed-off-by: Chengming Zhou --- mm/zswap.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index dbff67d7e1c7..f6470d30d337 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -173,7 +173,7 @@ struct crypto_acomp_ctx { struct zswap_pool { struct zpool *zpools[ZSWAP_NR_ZPOOLS]; struct crypto_acomp_ctx __percpu *acomp_ctx; - struct kref kref; + struct percpu_ref ref; struct list_head list; struct work_struct release_work; struct hlist_node node; @@ -304,6 +304,7 @@ static void zswap_update_total_size(void) /********************************* * pool functions **********************************/ +static void __zswap_pool_empty(struct percpu_ref *ref); =20 static struct zswap_pool *zswap_pool_create(char *type, char *compressor) { @@ -357,13 +358,18 @@ static struct zswap_pool *zswap_pool_create(char *typ= e, char *compressor) /* being the current pool takes 1 ref; this func expects the * caller to always add the new pool as the current pool */ - kref_init(&pool->kref); + ret =3D percpu_ref_init(&pool->ref, __zswap_pool_empty, + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); + if (ret) + goto ref_fail; INIT_LIST_HEAD(&pool->list); =20 zswap_pool_debug("created", pool); =20 return pool; =20 +ref_fail: + cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); error: if (pool->acomp_ctx) free_percpu(pool->acomp_ctx); @@ -436,8 +442,9 @@ static void __zswap_pool_release(struct work_struct *wo= rk) =20 synchronize_rcu(); =20 - /* nobody should have been able to get a kref... */ - WARN_ON(kref_get_unless_zero(&pool->kref)); + /* nobody should have been able to get a ref... */ + WARN_ON(percpu_ref_tryget(&pool->ref)); + percpu_ref_exit(&pool->ref); =20 /* pool is now off zswap_pools list and has no references. */ zswap_pool_destroy(pool); @@ -445,11 +452,11 @@ static void __zswap_pool_release(struct work_struct *= work) =20 static struct zswap_pool *zswap_pool_current(void); =20 -static void __zswap_pool_empty(struct kref *kref) +static void __zswap_pool_empty(struct percpu_ref *ref) { struct zswap_pool *pool; =20 - pool =3D container_of(kref, typeof(*pool), kref); + pool =3D container_of(ref, typeof(*pool), ref); =20 spin_lock(&zswap_pools_lock); =20 @@ -468,12 +475,12 @@ static int __must_check zswap_pool_get(struct zswap_p= ool *pool) if (!pool) return 0; =20 - return kref_get_unless_zero(&pool->kref); + return percpu_ref_tryget(&pool->ref); } =20 static void zswap_pool_put(struct zswap_pool *pool) { - kref_put(&pool->kref, __zswap_pool_empty); + percpu_ref_put(&pool->ref); } =20 static struct zswap_pool *__zswap_pool_current(void) @@ -603,6 +610,12 @@ static int __zswap_param_set(const char *val, const st= ruct kernel_param *kp, =20 if (!pool) pool =3D zswap_pool_create(type, compressor); + else { + /* Resurrect percpu_ref to percpu mode. */ + percpu_ref_resurrect(&pool->ref); + /* Drop the ref from zswap_pool_find_get(). */ + zswap_pool_put(pool); + } =20 if (pool) ret =3D param_set_charp(s, kp); @@ -641,7 +654,7 @@ static int __zswap_param_set(const char *val, const str= uct kernel_param *kp, * or the new pool we failed to add */ if (put_pool) - zswap_pool_put(put_pool); + percpu_ref_kill(&put_pool->ref); =20 return ret; } --=20 b4 0.10.1