From nobody Sun Feb 8 07:21:32 2026 Received: from out-187.mta0.migadu.com (out-187.mta0.migadu.com [91.218.175.187]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0630236137 for ; Mon, 19 Feb 2024 13:34:25 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=91.218.175.187 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1708349668; cv=none; b=CZQ/boAiDV0a/4lcatRjjUQ2THkQYdJy+zPjx/7RgguDHZ+KXCbWFQj5ONBdTuw6LZIe2WoIop1igU45ffVqJkOY+lEVWMAbiaTnKS2oepSSrWhNgR+M4alFM6u3dxhevDsyEETviK7T4GHCWnS4kveD3rgEwefj+g91gnFpa8E= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1708349668; c=relaxed/simple; bh=UHSVodPnpBiivncRsTV1Arftl0WePz5UFf9WpjK+7+4=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=d1rKpxE8LaOlSizWoe0QwLSgG3Iro+tYiebXrw6IQWrckhx/hSSrvIE57mH3ag1ZDWoOzCNFEAStz81X/KaflRvXpWe70Qaxb9ysXAILFskau76EJEm4sWTTXD/GB+ny/kzWScDpC3pfHe0d9kK0USH2rGOEXyQuHS3PkM7lVlc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com; spf=pass smtp.mailfrom=linux.dev; arc=none smtp.client-ip=91.218.175.187 Authentication-Results: smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. From: Chengming Zhou Date: Mon, 19 Feb 2024 13:33:51 +0000 Subject: [PATCH 1/3] mm/zsmalloc: fix migrate_write_lock() when !CONFIG_COMPACTION Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20240219-b4-szmalloc-migrate-v1-1-34cd49c6545b@bytedance.com> References: <20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com> In-Reply-To: <20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com> To: nphamcs@gmail.com, yosryahmed@google.com, Sergey Senozhatsky , Minchan Kim , Andrew Morton , hannes@cmpxchg.org Cc: linux-mm@kvack.org, Chengming Zhou , linux-kernel@vger.kernel.org X-Migadu-Flow: FLOW_OUT migrate_write_lock() is a empty function when !CONFIG_COMPACTION, in which case zs_compact() can be triggered from shrinker reclaim context. (Maybe it's better to rename it to zs_shrink()?) And zspage map object users rely on this migrate_read_lock() so object won't be migrated elsewhere. Fix it by always implementing the migrate_write_lock() related functions. Signed-off-by: Chengming Zhou --- mm/zsmalloc.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c937635e0ad1..64d5533fa5d8 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -278,18 +278,15 @@ static bool ZsHugePage(struct zspage *zspage) static void migrate_lock_init(struct zspage *zspage); static void migrate_read_lock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage); - -#ifdef CONFIG_COMPACTION static void migrate_write_lock(struct zspage *zspage); static void migrate_write_lock_nested(struct zspage *zspage); static void migrate_write_unlock(struct zspage *zspage); + +#ifdef CONFIG_COMPACTION static void kick_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool); static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); #else -static void migrate_write_lock(struct zspage *zspage) {} -static void migrate_write_lock_nested(struct zspage *zspage) {} -static void migrate_write_unlock(struct zspage *zspage) {} static void kick_deferred_free(struct zs_pool *pool) {} static void init_deferred_free(struct zs_pool *pool) {} static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) = {} @@ -1725,7 +1722,6 @@ static void migrate_read_unlock(struct zspage *zspage= ) __releases(&zspage->lock) read_unlock(&zspage->lock); } =20 -#ifdef CONFIG_COMPACTION static void migrate_write_lock(struct zspage *zspage) { write_lock(&zspage->lock); @@ -1741,6 +1737,7 @@ static void migrate_write_unlock(struct zspage *zspag= e) write_unlock(&zspage->lock); } =20 +#ifdef CONFIG_COMPACTION /* Number of isolated subpage for *page migration* in this zspage */ static void inc_zspage_isolation(struct zspage *zspage) { --=20 b4 0.10.1 From nobody Sun Feb 8 07:21:32 2026 Received: from out-183.mta0.migadu.com (out-183.mta0.migadu.com [91.218.175.183]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BE4E5364C4 for ; Mon, 19 Feb 2024 13:34:27 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=91.218.175.183 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1708349669; cv=none; b=ZPjvRwHdfdIJtMmMPpTrVGKviN7hLlAov9lPQ/xVM3aVVzTRwi+TCIjodqCpo3UpVQ5yPozEpon/LJ3UXB5v/cEz2vdr7fDgEA6mScMb1Zz5lO8M9WPtjxsKbdTZFkWc53jTHvGpyydJPoBWw3JAANxJPD/LrZaOrl4xNuCdfvo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1708349669; c=relaxed/simple; bh=OUia51YESybtl45fp4ZaNAosIUgwtCmJ4/X7GVIhXhM=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=YvbcsMOtma6mi/a59jQbg2TXpKXJ35OIWsP7lj5udb1YoTfVvOUQdTQ/h2P3ZJcKV6c8dP9kW3iXeBiTZ1EDwDayX4yhcPlfwl2BOqMx4Jgqk/ZxWY4+Gl9Vb4d3Ps1uKPg6y7YOKExU18wrKvYNq/H/BB7fjZwSbqA0mYnMvAY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com; spf=pass smtp.mailfrom=linux.dev; arc=none smtp.client-ip=91.218.175.183 Authentication-Results: smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. From: Chengming Zhou Date: Mon, 19 Feb 2024 13:33:52 +0000 Subject: [PATCH 2/3] mm/zsmalloc: remove migrate_write_lock_nested() Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20240219-b4-szmalloc-migrate-v1-2-34cd49c6545b@bytedance.com> References: <20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com> In-Reply-To: <20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com> To: nphamcs@gmail.com, yosryahmed@google.com, Sergey Senozhatsky , Minchan Kim , Andrew Morton , hannes@cmpxchg.org Cc: linux-mm@kvack.org, Chengming Zhou , linux-kernel@vger.kernel.org X-Migadu-Flow: FLOW_OUT The migrate write lock is to protect the race between zspage migration and zspage objects' map users. We only need to lock out the map users of src zspage, not dst zspage, which is safe to map by users concurrently, since we only need to do obj_malloc() from dst zspage. So we can remove the migrate_write_lock_nested() use case. As we are here, cleanup the __zs_compact() by moving putback_zspage() outside of migrate_write_unlock since we hold pool lock, no malloc or free users can come in. Signed-off-by: Chengming Zhou --- mm/zsmalloc.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 64d5533fa5d8..f2ae7d4c6f21 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -279,7 +279,6 @@ static void migrate_lock_init(struct zspage *zspage); static void migrate_read_lock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage); static void migrate_write_lock(struct zspage *zspage); -static void migrate_write_lock_nested(struct zspage *zspage); static void migrate_write_unlock(struct zspage *zspage); =20 #ifdef CONFIG_COMPACTION @@ -1727,11 +1726,6 @@ static void migrate_write_lock(struct zspage *zspage) write_lock(&zspage->lock); } =20 -static void migrate_write_lock_nested(struct zspage *zspage) -{ - write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING); -} - static void migrate_write_unlock(struct zspage *zspage) { write_unlock(&zspage->lock); @@ -2003,19 +1997,17 @@ static unsigned long __zs_compact(struct zs_pool *p= ool, dst_zspage =3D isolate_dst_zspage(class); if (!dst_zspage) break; - migrate_write_lock(dst_zspage); } =20 src_zspage =3D isolate_src_zspage(class); if (!src_zspage) break; =20 - migrate_write_lock_nested(src_zspage); - + migrate_write_lock(src_zspage); migrate_zspage(pool, src_zspage, dst_zspage); - fg =3D putback_zspage(class, src_zspage); migrate_write_unlock(src_zspage); =20 + fg =3D putback_zspage(class, src_zspage); if (fg =3D=3D ZS_INUSE_RATIO_0) { free_zspage(pool, class, src_zspage); pages_freed +=3D class->pages_per_zspage; @@ -2025,7 +2017,6 @@ static unsigned long __zs_compact(struct zs_pool *poo= l, if (get_fullness_group(class, dst_zspage) =3D=3D ZS_INUSE_RATIO_100 || spin_is_contended(&pool->lock)) { putback_zspage(class, dst_zspage); - migrate_write_unlock(dst_zspage); dst_zspage =3D NULL; =20 spin_unlock(&pool->lock); @@ -2034,15 +2025,12 @@ static unsigned long __zs_compact(struct zs_pool *p= ool, } } =20 - if (src_zspage) { + if (src_zspage) putback_zspage(class, src_zspage); - migrate_write_unlock(src_zspage); - } =20 - if (dst_zspage) { + if (dst_zspage) putback_zspage(class, dst_zspage); - migrate_write_unlock(dst_zspage); - } + spin_unlock(&pool->lock); =20 return pages_freed; --=20 b4 0.10.1 From nobody Sun Feb 8 07:21:32 2026 Received: from out-184.mta0.migadu.com (out-184.mta0.migadu.com [91.218.175.184]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id ECB2037153 for ; Mon, 19 Feb 2024 13:34:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=91.218.175.184 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1708349671; cv=none; b=CbUfHhnnbF5+sVYDisWkM2ptAUBXqQZDeWTSqIER0aF2WSnJGvakR8JvBaHL9TyABiP0VwdFzT6+bohaOVfn8C64/2d2Vd3YNMMgl8p9bsdf1FEkjVwtyRGJ6g4AdMVXy1NUzTOcnVuIQ/tA1glfTYmU4OEvU06RAE0sm2610/c= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1708349671; c=relaxed/simple; bh=4NV6DsrM5S+/h7ppU/k8FOJs/bnqzH3nRRreQTC9eDs=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=jADPpBR9m7IInlRia2KYhh2g+kbq8EziZ24rbubQluLhsQiVB9JCLuN/qYHiL/+Bh+rR5J1CGbvvnSgIMbLWIqvNIdwHqj5j77nCg+2VA1PYz+88ejHyR4+47nkP1cScOx+heuB7UEE3U7fcso3Tf+hiHD39UZQv7RdldJquCKo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com; spf=pass smtp.mailfrom=linux.dev; arc=none smtp.client-ip=91.218.175.184 Authentication-Results: smtp.subspace.kernel.org; dmarc=fail (p=quarantine dis=none) header.from=bytedance.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. From: Chengming Zhou Date: Mon, 19 Feb 2024 13:33:53 +0000 Subject: [PATCH 3/3] mm/zsmalloc: remove unused zspage->isolated Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20240219-b4-szmalloc-migrate-v1-3-34cd49c6545b@bytedance.com> References: <20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com> In-Reply-To: <20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com> To: nphamcs@gmail.com, yosryahmed@google.com, Sergey Senozhatsky , Minchan Kim , Andrew Morton , hannes@cmpxchg.org Cc: linux-mm@kvack.org, Chengming Zhou , linux-kernel@vger.kernel.org X-Migadu-Flow: FLOW_OUT The zspage->isolated is not used anywhere, we don't need to maintain it, which needs to hold the heavy pool lock to update it, so just remove it. Signed-off-by: Chengming Zhou --- mm/zsmalloc.c | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index f2ae7d4c6f21..a48f4651d143 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -116,7 +116,6 @@ #define HUGE_BITS 1 #define FULLNESS_BITS 4 #define CLASS_BITS 8 -#define ISOLATED_BITS 5 #define MAGIC_VAL_BITS 8 =20 #define MAX(a, b) ((a) >=3D (b) ? (a) : (b)) @@ -246,7 +245,6 @@ struct zspage { unsigned int huge:HUGE_BITS; unsigned int fullness:FULLNESS_BITS; unsigned int class:CLASS_BITS + 1; - unsigned int isolated:ISOLATED_BITS; unsigned int magic:MAGIC_VAL_BITS; }; unsigned int inuse; @@ -1732,17 +1730,6 @@ static void migrate_write_unlock(struct zspage *zspa= ge) } =20 #ifdef CONFIG_COMPACTION -/* Number of isolated subpage for *page migration* in this zspage */ -static void inc_zspage_isolation(struct zspage *zspage) -{ - zspage->isolated++; -} - -static void dec_zspage_isolation(struct zspage *zspage) -{ - VM_BUG_ON(zspage->isolated =3D=3D 0); - zspage->isolated--; -} =20 static const struct movable_operations zsmalloc_mops; =20 @@ -1771,21 +1758,12 @@ static void replace_sub_page(struct size_class *cla= ss, struct zspage *zspage, =20 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) { - struct zs_pool *pool; - struct zspage *zspage; - /* * Page is locked so zspage couldn't be destroyed. For detail, look at * lock_zspage in free_zspage. */ VM_BUG_ON_PAGE(PageIsolated(page), page); =20 - zspage =3D get_zspage(page); - pool =3D zspage->pool; - spin_lock(&pool->lock); - inc_zspage_isolation(zspage); - spin_unlock(&pool->lock); - return true; } =20 @@ -1850,7 +1828,6 @@ static int zs_page_migrate(struct page *newpage, stru= ct page *page, kunmap_atomic(s_addr); =20 replace_sub_page(class, zspage, newpage, page); - dec_zspage_isolation(zspage); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release the pool's lock. @@ -1872,16 +1849,7 @@ static int zs_page_migrate(struct page *newpage, str= uct page *page, =20 static void zs_page_putback(struct page *page) { - struct zs_pool *pool; - struct zspage *zspage; - VM_BUG_ON_PAGE(!PageIsolated(page), page); - - zspage =3D get_zspage(page); - pool =3D zspage->pool; - spin_lock(&pool->lock); - dec_zspage_isolation(zspage); - spin_unlock(&pool->lock); } =20 static const struct movable_operations zsmalloc_mops =3D { --=20 b4 0.10.1