From nobody Fri Dec 19 08:08:53 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 07D72C10DC1 for ; Wed, 6 Dec 2023 09:11:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1346780AbjLFJLT (ORCPT ); Wed, 6 Dec 2023 04:11:19 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34744 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1346731AbjLFJLK (ORCPT ); Wed, 6 Dec 2023 04:11:10 -0500 Received: from out30-111.freemail.mail.aliyun.com (out30-111.freemail.mail.aliyun.com [115.124.30.111]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E1DBE10C0 for ; Wed, 6 Dec 2023 01:11:15 -0800 (PST) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R151e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045176;MF=hsiangkao@linux.alibaba.com;NM=1;PH=DS;RN=4;SR=0;TI=SMTPD_---0VxxSRQ7_1701853873; Received: from e69b19392.et15sqa.tbsite.net(mailfrom:hsiangkao@linux.alibaba.com fp:SMTPD_---0VxxSRQ7_1701853873) by smtp.aliyun-inc.com; Wed, 06 Dec 2023 17:11:13 +0800 From: Gao Xiang To: linux-erofs@lists.ozlabs.org Cc: LKML , dhavale@google.com, Gao Xiang Subject: [PATCH 2/5] erofs: record `pclustersize` in bytes instead of pages Date: Wed, 6 Dec 2023 17:10:54 +0800 Message-Id: <20231206091057.87027-3-hsiangkao@linux.alibaba.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: <20231206091057.87027-1-hsiangkao@linux.alibaba.com> References: <20231206091057.87027-1-hsiangkao@linux.alibaba.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Currently, compressed sizes are recorded in pages using `pclusterpages`, However, for tailpacking pclusters, `tailpacking_size` is used instead. This approach doesn't work when dealing with sub-page blocks. To address this, let's switch them to the unified `pclustersize` in bytes. Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 64 ++++++++++++++++++++---------------------------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 421c0a88a0ca..d02989466711 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -56,6 +56,9 @@ struct z_erofs_pcluster { /* L: total number of bvecs */ unsigned int vcnt; =20 + /* I: pcluster size (compressed size) in bytes */ + unsigned int pclustersize; + /* I: page offset of start position of decompression */ unsigned short pageofs_out; =20 @@ -70,14 +73,6 @@ struct z_erofs_pcluster { struct rcu_head rcu; }; =20 - union { - /* I: physical cluster size in pages */ - unsigned short pclusterpages; - - /* I: tailpacking inline compressed size */ - unsigned short tailpacking_size; - }; - /* I: compression algorithm format */ unsigned char algorithmformat; =20 @@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_= erofs_pcluster *pcl) =20 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *= pcl) { - if (z_erofs_is_inline_pcluster(pcl)) - return 1; - return pcl->pclusterpages; + return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT; } =20 /* @@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void) return 0; } =20 -static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpage= s) +static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) { - int i; + unsigned int nrpages =3D PAGE_ALIGN(size) >> PAGE_SHIFT; + struct z_erofs_pcluster_slab *pcs =3D pcluster_pool; =20 - for (i =3D 0; i < ARRAY_SIZE(pcluster_pool); ++i) { - struct z_erofs_pcluster_slab *pcs =3D pcluster_pool + i; + for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { struct z_erofs_pcluster *pcl; =20 if (nrpages > pcs->maxpages) @@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(= unsigned int nrpages) pcl =3D kmem_cache_zalloc(pcs->slab, GFP_NOFS); if (!pcl) return ERR_PTR(-ENOMEM); - pcl->pclusterpages =3D nrpages; + pcl->pclustersize =3D size; return pcl; } return ERR_PTR(-EINVAL); @@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompres= s_frontend *fe) { struct address_space *mc =3D MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct z_erofs_pcluster *pcl =3D fe->pcl; + unsigned int pclusterpages =3D z_erofs_pclusterpages(pcl); bool shouldalloc =3D z_erofs_should_alloc_cache(fe); bool standalone =3D true; /* @@ -572,10 +566,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompre= ss_frontend *fe) if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; =20 - for (i =3D 0; i < pcl->pclusterpages; ++i) { - struct page *page; + for (i =3D 0; i < pclusterpages; ++i) { + struct page *page, *newpage; void *t; /* mark pages just found for debugging */ - struct page *newpage =3D NULL; =20 /* the compressed page was loaded before */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) @@ -585,6 +578,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompres= s_frontend *fe) =20 if (page) { t =3D (void *)((unsigned long)page | 1); + newpage =3D NULL; } else { /* I/O is needed, no possible to decompress directly */ standalone =3D false; @@ -592,9 +586,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompres= s_frontend *fe) continue; =20 /* - * try to use cached I/O if page allocation - * succeeds or fallback to in-place I/O instead - * to avoid any direct reclaim. + * Try cached I/O if allocation succeeds or fallback to + * in-place I/O instead to avoid any direct reclaim. */ newpage =3D erofs_allocpage(&fe->pagepool, gfp); if (!newpage) @@ -626,6 +619,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_= info *sbi, { struct z_erofs_pcluster *const pcl =3D container_of(grp, struct z_erofs_pcluster, obj); + unsigned int pclusterpages =3D z_erofs_pclusterpages(pcl); int i; =20 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); @@ -633,7 +627,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_= info *sbi, * refcount of workgroup is now freezed as 0, * therefore no need to worry about available decompression users. */ - for (i =3D 0; i < pcl->pclusterpages; ++i) { + for (i =3D 0; i < pclusterpages; ++i) { struct page *page =3D pcl->compressed_bvecs[i].page; =20 if (!page) @@ -657,6 +651,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_= info *sbi, static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) { struct z_erofs_pcluster *pcl =3D folio_get_private(folio); + unsigned int pclusterpages =3D z_erofs_pclusterpages(pcl); bool ret; int i; =20 @@ -669,7 +664,7 @@ static bool z_erofs_cache_release_folio(struct folio *f= olio, gfp_t gfp) goto out; =20 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - for (i =3D 0; i < pcl->pclusterpages; ++i) { + for (i =3D 0; i < pclusterpages; ++i) { if (pcl->compressed_bvecs[i].page =3D=3D &folio->page) { WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); ret =3D true; @@ -778,20 +773,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_er= ofs_decompress_frontend *f) static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *f= e) { struct erofs_map_blocks *map =3D &fe->map; + struct super_block *sb =3D fe->inode->i_sb; bool ztailpacking =3D map->m_flags & EROFS_MAP_META; struct z_erofs_pcluster *pcl; struct erofs_workgroup *grp; int err; =20 if (!(map->m_flags & EROFS_MAP_ENCODED) || - (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { + (!ztailpacking && !erofs_blknr(sb, map->m_pa))) { DBG_BUGON(1); return -EFSCORRUPTED; } =20 /* no available pcluster, let's allocate one */ - pcl =3D z_erofs_alloc_pcluster(ztailpacking ? 1 : - map->m_plen >> PAGE_SHIFT); + pcl =3D z_erofs_alloc_pcluster(map->m_plen); if (IS_ERR(pcl)) return PTR_ERR(pcl); =20 @@ -816,9 +811,8 @@ static int z_erofs_register_pcluster(struct z_erofs_dec= ompress_frontend *fe) if (ztailpacking) { pcl->obj.index =3D 0; /* which indicates ztailpacking */ pcl->pageofs_in =3D erofs_blkoff(fe->inode->i_sb, map->m_pa); - pcl->tailpacking_size =3D map->m_plen; } else { - pcl->obj.index =3D map->m_pa >> PAGE_SHIFT; + pcl->obj.index =3D erofs_blknr(sb, map->m_pa); =20 grp =3D erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); if (IS_ERR(grp)) { @@ -1244,8 +1238,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs= _decompress_backend *be, unsigned int pclusterpages =3D z_erofs_pclusterpages(pcl); const struct z_erofs_decompressor *decompressor =3D &erofs_decompressors[pcl->algorithmformat]; - unsigned int i, inputsize; - int err2; + int i, err2; struct page *page; bool overlapped; =20 @@ -1282,18 +1275,13 @@ static int z_erofs_decompress_pcluster(struct z_ero= fs_decompress_backend *be, if (err) goto out; =20 - if (z_erofs_is_inline_pcluster(pcl)) - inputsize =3D pcl->tailpacking_size; - else - inputsize =3D pclusterpages * PAGE_SIZE; - err =3D decompressor->decompress(&(struct z_erofs_decompress_req) { .sb =3D be->sb, .in =3D be->compressed_pages, .out =3D be->decompressed_pages, .pageofs_in =3D pcl->pageofs_in, .pageofs_out =3D pcl->pageofs_out, - .inputsize =3D inputsize, + .inputsize =3D pcl->pclustersize, .outputsize =3D pcl->length, .alg =3D pcl->algorithmformat, .inplace_io =3D overlapped, @@ -1668,7 +1656,7 @@ static void z_erofs_submit_queue(struct z_erofs_decom= press_frontend *f, (void)erofs_map_dev(sb, &mdev); =20 cur =3D mdev.m_pa; - end =3D cur + pcl->pclusterpages << PAGE_SHIFT; + end =3D cur + pcl->pclustersize; do { z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); if (!bvec.bv_page) --=20 2.39.3