From nobody Thu Oct 9 01:13:37 2025 Received: from szxga05-in.huawei.com (szxga05-in.huawei.com [45.249.212.191]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A0386226CE1; Mon, 23 Jun 2025 07:47:10 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.191 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1750664832; cv=none; b=dIJPqQUH/bWgwS4Wp6lWY9dyWhuUiFFWevxEl8dffEpfV+wjhwo25iOj+au0JfImM3Eq1wukVxg0j3RZoYkNGF/VQhwBf3g8+obMomBKQmn8IICsfsd8WzIHUrMzxWU++cqRbgB4edOsm2xzAkF5+zFfGmihifADid3Up+yUD+E= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1750664832; c=relaxed/simple; bh=/3XoxWpy1bvkEBHAB3eTe7ji9Z2uO/Fts96126NwKyQ=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=QCJS9vmcFCLBt6oayCzKx8ty3T7JoeJuOT7mGTnx3lYKW0eEyeZkLUTtFD07E3P/svQiBOaZ+A+yq7xrgEANd+Sr9lkSCvx1i2J72M5moFmsTdtsJeRNqrAnVvx+GyR8CdrvhXhTUuexYKI2J9+Qv7unjwq5q4FeM0Z3T+P37/s= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.191 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.88.163]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4bQg9V0zD5z2BdVj; Mon, 23 Jun 2025 15:45:34 +0800 (CST) Received: from dggpemf500013.china.huawei.com (unknown [7.185.36.188]) by mail.maildlp.com (Postfix) with ESMTPS id 4F8E818005F; Mon, 23 Jun 2025 15:47:08 +0800 (CST) Received: from huawei.com (10.175.112.188) by dggpemf500013.china.huawei.com (7.185.36.188) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Mon, 23 Jun 2025 15:47:07 +0800 From: Baokun Li To: CC: , , , , , , , Subject: [PATCH v2 16/16] ext4: ensure global ordered traversal across all free groups xarrays Date: Mon, 23 Jun 2025 15:33:04 +0800 Message-ID: <20250623073304.3275702-17-libaokun1@huawei.com> X-Mailer: git-send-email 2.46.1 In-Reply-To: <20250623073304.3275702-1-libaokun1@huawei.com> References: <20250623073304.3275702-1-libaokun1@huawei.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: kwepems200001.china.huawei.com (7.221.188.67) To dggpemf500013.china.huawei.com (7.185.36.188) Content-Type: text/plain; charset="utf-8" Although we now perform ordered traversal within an xarray, this is currently limited to a single xarray, traversing right then left. However, we have multiple such xarrays, which prevents us from guaranteeing a linear-like traversal where all groups on the right are visited before all groups on the left. Therefore, this change modifies the traversal to first iterate through all right groups across all xarrays, and then all left groups across all xarrays. This achieves a linear-like effect, mitigating contention between block allocation and block freeing paths. Performance test data follows: CPU: Kunpeng 920 | P80 | P1 | Memory: 512GB |-------------------------|-------------------------| Disk: 960GB SSD | base | patched | base | patched | -------------------|-------|-----------------|-------|-----------------| mb_optimize_scan=3D0 | 20976 | 20619 (-1.7%) | 319396| 299238 (-6.3%) | mb_optimize_scan=3D1 | 14580 | 20119 (+37.9%) | 319237| 315268 (-1.2%) | CPU: AMD 9654 * 2 | P96 | P1 | Memory: 1536GB |-------------------------|-------------------------| Disk: 960GB SSD | base | patched | base | patched | -------------------|-------|-----------------|-------|-----------------| mb_optimize_scan=3D0 | 51713 | 51983 (+0.5%) | 206655| 207033 (0.18%) | mb_optimize_scan=3D1 | 35527 | 48486 (+36.4%) | 212574| 202415 (+4.7%) | Signed-off-by: Baokun Li --- fs/ext4/mballoc.c | 69 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 22 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d8372a649a0c..d26a0e8e3f7e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -877,22 +877,20 @@ mb_update_avg_fragment_size(struct super_block *sb, s= truct ext4_group_info *grp) grp->bb_group, new, ret); } =20 -static int ext4_mb_scan_groups_xarray(struct ext4_allocation_context *ac, - struct xarray *xa, ext4_group_t start) +static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac, + struct xarray *xa, + ext4_group_t start, ext4_group_t end) { struct super_block *sb =3D ac->ac_sb; struct ext4_sb_info *sbi =3D EXT4_SB(sb); enum criteria cr =3D ac->ac_criteria; ext4_group_t ngroups =3D ext4_get_groups_count(sb); unsigned long group =3D start; - ext4_group_t end; struct ext4_group_info *grp; =20 - if (WARN_ON_ONCE(start >=3D ngroups)) + if (WARN_ON_ONCE(end >=3D ngroups || start > end)) return 0; - end =3D ngroups - 1; =20 -wrap_around: xa_for_each_range(xa, group, grp, start, end) { int err; =20 @@ -906,28 +904,23 @@ static int ext4_mb_scan_groups_xarray(struct ext4_all= ocation_context *ac, cond_resched(); } =20 - if (start) { - end =3D start - 1; - start =3D 0; - goto wrap_around; - } - return 0; } =20 /* * Find a suitable group of given order from the largest free orders xarra= y. */ -static int -ext4_mb_scan_groups_largest_free_order(struct ext4_allocation_context *ac, - int order, ext4_group_t start) +static inline int +ext4_mb_scan_groups_largest_free_order_range(struct ext4_allocation_contex= t *ac, + int order, ext4_group_t start, + ext4_group_t end) { struct xarray *xa =3D &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order= ]; =20 if (xa_empty(xa)) return 0; =20 - return ext4_mb_scan_groups_xarray(ac, xa, start); + return ext4_mb_scan_groups_xa_range(ac, xa, start, end - 1); } =20 /* @@ -940,13 +933,23 @@ static int ext4_mb_scan_groups_p2_aligned(struct ext4= _allocation_context *ac, struct ext4_sb_info *sbi =3D EXT4_SB(ac->ac_sb); int i; int ret =3D 0; + ext4_group_t start, end; =20 ac->ac_flags |=3D EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; + start =3D group; + end =3D ext4_get_groups_count(ac->ac_sb); +wrap_around: for (i =3D ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { - ret =3D ext4_mb_scan_groups_largest_free_order(ac, i, group); + ret =3D ext4_mb_scan_groups_largest_free_order_range(ac, i, + start, end); if (ret || ac->ac_status !=3D AC_STATUS_CONTINUE) goto out; } + if (start) { + end =3D start; + start =3D 0; + goto wrap_around; + } =20 if (sbi->s_mb_stats) atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); @@ -961,15 +964,17 @@ static int ext4_mb_scan_groups_p2_aligned(struct ext4= _allocation_context *ac, /* * Find a suitable group of given order from the average fragments xarray. */ -static int ext4_mb_scan_groups_avg_frag_order(struct ext4_allocation_conte= xt *ac, - int order, ext4_group_t start) +static int +ext4_mb_scan_groups_avg_frag_order_range(struct ext4_allocation_context *a= c, + int order, ext4_group_t start, + ext4_group_t end) { struct xarray *xa =3D &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order]; =20 if (xa_empty(xa)) return 0; =20 - return ext4_mb_scan_groups_xarray(ac, xa, start); + return ext4_mb_scan_groups_xa_range(ac, xa, start, end - 1); } =20 /* @@ -981,14 +986,24 @@ static int ext4_mb_scan_groups_goal_fast(struct ext4_= allocation_context *ac, { struct ext4_sb_info *sbi =3D EXT4_SB(ac->ac_sb); int i, ret =3D 0; + ext4_group_t start, end; =20 ac->ac_flags |=3D EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; + start =3D group; + end =3D ext4_get_groups_count(ac->ac_sb); +wrap_around: i =3D mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) { - ret =3D ext4_mb_scan_groups_avg_frag_order(ac, i, group); + ret =3D ext4_mb_scan_groups_avg_frag_order_range(ac, i, + start, end); if (ret || ac->ac_status !=3D AC_STATUS_CONTINUE) goto out; } + if (start) { + end =3D start; + start =3D 0; + goto wrap_around; + } =20 if (sbi->s_mb_stats) atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); @@ -1025,6 +1040,7 @@ static int ext4_mb_scan_groups_best_avail(struct ext4= _allocation_context *ac, struct ext4_sb_info *sbi =3D EXT4_SB(ac->ac_sb); int i, order, min_order; unsigned long num_stripe_clusters =3D 0; + ext4_group_t start, end; =20 /* * mb_avg_fragment_size_order() returns order in a way that makes @@ -1057,6 +1073,9 @@ static int ext4_mb_scan_groups_best_avail(struct ext4= _allocation_context *ac, min_order =3D fls(ac->ac_o_ex.fe_len); =20 ac->ac_flags |=3D EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; + start =3D group; + end =3D ext4_get_groups_count(ac->ac_sb); +wrap_around: for (i =3D order; i >=3D min_order; i--) { int frag_order; /* @@ -1079,10 +1098,16 @@ static int ext4_mb_scan_groups_best_avail(struct ex= t4_allocation_context *ac, frag_order =3D mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); =20 - ret =3D ext4_mb_scan_groups_avg_frag_order(ac, frag_order, group); + ret =3D ext4_mb_scan_groups_avg_frag_order_range(ac, frag_order, + start, end); if (ret || ac->ac_status !=3D AC_STATUS_CONTINUE) goto out; } + if (start) { + end =3D start; + start =3D 0; + goto wrap_around; + } =20 /* Reset goal length to original goal length before falling into CR_GOAL_= LEN_SLOW */ ac->ac_g_ex.fe_len =3D ac->ac_orig_goal_len; --=20 2.46.1