alloc_swap_scan_list() will scan the whole list or the first cluster.
This reduces the repeat patterns of isolating a cluster then scanning that
cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower.
No functional change.
Signed-off-by: Chris Li <chrisl@kernel.org>
---
mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++---------------------------
1 file changed, 47 insertions(+), 39 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4a0cf4fb348d..a7ffabbe65ef 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
return found;
}
+static unsigned int alloc_swap_scan_list(struct swap_info_struct *si,
+ struct list_head *list,
+ unsigned int order,
+ unsigned char usage,
+ bool scan_all)
+{
+ unsigned int found = SWAP_ENTRY_INVALID;
+
+ do {
+ struct swap_cluster_info *ci = isolate_lock_cluster(si, list);
+ unsigned long offset;
+
+ if (!ci)
+ break;
+ offset = cluster_offset(si, ci);
+ found = alloc_swap_scan_cluster(si, ci, offset, order, usage);
+ if (found)
+ break;
+ } while (scan_all);
+
+ return found;
+}
+
static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
{
long to_scan = 1;
@@ -913,32 +936,24 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* to spread out the writes.
*/
if (si->flags & SWP_PAGE_DISCARD) {
- ci = isolate_lock_cluster(si, &si->free_clusters);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
+ false);
+ if (found)
+ goto done;
}
if (order < PMD_ORDER) {
- while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->nonfull_clusters[order],
+ order, usage, true);
+ if (found)
+ goto done;
}
if (!(si->flags & SWP_PAGE_DISCARD)) {
- ci = isolate_lock_cluster(si, &si->free_clusters);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
+ false);
+ if (found)
+ goto done;
}
/* Try reclaim full clusters if free and nonfull lists are drained */
@@ -952,13 +967,10 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* failure is not critical. Scanning one cluster still
* keeps the list rotated and reclaimed (for HAS_CACHE).
*/
- ci = isolate_lock_cluster(si, &si->frag_clusters[order]);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->frag_clusters[order], order,
+ usage, false);
+ if (found)
+ goto done;
}
/*
@@ -977,19 +989,15 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* Clusters here have at least one usable slots and can't fail order 0
* allocation, but reclaim may drop si->lock and race with another user.
*/
- while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- 0, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->frag_clusters[o],
+ 0, usage, true);
+ if (found)
+ goto done;
- while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- 0, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->nonfull_clusters[o],
+ 0, usage, true);
+ if (found)
+ goto done;
}
done:
if (!(si->flags & SWP_SOLIDSTATE))
--
2.43.0
On Tue, Aug 12, 2025 at 12:10 AM Chris Li <chrisl@kernel.org> wrote: > > alloc_swap_scan_list() will scan the whole list or the first cluster. > > This reduces the repeat patterns of isolating a cluster then scanning that > cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower. > > No functional change. > > Signed-off-by: Chris Li <chrisl@kernel.org> Code seems more readable now, IMO. Thanks, Chris! Acked-by: Nhat Pham <nphamcs@gmail.com>
On Tue, Aug 12, 2025 at 5:13 PM Chris Li <chrisl@kernel.org> wrote: > > alloc_swap_scan_list() will scan the whole list or the first cluster. > > This reduces the repeat patterns of isolating a cluster then scanning that > cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower. > > No functional change. > > Signed-off-by: Chris Li <chrisl@kernel.org> > --- > mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- > 1 file changed, 47 insertions(+), 39 deletions(-) > > diff --git a/mm/swapfile.c b/mm/swapfile.c > index 4a0cf4fb348d..a7ffabbe65ef 100644 > --- a/mm/swapfile.c > +++ b/mm/swapfile.c > @@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, > return found; > } > > +static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, > + struct list_head *list, > + unsigned int order, > + unsigned char usage, > + bool scan_all) > +{ > + unsigned int found = SWAP_ENTRY_INVALID; > + > + do { > + struct swap_cluster_info *ci = isolate_lock_cluster(si, list); > + unsigned long offset; > + > + if (!ci) > + break; > + offset = cluster_offset(si, ci); > + found = alloc_swap_scan_cluster(si, ci, offset, order, usage); > + if (found) > + break; > + } while (scan_all); > + > + return found; > +} > + > static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) > { > long to_scan = 1; > @@ -913,32 +936,24 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > * to spread out the writes. > */ > if (si->flags & SWP_PAGE_DISCARD) { > - ci = isolate_lock_cluster(si, &si->free_clusters); > - if (ci) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, > + false); > + if (found) > + goto done; > } > > if (order < PMD_ORDER) { > - while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], > + order, usage, true); > + if (found) > + goto done; > } > > if (!(si->flags & SWP_PAGE_DISCARD)) { > - ci = isolate_lock_cluster(si, &si->free_clusters); > - if (ci) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, > + false); > + if (found) > + goto done; > } > > /* Try reclaim full clusters if free and nonfull lists are drained */ > @@ -952,13 +967,10 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > * failure is not critical. Scanning one cluster still > * keeps the list rotated and reclaimed (for HAS_CACHE). > */ > - ci = isolate_lock_cluster(si, &si->frag_clusters[order]); > - if (ci) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->frag_clusters[order], order, > + usage, false); > + if (found) > + goto done; > } > > /* > @@ -977,19 +989,15 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > * Clusters here have at least one usable slots and can't fail order 0 > * allocation, but reclaim may drop si->lock and race with another user. > */ > - while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - 0, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->frag_clusters[o], > + 0, usage, true); > + if (found) > + goto done; > > - while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - 0, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], > + 0, usage, true); > + if (found) > + goto done; > } > done: > if (!(si->flags & SWP_SOLIDSTATE)) > > -- > 2.43.0 > I've been testing on top of a locally updated version of V2 for about two days, and it's identical to this one. This looks great, thanks! Reviewed-by: Kairui Song <kasong@tencent.com>
On Tue, Aug 12, 2025 at 9:57 AM Kairui Song <ryncsn@gmail.com> wrote: > > I've been testing on top of a locally updated version of V2 for about > two days, and it's identical to this one. This looks great, thanks! Thanks for the review. I have been running the swap stress tests on kernel compile as well. No issue so far. Chris
© 2016 - 2025 Red Hat, Inc.