mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 47 insertions(+), 39 deletions(-)
This the alloc_swap_scan_list() will scan the whole list or the first
cluster.
This reduces the repeat patterns of isolating a cluster then scanning
that cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower.
No functional change.
Signed-off-by: Chris Li <chrisl@kernel.org>
---
This patch goes on top of Kairui's swap improve cluster scan series:
https://lore.kernel.org/linux-mm/20250806161748.76651-1-ryncsn@gmail.com/
---
mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++---------------------------
1 file changed, 47 insertions(+), 39 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4a0cf4fb348d..fcb1e57d8108 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
return found;
}
+static unsigned int alloc_swap_scan_list(struct swap_info_struct *si,
+ struct list_head *list,
+ unsigned int order,
+ unsigned char usage,
+ bool scan_all)
+{
+ int found = SWAP_ENTRY_INVALID;
+
+ do {
+ struct swap_cluster_info *ci = isolate_lock_cluster(si, list);
+ unsigned long offset;
+
+ if (!ci)
+ break;
+ offset = cluster_offset(si, ci);
+ found = alloc_swap_scan_cluster(si, ci, offset, order, usage);
+ if (found)
+ return found;
+ } while (scan_all);
+
+ return found;
+}
+
static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
{
long to_scan = 1;
@@ -913,32 +936,24 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* to spread out the writes.
*/
if (si->flags & SWP_PAGE_DISCARD) {
- ci = isolate_lock_cluster(si, &si->free_clusters);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
+ false);
+ if (found)
+ goto done;
}
if (order < PMD_ORDER) {
- while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->nonfull_clusters[order],
+ order, usage, 0);
+ if (found)
+ goto done;
}
if (!(si->flags & SWP_PAGE_DISCARD)) {
- ci = isolate_lock_cluster(si, &si->free_clusters);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
+ false);
+ if (found)
+ goto done;
}
/* Try reclaim full clusters if free and nonfull lists are drained */
@@ -952,13 +967,10 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* failure is not critical. Scanning one cluster still
* keeps the list rotated and reclaimed (for HAS_CACHE).
*/
- ci = isolate_lock_cluster(si, &si->frag_clusters[order]);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->frag_clusters[order], order,
+ usage, true);
+ if (found)
+ goto done;
}
/*
@@ -977,19 +989,15 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* Clusters here have at least one usable slots and can't fail order 0
* allocation, but reclaim may drop si->lock and race with another user.
*/
- while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- 0, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->frag_clusters[o],
+ 0, usage, true);
+ if (found)
+ goto done;
- while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- 0, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->nonfull_clusters[o],
+ 0, usage, true);
+ if (found)
+ goto done;
}
done:
if (!(si->flags & SWP_SOLIDSTATE))
---
base-commit: f89484324d5876ee10765fa61da0332899fa1a6a
change-id: 20250806-swap-scan-list-2b89e3424b0a
Best regards,
--
Chris Li <chrisl@kernel.org>
On Wed, 06 Aug 2025 10:34:45 -0700 Chris Li <chrisl@kernel.org> wrote: > This the alloc_swap_scan_list() will scan the whole list or the first > cluster. > > This reduces the repeat patterns of isolating a cluster then scanning > that cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower. > > No functional change. > ... > > --- > mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- > 1 file changed, 47 insertions(+), 39 deletions(-) A nice little patch. > diff --git a/mm/swapfile.c b/mm/swapfile.c > index 4a0cf4fb348d..fcb1e57d8108 100644 > --- a/mm/swapfile.c > +++ b/mm/swapfile.c > @@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, > return found; > } > > +static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, > + struct list_head *list, > + unsigned int order, > + unsigned char usage, > + bool scan_all) > +{ > + int found = SWAP_ENTRY_INVALID; but this function returns an unsigned int > + > + do { > + struct swap_cluster_info *ci = isolate_lock_cluster(si, list); > + unsigned long offset; > + > + if (!ci) > + break; > + offset = cluster_offset(si, ci); > + found = alloc_swap_scan_cluster(si, ci, offset, order, usage); so does that one > + if (found) > + return found; `break' here. To avoid multiple return points and for consistency. > + } while (scan_all); > + > + return found; > +} > + > static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) > { > long to_scan = 1;
Hi Andrew, Thanks for the feedback. On Wed, Aug 6, 2025 at 3:31 PM Andrew Morton <akpm@linux-foundation.org> wrote: > > On Wed, 06 Aug 2025 10:34:45 -0700 Chris Li <chrisl@kernel.org> wrote: > > > This the alloc_swap_scan_list() will scan the whole list or the first > > cluster. > > > > This reduces the repeat patterns of isolating a cluster then scanning > > that cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower. > > > > No functional change. > > ... > > > > --- > > mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- > > 1 file changed, 47 insertions(+), 39 deletions(-) > > A nice little patch. > > > diff --git a/mm/swapfile.c b/mm/swapfile.c > > index 4a0cf4fb348d..fcb1e57d8108 100644 > > --- a/mm/swapfile.c > > +++ b/mm/swapfile.c > > @@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, > > return found; > > } > > > > +static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, > > + struct list_head *list, > > + unsigned int order, > > + unsigned char usage, > > + bool scan_all) > > +{ > > + int found = SWAP_ENTRY_INVALID; > > but this function returns an unsigned int Will fix it in the next version. > > > + > > + do { > > + struct swap_cluster_info *ci = isolate_lock_cluster(si, list); > > + unsigned long offset; > > + > > + if (!ci) > > + break; > > + offset = cluster_offset(si, ci); > > + found = alloc_swap_scan_cluster(si, ci, offset, order, usage); > > so does that one > > > + if (found) > > + return found; > > `break' here. To avoid multiple return points and for consistency. Will do. Chris
© 2016 - 2025 Red Hat, Inc.