mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 47 insertions(+), 39 deletions(-)
This the alloc_swap_scan_list() will scan the whole list or the first
cluster.
This reduces the repeat patterns of isolating a cluster then scanning
that cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower.
No functional change.
Signed-off-by: Chris Li <chrisl@kernel.org>
---
This patch goes on top of Kairui's swap improve cluster scan series:
https://lore.kernel.org/linux-mm/20250806161748.76651-1-ryncsn@gmail.com/
---
Changes in v2:
- Adjust change base on Andrew's feedback on int type and break out of
loop.
- Link to v1: https://lore.kernel.org/r/20250806-swap-scan-list-v1-1-a5fe2d9340a2@kernel.org
---
mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++---------------------------
1 file changed, 47 insertions(+), 39 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4a0cf4fb348d..f26678d68874 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
return found;
}
+static unsigned int alloc_swap_scan_list(struct swap_info_struct *si,
+ struct list_head *list,
+ unsigned int order,
+ unsigned char usage,
+ bool scan_all)
+{
+ unsigned int found = SWAP_ENTRY_INVALID;
+
+ do {
+ struct swap_cluster_info *ci = isolate_lock_cluster(si, list);
+ unsigned long offset;
+
+ if (!ci)
+ break;
+ offset = cluster_offset(si, ci);
+ found = alloc_swap_scan_cluster(si, ci, offset, order, usage);
+ if (found)
+ break;
+ } while (scan_all);
+
+ return found;
+}
+
static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
{
long to_scan = 1;
@@ -913,32 +936,24 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* to spread out the writes.
*/
if (si->flags & SWP_PAGE_DISCARD) {
- ci = isolate_lock_cluster(si, &si->free_clusters);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
+ false);
+ if (found)
+ goto done;
}
if (order < PMD_ORDER) {
- while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->nonfull_clusters[order],
+ order, usage, 0);
+ if (found)
+ goto done;
}
if (!(si->flags & SWP_PAGE_DISCARD)) {
- ci = isolate_lock_cluster(si, &si->free_clusters);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
+ false);
+ if (found)
+ goto done;
}
/* Try reclaim full clusters if free and nonfull lists are drained */
@@ -952,13 +967,10 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* failure is not critical. Scanning one cluster still
* keeps the list rotated and reclaimed (for HAS_CACHE).
*/
- ci = isolate_lock_cluster(si, &si->frag_clusters[order]);
- if (ci) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- order, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->frag_clusters[order], order,
+ usage, true);
+ if (found)
+ goto done;
}
/*
@@ -977,19 +989,15 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
* Clusters here have at least one usable slots and can't fail order 0
* allocation, but reclaim may drop si->lock and race with another user.
*/
- while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- 0, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->frag_clusters[o],
+ 0, usage, true);
+ if (found)
+ goto done;
- while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
- found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
- 0, usage);
- if (found)
- goto done;
- }
+ found = alloc_swap_scan_list(si, &si->nonfull_clusters[o],
+ 0, usage, true);
+ if (found)
+ goto done;
}
done:
if (!(si->flags & SWP_SOLIDSTATE))
---
base-commit: f89484324d5876ee10765fa61da0332899fa1a6a
change-id: 20250806-swap-scan-list-2b89e3424b0a
Best regards,
--
Chris Li <chrisl@kernel.org>
On Fri, Aug 8, 2025 at 3:48 PM Chris Li <chrisl@kernel.org> wrote: > > This the alloc_swap_scan_list() will scan the whole list or the first > cluster. Hi Chris, This sentence reads strange to me, but English is not my native language so I'm not very sure about it. > > This reduces the repeat patterns of isolating a cluster then scanning > that cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower. > > No functional change. > > Signed-off-by: Chris Li <chrisl@kernel.org> > --- > This patch goes on top of Kairui's swap improve cluster scan series: > https://lore.kernel.org/linux-mm/20250806161748.76651-1-ryncsn@gmail.com/ > --- > Changes in v2: > - Adjust change base on Andrew's feedback on int type and break out of > loop. > - Link to v1: https://lore.kernel.org/r/20250806-swap-scan-list-v1-1-a5fe2d9340a2@kernel.org > --- > mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- > 1 file changed, 47 insertions(+), 39 deletions(-) > > diff --git a/mm/swapfile.c b/mm/swapfile.c > index 4a0cf4fb348d..f26678d68874 100644 > --- a/mm/swapfile.c > +++ b/mm/swapfile.c > @@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, > return found; > } > > +static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, > + struct list_head *list, > + unsigned int order, > + unsigned char usage, > + bool scan_all) > +{ > + unsigned int found = SWAP_ENTRY_INVALID; > + > + do { > + struct swap_cluster_info *ci = isolate_lock_cluster(si, list); > + unsigned long offset; > + > + if (!ci) > + break; > + offset = cluster_offset(si, ci); > + found = alloc_swap_scan_cluster(si, ci, offset, order, usage); > + if (found) > + break; > + } while (scan_all); > + > + return found; > +} > + > static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) > { > long to_scan = 1; > @@ -913,32 +936,24 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > * to spread out the writes. > */ > if (si->flags & SWP_PAGE_DISCARD) { > - ci = isolate_lock_cluster(si, &si->free_clusters); > - if (ci) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, > + false); > + if (found) > + goto done; > } > > if (order < PMD_ORDER) { > - while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], > + order, usage, 0); All other alloc_swap_scan_list calls use `false`/`true` but this one uses `0` for `scan_all`. And it should be `true`, right? > + if (found) > + goto done; > } > > if (!(si->flags & SWP_PAGE_DISCARD)) { > - ci = isolate_lock_cluster(si, &si->free_clusters); > - if (ci) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, > + false); > + if (found) > + goto done; > } > > /* Try reclaim full clusters if free and nonfull lists are drained */ > @@ -952,13 +967,10 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > * failure is not critical. Scanning one cluster still > * keeps the list rotated and reclaimed (for HAS_CACHE). > */ > - ci = isolate_lock_cluster(si, &si->frag_clusters[order]); > - if (ci) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - order, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->frag_clusters[order], order, > + usage, true); And it should be `false` here. > + if (found) > + goto done; > } > > /* > @@ -977,19 +989,15 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > * Clusters here have at least one usable slots and can't fail order 0 > * allocation, but reclaim may drop si->lock and race with another user. > */ > - while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - 0, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->frag_clusters[o], > + 0, usage, true); > + if (found) > + goto done; > > - while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) { > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > - 0, usage); > - if (found) > - goto done; > - } > + found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], > + 0, usage, true); > + if (found) > + goto done; > } > done: > if (!(si->flags & SWP_SOLIDSTATE)) > > --- > base-commit: f89484324d5876ee10765fa61da0332899fa1a6a > change-id: 20250806-swap-scan-list-2b89e3424b0a > > Best regards, > -- > Chris Li <chrisl@kernel.org> > This makes the code much cleaner, thanks!
On Fri, Aug 8, 2025 at 12:52 PM Kairui Song <ryncsn@gmail.com> wrote: > > On Fri, Aug 8, 2025 at 3:48 PM Chris Li <chrisl@kernel.org> wrote: > > > > This the alloc_swap_scan_list() will scan the whole list or the first > > cluster. > > Hi Chris, > > This sentence reads strange to me, but English is not my native > language so I'm not very sure about it. Ack, my bad, let me rephrase it. > > > > > This reduces the repeat patterns of isolating a cluster then scanning > > that cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower. > > > > No functional change. > > > > Signed-off-by: Chris Li <chrisl@kernel.org> > > --- > > This patch goes on top of Kairui's swap improve cluster scan series: > > https://lore.kernel.org/linux-mm/20250806161748.76651-1-ryncsn@gmail.com/ > > --- > > Changes in v2: > > - Adjust change base on Andrew's feedback on int type and break out of > > loop. > > - Link to v1: https://lore.kernel.org/r/20250806-swap-scan-list-v1-1-a5fe2d9340a2@kernel.org > > --- > > mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- > > 1 file changed, 47 insertions(+), 39 deletions(-) > > > > diff --git a/mm/swapfile.c b/mm/swapfile.c > > index 4a0cf4fb348d..f26678d68874 100644 > > --- a/mm/swapfile.c > > +++ b/mm/swapfile.c > > @@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, > > return found; > > } > > > > +static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, > > + struct list_head *list, > > + unsigned int order, > > + unsigned char usage, > > + bool scan_all) > > +{ > > + unsigned int found = SWAP_ENTRY_INVALID; > > + > > + do { > > + struct swap_cluster_info *ci = isolate_lock_cluster(si, list); > > + unsigned long offset; > > + > > + if (!ci) > > + break; > > + offset = cluster_offset(si, ci); > > + found = alloc_swap_scan_cluster(si, ci, offset, order, usage); > > + if (found) > > + break; > > + } while (scan_all); > > + > > + return found; > > +} > > + > > static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) > > { > > long to_scan = 1; > > @@ -913,32 +936,24 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > > * to spread out the writes. > > */ > > if (si->flags & SWP_PAGE_DISCARD) { > > - ci = isolate_lock_cluster(si, &si->free_clusters); > > - if (ci) { > > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > > - order, usage); > > - if (found) > > - goto done; > > - } > > + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, > > + false); > > + if (found) > > + goto done; > > } > > > > if (order < PMD_ORDER) { > > - while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) { > > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > > - order, usage); > > - if (found) > > - goto done; > > - } > > + found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], > > + order, usage, 0); > > All other alloc_swap_scan_list calls use `false`/`true` but this one > uses `0` for `scan_all`. > And it should be `true`, right? Yes, good catch. Will send out a V3. > > > + if (found) > > + goto done; > > } > > > > if (!(si->flags & SWP_PAGE_DISCARD)) { > > - ci = isolate_lock_cluster(si, &si->free_clusters); > > - if (ci) { > > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > > - order, usage); > > - if (found) > > - goto done; > > - } > > + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, > > + false); > > + if (found) > > + goto done; > > } > > > > /* Try reclaim full clusters if free and nonfull lists are drained */ > > @@ -952,13 +967,10 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o > > * failure is not critical. Scanning one cluster still > > * keeps the list rotated and reclaimed (for HAS_CACHE). > > */ > > - ci = isolate_lock_cluster(si, &si->frag_clusters[order]); > > - if (ci) { > > - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), > > - order, usage); > > - if (found) > > - goto done; > > - } > > + found = alloc_swap_scan_list(si, &si->frag_clusters[order], order, > > + usage, true); > > And it should be `false` here. Ack. Chris
On Sat, 9 Aug 2025 03:51:48 +0800 Kairui Song <ryncsn@gmail.com> wrote: > On Fri, Aug 8, 2025 at 3:48 PM Chris Li <chrisl@kernel.org> wrote: > > > > This the alloc_swap_scan_list() will scan the whole list or the first > > cluster. > > Hi Chris, > > This sentence reads strange to me, but English is not my native > language so I'm not very sure about it. Yes, I rewrote this to : alloc_swap_scan_list() will scan the whole list or the first cluster. : : This reduces the repeat patterns of isolating a cluster then scanning that : cluster. As a result, cluster_alloc_swap_entry() is shorter and : shallower. : : No functional change. Which is hopefully accurate. LLMs do an awesome job of doing this, although one should carefully review the output. Here's gemini.google.com: : alloc_swap_scan_list() now scans either the entire list or just the : first cluster. This change eliminates the repetitive pattern of : isolating and then scanning a cluster. As a result, the function : cluster_alloc_swap_entry() is now shorter and less complex. There are : no functional changes to the code's behavior.
On Mon, Aug 11, 2025 at 9:24 PM Andrew Morton <akpm@linux-foundation.org> wrote: > > On Sat, 9 Aug 2025 03:51:48 +0800 Kairui Song <ryncsn@gmail.com> wrote: > > > On Fri, Aug 8, 2025 at 3:48 PM Chris Li <chrisl@kernel.org> wrote: > > > > > > This the alloc_swap_scan_list() will scan the whole list or the first > > > cluster. > > > > Hi Chris, > > > > This sentence reads strange to me, but English is not my native > > language so I'm not very sure about it. Ack. My bad. > > Yes, I rewrote this to > > > : alloc_swap_scan_list() will scan the whole list or the first cluster. > : > : This reduces the repeat patterns of isolating a cluster then scanning that > : cluster. As a result, cluster_alloc_swap_entry() is shorter and > : shallower. > : > : No functional change. > > Which is hopefully accurate. > > > LLMs do an awesome job of doing this, although one should carefully > review the output. Here's gemini.google.com: > > : alloc_swap_scan_list() now scans either the entire list or just the > : first cluster. This change eliminates the repetitive pattern of > : isolating and then scanning a cluster. As a result, the function > : cluster_alloc_swap_entry() is now shorter and less complex. There are > : no functional changes to the code's behavior. > Thanks I will do a V3 and use something like that. I spot some other minor cleanup required. Might just add it to the clean up series. Chris
© 2016 - 2025 Red Hat, Inc.