From: Kairui Song <kasong@tencent.com>
There are currently three swap cache users that are trying to replace an
existing folio with a new one: huge memory splitting, migration, and
shmem replacement. What they are doing is quite similar.
Introduce a common helper for this. In later commits, they can be easily
switched to use the swap table by updating this helper.
The newly added helper also makes the swap cache API better defined, and
debugging is easier.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/huge_memory.c | 5 ++---
mm/migrate.c | 11 +++--------
mm/shmem.c | 10 ++--------
mm/swap.h | 3 +++
mm/swap_state.c | 32 ++++++++++++++++++++++++++++++++
5 files changed, 42 insertions(+), 19 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 26cedfcd7418..a4d192c8d794 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3798,9 +3798,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
* NOTE: shmem in swap cache is not supported yet.
*/
if (swap_cache) {
- __xa_store(&swap_cache->i_pages,
- swap_cache_index(new_folio->swap),
- new_folio, 0);
+ __swap_cache_replace_folio(swap_cache, new_folio->swap,
+ folio, new_folio);
continue;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 8e435a078fc3..7e1d01aa8c85 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -566,7 +566,6 @@ static int __folio_migrate_mapping(struct address_space *mapping,
struct zone *oldzone, *newzone;
int dirty;
long nr = folio_nr_pages(folio);
- long entries, i;
if (!mapping) {
/* Take off deferred split queue while frozen and memcg set */
@@ -615,9 +614,6 @@ static int __folio_migrate_mapping(struct address_space *mapping,
if (folio_test_swapcache(folio)) {
folio_set_swapcache(newfolio);
newfolio->private = folio_get_private(folio);
- entries = nr;
- } else {
- entries = 1;
}
/* Move dirty while folio refs frozen and newfolio not yet exposed */
@@ -627,11 +623,10 @@ static int __folio_migrate_mapping(struct address_space *mapping,
folio_set_dirty(newfolio);
}
- /* Swap cache still stores N entries instead of a high-order entry */
- for (i = 0; i < entries; i++) {
+ if (folio_test_swapcache(folio))
+ __swap_cache_replace_folio(mapping, folio->swap, folio, newfolio);
+ else
xas_store(&xas, newfolio);
- xas_next(&xas);
- }
/*
* Drop cache reference from old folio by unfreezing
diff --git a/mm/shmem.c b/mm/shmem.c
index cc6a0007c7a6..823ceae9dff8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2123,10 +2123,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
struct folio *new, *old = *foliop;
swp_entry_t entry = old->swap;
struct address_space *swap_mapping = swap_address_space(entry);
- pgoff_t swap_index = swap_cache_index(entry);
- XA_STATE(xas, &swap_mapping->i_pages, swap_index);
int nr_pages = folio_nr_pages(old);
- int error = 0, i;
+ int error = 0;
/*
* We have arrived here because our zones are constrained, so don't
@@ -2155,12 +2153,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
new->swap = entry;
folio_set_swapcache(new);
- /* Swap cache still stores N entries instead of a high-order entry */
xa_lock_irq(&swap_mapping->i_pages);
- for (i = 0; i < nr_pages; i++) {
- WARN_ON_ONCE(xas_store(&xas, new));
- xas_next(&xas);
- }
+ __swap_cache_replace_folio(swap_mapping, entry, old, new);
xa_unlock_irq(&swap_mapping->i_pages);
mem_cgroup_replace_folio(old, new);
diff --git a/mm/swap.h b/mm/swap.h
index 8b38577a4e04..a139c9131244 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -182,6 +182,9 @@ int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
void swap_cache_del_folio(struct folio *folio);
void __swap_cache_del_folio(struct folio *folio,
swp_entry_t entry, void *shadow);
+void __swap_cache_replace_folio(struct address_space *address_space,
+ swp_entry_t entry,
+ struct folio *old, struct folio *new);
void swap_cache_clear_shadow(int type, unsigned long begin,
unsigned long end);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f3a32a06a950..38f5f4cf565d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -234,6 +234,38 @@ void swap_cache_del_folio(struct folio *folio)
folio_ref_sub(folio, folio_nr_pages(folio));
}
+/**
+ * __swap_cache_replace_folio - Replace a folio in the swap cache.
+ * @mapping: Swap mapping address space.
+ * @entry: The first swap entry that the new folio corresponds to.
+ * @old: The old folio to be replaced.
+ * @new: The new folio.
+ *
+ * Replace a existing folio in the swap cache with a new folio.
+ *
+ * Context: Caller must ensure both folios are locked, and lock the
+ * swap address_space that holds the entries to be replaced.
+ */
+void __swap_cache_replace_folio(struct address_space *mapping,
+ swp_entry_t entry,
+ struct folio *old, struct folio *new)
+{
+ unsigned long nr_pages = folio_nr_pages(new);
+ unsigned long offset = swap_cache_index(entry);
+ unsigned long end = offset + nr_pages;
+ XA_STATE(xas, &mapping->i_pages, offset);
+
+ VM_WARN_ON_ONCE(entry.val != new->swap.val);
+ VM_WARN_ON_ONCE(!folio_test_locked(old) || !folio_test_locked(new));
+ VM_WARN_ON_ONCE(!folio_test_swapcache(old) || !folio_test_swapcache(new));
+
+ /* Swap cache still stores N entries instead of a high-order entry */
+ do {
+ WARN_ON_ONCE(xas_store(&xas, new) != old);
+ xas_next(&xas);
+ } while (++offset < end);
+}
+
/**
* swap_cache_clear_shadow - Clears a set of shadows in the swap cache.
* @type: Indicates the swap device.
--
2.51.0
> > +/** > + * __swap_cache_replace_folio - Replace a folio in the swap cache. > + * @mapping: Swap mapping address space. > + * @entry: The first swap entry that the new folio corresponds to. > + * @old: The old folio to be replaced. > + * @new: The new folio. > + * > + * Replace a existing folio in the swap cache with a new folio. > + * > + * Context: Caller must ensure both folios are locked, and lock the > + * swap address_space that holds the entries to be replaced. > + */ > +void __swap_cache_replace_folio(struct address_space *mapping, > + swp_entry_t entry, > + struct folio *old, struct folio *new) Can't we just use "new->swap.val" directly and avoid passing in the entry, documenting that new->swap.val must be setup properly in advance? Similarly, can't we obtain "mapping" from new? -- Cheers David / dhildenb
On Mon, Sep 8, 2025 at 8:35 PM David Hildenbrand <david@redhat.com> wrote: > > > > > > +/** > > + * __swap_cache_replace_folio - Replace a folio in the swap cache. > > + * @mapping: Swap mapping address space. > > + * @entry: The first swap entry that the new folio corresponds to. > > + * @old: The old folio to be replaced. > > + * @new: The new folio. > > + * > > + * Replace a existing folio in the swap cache with a new folio. > > + * > > + * Context: Caller must ensure both folios are locked, and lock the > > + * swap address_space that holds the entries to be replaced. > > + */ > > +void __swap_cache_replace_folio(struct address_space *mapping, > > + swp_entry_t entry, > > + struct folio *old, struct folio *new) > > Can't we just use "new->swap.val" directly and avoid passing in the > entry, documenting that new->swap.val must be setup properly in advance? Thanks for the suggestion. I was thinking about the opposite. I think maybe it's better that the caller never sets the new folio's entry value, so folio->swap is always modified in mm/swap_state.c, and let __swap_cache_replace_folio set new->swap, to make it easier to track the folio->swap usage. This can be done easily for migration and shmem parts, the huge split code will need a bit more cleanup. It's a trivial change I think. But letting __swap_cache_replace_folio setup new's swap and flags may deduplicate some code. So I thought maybe this can be better cleaned up later. So for now I just add a debug check here that `entry == new->swap`. And the debug check does imply that we can just drop the entry params in this patch, there will be no feature change. > Similarly, can't we obtain "mapping" from new? This is doable. But this patch is only an intermediate patch, next commit will let the pass in ci instead. Of course the `ci` can be retrieved from `entry` directly too, but it's the caller's responsibility to lock the `ci`, so passing in a locked ci explicitly might be more intuitive? Also might save a tiny bit of CPU time from recalculating and load the `ci`. > > -- > Cheers > > David / dhildenb > >
On 08.09.25 16:20, Kairui Song wrote: > On Mon, Sep 8, 2025 at 8:35 PM David Hildenbrand <david@redhat.com> wrote: >> >> >>> >>> +/** >>> + * __swap_cache_replace_folio - Replace a folio in the swap cache. >>> + * @mapping: Swap mapping address space. >>> + * @entry: The first swap entry that the new folio corresponds to. >>> + * @old: The old folio to be replaced. >>> + * @new: The new folio. >>> + * >>> + * Replace a existing folio in the swap cache with a new folio. >>> + * >>> + * Context: Caller must ensure both folios are locked, and lock the >>> + * swap address_space that holds the entries to be replaced. >>> + */ >>> +void __swap_cache_replace_folio(struct address_space *mapping, >>> + swp_entry_t entry, >>> + struct folio *old, struct folio *new) >> >> Can't we just use "new->swap.val" directly and avoid passing in the >> entry, documenting that new->swap.val must be setup properly in advance? > > Thanks for the suggestion. > > I was thinking about the opposite. I think maybe it's better that the > caller never sets the new folio's entry value, so folio->swap is always > modified in mm/swap_state.c, and let __swap_cache_replace_folio set > new->swap, to make it easier to track the folio->swap > usage. > > This can be done easily for migration and shmem parts, the huge split > code will need a bit more cleanup. Right, but it's probably worth it. > > It's a trivial change I think. But letting __swap_cache_replace_folio > setup new's swap and flags may deduplicate some code. So I thought > maybe this can be better cleaned up later. So for now I just add a > debug check here that `entry == new->swap`. > > And the debug check does imply that we can just drop the entry params > in this patch, there will be no feature change. Well, the current API as you introduce it here is confusing, as it's not clear who is supposed to initialize what. So better to it cleanly right from the start. > >> Similarly, can't we obtain "mapping" from new? > > This is doable. But this patch is only an intermediate patch, next > commit will let the pass in ci instead. Of course the `ci` can be > retrieved from `entry` directly too, but it's the caller's > responsibility to lock the `ci`, so passing in a locked ci explicitly > might be more intuitive? Also might save a tiny bit of CPU time from > recalculating and load the `ci`. Well, no other swap_cache_* functions consumes an address space, right? -- Cheers David / dhildenb
On Mon, Sep 8, 2025 at 10:39 PM David Hildenbrand <david@redhat.com> wrote: > > On 08.09.25 16:20, Kairui Song wrote: > > On Mon, Sep 8, 2025 at 8:35 PM David Hildenbrand <david@redhat.com> wrote: > >> > >> > >>> > >>> +/** > >>> + * __swap_cache_replace_folio - Replace a folio in the swap cache. > >>> + * @mapping: Swap mapping address space. > >>> + * @entry: The first swap entry that the new folio corresponds to. > >>> + * @old: The old folio to be replaced. > >>> + * @new: The new folio. > >>> + * > >>> + * Replace a existing folio in the swap cache with a new folio. > >>> + * > >>> + * Context: Caller must ensure both folios are locked, and lock the > >>> + * swap address_space that holds the entries to be replaced. > >>> + */ > >>> +void __swap_cache_replace_folio(struct address_space *mapping, > >>> + swp_entry_t entry, > >>> + struct folio *old, struct folio *new) > >> > >> Can't we just use "new->swap.val" directly and avoid passing in the > >> entry, documenting that new->swap.val must be setup properly in advance? > > > > Thanks for the suggestion. > > > > I was thinking about the opposite. I think maybe it's better that the > > caller never sets the new folio's entry value, so folio->swap is always > > modified in mm/swap_state.c, and let __swap_cache_replace_folio set > > new->swap, to make it easier to track the folio->swap > > usage. > > > > This can be done easily for migration and shmem parts, the huge split > > code will need a bit more cleanup. > > Right, but it's probably worth it. > > > > > It's a trivial change I think. But letting __swap_cache_replace_folio > > setup new's swap and flags may deduplicate some code. So I thought > > maybe this can be better cleaned up later. So for now I just add a > > debug check here that `entry == new->swap`. > > > > And the debug check does imply that we can just drop the entry params > > in this patch, there will be no feature change. > > Well, the current API as you introduce it here is confusing, as it's not > clear who is supposed to initialize what. > > So better to it cleanly right from the start. > > > > >> Similarly, can't we obtain "mapping" from new? > > > > This is doable. But this patch is only an intermediate patch, next > > commit will let the pass in ci instead. Of course the `ci` can be > > retrieved from `entry` directly too, but it's the caller's > > responsibility to lock the `ci`, so passing in a locked ci explicitly > > might be more intuitive? Also might save a tiny bit of CPU time from > > recalculating and load the `ci`. > > Well, no other swap_cache_* functions consumes an address space, right? Right. I can drop it in this patch. > > -- > Cheers > > David / dhildenb >
On 2025/9/6 03:13, Kairui Song wrote: > From: Kairui Song <kasong@tencent.com> > > There are currently three swap cache users that are trying to replace an > existing folio with a new one: huge memory splitting, migration, and > shmem replacement. What they are doing is quite similar. > > Introduce a common helper for this. In later commits, they can be easily > switched to use the swap table by updating this helper. > > The newly added helper also makes the swap cache API better defined, and > debugging is easier. > > Signed-off-by: Kairui Song <kasong@tencent.com> > --- > mm/huge_memory.c | 5 ++--- > mm/migrate.c | 11 +++-------- > mm/shmem.c | 10 ++-------- > mm/swap.h | 3 +++ > mm/swap_state.c | 32 ++++++++++++++++++++++++++++++++ > 5 files changed, 42 insertions(+), 19 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 26cedfcd7418..a4d192c8d794 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -3798,9 +3798,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, > * NOTE: shmem in swap cache is not supported yet. > */ > if (swap_cache) { > - __xa_store(&swap_cache->i_pages, > - swap_cache_index(new_folio->swap), > - new_folio, 0); > + __swap_cache_replace_folio(swap_cache, new_folio->swap, > + folio, new_folio); > continue; > } IIUC, it doesn't seem like a simple function replacement here. It appears that the original code has a bug: if the 'new_folio' is a large folio after split, we need to iterate over each swap entry of the large swapcache folio and then restore the new 'new_folio'.
On Mon, Sep 8, 2025 at 11:52 AM Baolin Wang <baolin.wang@linux.alibaba.com> wrote: > > > > On 2025/9/6 03:13, Kairui Song wrote: > > From: Kairui Song <kasong@tencent.com> > > > > There are currently three swap cache users that are trying to replace an > > existing folio with a new one: huge memory splitting, migration, and > > shmem replacement. What they are doing is quite similar. > > > > Introduce a common helper for this. In later commits, they can be easily > > switched to use the swap table by updating this helper. > > > > The newly added helper also makes the swap cache API better defined, and > > debugging is easier. > > > > Signed-off-by: Kairui Song <kasong@tencent.com> > > --- > > mm/huge_memory.c | 5 ++--- > > mm/migrate.c | 11 +++-------- > > mm/shmem.c | 10 ++-------- > > mm/swap.h | 3 +++ > > mm/swap_state.c | 32 ++++++++++++++++++++++++++++++++ > > 5 files changed, 42 insertions(+), 19 deletions(-) > > > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > > index 26cedfcd7418..a4d192c8d794 100644 > > --- a/mm/huge_memory.c > > +++ b/mm/huge_memory.c > > @@ -3798,9 +3798,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, > > * NOTE: shmem in swap cache is not supported yet. > > */ > > if (swap_cache) { > > - __xa_store(&swap_cache->i_pages, > > - swap_cache_index(new_folio->swap), > > - new_folio, 0); > > + __swap_cache_replace_folio(swap_cache, new_folio->swap, > > + folio, new_folio); > > continue; > > } > > IIUC, it doesn't seem like a simple function replacement here. It > appears that the original code has a bug: if the 'new_folio' is a large > folio after split, we need to iterate over each swap entry of the large > swapcache folio and then restore the new 'new_folio'. > That should be OK. We have a check in uniform_split_supported and non_uniform_split_supported that swapcache folio can only be splitted into order0. And it seems there is no support for splitting pure swapcache folio now. Maybe we can try to enable and make use of higher order split after this series for swapcache. I just had a try to use some hackish code to split random folios in the swap cache to larger order, it seems fine after this series.
On 2025/9/8 18:44, Kairui Song wrote: > On Mon, Sep 8, 2025 at 11:52 AM Baolin Wang > <baolin.wang@linux.alibaba.com> wrote: >> >> >> >> On 2025/9/6 03:13, Kairui Song wrote: >>> From: Kairui Song <kasong@tencent.com> >>> >>> There are currently three swap cache users that are trying to replace an >>> existing folio with a new one: huge memory splitting, migration, and >>> shmem replacement. What they are doing is quite similar. >>> >>> Introduce a common helper for this. In later commits, they can be easily >>> switched to use the swap table by updating this helper. >>> >>> The newly added helper also makes the swap cache API better defined, and >>> debugging is easier. >>> >>> Signed-off-by: Kairui Song <kasong@tencent.com> >>> --- >>> mm/huge_memory.c | 5 ++--- >>> mm/migrate.c | 11 +++-------- >>> mm/shmem.c | 10 ++-------- >>> mm/swap.h | 3 +++ >>> mm/swap_state.c | 32 ++++++++++++++++++++++++++++++++ >>> 5 files changed, 42 insertions(+), 19 deletions(-) >>> >>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c >>> index 26cedfcd7418..a4d192c8d794 100644 >>> --- a/mm/huge_memory.c >>> +++ b/mm/huge_memory.c >>> @@ -3798,9 +3798,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, >>> * NOTE: shmem in swap cache is not supported yet. >>> */ >>> if (swap_cache) { >>> - __xa_store(&swap_cache->i_pages, >>> - swap_cache_index(new_folio->swap), >>> - new_folio, 0); >>> + __swap_cache_replace_folio(swap_cache, new_folio->swap, >>> + folio, new_folio); >>> continue; >>> } >> >> IIUC, it doesn't seem like a simple function replacement here. It >> appears that the original code has a bug: if the 'new_folio' is a large >> folio after split, we need to iterate over each swap entry of the large >> swapcache folio and then restore the new 'new_folio'. >> > > That should be OK. We have a check in uniform_split_supported and > non_uniform_split_supported that swapcache folio can only be splitted > into order0. And it seems there is no support for splitting pure > swapcache folio now. Ah, yes. Better to mention that in the commit message, otherwise, it will make people (at least for me) doubt whether this is a non-functional change. With David's comments addressed, feel free to add: Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> > Maybe we can try to enable and make use of higher order split > after this series for swapcache. I just had a try to use some hackish > code to split random folios in the swap cache to larger order, it seems > fine after this series.
Acked-by: Chris Li <chrisl@kernel.org> Chris On Fri, Sep 5, 2025 at 12:15 PM Kairui Song <ryncsn@gmail.com> wrote: > > From: Kairui Song <kasong@tencent.com> > > There are currently three swap cache users that are trying to replace an > existing folio with a new one: huge memory splitting, migration, and > shmem replacement. What they are doing is quite similar. > > Introduce a common helper for this. In later commits, they can be easily > switched to use the swap table by updating this helper. > > The newly added helper also makes the swap cache API better defined, and > debugging is easier. > > Signed-off-by: Kairui Song <kasong@tencent.com> > --- > mm/huge_memory.c | 5 ++--- > mm/migrate.c | 11 +++-------- > mm/shmem.c | 10 ++-------- > mm/swap.h | 3 +++ > mm/swap_state.c | 32 ++++++++++++++++++++++++++++++++ > 5 files changed, 42 insertions(+), 19 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 26cedfcd7418..a4d192c8d794 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -3798,9 +3798,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, > * NOTE: shmem in swap cache is not supported yet. > */ > if (swap_cache) { > - __xa_store(&swap_cache->i_pages, > - swap_cache_index(new_folio->swap), > - new_folio, 0); > + __swap_cache_replace_folio(swap_cache, new_folio->swap, > + folio, new_folio); > continue; > } > > diff --git a/mm/migrate.c b/mm/migrate.c > index 8e435a078fc3..7e1d01aa8c85 100644 > --- a/mm/migrate.c > +++ b/mm/migrate.c > @@ -566,7 +566,6 @@ static int __folio_migrate_mapping(struct address_space *mapping, > struct zone *oldzone, *newzone; > int dirty; > long nr = folio_nr_pages(folio); > - long entries, i; > > if (!mapping) { > /* Take off deferred split queue while frozen and memcg set */ > @@ -615,9 +614,6 @@ static int __folio_migrate_mapping(struct address_space *mapping, > if (folio_test_swapcache(folio)) { > folio_set_swapcache(newfolio); > newfolio->private = folio_get_private(folio); > - entries = nr; > - } else { > - entries = 1; > } > > /* Move dirty while folio refs frozen and newfolio not yet exposed */ > @@ -627,11 +623,10 @@ static int __folio_migrate_mapping(struct address_space *mapping, > folio_set_dirty(newfolio); > } > > - /* Swap cache still stores N entries instead of a high-order entry */ > - for (i = 0; i < entries; i++) { > + if (folio_test_swapcache(folio)) > + __swap_cache_replace_folio(mapping, folio->swap, folio, newfolio); > + else > xas_store(&xas, newfolio); > - xas_next(&xas); > - } > > /* > * Drop cache reference from old folio by unfreezing > diff --git a/mm/shmem.c b/mm/shmem.c > index cc6a0007c7a6..823ceae9dff8 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -2123,10 +2123,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, > struct folio *new, *old = *foliop; > swp_entry_t entry = old->swap; > struct address_space *swap_mapping = swap_address_space(entry); > - pgoff_t swap_index = swap_cache_index(entry); > - XA_STATE(xas, &swap_mapping->i_pages, swap_index); > int nr_pages = folio_nr_pages(old); > - int error = 0, i; > + int error = 0; > > /* > * We have arrived here because our zones are constrained, so don't > @@ -2155,12 +2153,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, > new->swap = entry; > folio_set_swapcache(new); > > - /* Swap cache still stores N entries instead of a high-order entry */ > xa_lock_irq(&swap_mapping->i_pages); > - for (i = 0; i < nr_pages; i++) { > - WARN_ON_ONCE(xas_store(&xas, new)); > - xas_next(&xas); > - } > + __swap_cache_replace_folio(swap_mapping, entry, old, new); > xa_unlock_irq(&swap_mapping->i_pages); > > mem_cgroup_replace_folio(old, new); > diff --git a/mm/swap.h b/mm/swap.h > index 8b38577a4e04..a139c9131244 100644 > --- a/mm/swap.h > +++ b/mm/swap.h > @@ -182,6 +182,9 @@ int swap_cache_add_folio(struct folio *folio, swp_entry_t entry, > void swap_cache_del_folio(struct folio *folio); > void __swap_cache_del_folio(struct folio *folio, > swp_entry_t entry, void *shadow); > +void __swap_cache_replace_folio(struct address_space *address_space, > + swp_entry_t entry, > + struct folio *old, struct folio *new); > void swap_cache_clear_shadow(int type, unsigned long begin, > unsigned long end); > > diff --git a/mm/swap_state.c b/mm/swap_state.c > index f3a32a06a950..38f5f4cf565d 100644 > --- a/mm/swap_state.c > +++ b/mm/swap_state.c > @@ -234,6 +234,38 @@ void swap_cache_del_folio(struct folio *folio) > folio_ref_sub(folio, folio_nr_pages(folio)); > } > > +/** > + * __swap_cache_replace_folio - Replace a folio in the swap cache. > + * @mapping: Swap mapping address space. > + * @entry: The first swap entry that the new folio corresponds to. > + * @old: The old folio to be replaced. > + * @new: The new folio. > + * > + * Replace a existing folio in the swap cache with a new folio. > + * > + * Context: Caller must ensure both folios are locked, and lock the > + * swap address_space that holds the entries to be replaced. > + */ > +void __swap_cache_replace_folio(struct address_space *mapping, > + swp_entry_t entry, > + struct folio *old, struct folio *new) > +{ > + unsigned long nr_pages = folio_nr_pages(new); > + unsigned long offset = swap_cache_index(entry); > + unsigned long end = offset + nr_pages; > + XA_STATE(xas, &mapping->i_pages, offset); > + > + VM_WARN_ON_ONCE(entry.val != new->swap.val); > + VM_WARN_ON_ONCE(!folio_test_locked(old) || !folio_test_locked(new)); > + VM_WARN_ON_ONCE(!folio_test_swapcache(old) || !folio_test_swapcache(new)); > + > + /* Swap cache still stores N entries instead of a high-order entry */ > + do { > + WARN_ON_ONCE(xas_store(&xas, new) != old); > + xas_next(&xas); > + } while (++offset < end); > +} > + > /** > * swap_cache_clear_shadow - Clears a set of shadows in the swap cache. > * @type: Indicates the swap device. > -- > 2.51.0 > >
© 2016 - 2025 Red Hat, Inc.