Teach folio_put_swap to handle a batch of consecutive pages. Note that
folio_put_swap already can handle a subset of this: nr_pages == 1 and
nr_pages == folio_nr_pages(folio). Generalize this to any nr_pages.
Currently we have a not-so-nice logic of passing in subpage == NULL if
we mean to exercise the logic on the entire folio, and subpage != NULL if
we want to exercise the logic on only that subpage. Remove this
indirection, and explicitly pass subpage != NULL, and the number of
pages required.
Signed-off-by: Dev Jain <dev.jain@arm.com>
---
mm/memory.c | 6 +++---
mm/rmap.c | 4 ++--
mm/shmem.c | 6 +++---
mm/swap.h | 5 +++--
mm/swapfile.c | 13 +++++--------
5 files changed, 16 insertions(+), 18 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 768646c0b3b6a..8249a9b7083ab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5002,7 +5002,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (unlikely(folio != swapcache)) {
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
- folio_put_swap(swapcache, NULL);
+ folio_put_swap(swapcache, folio_page(swapcache, 0), folio_nr_pages(swapcache));
} else if (!folio_test_anon(folio)) {
/*
* We currently only expect !anon folios that are fully
@@ -5011,12 +5011,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio);
VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
- folio_put_swap(folio, NULL);
+ folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
} else {
VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio));
folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
rmap_flags);
- folio_put_swap(folio, nr_pages == 1 ? page : NULL);
+ folio_put_swap(folio, page, nr_pages);
}
VM_BUG_ON(!folio_test_anon(folio) ||
diff --git a/mm/rmap.c b/mm/rmap.c
index f6d5b187cf09b..42f6b00cced01 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2293,7 +2293,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* so we'll not check/care.
*/
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
- folio_put_swap(folio, subpage);
+ folio_put_swap(folio, subpage, 1);
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
}
@@ -2301,7 +2301,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/* See folio_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive &&
folio_try_share_anon_rmap_pte(folio, subpage)) {
- folio_put_swap(folio, subpage);
+ folio_put_swap(folio, subpage, 1);
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 86ee34c9b40b3..d9d216ea28ecb 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1716,7 +1716,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
/* Swap entry might be erased by racing shmem_free_swap() */
if (!error) {
shmem_recalc_inode(inode, 0, -nr_pages);
- folio_put_swap(folio, NULL);
+ folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
}
/*
@@ -2196,7 +2196,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
nr_pages = folio_nr_pages(folio);
folio_wait_writeback(folio);
- folio_put_swap(folio, NULL);
+ folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
swap_cache_del_folio(folio);
/*
* Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
@@ -2426,7 +2426,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (sgp == SGP_WRITE)
folio_mark_accessed(folio);
- folio_put_swap(folio, NULL);
+ folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
swap_cache_del_folio(folio);
folio_mark_dirty(folio);
put_swap_device(si);
diff --git a/mm/swap.h b/mm/swap.h
index d9cb58ebbddd1..73fd9faa67608 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -207,7 +207,7 @@ extern int swap_retry_table_alloc(swp_entry_t entry, gfp_t gfp);
*/
int folio_alloc_swap(struct folio *folio);
int folio_dup_swap(struct folio *folio, struct page *subpage, unsigned int nr_pages);
-void folio_put_swap(struct folio *folio, struct page *subpage);
+void folio_put_swap(struct folio *folio, struct page *subpage, unsigned int nr_pages);
/* For internal use */
extern void __swap_cluster_free_entries(struct swap_info_struct *si,
@@ -396,7 +396,8 @@ static inline int folio_dup_swap(struct folio *folio, struct page *page,
return -EINVAL;
}
-static inline void folio_put_swap(struct folio *folio, struct page *page)
+static inline void folio_put_swap(struct folio *folio, struct page *page,
+ unsigned int nr_pages)
{
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index eaf61ae6c3817..c66aa6d15d479 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1770,25 +1770,22 @@ int folio_dup_swap(struct folio *folio, struct page *subpage,
/**
* folio_put_swap() - Decrease swap count of swap entries of a folio.
* @folio: folio with swap entries bounded, must be in swap cache and locked.
- * @subpage: if not NULL, only decrease the swap count of this subpage.
+ * @subpage: decrease the swap count of this subpage till nr_pages.
*
* This won't free the swap slots even if swap count drops to zero, they are
* still pinned by the swap cache. User may call folio_free_swap to free them.
* Context: Caller must ensure the folio is locked and in the swap cache.
*/
-void folio_put_swap(struct folio *folio, struct page *subpage)
+void folio_put_swap(struct folio *folio, struct page *subpage,
+ unsigned int nr_pages)
{
swp_entry_t entry = folio->swap;
- unsigned long nr_pages = folio_nr_pages(folio);
struct swap_info_struct *si = __swap_entry_to_info(entry);
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_swapcache(folio), folio);
- if (subpage) {
- entry.val += folio_page_idx(folio, subpage);
- nr_pages = 1;
- }
+ entry.val += folio_page_idx(folio, subpage);
swap_put_entries_cluster(si, swp_offset(entry), nr_pages, false);
}
@@ -2334,7 +2331,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
new_pte = pte_mkuffd_wp(new_pte);
setpte:
set_pte_at(vma->vm_mm, addr, pte, new_pte);
- folio_put_swap(swapcache, folio_file_page(swapcache, swp_offset(entry)));
+ folio_put_swap(swapcache, folio_file_page(swapcache, swp_offset(entry)), 1);
out:
if (pte)
pte_unmap_unlock(pte, ptl);
--
2.34.1
Hi Dev, kernel test robot noticed the following build warnings: [auto build test WARNING on akpm-mm/mm-everything] url: https://github.com/intel-lab-lkp/linux/commits/Dev-Jain/mm-rmap-make-nr_pages-signed-in-try_to_unmap_one/20260310-153604 base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/20260310073013.4069309-8-dev.jain%40arm.com patch subject: [PATCH 7/9] mm/swapfile: Make folio_put_swap batchable config: m68k-allyesconfig (https://download.01.org/0day-ci/archive/20260318/202603180953.FNgOslIG-lkp@intel.com/config) compiler: m68k-linux-gcc (GCC) 15.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260318/202603180953.FNgOslIG-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202603180953.FNgOslIG-lkp@intel.com/ All warnings (new ones prefixed by >>): Warning: mm/swapfile.c:1757 function parameter 'nr_pages' not described in 'folio_dup_swap' >> Warning: mm/swapfile.c:1780 function parameter 'nr_pages' not described in 'folio_put_swap' Warning: mm/swapfile.c:1757 function parameter 'nr_pages' not described in 'folio_dup_swap' >> Warning: mm/swapfile.c:1780 function parameter 'nr_pages' not described in 'folio_put_swap' -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
On Tue, Mar 10, 2026 at 01:00:11PM +0530, Dev Jain wrote:
> Teach folio_put_swap to handle a batch of consecutive pages. Note that
> folio_put_swap already can handle a subset of this: nr_pages == 1 and
> nr_pages == folio_nr_pages(folio). Generalize this to any nr_pages.
>
> Currently we have a not-so-nice logic of passing in subpage == NULL if
> we mean to exercise the logic on the entire folio, and subpage != NULL if
> we want to exercise the logic on only that subpage. Remove this
> indirection, and explicitly pass subpage != NULL, and the number of
> pages required.
>
> Signed-off-by: Dev Jain <dev.jain@arm.com>
> ---
> mm/memory.c | 6 +++---
> mm/rmap.c | 4 ++--
> mm/shmem.c | 6 +++---
> mm/swap.h | 5 +++--
> mm/swapfile.c | 13 +++++--------
> 5 files changed, 16 insertions(+), 18 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 768646c0b3b6a..8249a9b7083ab 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5002,7 +5002,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> if (unlikely(folio != swapcache)) {
> folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
> folio_add_lru_vma(folio, vma);
> - folio_put_swap(swapcache, NULL);
> + folio_put_swap(swapcache, folio_page(swapcache, 0), folio_nr_pages(swapcache));
Lord above HELPER. HELPER :) please.
I think in general, let's have the same refactoring in folio_put_swap() as I
suggested for folio_dup_swap().
I'm not sure where this hellish subpage interface came from, I mean there must
be a good reason but it seems kinda horrible.
> } else if (!folio_test_anon(folio)) {
> /*
> * We currently only expect !anon folios that are fully
> @@ -5011,12 +5011,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio);
> VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
> folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
> } else {
> VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio));
> folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
> rmap_flags);
> - folio_put_swap(folio, nr_pages == 1 ? page : NULL);
> + folio_put_swap(folio, page, nr_pages);
I'm confused as to why some callers use folio_nr_pages() and others nr_pages...
> }
>
> VM_BUG_ON(!folio_test_anon(folio) ||
> diff --git a/mm/rmap.c b/mm/rmap.c
> index f6d5b187cf09b..42f6b00cced01 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -2293,7 +2293,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> * so we'll not check/care.
> */
> if (arch_unmap_one(mm, vma, address, pteval) < 0) {
> - folio_put_swap(folio, subpage);
> + folio_put_swap(folio, subpage, 1);
Again, as with the folio_dup_swap() refactoring I suggested in previous patch,
something like folio_dup_swap_subpage() would be good here right?
Like folio_put_swap_subpage(folio, subpage)...
> set_pte_at(mm, address, pvmw.pte, pteval);
> goto walk_abort;
> }
> @@ -2301,7 +2301,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> /* See folio_try_share_anon_rmap(): clear PTE first. */
> if (anon_exclusive &&
> folio_try_share_anon_rmap_pte(folio, subpage)) {
> - folio_put_swap(folio, subpage);
> + folio_put_swap(folio, subpage, 1);
> set_pte_at(mm, address, pvmw.pte, pteval);
> goto walk_abort;
> }
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 86ee34c9b40b3..d9d216ea28ecb 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1716,7 +1716,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
> /* Swap entry might be erased by racing shmem_free_swap() */
> if (!error) {
> shmem_recalc_inode(inode, 0, -nr_pages);
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
> }
>
> /*
> @@ -2196,7 +2196,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
>
> nr_pages = folio_nr_pages(folio);
> folio_wait_writeback(folio);
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
> swap_cache_del_folio(folio);
> /*
> * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
> @@ -2426,7 +2426,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> if (sgp == SGP_WRITE)
> folio_mark_accessed(folio);
>
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
Again, for emphasis, please do not repeatedly open code the exact same thing all
over the place, 'don't repeat yourself' is a really good principle in
programming in general. Now if one of these callers gets updated and the others
not we're in a mess.
Abstract this please :)
> swap_cache_del_folio(folio);
> folio_mark_dirty(folio);
> put_swap_device(si);
> diff --git a/mm/swap.h b/mm/swap.h
> index d9cb58ebbddd1..73fd9faa67608 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -207,7 +207,7 @@ extern int swap_retry_table_alloc(swp_entry_t entry, gfp_t gfp);
> */
> int folio_alloc_swap(struct folio *folio);
> int folio_dup_swap(struct folio *folio, struct page *subpage, unsigned int nr_pages);
> -void folio_put_swap(struct folio *folio, struct page *subpage);
> +void folio_put_swap(struct folio *folio, struct page *subpage, unsigned int nr_pages);
>
> /* For internal use */
> extern void __swap_cluster_free_entries(struct swap_info_struct *si,
> @@ -396,7 +396,8 @@ static inline int folio_dup_swap(struct folio *folio, struct page *page,
> return -EINVAL;
> }
>
> -static inline void folio_put_swap(struct folio *folio, struct page *page)
> +static inline void folio_put_swap(struct folio *folio, struct page *page,
> + unsigned int nr_pages)
> {
> }
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index eaf61ae6c3817..c66aa6d15d479 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1770,25 +1770,22 @@ int folio_dup_swap(struct folio *folio, struct page *subpage,
> /**
> * folio_put_swap() - Decrease swap count of swap entries of a folio.
> * @folio: folio with swap entries bounded, must be in swap cache and locked.
> - * @subpage: if not NULL, only decrease the swap count of this subpage.
> + * @subpage: decrease the swap count of this subpage till nr_pages.
Again no nr_pages entry.
> *
> * This won't free the swap slots even if swap count drops to zero, they are
> * still pinned by the swap cache. User may call folio_free_swap to free them.
> * Context: Caller must ensure the folio is locked and in the swap cache.
> */
> -void folio_put_swap(struct folio *folio, struct page *subpage)
> +void folio_put_swap(struct folio *folio, struct page *subpage,
> + unsigned int nr_pages)
> {
> swp_entry_t entry = folio->swap;
> - unsigned long nr_pages = folio_nr_pages(folio);
> struct swap_info_struct *si = __swap_entry_to_info(entry);
>
> VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
> VM_WARN_ON_FOLIO(!folio_test_swapcache(folio), folio);
>
> - if (subpage) {
> - entry.val += folio_page_idx(folio, subpage);
> - nr_pages = 1;
> - }
> + entry.val += folio_page_idx(folio, subpage);
>
> swap_put_entries_cluster(si, swp_offset(entry), nr_pages, false);
And yeah exact same comments re: refactoring as per folio_dup_swap().
> }
> @@ -2334,7 +2331,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
> new_pte = pte_mkuffd_wp(new_pte);
> setpte:
> set_pte_at(vma->vm_mm, addr, pte, new_pte);
> - folio_put_swap(swapcache, folio_file_page(swapcache, swp_offset(entry)));
> + folio_put_swap(swapcache, folio_file_page(swapcache, swp_offset(entry)), 1);
> out:
> if (pte)
> pte_unmap_unlock(pte, ptl);
> --
> 2.34.1
>
Thanks, Lorenzo
On Tue, Mar 10, 2026 at 3:47 PM Dev Jain <dev.jain@arm.com> wrote:
>
> Teach folio_put_swap to handle a batch of consecutive pages. Note that
> folio_put_swap already can handle a subset of this: nr_pages == 1 and
> nr_pages == folio_nr_pages(folio). Generalize this to any nr_pages.
>
> Currently we have a not-so-nice logic of passing in subpage == NULL if
> we mean to exercise the logic on the entire folio, and subpage != NULL if
> we want to exercise the logic on only that subpage. Remove this
> indirection, and explicitly pass subpage != NULL, and the number of
> pages required.
>
> Signed-off-by: Dev Jain <dev.jain@arm.com>
> ---
> mm/memory.c | 6 +++---
> mm/rmap.c | 4 ++--
> mm/shmem.c | 6 +++---
> mm/swap.h | 5 +++--
> mm/swapfile.c | 13 +++++--------
> 5 files changed, 16 insertions(+), 18 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 768646c0b3b6a..8249a9b7083ab 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5002,7 +5002,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> if (unlikely(folio != swapcache)) {
> folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
> folio_add_lru_vma(folio, vma);
> - folio_put_swap(swapcache, NULL);
> + folio_put_swap(swapcache, folio_page(swapcache, 0), folio_nr_pages(swapcache));
> } else if (!folio_test_anon(folio)) {
> /*
> * We currently only expect !anon folios that are fully
> @@ -5011,12 +5011,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio);
> VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
> folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
> } else {
> VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio));
> folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
> rmap_flags);
> - folio_put_swap(folio, nr_pages == 1 ? page : NULL);
> + folio_put_swap(folio, page, nr_pages);
> }
>
> VM_BUG_ON(!folio_test_anon(folio) ||
> diff --git a/mm/rmap.c b/mm/rmap.c
> index f6d5b187cf09b..42f6b00cced01 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -2293,7 +2293,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> * so we'll not check/care.
> */
> if (arch_unmap_one(mm, vma, address, pteval) < 0) {
> - folio_put_swap(folio, subpage);
> + folio_put_swap(folio, subpage, 1);
> set_pte_at(mm, address, pvmw.pte, pteval);
> goto walk_abort;
> }
> @@ -2301,7 +2301,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> /* See folio_try_share_anon_rmap(): clear PTE first. */
> if (anon_exclusive &&
> folio_try_share_anon_rmap_pte(folio, subpage)) {
> - folio_put_swap(folio, subpage);
> + folio_put_swap(folio, subpage, 1);
> set_pte_at(mm, address, pvmw.pte, pteval);
> goto walk_abort;
> }
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 86ee34c9b40b3..d9d216ea28ecb 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1716,7 +1716,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
> /* Swap entry might be erased by racing shmem_free_swap() */
> if (!error) {
> shmem_recalc_inode(inode, 0, -nr_pages);
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
I just realized that we already have a nr_pages variable available
here, maybe you can just use that?
Feel free to ignore this if it might touch more code.
> }
>
> /*
> @@ -2196,7 +2196,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
>
> nr_pages = folio_nr_pages(folio);
> folio_wait_writeback(folio);
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
> swap_cache_del_folio(folio);
> /*
> * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
> @@ -2426,7 +2426,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> if (sgp == SGP_WRITE)
> folio_mark_accessed(folio);
>
> - folio_put_swap(folio, NULL);
> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
Same here, nr_pages seems good enough?
On 10/03/26 1:59 pm, Kairui Song wrote:
> On Tue, Mar 10, 2026 at 3:47 PM Dev Jain <dev.jain@arm.com> wrote:
>>
>> Teach folio_put_swap to handle a batch of consecutive pages. Note that
>> folio_put_swap already can handle a subset of this: nr_pages == 1 and
>> nr_pages == folio_nr_pages(folio). Generalize this to any nr_pages.
>>
>> Currently we have a not-so-nice logic of passing in subpage == NULL if
>> we mean to exercise the logic on the entire folio, and subpage != NULL if
>> we want to exercise the logic on only that subpage. Remove this
>> indirection, and explicitly pass subpage != NULL, and the number of
>> pages required.
>>
>> Signed-off-by: Dev Jain <dev.jain@arm.com>
>> ---
>> mm/memory.c | 6 +++---
>> mm/rmap.c | 4 ++--
>> mm/shmem.c | 6 +++---
>> mm/swap.h | 5 +++--
>> mm/swapfile.c | 13 +++++--------
>> 5 files changed, 16 insertions(+), 18 deletions(-)
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 768646c0b3b6a..8249a9b7083ab 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -5002,7 +5002,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>> if (unlikely(folio != swapcache)) {
>> folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
>> folio_add_lru_vma(folio, vma);
>> - folio_put_swap(swapcache, NULL);
>> + folio_put_swap(swapcache, folio_page(swapcache, 0), folio_nr_pages(swapcache));
>> } else if (!folio_test_anon(folio)) {
>> /*
>> * We currently only expect !anon folios that are fully
>> @@ -5011,12 +5011,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>> VM_WARN_ON_ONCE_FOLIO(folio_nr_pages(folio) != nr_pages, folio);
>> VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
>> folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
>> - folio_put_swap(folio, NULL);
>> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
>> } else {
>> VM_WARN_ON_ONCE(nr_pages != 1 && nr_pages != folio_nr_pages(folio));
>> folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
>> rmap_flags);
>> - folio_put_swap(folio, nr_pages == 1 ? page : NULL);
>> + folio_put_swap(folio, page, nr_pages);
>> }
>>
>> VM_BUG_ON(!folio_test_anon(folio) ||
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index f6d5b187cf09b..42f6b00cced01 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -2293,7 +2293,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>> * so we'll not check/care.
>> */
>> if (arch_unmap_one(mm, vma, address, pteval) < 0) {
>> - folio_put_swap(folio, subpage);
>> + folio_put_swap(folio, subpage, 1);
>> set_pte_at(mm, address, pvmw.pte, pteval);
>> goto walk_abort;
>> }
>> @@ -2301,7 +2301,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>> /* See folio_try_share_anon_rmap(): clear PTE first. */
>> if (anon_exclusive &&
>> folio_try_share_anon_rmap_pte(folio, subpage)) {
>> - folio_put_swap(folio, subpage);
>> + folio_put_swap(folio, subpage, 1);
>> set_pte_at(mm, address, pvmw.pte, pteval);
>> goto walk_abort;
>> }
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 86ee34c9b40b3..d9d216ea28ecb 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -1716,7 +1716,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
>> /* Swap entry might be erased by racing shmem_free_swap() */
>> if (!error) {
>> shmem_recalc_inode(inode, 0, -nr_pages);
>> - folio_put_swap(folio, NULL);
>> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
>
> I just realized that we already have a nr_pages variable available
> here, maybe you can just use that?
>
> Feel free to ignore this if it might touch more code.
>
>> }
>>
>> /*
>> @@ -2196,7 +2196,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
>>
>> nr_pages = folio_nr_pages(folio);
>> folio_wait_writeback(folio);
>> - folio_put_swap(folio, NULL);
>> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
>> swap_cache_del_folio(folio);
>> /*
>> * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
>> @@ -2426,7 +2426,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
>> if (sgp == SGP_WRITE)
>> folio_mark_accessed(folio);
>>
>> - folio_put_swap(folio, NULL);
>> + folio_put_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
>
> Same here, nr_pages seems good enough?
Yup, thanks for your observation.
© 2016 - 2026 Red Hat, Inc.