[PATCH v3 4/6] mm: add folio_is_lazyfree helper

Vernon Yang posted 6 patches 1 month ago
There is a newer version of this series
[PATCH v3 4/6] mm: add folio_is_lazyfree helper
Posted by Vernon Yang 1 month ago
Add folio_is_lazyfree() function to identify lazy-free folios to improve
code readability.

Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
---
 include/linux/mm_inline.h | 5 +++++
 mm/rmap.c                 | 4 ++--
 mm/vmscan.c               | 5 ++---
 3 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index fa2d6ba811b5..65a4ae52d915 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -35,6 +35,11 @@ static inline int page_is_file_lru(struct page *page)
 	return folio_is_file_lru(page_folio(page));
 }
 
+static inline int folio_is_lazyfree(const struct folio *folio)
+{
+	return folio_test_anon(folio) && !folio_test_swapbacked(folio);
+}
+
 static __always_inline void __update_lru_size(struct lruvec *lruvec,
 				enum lru_list lru, enum zone_type zid,
 				long nr_pages)
diff --git a/mm/rmap.c b/mm/rmap.c
index f955f02d570e..7241a3fa8574 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1838,7 +1838,7 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
 	max_nr = (end_addr - addr) >> PAGE_SHIFT;
 
 	/* We only support lazyfree batching for now ... */
-	if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
+	if (!folio_is_lazyfree(folio))
 		return 1;
 	if (pte_unused(pte))
 		return 1;
@@ -1934,7 +1934,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 		}
 
 		if (!pvmw.pte) {
-			if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
+			if (folio_is_lazyfree(folio)) {
 				if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
 					goto walk_done;
 				/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 670fe9fae5ba..f357f74b5a35 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -963,8 +963,7 @@ static void folio_check_dirty_writeback(struct folio *folio,
 	 * They could be mistakenly treated as file lru. So further anon
 	 * test is needed.
 	 */
-	if (!folio_is_file_lru(folio) ||
-	    (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
+	if (!folio_is_file_lru(folio) || folio_is_lazyfree(folio)) {
 		*dirty = false;
 		*writeback = false;
 		return;
@@ -1501,7 +1500,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 			}
 		}
 
-		if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
+		if (folio_is_lazyfree(folio)) {
 			/* follow __remove_mapping for reference */
 			if (!folio_ref_freeze(folio, 1))
 				goto keep_locked;
-- 
2.51.0
Re: [PATCH v3 4/6] mm: add folio_is_lazyfree helper
Posted by Lance Yang 1 month ago

On 2026/1/4 13:41, Vernon Yang wrote:
> Add folio_is_lazyfree() function to identify lazy-free folios to improve
> code readability.
> 
> Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
> ---
>   include/linux/mm_inline.h | 5 +++++
>   mm/rmap.c                 | 4 ++--
>   mm/vmscan.c               | 5 ++---
>   3 files changed, 9 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index fa2d6ba811b5..65a4ae52d915 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -35,6 +35,11 @@ static inline int page_is_file_lru(struct page *page)
>   	return folio_is_file_lru(page_folio(page));
>   }
>   
> +static inline int folio_is_lazyfree(const struct folio *folio)
> +{
> +	return folio_test_anon(folio) && !folio_test_swapbacked(folio);
> +}
> +
>   static __always_inline void __update_lru_size(struct lruvec *lruvec,
>   				enum lru_list lru, enum zone_type zid,
>   				long nr_pages)
> diff --git a/mm/rmap.c b/mm/rmap.c
> index f955f02d570e..7241a3fa8574 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1838,7 +1838,7 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
>   	max_nr = (end_addr - addr) >> PAGE_SHIFT;
>   
>   	/* We only support lazyfree batching for now ... */
> -	if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
> +	if (!folio_is_lazyfree(folio))

Please rebase against mm-new. Commit[1] already supports file folios
batching in folio_unmap_pte_batch()

+	/* We only support lazyfree or file folios batching for now ... */
+	if (folio_test_anon(folio) && folio_test_swapbacked(folio))

[1] 
https://lore.kernel.org/all/142919ac14d3cf70cba370808d85debe089df7b4.1766631066.git.baolin.wang@linux.alibaba.com/

Thanks,
Lance

>   		return 1;
>   	if (pte_unused(pte))
>   		return 1;
> @@ -1934,7 +1934,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>   		}
>   
>   		if (!pvmw.pte) {
> -			if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
> +			if (folio_is_lazyfree(folio)) {
>   				if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
>   					goto walk_done;
>   				/*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 670fe9fae5ba..f357f74b5a35 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -963,8 +963,7 @@ static void folio_check_dirty_writeback(struct folio *folio,
>   	 * They could be mistakenly treated as file lru. So further anon
>   	 * test is needed.
>   	 */
> -	if (!folio_is_file_lru(folio) ||
> -	    (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
> +	if (!folio_is_file_lru(folio) || folio_is_lazyfree(folio)) {
>   		*dirty = false;
>   		*writeback = false;
>   		return;
> @@ -1501,7 +1500,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>   			}
>   		}
>   
> -		if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
> +		if (folio_is_lazyfree(folio)) {
>   			/* follow __remove_mapping for reference */
>   			if (!folio_ref_freeze(folio, 1))
>   				goto keep_locked;
Re: [PATCH v3 4/6] mm: add folio_is_lazyfree helper
Posted by Vernon Yang 1 month ago
On Sun, Jan 4, 2026 at 7:42 PM Lance Yang <lance.yang@linux.dev> wrote:
>
> On 2026/1/4 13:41, Vernon Yang wrote:
> > Add folio_is_lazyfree() function to identify lazy-free folios to improve
> > code readability.
> >
> > Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
> > ---
> >   include/linux/mm_inline.h | 5 +++++
> >   mm/rmap.c                 | 4 ++--
> >   mm/vmscan.c               | 5 ++---
> >   3 files changed, 9 insertions(+), 5 deletions(-)
> >
> > diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> > index fa2d6ba811b5..65a4ae52d915 100644
> > --- a/include/linux/mm_inline.h
> > +++ b/include/linux/mm_inline.h
> > @@ -35,6 +35,11 @@ static inline int page_is_file_lru(struct page *page)
> >       return folio_is_file_lru(page_folio(page));
> >   }
> >
> > +static inline int folio_is_lazyfree(const struct folio *folio)
> > +{
> > +     return folio_test_anon(folio) && !folio_test_swapbacked(folio);
> > +}
> > +
> >   static __always_inline void __update_lru_size(struct lruvec *lruvec,
> >                               enum lru_list lru, enum zone_type zid,
> >                               long nr_pages)
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index f955f02d570e..7241a3fa8574 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -1838,7 +1838,7 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
> >       max_nr = (end_addr - addr) >> PAGE_SHIFT;
> >
> >       /* We only support lazyfree batching for now ... */
> > -     if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
> > +     if (!folio_is_lazyfree(folio))
>
> Please rebase against mm-new. Commit[1] already supports file folios
> batching in folio_unmap_pte_batch()

Ok, thanks! I will rebase in the next version.

> +       /* We only support lazyfree or file folios batching for now ... */
> +       if (folio_test_anon(folio) && folio_test_swapbacked(folio))
>
> [1]
> https://lore.kernel.org/all/142919ac14d3cf70cba370808d85debe089df7b4.1766631066.git.baolin.wang@linux.alibaba.com/
>
> Thanks,
> Lance
>
> >               return 1;
> >       if (pte_unused(pte))
> >               return 1;
> > @@ -1934,7 +1934,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> >               }
> >
> >               if (!pvmw.pte) {
> > -                     if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
> > +                     if (folio_is_lazyfree(folio)) {
> >                               if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
> >                                       goto walk_done;
> >                               /*
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 670fe9fae5ba..f357f74b5a35 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -963,8 +963,7 @@ static void folio_check_dirty_writeback(struct folio *folio,
> >        * They could be mistakenly treated as file lru. So further anon
> >        * test is needed.
> >        */
> > -     if (!folio_is_file_lru(folio) ||
> > -         (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
> > +     if (!folio_is_file_lru(folio) || folio_is_lazyfree(folio)) {
> >               *dirty = false;
> >               *writeback = false;
> >               return;
> > @@ -1501,7 +1500,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> >                       }
> >               }
> >
> > -             if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
> > +             if (folio_is_lazyfree(folio)) {
> >                       /* follow __remove_mapping for reference */
> >                       if (!folio_ref_freeze(folio, 1))
> >                               goto keep_locked;
>