From: Kairui Song <kasong@tencent.com>
Introduce swapin_entry which merges swapin_readahead and swapin_direct
making it the main entry for swapin pages, and use a unified swapin
policy.
This commit makes swapoff make use of this new helper and now swapping
off a 10G ZRAM (lzo-rle) is faster since readahead is skipped.
Before:
time swapoff /dev/zram0
real 0m12.337s
user 0m0.001s
sys 0m12.329s
After:
time swapoff /dev/zram0
real 0m9.728s
user 0m0.001s
sys 0m9.719s
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/memory.c | 21 +++++++--------------
mm/swap.h | 16 ++++------------
mm/swap_state.c | 49 +++++++++++++++++++++++++++++++++----------------
mm/swapfile.c | 7 ++-----
4 files changed, 46 insertions(+), 47 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 0165c8cad489..b56254a875f8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3801,6 +3801,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
rmap_t rmap_flags = RMAP_NONE;
bool exclusive = false;
swp_entry_t entry;
+ bool swapcached;
pte_t pte;
vm_fault_t ret = 0;
@@ -3864,21 +3865,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
swapcache = folio;
if (!folio) {
- if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
- __swap_count(entry) == 1) {
- /* skip swapcache and readahead */
- folio = swapin_direct(entry, GFP_HIGHUSER_MOVABLE, vmf);
- if (folio)
- page = &folio->page;
+ folio = swapin_entry(entry, GFP_HIGHUSER_MOVABLE,
+ vmf, &swapcached);
+ if (folio) {
+ page = folio_file_page(folio, swp_offset(entry));
+ if (swapcached)
+ swapcache = folio;
} else {
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
- vmf);
- if (page)
- folio = page_folio(page);
- swapcache = folio;
- }
-
- if (!folio) {
/*
* Back out if somebody else faulted in this pte
* while we released the pte lock.
diff --git a/mm/swap.h b/mm/swap.h
index 83eab7b67e77..502a2801f817 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -54,10 +54,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
bool skip_if_exists);
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx);
-struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
+struct folio *swapin_entry(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf, bool *swapcached);
static inline unsigned int folio_swap_flags(struct folio *folio)
{
@@ -88,14 +86,8 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
return NULL;
}
-struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf)
-{
- return NULL;
-}
-
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
- struct vm_fault *vmf)
+static inline struct folio *swapin_entry(swp_entry_t swp, gfp_t gfp_mask,
+ struct vm_fault *vmf, bool *swapcached)
{
return NULL;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d39c5369da21..66ff187aa5d3 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -316,6 +316,11 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
release_pages(pages, nr);
}
+static inline bool swap_use_no_readahead(struct swap_info_struct *si, swp_entry_t entry)
+{
+ return data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1;
+}
+
static inline bool swap_use_vma_readahead(void)
{
return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
@@ -870,8 +875,8 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
* Returns the struct folio for entry and addr after the swap entry is read
* in.
*/
-struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_fault *vmf)
+static struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
@@ -908,33 +913,45 @@ struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
}
/**
- * swapin_readahead - swap in pages in hope we need them soon
+ * swapin_entry - swap in a page from swap entry
* @entry: swap entry of this memory
* @gfp_mask: memory allocation flags
* @vmf: fault information
+ * @swapcached: pointer to a bool used as indicator if the
+ * page is swapped in through swapcache.
*
* Returns the struct page for entry and addr, after queueing swapin.
*
- * It's a main entry function for swap readahead. By the configuration,
+ * It's a main entry function for swap in. By the configuration,
* it will read ahead blocks by cluster-based(ie, physical disk based)
- * or vma-based(ie, virtual address based on faulty address) readahead.
+ * or vma-based(ie, virtual address based on faulty address) readahead,
+ * or skip the readahead (ie, ramdisk based swap device).
*/
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_fault *vmf)
+struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_fault *vmf, bool *swapcached)
{
struct mempolicy *mpol;
- pgoff_t ilx;
struct folio *folio;
+ pgoff_t ilx;
+ bool cached;
- mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
- folio = swap_use_vma_readahead() ?
- swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
- swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
- mpol_cond_put(mpol);
+ if (swap_use_no_readahead(swp_swap_info(entry), entry)) {
+ folio = swapin_direct(entry, gfp_mask, vmf);
+ cached = false;
+ } else {
+ mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
+ if (swap_use_vma_readahead())
+ folio = swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf);
+ else
+ folio = swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
+ mpol_cond_put(mpol);
+ cached = true;
+ }
- if (!folio)
- return NULL;
- return folio_file_page(folio, swp_offset(entry));
+ if (swapcached)
+ *swapcached = cached;
+
+ return folio;
}
#ifdef CONFIG_SYSFS
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f7271504aa0a..ce4e6c10dce7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1866,7 +1866,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
folio = swap_cache_get_folio(entry, vma, addr);
if (!folio) {
- struct page *page;
struct vm_fault vmf = {
.vma = vma,
.address = addr,
@@ -1874,10 +1873,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
.pmd = pmd,
};
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
- &vmf);
- if (page)
- folio = page_folio(page);
+ folio = swapin_entry(entry, GFP_HIGHUSER_MOVABLE,
+ &vmf, NULL);
}
if (!folio) {
/*
--
2.43.0
Kairui Song <ryncsn@gmail.com> writes:
> From: Kairui Song <kasong@tencent.com>
>
> Introduce swapin_entry which merges swapin_readahead and swapin_direct
> making it the main entry for swapin pages, and use a unified swapin
> policy.
>
> This commit makes swapoff make use of this new helper and now swapping
> off a 10G ZRAM (lzo-rle) is faster since readahead is skipped.
>
> Before:
> time swapoff /dev/zram0
> real 0m12.337s
> user 0m0.001s
> sys 0m12.329s
>
> After:
> time swapoff /dev/zram0
> real 0m9.728s
> user 0m0.001s
> sys 0m9.719s
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
> mm/memory.c | 21 +++++++--------------
> mm/swap.h | 16 ++++------------
> mm/swap_state.c | 49 +++++++++++++++++++++++++++++++++----------------
> mm/swapfile.c | 7 ++-----
> 4 files changed, 46 insertions(+), 47 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 0165c8cad489..b56254a875f8 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3801,6 +3801,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> rmap_t rmap_flags = RMAP_NONE;
> bool exclusive = false;
> swp_entry_t entry;
> + bool swapcached;
> pte_t pte;
> vm_fault_t ret = 0;
>
> @@ -3864,21 +3865,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> swapcache = folio;
>
> if (!folio) {
> - if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
> - __swap_count(entry) == 1) {
> - /* skip swapcache and readahead */
> - folio = swapin_direct(entry, GFP_HIGHUSER_MOVABLE, vmf);
> - if (folio)
> - page = &folio->page;
> + folio = swapin_entry(entry, GFP_HIGHUSER_MOVABLE,
> + vmf, &swapcached);
> + if (folio) {
> + page = folio_file_page(folio, swp_offset(entry));
> + if (swapcached)
> + swapcache = folio;
> } else {
> - page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
> - vmf);
> - if (page)
> - folio = page_folio(page);
> - swapcache = folio;
> - }
> -
> - if (!folio) {
> /*
> * Back out if somebody else faulted in this pte
> * while we released the pte lock.
> diff --git a/mm/swap.h b/mm/swap.h
> index 83eab7b67e77..502a2801f817 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -54,10 +54,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
> bool skip_if_exists);
> struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
> struct mempolicy *mpol, pgoff_t ilx);
> -struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
> - struct vm_fault *vmf);
> -struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
> - struct vm_fault *vmf);
> +struct folio *swapin_entry(swp_entry_t entry, gfp_t flag,
> + struct vm_fault *vmf, bool *swapcached);
>
> static inline unsigned int folio_swap_flags(struct folio *folio)
> {
> @@ -88,14 +86,8 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
> return NULL;
> }
>
> -struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
> - struct vm_fault *vmf)
> -{
> - return NULL;
> -}
> -
> -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
> - struct vm_fault *vmf)
> +static inline struct folio *swapin_entry(swp_entry_t swp, gfp_t gfp_mask,
> + struct vm_fault *vmf, bool *swapcached)
> {
> return NULL;
> }
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index d39c5369da21..66ff187aa5d3 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -316,6 +316,11 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
> release_pages(pages, nr);
> }
>
> +static inline bool swap_use_no_readahead(struct swap_info_struct *si, swp_entry_t entry)
> +{
> + return data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1;
> +}
> +
It appears that there's only one caller of the function in the same
file? Why add a function?
> static inline bool swap_use_vma_readahead(void)
> {
> return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
> @@ -870,8 +875,8 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
> * Returns the struct folio for entry and addr after the swap entry is read
> * in.
> */
> -struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> - struct vm_fault *vmf)
> +static struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> + struct vm_fault *vmf)
> {
> struct vm_area_struct *vma = vmf->vma;
> struct folio *folio;
> @@ -908,33 +913,45 @@ struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> }
>
> /**
> - * swapin_readahead - swap in pages in hope we need them soon
> + * swapin_entry - swap in a page from swap entry
> * @entry: swap entry of this memory
> * @gfp_mask: memory allocation flags
> * @vmf: fault information
> + * @swapcached: pointer to a bool used as indicator if the
> + * page is swapped in through swapcache.
> *
> * Returns the struct page for entry and addr, after queueing swapin.
> *
> - * It's a main entry function for swap readahead. By the configuration,
> + * It's a main entry function for swap in. By the configuration,
> * it will read ahead blocks by cluster-based(ie, physical disk based)
> - * or vma-based(ie, virtual address based on faulty address) readahead.
> + * or vma-based(ie, virtual address based on faulty address) readahead,
> + * or skip the readahead (ie, ramdisk based swap device).
> */
> -struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
> - struct vm_fault *vmf)
> +struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask,
> + struct vm_fault *vmf, bool *swapcached)
May be better to use
struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf, struct folio **swapcache)
In this way, we can reduce the number of source lines in the caller.
> {
> struct mempolicy *mpol;
> - pgoff_t ilx;
> struct folio *folio;
> + pgoff_t ilx;
> + bool cached;
>
> - mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
> - folio = swap_use_vma_readahead() ?
> - swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
> - swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
> - mpol_cond_put(mpol);
> + if (swap_use_no_readahead(swp_swap_info(entry), entry)) {
> + folio = swapin_direct(entry, gfp_mask, vmf);
> + cached = false;
> + } else {
> + mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
> + if (swap_use_vma_readahead())
> + folio = swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf);
> + else
> + folio = swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
> + mpol_cond_put(mpol);
> + cached = true;
> + }
>
> - if (!folio)
> - return NULL;
> - return folio_file_page(folio, swp_offset(entry));
> + if (swapcached)
> + *swapcached = cached;
> +
> + return folio;
> }
>
> #ifdef CONFIG_SYSFS
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index f7271504aa0a..ce4e6c10dce7 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1866,7 +1866,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
>
> folio = swap_cache_get_folio(entry, vma, addr);
> if (!folio) {
> - struct page *page;
> struct vm_fault vmf = {
> .vma = vma,
> .address = addr,
> @@ -1874,10 +1873,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
> .pmd = pmd,
> };
>
> - page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
> - &vmf);
> - if (page)
> - folio = page_folio(page);
> + folio = swapin_entry(entry, GFP_HIGHUSER_MOVABLE,
> + &vmf, NULL);
> }
> if (!folio) {
> /*
--
Best Regards,
Huang, Ying
Huang, Ying <ying.huang@intel.com> 于2024年1月5日周五 15:30写道:
>
> Kairui Song <ryncsn@gmail.com> writes:
>
> > From: Kairui Song <kasong@tencent.com>
> >
> > Introduce swapin_entry which merges swapin_readahead and swapin_direct
> > making it the main entry for swapin pages, and use a unified swapin
> > policy.
> >
> > This commit makes swapoff make use of this new helper and now swapping
> > off a 10G ZRAM (lzo-rle) is faster since readahead is skipped.
> >
> > Before:
> > time swapoff /dev/zram0
> > real 0m12.337s
> > user 0m0.001s
> > sys 0m12.329s
> >
> > After:
> > time swapoff /dev/zram0
> > real 0m9.728s
> > user 0m0.001s
> > sys 0m9.719s
> >
> > Signed-off-by: Kairui Song <kasong@tencent.com>
> > ---
> > mm/memory.c | 21 +++++++--------------
> > mm/swap.h | 16 ++++------------
> > mm/swap_state.c | 49 +++++++++++++++++++++++++++++++++----------------
> > mm/swapfile.c | 7 ++-----
> > 4 files changed, 46 insertions(+), 47 deletions(-)
> >
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 0165c8cad489..b56254a875f8 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -3801,6 +3801,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > rmap_t rmap_flags = RMAP_NONE;
> > bool exclusive = false;
> > swp_entry_t entry;
> > + bool swapcached;
> > pte_t pte;
> > vm_fault_t ret = 0;
> >
> > @@ -3864,21 +3865,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > swapcache = folio;
> >
> > if (!folio) {
> > - if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
> > - __swap_count(entry) == 1) {
> > - /* skip swapcache and readahead */
> > - folio = swapin_direct(entry, GFP_HIGHUSER_MOVABLE, vmf);
> > - if (folio)
> > - page = &folio->page;
> > + folio = swapin_entry(entry, GFP_HIGHUSER_MOVABLE,
> > + vmf, &swapcached);
> > + if (folio) {
> > + page = folio_file_page(folio, swp_offset(entry));
> > + if (swapcached)
> > + swapcache = folio;
> > } else {
> > - page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
> > - vmf);
> > - if (page)
> > - folio = page_folio(page);
> > - swapcache = folio;
> > - }
> > -
> > - if (!folio) {
> > /*
> > * Back out if somebody else faulted in this pte
> > * while we released the pte lock.
> > diff --git a/mm/swap.h b/mm/swap.h
> > index 83eab7b67e77..502a2801f817 100644
> > --- a/mm/swap.h
> > +++ b/mm/swap.h
> > @@ -54,10 +54,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
> > bool skip_if_exists);
> > struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
> > struct mempolicy *mpol, pgoff_t ilx);
> > -struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
> > - struct vm_fault *vmf);
> > -struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
> > - struct vm_fault *vmf);
> > +struct folio *swapin_entry(swp_entry_t entry, gfp_t flag,
> > + struct vm_fault *vmf, bool *swapcached);
> >
> > static inline unsigned int folio_swap_flags(struct folio *folio)
> > {
> > @@ -88,14 +86,8 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
> > return NULL;
> > }
> >
> > -struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
> > - struct vm_fault *vmf)
> > -{
> > - return NULL;
> > -}
> > -
> > -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
> > - struct vm_fault *vmf)
> > +static inline struct folio *swapin_entry(swp_entry_t swp, gfp_t gfp_mask,
> > + struct vm_fault *vmf, bool *swapcached)
> > {
> > return NULL;
> > }
> > diff --git a/mm/swap_state.c b/mm/swap_state.c
> > index d39c5369da21..66ff187aa5d3 100644
> > --- a/mm/swap_state.c
> > +++ b/mm/swap_state.c
> > @@ -316,6 +316,11 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
> > release_pages(pages, nr);
> > }
> >
> > +static inline bool swap_use_no_readahead(struct swap_info_struct *si, swp_entry_t entry)
> > +{
> > + return data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1;
> > +}
> > +
Hi Ying,
Thanks for the review.
>
> It appears that there's only one caller of the function in the same
> file? Why add a function?
Later patch will extend the checker function.
I can defer this change so it won't cause confusion for reviewers.
>
> > static inline bool swap_use_vma_readahead(void)
> > {
> > return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
> > @@ -870,8 +875,8 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
> > * Returns the struct folio for entry and addr after the swap entry is read
> > * in.
> > */
> > -struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> > - struct vm_fault *vmf)
> > +static struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> > + struct vm_fault *vmf)
> > {
> > struct vm_area_struct *vma = vmf->vma;
> > struct folio *folio;
> > @@ -908,33 +913,45 @@ struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> > }
> >
> > /**
> > - * swapin_readahead - swap in pages in hope we need them soon
> > + * swapin_entry - swap in a page from swap entry
> > * @entry: swap entry of this memory
> > * @gfp_mask: memory allocation flags
> > * @vmf: fault information
> > + * @swapcached: pointer to a bool used as indicator if the
> > + * page is swapped in through swapcache.
> > *
> > * Returns the struct page for entry and addr, after queueing swapin.
> > *
> > - * It's a main entry function for swap readahead. By the configuration,
> > + * It's a main entry function for swap in. By the configuration,
> > * it will read ahead blocks by cluster-based(ie, physical disk based)
> > - * or vma-based(ie, virtual address based on faulty address) readahead.
> > + * or vma-based(ie, virtual address based on faulty address) readahead,
> > + * or skip the readahead (ie, ramdisk based swap device).
> > */
> > -struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
> > - struct vm_fault *vmf)
> > +struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask,
> > + struct vm_fault *vmf, bool *swapcached)
>
> May be better to use
>
> struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask,
> struct vm_fault *vmf, struct folio **swapcache)
>
> In this way, we can reduce the number of source lines in the caller.
Following commit will rewrite this part to return a enum instead of
bool, so this is just a intermediate change. And do_swap_page is the
only caller that can benefit from this, not helpful for swapoff/shmem.
I think we can just keep it this way here.
© 2016 - 2025 Red Hat, Inc.