From: Kairui Song <kasong@tencent.com>
Move all THP swapin related checks under CONFIG_TRANSPARENT_HUGEPAGE,
so they will be trimmed off by the compiler if not needed.
And add a WARN if shmem sees a order > 0 entry when
CONFIG_TRANSPARENT_HUGEPAGE is disabled, that should never happen unless
things went very wrong.
There should be no observable feature change except the new added WARN.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/shmem.c | 42 ++++++++++++++++++++----------------------
1 file changed, 20 insertions(+), 22 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 033dc7a3435d..f85a985167c5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1980,26 +1980,39 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode,
swp_entry_t entry, int order, gfp_t gfp)
{
struct shmem_inode_info *info = SHMEM_I(inode);
+ int nr_pages = 1 << order;
struct folio *new;
void *shadow;
- int nr_pages;
/*
* We have arrived here because our zones are constrained, so don't
* limit chance of success with further cpuset and node constraints.
*/
gfp &= ~GFP_CONSTRAINT_MASK;
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) {
- gfp_t huge_gfp = vma_thp_gfp_mask(vma);
-
- gfp = limit_gfp_mask(huge_gfp, gfp);
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ if (WARN_ON_ONCE(order))
+ return ERR_PTR(-EINVAL);
+ } else if (order) {
+ /*
+ * If uffd is active for the vma, we need per-page fault
+ * fidelity to maintain the uffd semantics, then fallback
+ * to swapin order-0 folio, as well as for zswap case.
+ * Any existing sub folio in the swap cache also blocks
+ * mTHP swapin.
+ */
+ if ((vma && unlikely(userfaultfd_armed(vma))) ||
+ !zswap_never_enabled() ||
+ non_swapcache_batch(entry, nr_pages) != nr_pages) {
+ return ERR_PTR(-EINVAL);
+ } else {
+ gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
+ }
}
new = shmem_alloc_folio(gfp, order, info, index);
if (!new)
return ERR_PTR(-ENOMEM);
- nr_pages = folio_nr_pages(new);
if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
gfp, entry)) {
folio_put(new);
@@ -2283,9 +2296,6 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
/* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0);
if (!folio) {
- int nr_pages = 1 << order;
- bool fallback_order0 = false;
-
/* Or update major stats only when swapin succeeds?? */
if (fault_type) {
*fault_type |= VM_FAULT_MAJOR;
@@ -2293,20 +2303,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
count_memcg_event_mm(fault_mm, PGMAJFAULT);
}
- /*
- * If uffd is active for the vma, we need per-page fault
- * fidelity to maintain the uffd semantics, then fallback
- * to swapin order-0 folio, as well as for zswap case.
- * Any existing sub folio in the swap cache also blocks
- * mTHP swapin.
- */
- if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) ||
- !zswap_never_enabled() ||
- non_swapcache_batch(swap, nr_pages) != nr_pages))
- fallback_order0 = true;
-
/* Skip swapcache for synchronous device. */
- if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
+ if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
if (!IS_ERR(folio)) {
skip_swapcache = true;
--
2.50.0
On 2025/6/27 14:20, Kairui Song wrote: > From: Kairui Song <kasong@tencent.com> > > Move all THP swapin related checks under CONFIG_TRANSPARENT_HUGEPAGE, > so they will be trimmed off by the compiler if not needed. > > And add a WARN if shmem sees a order > 0 entry when > CONFIG_TRANSPARENT_HUGEPAGE is disabled, that should never happen unless > things went very wrong. > > There should be no observable feature change except the new added WARN. > > Signed-off-by: Kairui Song <kasong@tencent.com> LGTM. Thanks. Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> > --- > mm/shmem.c | 42 ++++++++++++++++++++---------------------- > 1 file changed, 20 insertions(+), 22 deletions(-) > > diff --git a/mm/shmem.c b/mm/shmem.c > index 033dc7a3435d..f85a985167c5 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -1980,26 +1980,39 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode, > swp_entry_t entry, int order, gfp_t gfp) > { > struct shmem_inode_info *info = SHMEM_I(inode); > + int nr_pages = 1 << order; > struct folio *new; > void *shadow; > - int nr_pages; > > /* > * We have arrived here because our zones are constrained, so don't > * limit chance of success with further cpuset and node constraints. > */ > gfp &= ~GFP_CONSTRAINT_MASK; > - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) { > - gfp_t huge_gfp = vma_thp_gfp_mask(vma); > - > - gfp = limit_gfp_mask(huge_gfp, gfp); > + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { > + if (WARN_ON_ONCE(order)) > + return ERR_PTR(-EINVAL); > + } else if (order) { > + /* > + * If uffd is active for the vma, we need per-page fault > + * fidelity to maintain the uffd semantics, then fallback > + * to swapin order-0 folio, as well as for zswap case. > + * Any existing sub folio in the swap cache also blocks > + * mTHP swapin. > + */ > + if ((vma && unlikely(userfaultfd_armed(vma))) || > + !zswap_never_enabled() || > + non_swapcache_batch(entry, nr_pages) != nr_pages) { > + return ERR_PTR(-EINVAL); > + } else { > + gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp); > + } > } > > new = shmem_alloc_folio(gfp, order, info, index); > if (!new) > return ERR_PTR(-ENOMEM); > > - nr_pages = folio_nr_pages(new); > if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL, > gfp, entry)) { > folio_put(new); > @@ -2283,9 +2296,6 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, > /* Look it up and read it in.. */ > folio = swap_cache_get_folio(swap, NULL, 0); > if (!folio) { > - int nr_pages = 1 << order; > - bool fallback_order0 = false; > - > /* Or update major stats only when swapin succeeds?? */ > if (fault_type) { > *fault_type |= VM_FAULT_MAJOR; > @@ -2293,20 +2303,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, > count_memcg_event_mm(fault_mm, PGMAJFAULT); > } > > - /* > - * If uffd is active for the vma, we need per-page fault > - * fidelity to maintain the uffd semantics, then fallback > - * to swapin order-0 folio, as well as for zswap case. > - * Any existing sub folio in the swap cache also blocks > - * mTHP swapin. > - */ > - if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) || > - !zswap_never_enabled() || > - non_swapcache_batch(swap, nr_pages) != nr_pages)) > - fallback_order0 = true; > - > /* Skip swapcache for synchronous device. */ > - if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) { > + if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) { > folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp); > if (!IS_ERR(folio)) { > skip_swapcache = true;
© 2016 - 2025 Red Hat, Inc.