Change the shmem_huge_global_enabled() to return the suitable huge
order bitmap, and return 0 if huge pages are not allowed. This is a
preparation for supporting various huge orders allocation of tmpfs
in the following patches.
No functional changes.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/shmem.c | 45 ++++++++++++++++++++++++---------------------
1 file changed, 24 insertions(+), 21 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 579e58cb3262..361da46c4bd5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -549,37 +549,37 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
- unsigned long vm_flags)
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ unsigned long vm_flags)
{
loff_t i_size;
if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
- return false;
+ return 0;
if (!S_ISREG(inode->i_mode))
- return false;
+ return 0;
if (shmem_huge == SHMEM_HUGE_DENY)
- return false;
+ return 0;
if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
- return true;
+ return BIT(HPAGE_PMD_ORDER);
case SHMEM_HUGE_WITHIN_SIZE:
index = round_up(index + 1, HPAGE_PMD_NR);
i_size = max(write_end, i_size_read(inode));
i_size = round_up(i_size, PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= index)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
fallthrough;
default:
- return false;
+ return 0;
}
}
@@ -774,11 +774,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
return 0;
}
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
- unsigned long vm_flags)
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ unsigned long vm_flags)
{
- return false;
+ return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -1173,8 +1173,11 @@ static int shmem_getattr(struct mnt_idmap *idmap,
generic_fillattr(idmap, request_mask, inode, stat);
inode_unlock_shared(inode);
- if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (shmem_huge_global_enabled(inode, 0, 0, false, 0) ==
+ BIT(HPAGE_PMD_ORDER))
stat->blksize = HPAGE_PMD_SIZE;
+#endif
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
@@ -1682,21 +1685,21 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
- bool global_huge;
+ unsigned int global_orders;
loff_t i_size;
int order;
if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
return 0;
- global_huge = shmem_huge_global_enabled(inode, index, write_end,
- shmem_huge_force, vm_flags);
+ global_orders = shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vm_flags);
if (!vma || !vma_is_anon_shmem(vma)) {
/*
* For tmpfs, we now only support PMD sized THP if huge page
* is enabled, otherwise fallback to order 0.
*/
- return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
+ return global_orders;
}
/*
@@ -1729,7 +1732,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
- if (global_huge)
+ if (global_orders > 0)
mask |= READ_ONCE(huge_shmem_orders_inherit);
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
--
2.39.3
Hi Baolin, kernel test robot noticed the following build warnings: [auto build test WARNING on akpm-mm/mm-everything] [also build test WARNING on next-20241108] [cannot apply to linus/master v6.12-rc6] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Baolin-Wang/mm-factor-out-the-order-calculation-into-a-new-helper/20241108-121545 base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/a0d41cdc3491878260277e8c18a3e71deb2bc1fb.1731038280.git.baolin.wang%40linux.alibaba.com patch subject: [PATCH 2/4] mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap config: arc-allnoconfig (https://download.01.org/0day-ci/archive/20241108/202411082236.7mwWSsNe-lkp@intel.com/config) compiler: arc-elf-gcc (GCC) 13.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241108/202411082236.7mwWSsNe-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202411082236.7mwWSsNe-lkp@intel.com/ All warnings (new ones prefixed by >>): >> mm/shmem.c:777:21: warning: 'shmem_huge_global_enabled' defined but not used [-Wunused-function] 777 | static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, | ^~~~~~~~~~~~~~~~~~~~~~~~~ vim +/shmem_huge_global_enabled +777 mm/shmem.c 776 > 777 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, 778 loff_t write_end, bool shmem_huge_force, 779 unsigned long vm_flags) 780 { 781 return 0; 782 } 783 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 784 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2024 Red Hat, Inc.