In order to add mTHP support to khugepaged, we will often be checking if a
given order is (or is not) a PMD order. Some places in the kernel already
use this check, so lets create a simple helper function to keep the code
clean and readable.
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Reviewed-by: Barry Song <baohua@kernel.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Suggested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Nico Pache <npache@redhat.com>
---
include/linux/huge_mm.h | 5 +++++
mm/huge_memory.c | 2 +-
mm/khugepaged.c | 6 +++---
mm/memory.c | 2 +-
mm/mempolicy.c | 2 +-
mm/page_alloc.c | 4 ++--
mm/shmem.c | 3 +--
7 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a4d9f964dfde..bd7f0e1d8094 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -771,6 +771,11 @@ static inline bool pmd_is_huge(pmd_t pmd)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool is_pmd_order(unsigned int order)
+{
+ return order == HPAGE_PMD_ORDER;
+}
+
static inline int split_folio_to_list_to_order(struct folio *folio,
struct list_head *list, int new_order)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8003d3a49822..a688d5ff806e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4100,7 +4100,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
i_mmap_unlock_read(mapping);
out:
xas_destroy(&xas);
- if (old_order == HPAGE_PMD_ORDER)
+ if (is_pmd_order(old_order))
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
return ret;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index c85d7381adb5..2ef4b972470b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1533,7 +1533,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign
if (IS_ERR(folio))
return SCAN_PAGE_NULL;
- if (folio_order(folio) != HPAGE_PMD_ORDER) {
+ if (!is_pmd_order(folio_order(folio))) {
result = SCAN_PAGE_COMPOUND;
goto drop_folio;
}
@@ -2016,7 +2016,7 @@ static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
* we locked the first folio, then a THP might be there already.
* This will be discovered on the first iteration.
*/
- if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ if (is_pmd_order(folio_order(folio)) &&
folio->index == start) {
/* Maybe PMD-mapped */
result = SCAN_PTE_MAPPED_HUGEPAGE;
@@ -2346,7 +2346,7 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm,
continue;
}
- if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ if (is_pmd_order(folio_order(folio)) &&
folio->index == start) {
/* Maybe PMD-mapped */
result = SCAN_PTE_MAPPED_HUGEPAGE;
diff --git a/mm/memory.c b/mm/memory.c
index a1a364e1fdcd..cb76fa182eab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5427,7 +5427,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa
if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return ret;
- if (folio_order(folio) != HPAGE_PMD_ORDER)
+ if (!is_pmd_order(folio_order(folio)))
return ret;
page = &folio->page;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0e5175f1c767..e5528c35bbb8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2449,7 +2449,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
/* filter "hugepage" allocation, unless from alloc_pages() */
- order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
+ is_pmd_order(order) && ilx != NO_INTERLEAVE_INDEX) {
/*
* For hugepage allocation and non-interleave policy which
* allows the current node (or other explicitly preferred
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d88c8c67ac0b..96ffb47bcfee 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -687,7 +687,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool movable;
if (order > PAGE_ALLOC_COSTLY_ORDER) {
- VM_BUG_ON(order != HPAGE_PMD_ORDER);
+ VM_BUG_ON(!is_pmd_order(order));
movable = migratetype == MIGRATE_MOVABLE;
@@ -719,7 +719,7 @@ static inline bool pcp_allowed_order(unsigned int order)
if (order <= PAGE_ALLOC_COSTLY_ORDER)
return true;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (order == HPAGE_PMD_ORDER)
+ if (is_pmd_order(order))
return true;
#endif
return false;
diff --git a/mm/shmem.c b/mm/shmem.c
index cfed6c3ff853..ba74803c7518 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -5558,8 +5558,7 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
spin_unlock(&huge_shmem_orders_lock);
} else if (sysfs_streq(buf, "inherit")) {
/* Do not override huge allocation policy with non-PMD sized mTHP */
- if (shmem_huge == SHMEM_HUGE_FORCE &&
- order != HPAGE_PMD_ORDER)
+ if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
return -EINVAL;
spin_lock(&huge_shmem_orders_lock);
--
2.53.0
On 26/02/26 6:59 am, Nico Pache wrote:
> In order to add mTHP support to khugepaged, we will often be checking if a
> given order is (or is not) a PMD order. Some places in the kernel already
> use this check, so lets create a simple helper function to keep the code
> clean and readable.
>
> Acked-by: David Hildenbrand (Arm) <david@kernel.org>
> Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
> Reviewed-by: Lance Yang <lance.yang@linux.dev>
> Reviewed-by: Barry Song <baohua@kernel.org>
> Reviewed-by: Zi Yan <ziy@nvidia.com>
> Reviewed-by: Pedro Falcato <pfalcato@suse.de>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Suggested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Signed-off-by: Nico Pache <npache@redhat.com>
> ---
Reviewed-by: Dev Jain <dev.jain@arm.com>
> include/linux/huge_mm.h | 5 +++++
> mm/huge_memory.c | 2 +-
> mm/khugepaged.c | 6 +++---
> mm/memory.c | 2 +-
> mm/mempolicy.c | 2 +-
> mm/page_alloc.c | 4 ++--
> mm/shmem.c | 3 +--
> 7 files changed, 14 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index a4d9f964dfde..bd7f0e1d8094 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -771,6 +771,11 @@ static inline bool pmd_is_huge(pmd_t pmd)
> }
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>
> +static inline bool is_pmd_order(unsigned int order)
> +{
> + return order == HPAGE_PMD_ORDER;
> +}
> +
> static inline int split_folio_to_list_to_order(struct folio *folio,
> struct list_head *list, int new_order)
> {
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 8003d3a49822..a688d5ff806e 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -4100,7 +4100,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
> i_mmap_unlock_read(mapping);
> out:
> xas_destroy(&xas);
> - if (old_order == HPAGE_PMD_ORDER)
> + if (is_pmd_order(old_order))
> count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
> count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
> return ret;
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index c85d7381adb5..2ef4b972470b 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1533,7 +1533,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign
> if (IS_ERR(folio))
> return SCAN_PAGE_NULL;
>
> - if (folio_order(folio) != HPAGE_PMD_ORDER) {
> + if (!is_pmd_order(folio_order(folio))) {
> result = SCAN_PAGE_COMPOUND;
> goto drop_folio;
> }
> @@ -2016,7 +2016,7 @@ static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
> * we locked the first folio, then a THP might be there already.
> * This will be discovered on the first iteration.
> */
> - if (folio_order(folio) == HPAGE_PMD_ORDER &&
> + if (is_pmd_order(folio_order(folio)) &&
> folio->index == start) {
> /* Maybe PMD-mapped */
> result = SCAN_PTE_MAPPED_HUGEPAGE;
> @@ -2346,7 +2346,7 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm,
> continue;
> }
>
> - if (folio_order(folio) == HPAGE_PMD_ORDER &&
> + if (is_pmd_order(folio_order(folio)) &&
> folio->index == start) {
> /* Maybe PMD-mapped */
> result = SCAN_PTE_MAPPED_HUGEPAGE;
> diff --git a/mm/memory.c b/mm/memory.c
> index a1a364e1fdcd..cb76fa182eab 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5427,7 +5427,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa
> if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
> return ret;
>
> - if (folio_order(folio) != HPAGE_PMD_ORDER)
> + if (!is_pmd_order(folio_order(folio)))
> return ret;
> page = &folio->page;
>
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 0e5175f1c767..e5528c35bbb8 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2449,7 +2449,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
>
> if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
> /* filter "hugepage" allocation, unless from alloc_pages() */
> - order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
> + is_pmd_order(order) && ilx != NO_INTERLEAVE_INDEX) {
> /*
> * For hugepage allocation and non-interleave policy which
> * allows the current node (or other explicitly preferred
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d88c8c67ac0b..96ffb47bcfee 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -687,7 +687,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> bool movable;
> if (order > PAGE_ALLOC_COSTLY_ORDER) {
> - VM_BUG_ON(order != HPAGE_PMD_ORDER);
> + VM_BUG_ON(!is_pmd_order(order));
>
> movable = migratetype == MIGRATE_MOVABLE;
>
> @@ -719,7 +719,7 @@ static inline bool pcp_allowed_order(unsigned int order)
> if (order <= PAGE_ALLOC_COSTLY_ORDER)
> return true;
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> - if (order == HPAGE_PMD_ORDER)
> + if (is_pmd_order(order))
> return true;
> #endif
> return false;
> diff --git a/mm/shmem.c b/mm/shmem.c
> index cfed6c3ff853..ba74803c7518 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -5558,8 +5558,7 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
> spin_unlock(&huge_shmem_orders_lock);
> } else if (sysfs_streq(buf, "inherit")) {
> /* Do not override huge allocation policy with non-PMD sized mTHP */
> - if (shmem_huge == SHMEM_HUGE_FORCE &&
> - order != HPAGE_PMD_ORDER)
> + if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
> return -EINVAL;
>
> spin_lock(&huge_shmem_orders_lock);
On 2/26/26 9:29 AM, Nico Pache wrote: > In order to add mTHP support to khugepaged, we will often be checking if a > given order is (or is not) a PMD order. Some places in the kernel already > use this check, so lets create a simple helper function to keep the code > clean and readable. > > Acked-by: David Hildenbrand (Arm) <david@kernel.org> > Reviewed-by: Wei Yang <richard.weiyang@gmail.com> > Reviewed-by: Lance Yang <lance.yang@linux.dev> > Reviewed-by: Barry Song <baohua@kernel.org> > Reviewed-by: Zi Yan <ziy@nvidia.com> > Reviewed-by: Pedro Falcato <pfalcato@suse.de> > Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > Suggested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > Signed-off-by: Nico Pache <npache@redhat.com> > --- Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
© 2016 - 2026 Red Hat, Inc.