From: Dev Jain <dev.jain@arm.com>
Pass order to alloc_charge_folio() and update mTHP statistics.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Co-developed-by: Nico Pache <npache@redhat.com>
Signed-off-by: Nico Pache <npache@redhat.com>
Signed-off-by: Dev Jain <dev.jain@arm.com>
---
Documentation/admin-guide/mm/transhuge.rst | 8 ++++++++
include/linux/huge_mm.h | 2 ++
mm/huge_memory.c | 4 ++++
mm/khugepaged.c | 17 +++++++++++------
4 files changed, 25 insertions(+), 6 deletions(-)
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index dff8d5985f0f..2c523dce6bc7 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -583,6 +583,14 @@ anon_fault_fallback_charge
instead falls back to using huge pages with lower orders or
small pages even though the allocation was successful.
+collapse_alloc
+ is incremented every time a huge page is successfully allocated for a
+ khugepaged collapse.
+
+collapse_alloc_failed
+ is incremented every time a huge page allocation fails during a
+ khugepaged collapse.
+
zswpout
is incremented every time a huge page is swapped out to zswap in one
piece without splitting.
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7748489fde1b..4042078e8cc9 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -125,6 +125,8 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_ALLOC,
MTHP_STAT_ANON_FAULT_FALLBACK,
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_COLLAPSE_ALLOC,
+ MTHP_STAT_COLLAPSE_ALLOC_FAILED,
MTHP_STAT_ZSWPOUT,
MTHP_STAT_SWPIN,
MTHP_STAT_SWPIN_FALLBACK,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bd7a623d7ef8..e2ed9493df77 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -614,6 +614,8 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
+DEFINE_MTHP_STAT_ATTR(collapse_alloc, MTHP_STAT_COLLAPSE_ALLOC);
+DEFINE_MTHP_STAT_ATTR(collapse_alloc_failed, MTHP_STAT_COLLAPSE_ALLOC_FAILED);
DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
@@ -679,6 +681,8 @@ static struct attribute *any_stats_attrs[] = {
#endif
&split_attr.attr,
&split_failed_attr.attr,
+ &collapse_alloc_attr.attr,
+ &collapse_alloc_failed_attr.attr,
NULL,
};
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index fa0642e66790..cc9a35185604 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1068,21 +1068,26 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
}
static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
- struct collapse_control *cc)
+ struct collapse_control *cc, u8 order)
{
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
GFP_TRANSHUGE);
int node = collapse_find_target_node(cc);
struct folio *folio;
- folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
+ folio = __folio_alloc(gfp, order, node, &cc->alloc_nmask);
if (!folio) {
*foliop = NULL;
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+ if (order == HPAGE_PMD_ORDER)
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+ count_mthp_stat(order, MTHP_STAT_COLLAPSE_ALLOC_FAILED);
return SCAN_ALLOC_HUGE_PAGE_FAIL;
}
- count_vm_event(THP_COLLAPSE_ALLOC);
+ if (order == HPAGE_PMD_ORDER)
+ count_vm_event(THP_COLLAPSE_ALLOC);
+ count_mthp_stat(order, MTHP_STAT_COLLAPSE_ALLOC);
+
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio);
*foliop = NULL;
@@ -1119,7 +1124,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
*/
mmap_read_unlock(mm);
- result = alloc_charge_folio(&folio, mm, cc);
+ result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER);
if (result != SCAN_SUCCEED)
goto out_nolock;
@@ -1843,7 +1848,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
- result = alloc_charge_folio(&new_folio, mm, cc);
+ result = alloc_charge_folio(&new_folio, mm, cc, HPAGE_PMD_ORDER);
if (result != SCAN_SUCCEED)
goto out;
--
2.50.0
On 14.07.25 02:31, Nico Pache wrote: > From: Dev Jain <dev.jain@arm.com> > > Pass order to alloc_charge_folio() and update mTHP statistics. > > Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> > Co-developed-by: Nico Pache <npache@redhat.com> > Signed-off-by: Nico Pache <npache@redhat.com> > Signed-off-by: Dev Jain <dev.jain@arm.com> > --- > Documentation/admin-guide/mm/transhuge.rst | 8 ++++++++ > include/linux/huge_mm.h | 2 ++ > mm/huge_memory.c | 4 ++++ > mm/khugepaged.c | 17 +++++++++++------ > 4 files changed, 25 insertions(+), 6 deletions(-) > > diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst > index dff8d5985f0f..2c523dce6bc7 100644 > --- a/Documentation/admin-guide/mm/transhuge.rst > +++ b/Documentation/admin-guide/mm/transhuge.rst > @@ -583,6 +583,14 @@ anon_fault_fallback_charge > instead falls back to using huge pages with lower orders or > small pages even though the allocation was successful. > > +collapse_alloc > + is incremented every time a huge page is successfully allocated for a > + khugepaged collapse. > + > +collapse_alloc_failed > + is incremented every time a huge page allocation fails during a > + khugepaged collapse. > + > zswpout > is incremented every time a huge page is swapped out to zswap in one > piece without splitting. > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > index 7748489fde1b..4042078e8cc9 100644 > --- a/include/linux/huge_mm.h > +++ b/include/linux/huge_mm.h > @@ -125,6 +125,8 @@ enum mthp_stat_item { > MTHP_STAT_ANON_FAULT_ALLOC, > MTHP_STAT_ANON_FAULT_FALLBACK, > MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > + MTHP_STAT_COLLAPSE_ALLOC, > + MTHP_STAT_COLLAPSE_ALLOC_FAILED, > MTHP_STAT_ZSWPOUT, > MTHP_STAT_SWPIN, > MTHP_STAT_SWPIN_FALLBACK, > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index bd7a623d7ef8..e2ed9493df77 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -614,6 +614,8 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) > DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); > DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); > DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); > +DEFINE_MTHP_STAT_ATTR(collapse_alloc, MTHP_STAT_COLLAPSE_ALLOC); > +DEFINE_MTHP_STAT_ATTR(collapse_alloc_failed, MTHP_STAT_COLLAPSE_ALLOC_FAILED); > DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT); > DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN); > DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK); > @@ -679,6 +681,8 @@ static struct attribute *any_stats_attrs[] = { > #endif > &split_attr.attr, > &split_failed_attr.attr, > + &collapse_alloc_attr.attr, > + &collapse_alloc_failed_attr.attr, > NULL, > }; > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index fa0642e66790..cc9a35185604 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -1068,21 +1068,26 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, > } > > static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm, > - struct collapse_control *cc) > + struct collapse_control *cc, u8 order) u8, really? :) Just use an "unsigned int" like folio_order() would or what __folio_alloc() consumes. Apart from that Acked-by: David Hildenbrand <david@redhat.com> -- Cheers, David / dhildenb
On Wed, Jul 16, 2025 at 7:46 AM David Hildenbrand <david@redhat.com> wrote: > > On 14.07.25 02:31, Nico Pache wrote: > > From: Dev Jain <dev.jain@arm.com> > > > > Pass order to alloc_charge_folio() and update mTHP statistics. > > > > Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> > > Co-developed-by: Nico Pache <npache@redhat.com> > > Signed-off-by: Nico Pache <npache@redhat.com> > > Signed-off-by: Dev Jain <dev.jain@arm.com> > > --- > > Documentation/admin-guide/mm/transhuge.rst | 8 ++++++++ > > include/linux/huge_mm.h | 2 ++ > > mm/huge_memory.c | 4 ++++ > > mm/khugepaged.c | 17 +++++++++++------ > > 4 files changed, 25 insertions(+), 6 deletions(-) > > > > diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst > > index dff8d5985f0f..2c523dce6bc7 100644 > > --- a/Documentation/admin-guide/mm/transhuge.rst > > +++ b/Documentation/admin-guide/mm/transhuge.rst > > @@ -583,6 +583,14 @@ anon_fault_fallback_charge > > instead falls back to using huge pages with lower orders or > > small pages even though the allocation was successful. > > > > +collapse_alloc > > + is incremented every time a huge page is successfully allocated for a > > + khugepaged collapse. > > + > > +collapse_alloc_failed > > + is incremented every time a huge page allocation fails during a > > + khugepaged collapse. > > + > > zswpout > > is incremented every time a huge page is swapped out to zswap in one > > piece without splitting. > > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > > index 7748489fde1b..4042078e8cc9 100644 > > --- a/include/linux/huge_mm.h > > +++ b/include/linux/huge_mm.h > > @@ -125,6 +125,8 @@ enum mthp_stat_item { > > MTHP_STAT_ANON_FAULT_ALLOC, > > MTHP_STAT_ANON_FAULT_FALLBACK, > > MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > > + MTHP_STAT_COLLAPSE_ALLOC, > > + MTHP_STAT_COLLAPSE_ALLOC_FAILED, > > MTHP_STAT_ZSWPOUT, > > MTHP_STAT_SWPIN, > > MTHP_STAT_SWPIN_FALLBACK, > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > > index bd7a623d7ef8..e2ed9493df77 100644 > > --- a/mm/huge_memory.c > > +++ b/mm/huge_memory.c > > @@ -614,6 +614,8 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) > > DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); > > DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); > > DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); > > +DEFINE_MTHP_STAT_ATTR(collapse_alloc, MTHP_STAT_COLLAPSE_ALLOC); > > +DEFINE_MTHP_STAT_ATTR(collapse_alloc_failed, MTHP_STAT_COLLAPSE_ALLOC_FAILED); > > DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT); > > DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN); > > DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK); > > @@ -679,6 +681,8 @@ static struct attribute *any_stats_attrs[] = { > > #endif > > &split_attr.attr, > > &split_failed_attr.attr, > > + &collapse_alloc_attr.attr, > > + &collapse_alloc_failed_attr.attr, > > NULL, > > }; > > > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > > index fa0642e66790..cc9a35185604 100644 > > --- a/mm/khugepaged.c > > +++ b/mm/khugepaged.c > > @@ -1068,21 +1068,26 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, > > } > > > > static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm, > > - struct collapse_control *cc) > > + struct collapse_control *cc, u8 order) > > u8, really? :) At the time I knew I was going to use u8's at the bitmap level so I thought I should have them here too. But you are right I went through and cleaned up all the u8 usage with the exception of the actual bitmap storage. > > Just use an "unsigned int" like folio_order() would or what > __folio_alloc() consumes. > > > > Apart from that > > Acked-by: David Hildenbrand <david@redhat.com> Thank you! > > -- > Cheers, > > David / dhildenb >
© 2016 - 2025 Red Hat, Inc.