Extend the mTHP (multi-size THP) statistics infrastructure to support
PUD-sized transparent huge pages.
The mTHP framework tracks statistics for each supported THP size through
per-order counters exposed via sysfs. To add PUD THP support, PUD_ORDER
must be included in the set of tracked orders.
With this change, PUD THP events (allocations, faults, splits, swaps)
are tracked and exposed through the existing sysfs interface at
/sys/kernel/mm/transparent_hugepage/hugepages-1048576kB/stats/. This
provides visibility into PUD THP behavior for debugging and performance
analysis.
Signed-off-by: Usama Arif <usamaarif642@gmail.com>
---
include/linux/huge_mm.h | 42 +++++++++++++++++++++++++++++++++++++----
mm/huge_memory.c | 3 ++-
2 files changed, 40 insertions(+), 5 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e672e45bb9cc7..5509ba8555b6e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -76,7 +76,13 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
* and including PMD_ORDER, except order-0 (which is not "huge") and order-1
* (which is a limitation of the THP implementation).
*/
-#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+#define THP_ORDERS_ALL_ANON_PUD BIT(PUD_ORDER)
+#else
+#define THP_ORDERS_ALL_ANON_PUD 0
+#endif
+#define THP_ORDERS_ALL_ANON (((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) | \
+ THP_ORDERS_ALL_ANON_PUD)
/*
* Mask of all large folio orders supported for file THP. Folios in a DAX
@@ -146,18 +152,46 @@ enum mthp_stat_item {
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+#define MTHP_STAT_COUNT (PMD_ORDER + 2)
+#define MTHP_STAT_PUD_INDEX (PMD_ORDER + 1) /* PUD uses last index */
+#else
+#define MTHP_STAT_COUNT (PMD_ORDER + 1)
+#endif
+
struct mthp_stat {
- unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+ unsigned long stats[MTHP_STAT_COUNT][__MTHP_STAT_COUNT];
};
DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+static inline int mthp_stat_order_to_index(int order)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ if (order == PUD_ORDER)
+ return MTHP_STAT_PUD_INDEX;
+#endif
+ return order;
+}
+
static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
{
- if (order <= 0 || order > PMD_ORDER)
+ int index;
+
+ if (order <= 0)
+ return;
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ if (order != PUD_ORDER && order > PMD_ORDER)
return;
+#else
+ if (order > PMD_ORDER)
+ return;
+#endif
- this_cpu_add(mthp_stats.stats[order][item], delta);
+ index = mthp_stat_order_to_index(order);
+ this_cpu_add(mthp_stats.stats[index][item], delta);
}
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3128b3beedb0a..d033624d7e1f2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -598,11 +598,12 @@ static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
{
unsigned long sum = 0;
int cpu;
+ int index = mthp_stat_order_to_index(order);
for_each_possible_cpu(cpu) {
struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
- sum += this->stats[order][item];
+ sum += this->stats[index][item];
}
return sum;
--
2.47.3
On Sun, Feb 01, 2026 at 04:50:19PM -0800, Usama Arif wrote:
> Extend the mTHP (multi-size THP) statistics infrastructure to support
> PUD-sized transparent huge pages.
>
> The mTHP framework tracks statistics for each supported THP size through
> per-order counters exposed via sysfs. To add PUD THP support, PUD_ORDER
> must be included in the set of tracked orders.
>
> With this change, PUD THP events (allocations, faults, splits, swaps)
> are tracked and exposed through the existing sysfs interface at
> /sys/kernel/mm/transparent_hugepage/hugepages-1048576kB/stats/. This
> provides visibility into PUD THP behavior for debugging and performance
> analysis.
>
> Signed-off-by: Usama Arif <usamaarif642@gmail.com>
Yeah we really need to be basing this on mm-unstable once Nico's series is
landed.
I think it's quite important as well for you to check that khugepaged mTHP works
with all of this.
> ---
> include/linux/huge_mm.h | 42 +++++++++++++++++++++++++++++++++++++----
> mm/huge_memory.c | 3 ++-
> 2 files changed, 40 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index e672e45bb9cc7..5509ba8555b6e 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -76,7 +76,13 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
> * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
> * (which is a limitation of the THP implementation).
> */
> -#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
> +#define THP_ORDERS_ALL_ANON_PUD BIT(PUD_ORDER)
> +#else
> +#define THP_ORDERS_ALL_ANON_PUD 0
> +#endif
> +#define THP_ORDERS_ALL_ANON (((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) | \
> + THP_ORDERS_ALL_ANON_PUD)
Err what is this change doing in a 'stats' change? This quietly updates
__thp_vma_allowable_orders() to also support PUD order for anon memory... Can we
put this in the right place?
>
> /*
> * Mask of all large folio orders supported for file THP. Folios in a DAX
> @@ -146,18 +152,46 @@ enum mthp_stat_item {
> };
>
> #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
> +
> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
By the way I'm not a fan of us treating an 'arch has' as a 'will use'.
> +#define MTHP_STAT_COUNT (PMD_ORDER + 2)
Yeah I hate this. This is just 'one more thing to remember'.
> +#define MTHP_STAT_PUD_INDEX (PMD_ORDER + 1) /* PUD uses last index */
> +#else
> +#define MTHP_STAT_COUNT (PMD_ORDER + 1)
> +#endif
> +
> struct mthp_stat {
> - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
> + unsigned long stats[MTHP_STAT_COUNT][__MTHP_STAT_COUNT];
> };
>
> DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
>
> +static inline int mthp_stat_order_to_index(int order)
> +{
> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
> + if (order == PUD_ORDER)
> + return MTHP_STAT_PUD_INDEX;
This seems like a hack again.
> +#endif
> + return order;
> +}
> +
> static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
> {
> - if (order <= 0 || order > PMD_ORDER)
> + int index;
> +
> + if (order <= 0)
> + return;
> +
> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
> + if (order != PUD_ORDER && order > PMD_ORDER)
> return;
> +#else
> + if (order > PMD_ORDER)
> + return;
> +#endif
Or we could actually define a max order... except now the hack contorts this
code.
Is it really that bad to just take up memory for the order between PMD_ORDER and
PUD_ORDER? ~72 bytes * cores and we avoid having to do this silly dance.
>
> - this_cpu_add(mthp_stats.stats[order][item], delta);
> + index = mthp_stat_order_to_index(order);
> + this_cpu_add(mthp_stats.stats[index][item], delta);
> }
>
> static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 3128b3beedb0a..d033624d7e1f2 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -598,11 +598,12 @@ static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
> {
> unsigned long sum = 0;
> int cpu;
> + int index = mthp_stat_order_to_index(order);
>
> for_each_possible_cpu(cpu) {
> struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
>
> - sum += this->stats[order][item];
> + sum += this->stats[index][item];
> }
>
> return sum;
> --
> 2.47.3
>
On 02/02/2026 03:56, Lorenzo Stoakes wrote:
> On Sun, Feb 01, 2026 at 04:50:19PM -0800, Usama Arif wrote:
>> Extend the mTHP (multi-size THP) statistics infrastructure to support
>> PUD-sized transparent huge pages.
>>
>> The mTHP framework tracks statistics for each supported THP size through
>> per-order counters exposed via sysfs. To add PUD THP support, PUD_ORDER
>> must be included in the set of tracked orders.
>>
>> With this change, PUD THP events (allocations, faults, splits, swaps)
>> are tracked and exposed through the existing sysfs interface at
>> /sys/kernel/mm/transparent_hugepage/hugepages-1048576kB/stats/. This
>> provides visibility into PUD THP behavior for debugging and performance
>> analysis.
>>
>> Signed-off-by: Usama Arif <usamaarif642@gmail.com>
>
> Yeah we really need to be basing this on mm-unstable once Nico's series is
> landed.
>
> I think it's quite important as well for you to check that khugepaged mTHP works
> with all of this.
>
>> ---
>> include/linux/huge_mm.h | 42 +++++++++++++++++++++++++++++++++++++----
>> mm/huge_memory.c | 3 ++-
>> 2 files changed, 40 insertions(+), 5 deletions(-)
>>
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index e672e45bb9cc7..5509ba8555b6e 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -76,7 +76,13 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
>> * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
>> * (which is a limitation of the THP implementation).
>> */
>> -#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
>> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
>> +#define THP_ORDERS_ALL_ANON_PUD BIT(PUD_ORDER)
>> +#else
>> +#define THP_ORDERS_ALL_ANON_PUD 0
>> +#endif
>> +#define THP_ORDERS_ALL_ANON (((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) | \
>> + THP_ORDERS_ALL_ANON_PUD)
>
> Err what is this change doing in a 'stats' change? This quietly updates
> __thp_vma_allowable_orders() to also support PUD order for anon memory... Can we
> put this in the right place?
>
Yeah I didnt place it in the right place. Thanks!
>>
>> /*
>> * Mask of all large folio orders supported for file THP. Folios in a DAX
>> @@ -146,18 +152,46 @@ enum mthp_stat_item {
>> };
>>
>> #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
>> +
>> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
>
> By the way I'm not a fan of us treating an 'arch has' as a 'will use'.
>
>> +#define MTHP_STAT_COUNT (PMD_ORDER + 2)
>
> Yeah I hate this. This is just 'one more thing to remember'.
>
>> +#define MTHP_STAT_PUD_INDEX (PMD_ORDER + 1) /* PUD uses last index */
>> +#else
>> +#define MTHP_STAT_COUNT (PMD_ORDER + 1)
>> +#endif
>> +
>> struct mthp_stat {
>> - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
>> + unsigned long stats[MTHP_STAT_COUNT][__MTHP_STAT_COUNT];
>> };
>>
>> DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
>>
>> +static inline int mthp_stat_order_to_index(int order)
>> +{
>> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
>> + if (order == PUD_ORDER)
>> + return MTHP_STAT_PUD_INDEX;
>
> This seems like a hack again.
>
>> +#endif
>> + return order;
>> +}
>> +
>> static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
>> {
>> - if (order <= 0 || order > PMD_ORDER)
>> + int index;
>> +
>> + if (order <= 0)
>> + return;
>> +
>> +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
>> + if (order != PUD_ORDER && order > PMD_ORDER)
>> return;
>> +#else
>> + if (order > PMD_ORDER)
>> + return;
>> +#endif
>
> Or we could actually define a max order... except now the hack contorts this
> code.
>
> Is it really that bad to just take up memory for the order between PMD_ORDER and
> PUD_ORDER? ~72 bytes * cores and we avoid having to do this silly dance.
So up until a few hours before I sent the series. What you are saying is exactly what
I was doing, i.e. allocating up until PUD order. Its not a lot of memory wastage,
but it is there, and I saw this patch as an easy solution to it. For a server
with 512 cores, this is 36KB. Its not a lot because a server with 512 cores will
also have several 100GBs or TBs of memory.
I know its not elegant, but I do like the approach in this patch more. If there is a
very strong preference to switch to having all order to PUD as it would the make the
code more elegant, than I can switch to it.
>
>>
>> - this_cpu_add(mthp_stats.stats[order][item], delta);
>> + index = mthp_stat_order_to_index(order);
>> + this_cpu_add(mthp_stats.stats[index][item], delta);
>> }
>>
>> static inline void count_mthp_stat(int order, enum mthp_stat_item item)
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index 3128b3beedb0a..d033624d7e1f2 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -598,11 +598,12 @@ static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
>> {
>> unsigned long sum = 0;
>> int cpu;
>> + int index = mthp_stat_order_to_index(order);
>>
>> for_each_possible_cpu(cpu) {
>> struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
>>
>> - sum += this->stats[order][item];
>> + sum += this->stats[index][item];
>> }
>>
>> return sum;
>> --
>> 2.47.3
>>
© 2016 - 2026 Red Hat, Inc.