[PATCH 04/11] mm: Rework compound_head() for power-of-2 sizeof(struct page)

Kiryl Shutsemau posted 11 patches 1 week, 3 days ago
[PATCH 04/11] mm: Rework compound_head() for power-of-2 sizeof(struct page)
Posted by Kiryl Shutsemau 1 week, 3 days ago
For tail pages, the kernel uses the 'compound_info' field to get to the
head page. The bit 0 of the field indicates whether the page is a
tail page, and if set, the remaining bits represent a pointer to the
head page.

For cases when size of struct page is power-of-2, change the encoding of
compound_info to store a mask that can be applied to the virtual address
of the tail page in order to access the head page. It is possible
because sturct page of the head page is naturally aligned with regards
to order of the page.

The significant impact of this modification is that all tail pages of
the same order will now have identical 'compound_info', regardless of
the compound page they are associated with. This paves the way for
eliminating fake heads.

Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
---
 include/linux/page-flags.h | 61 +++++++++++++++++++++++++++++++++-----
 mm/util.c                  | 15 +++++++---
 2 files changed, 64 insertions(+), 12 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 11d9499e5ced..eef02fbbb40f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -210,6 +210,13 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
 	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
 		return page;
 
+	/*
+	 * Fake heads only exists if size of struct page is power-of-2.
+	 * See hugetlb_vmemmap_optimizable_size().
+	 */
+	if (!is_power_of_2(sizeof(struct page)))
+		return page;
+
 	/*
 	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
 	 * struct page. The alignment check aims to avoid access the fields (
@@ -223,10 +230,13 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
 		 * because the @page is a compound page composed with at least
 		 * two contiguous pages.
 		 */
-		unsigned long head = READ_ONCE(page[1].compound_info);
+		unsigned long info = READ_ONCE(page[1].compound_info);
 
-		if (likely(head & 1))
-			return (const struct page *)(head - 1);
+		if (likely(info & 1)) {
+			unsigned long p = (unsigned long)page;
+
+			return (const struct page *)(p & info);
+		}
 	}
 	return page;
 }
@@ -281,11 +291,27 @@ static __always_inline int page_is_fake_head(const struct page *page)
 
 static __always_inline unsigned long _compound_head(const struct page *page)
 {
-	unsigned long head = READ_ONCE(page->compound_info);
+	unsigned long info = READ_ONCE(page->compound_info);
 
-	if (unlikely(head & 1))
-		return head - 1;
-	return (unsigned long)page_fixed_fake_head(page);
+	/* Bit 0 encodes PageTail() */
+	if (!(info & 1))
+		return (unsigned long)page_fixed_fake_head(page);
+
+	/*
+	 * If the size of struct page is not power-of-2, the rest if
+	 * compound_info is the pointer to the head page.
+	 */
+	if (!is_power_of_2(sizeof(struct page)))
+		return info - 1;
+
+	/*
+	 * If the size of struct page is power-of-2 it is set the rest of
+	 * the info encodes the mask that converts the address of the tail
+	 * page to the head page.
+	 *
+	 * No need to clear bit 0 in the mask as 'page' always has it clear.
+	 */
+	return (unsigned long)page & info;
 }
 
 #define compound_head(page)	((typeof(page))_compound_head(page))
@@ -294,7 +320,26 @@ static __always_inline void set_compound_head(struct page *page,
 					      struct page *head,
 					      unsigned int order)
 {
-	WRITE_ONCE(page->compound_info, (unsigned long)head + 1);
+	unsigned int shift;
+	unsigned long mask;
+
+	if (!is_power_of_2(sizeof(struct page))) {
+		WRITE_ONCE(page->compound_info, (unsigned long)head | 1);
+		return;
+	}
+
+	/*
+	 * If the size of struct page is power-of-2, bits [shift:0] of the
+	 * virtual address of compound head are zero.
+	 *
+	 * Calculate mask that can be applied the virtual address of the
+	 * tail page to get address of the head page.
+	 */
+	shift = order + order_base_2(sizeof(struct page));
+	mask = GENMASK(BITS_PER_LONG - 1, shift);
+
+	/* Bit 0 encodes PageTail() */
+	WRITE_ONCE(page->compound_info, mask | 1);
 }
 
 static __always_inline void clear_compound_head(struct page *page)
diff --git a/mm/util.c b/mm/util.c
index cbf93cf3223a..6723d2bb7f1e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1234,7 +1234,7 @@ static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio,
  */
 void snapshot_page(struct page_snapshot *ps, const struct page *page)
 {
-	unsigned long head, nr_pages = 1;
+	unsigned long info, nr_pages = 1;
 	struct folio *foliop;
 	int loops = 5;
 
@@ -1244,8 +1244,8 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
 again:
 	memset(&ps->folio_snapshot, 0, sizeof(struct folio));
 	memcpy(&ps->page_snapshot, page, sizeof(*page));
-	head = ps->page_snapshot.compound_info;
-	if ((head & 1) == 0) {
+	info = ps->page_snapshot.compound_info;
+	if ((info & 1) == 0) {
 		ps->idx = 0;
 		foliop = (struct folio *)&ps->page_snapshot;
 		if (!folio_test_large(foliop)) {
@@ -1256,7 +1256,14 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
 		}
 		foliop = (struct folio *)page;
 	} else {
-		foliop = (struct folio *)(head - 1);
+		unsigned long p = (unsigned long)page;
+
+		/* See compound_head() */
+		if (is_power_of_2(sizeof(struct page)))
+			foliop = (struct folio *)(p & info);
+		else
+			foliop = (struct folio *)(info - 1);
+
 		ps->idx = folio_page_idx(foliop, page);
 	}
 
-- 
2.51.2
Re: [PATCH 04/11] mm: Rework compound_head() for power-of-2 sizeof(struct page)
Posted by Usama Arif 1 week, 3 days ago

On 05/12/2025 19:43, Kiryl Shutsemau wrote:
> For tail pages, the kernel uses the 'compound_info' field to get to the
> head page. The bit 0 of the field indicates whether the page is a
> tail page, and if set, the remaining bits represent a pointer to the
> head page.
> 
> For cases when size of struct page is power-of-2, change the encoding of
> compound_info to store a mask that can be applied to the virtual address
> of the tail page in order to access the head page. It is possible
> because sturct page of the head page is naturally aligned with regards

nit: s/sturct/struct/

> to order of the page.

Might be good to add to state here that no change expected if the struct page
is not a power of 2.

> 
> The significant impact of this modification is that all tail pages of
> the same order will now have identical 'compound_info', regardless of
> the compound page they are associated with. This paves the way for
> eliminating fake heads.
> 
> Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
> ---
>  include/linux/page-flags.h | 61 +++++++++++++++++++++++++++++++++-----
>  mm/util.c                  | 15 +++++++---
>  2 files changed, 64 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index 11d9499e5ced..eef02fbbb40f 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -210,6 +210,13 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
>  	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
>  		return page;
>  
> +	/*
> +	 * Fake heads only exists if size of struct page is power-of-2.
> +	 * See hugetlb_vmemmap_optimizable_size().
> +	 */
> +	if (!is_power_of_2(sizeof(struct page)))
> +		return page;
> +


hmm my understanding reviewing up until this patch of the series is that everything works
the same as old code when struct page is not a power of 2. Returning page here means you dont
fix page head when sizeof(struct page) is not a power of 2?

>  	/*
>  	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
>  	 * struct page. The alignment check aims to avoid access the fields (
> @@ -223,10 +230,13 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
>  		 * because the @page is a compound page composed with at least
>  		 * two contiguous pages.
>  		 */
> -		unsigned long head = READ_ONCE(page[1].compound_info);
> +		unsigned long info = READ_ONCE(page[1].compound_info);
>  
> -		if (likely(head & 1))
> -			return (const struct page *)(head - 1);
> +		if (likely(info & 1)) {
> +			unsigned long p = (unsigned long)page;
> +
> +			return (const struct page *)(p & info);

Would it be worth writing a comment over here similar to what you have in set_compound_head
to explain why this works? i.e. compound_info contains the mask derived from folio order that
can be applied to the virtual address to get the head page.

Also, it takes a few minutes to wrap your head around the fact that this works because the struct
page of the head page is aligned wrt to the order. Maybe it might be good to add that somewhere as
a comment somewhere? I dont see it documented in this patch, if its in a future patch, please ignore
this comment. 

> +		}
>  	}
>  	return page;
>  }
> @@ -281,11 +291,27 @@ static __always_inline int page_is_fake_head(const struct page *page)
>  
>  static __always_inline unsigned long _compound_head(const struct page *page)
>  {
> -	unsigned long head = READ_ONCE(page->compound_info);
> +	unsigned long info = READ_ONCE(page->compound_info);
>  
> -	if (unlikely(head & 1))
> -		return head - 1;
> -	return (unsigned long)page_fixed_fake_head(page);
> +	/* Bit 0 encodes PageTail() */
> +	if (!(info & 1))
> +		return (unsigned long)page_fixed_fake_head(page);
> +
> +	/*
> +	 * If the size of struct page is not power-of-2, the rest if

nit: s/if/of

> +	 * compound_info is the pointer to the head page.
> +	 */
> +	if (!is_power_of_2(sizeof(struct page)))
> +		return info - 1;
> +
> +	/*
> +	 * If the size of struct page is power-of-2 it is set the rest of

nit: remove "it is set"

> +	 * the info encodes the mask that converts the address of the tail
> +	 * page to the head page.
> +	 *
> +	 * No need to clear bit 0 in the mask as 'page' always has it clear.
> +	 */
> +	return (unsigned long)page & info;
>  }
>  
>  #define compound_head(page)	((typeof(page))_compound_head(page))
> @@ -294,7 +320,26 @@ static __always_inline void set_compound_head(struct page *page,
>  					      struct page *head,
>  					      unsigned int order)
>  {
> -	WRITE_ONCE(page->compound_info, (unsigned long)head + 1);
> +	unsigned int shift;
> +	unsigned long mask;
> +
> +	if (!is_power_of_2(sizeof(struct page))) {
> +		WRITE_ONCE(page->compound_info, (unsigned long)head | 1);
> +		return;
> +	}
> +
> +	/*
> +	 * If the size of struct page is power-of-2, bits [shift:0] of the
> +	 * virtual address of compound head are zero.
> +	 *
> +	 * Calculate mask that can be applied the virtual address of the

nit: applied to the ..

> +	 * tail page to get address of the head page.
> +	 */
> +	shift = order + order_base_2(sizeof(struct page));
> +	mask = GENMASK(BITS_PER_LONG - 1, shift);
> +
> +	/* Bit 0 encodes PageTail() */
> +	WRITE_ONCE(page->compound_info, mask | 1);
>  }
>  
>  static __always_inline void clear_compound_head(struct page *page)
> diff --git a/mm/util.c b/mm/util.c
> index cbf93cf3223a..6723d2bb7f1e 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -1234,7 +1234,7 @@ static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio,
>   */
>  void snapshot_page(struct page_snapshot *ps, const struct page *page)
>  {
> -	unsigned long head, nr_pages = 1;
> +	unsigned long info, nr_pages = 1;
>  	struct folio *foliop;
>  	int loops = 5;
>  
> @@ -1244,8 +1244,8 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
>  again:
>  	memset(&ps->folio_snapshot, 0, sizeof(struct folio));
>  	memcpy(&ps->page_snapshot, page, sizeof(*page));
> -	head = ps->page_snapshot.compound_info;
> -	if ((head & 1) == 0) {
> +	info = ps->page_snapshot.compound_info;
> +	if ((info & 1) == 0) {
>  		ps->idx = 0;
>  		foliop = (struct folio *)&ps->page_snapshot;
>  		if (!folio_test_large(foliop)) {
> @@ -1256,7 +1256,14 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
>  		}
>  		foliop = (struct folio *)page;
>  	} else {
> -		foliop = (struct folio *)(head - 1);
> +		unsigned long p = (unsigned long)page;
> +
> +		/* See compound_head() */
> +		if (is_power_of_2(sizeof(struct page)))
> +			foliop = (struct folio *)(p & info);
> +		else
> +			foliop = (struct folio *)(info - 1);
> +

Would it be better to do below, as you dont need to than declare p if sizeof(struct page) is not
a power of 2?

if (!is_power_of_2(sizeof(struct page)))
	foliop = (struct folio *)(info - 1);
else {
	unsigned long p = (unsigned long)page;
	foliop = (struct folio *)(p & info);
}
	
>  		ps->idx = folio_page_idx(foliop, page);
>  	}
>
Re: [PATCH 04/11] mm: Rework compound_head() for power-of-2 sizeof(struct page)
Posted by Kiryl Shutsemau 1 week, 2 days ago
On Sat, Dec 06, 2025 at 12:25:12AM +0000, Usama Arif wrote:
> 
> 
> On 05/12/2025 19:43, Kiryl Shutsemau wrote:
> > For tail pages, the kernel uses the 'compound_info' field to get to the
> > head page. The bit 0 of the field indicates whether the page is a
> > tail page, and if set, the remaining bits represent a pointer to the
> > head page.
> > 
> > For cases when size of struct page is power-of-2, change the encoding of
> > compound_info to store a mask that can be applied to the virtual address
> > of the tail page in order to access the head page. It is possible
> > because sturct page of the head page is naturally aligned with regards
> 
> nit: s/sturct/struct/

Ack.

> > to order of the page.
> 
> Might be good to add to state here that no change expected if the struct page
> is not a power of 2.

Okay.

> > 
> > The significant impact of this modification is that all tail pages of
> > the same order will now have identical 'compound_info', regardless of
> > the compound page they are associated with. This paves the way for
> > eliminating fake heads.
> > 
> > Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
> > ---
> >  include/linux/page-flags.h | 61 +++++++++++++++++++++++++++++++++-----
> >  mm/util.c                  | 15 +++++++---
> >  2 files changed, 64 insertions(+), 12 deletions(-)
> > 
> > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> > index 11d9499e5ced..eef02fbbb40f 100644
> > --- a/include/linux/page-flags.h
> > +++ b/include/linux/page-flags.h
> > @@ -210,6 +210,13 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
> >  	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
> >  		return page;
> >  
> > +	/*
> > +	 * Fake heads only exists if size of struct page is power-of-2.
> > +	 * See hugetlb_vmemmap_optimizable_size().
> > +	 */
> > +	if (!is_power_of_2(sizeof(struct page)))
> > +		return page;
> > +
> 
> 
> hmm my understanding reviewing up until this patch of the series is that everything works
> the same as old code when struct page is not a power of 2. Returning page here means you dont
> fix page head when sizeof(struct page) is not a power of 2?

There's no change for non-power-of-2 sizeof(struct page) as there's no
fake heads because there's no HVO for such cases.

See hugetlb_vmemmap_optimizable_size() as I mentioned in the comment.

> 
> >  	/*
> >  	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
> >  	 * struct page. The alignment check aims to avoid access the fields (
> > @@ -223,10 +230,13 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
> >  		 * because the @page is a compound page composed with at least
> >  		 * two contiguous pages.
> >  		 */
> > -		unsigned long head = READ_ONCE(page[1].compound_info);
> > +		unsigned long info = READ_ONCE(page[1].compound_info);
> >  
> > -		if (likely(head & 1))
> > -			return (const struct page *)(head - 1);
> > +		if (likely(info & 1)) {
> > +			unsigned long p = (unsigned long)page;
> > +
> > +			return (const struct page *)(p & info);
> 
> Would it be worth writing a comment over here similar to what you have in set_compound_head
> to explain why this works? i.e. compound_info contains the mask derived from folio order that
> can be applied to the virtual address to get the head page.

But this code is about to be deleted. Is it really worth it?

> Also, it takes a few minutes to wrap your head around the fact that this works because the struct
> page of the head page is aligned wrt to the order. Maybe it might be good to add that somewhere as
> a comment somewhere? I dont see it documented in this patch, if its in a future patch, please ignore
> this comment. 

Okay, I will try to explain it better.

> 
> > +		}
> >  	}
> >  	return page;
> >  }
> > @@ -281,11 +291,27 @@ static __always_inline int page_is_fake_head(const struct page *page)
> >  
> >  static __always_inline unsigned long _compound_head(const struct page *page)
> >  {
> > -	unsigned long head = READ_ONCE(page->compound_info);
> > +	unsigned long info = READ_ONCE(page->compound_info);
> >  
> > -	if (unlikely(head & 1))
> > -		return head - 1;
> > -	return (unsigned long)page_fixed_fake_head(page);
> > +	/* Bit 0 encodes PageTail() */
> > +	if (!(info & 1))
> > +		return (unsigned long)page_fixed_fake_head(page);
> > +
> > +	/*
> > +	 * If the size of struct page is not power-of-2, the rest if
> 
> nit: s/if/of

Ack.

> 
> > +	 * compound_info is the pointer to the head page.
> > +	 */
> > +	if (!is_power_of_2(sizeof(struct page)))
> > +		return info - 1;
> > +
> > +	/*
> > +	 * If the size of struct page is power-of-2 it is set the rest of
> 
> nit: remove "it is set"

Ack.

> 
> > +	 * the info encodes the mask that converts the address of the tail
> > +	 * page to the head page.
> > +	 *
> > +	 * No need to clear bit 0 in the mask as 'page' always has it clear.
> > +	 */
> > +	return (unsigned long)page & info;
> >  }
> >  
> >  #define compound_head(page)	((typeof(page))_compound_head(page))
> > @@ -294,7 +320,26 @@ static __always_inline void set_compound_head(struct page *page,
> >  					      struct page *head,
> >  					      unsigned int order)
> >  {
> > -	WRITE_ONCE(page->compound_info, (unsigned long)head + 1);
> > +	unsigned int shift;
> > +	unsigned long mask;
> > +
> > +	if (!is_power_of_2(sizeof(struct page))) {
> > +		WRITE_ONCE(page->compound_info, (unsigned long)head | 1);
> > +		return;
> > +	}
> > +
> > +	/*
> > +	 * If the size of struct page is power-of-2, bits [shift:0] of the
> > +	 * virtual address of compound head are zero.
> > +	 *
> > +	 * Calculate mask that can be applied the virtual address of the
> 
> nit: applied to the ..

Ack.

> 
> > +	 * tail page to get address of the head page.
> > +	 */
> > +	shift = order + order_base_2(sizeof(struct page));
> > +	mask = GENMASK(BITS_PER_LONG - 1, shift);
> > +
> > +	/* Bit 0 encodes PageTail() */
> > +	WRITE_ONCE(page->compound_info, mask | 1);
> >  }
> >  
> >  static __always_inline void clear_compound_head(struct page *page)
> > diff --git a/mm/util.c b/mm/util.c
> > index cbf93cf3223a..6723d2bb7f1e 100644
> > --- a/mm/util.c
> > +++ b/mm/util.c
> > @@ -1234,7 +1234,7 @@ static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio,
> >   */
> >  void snapshot_page(struct page_snapshot *ps, const struct page *page)
> >  {
> > -	unsigned long head, nr_pages = 1;
> > +	unsigned long info, nr_pages = 1;
> >  	struct folio *foliop;
> >  	int loops = 5;
> >  
> > @@ -1244,8 +1244,8 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
> >  again:
> >  	memset(&ps->folio_snapshot, 0, sizeof(struct folio));
> >  	memcpy(&ps->page_snapshot, page, sizeof(*page));
> > -	head = ps->page_snapshot.compound_info;
> > -	if ((head & 1) == 0) {
> > +	info = ps->page_snapshot.compound_info;
> > +	if ((info & 1) == 0) {
> >  		ps->idx = 0;
> >  		foliop = (struct folio *)&ps->page_snapshot;
> >  		if (!folio_test_large(foliop)) {
> > @@ -1256,7 +1256,14 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)
> >  		}
> >  		foliop = (struct folio *)page;
> >  	} else {
> > -		foliop = (struct folio *)(head - 1);
> > +		unsigned long p = (unsigned long)page;
> > +
> > +		/* See compound_head() */
> > +		if (is_power_of_2(sizeof(struct page)))
> > +			foliop = (struct folio *)(p & info);
> > +		else
> > +			foliop = (struct folio *)(info - 1);
> > +
> 
> Would it be better to do below, as you dont need to than declare p if sizeof(struct page) is not
> a power of 2?
> 
> if (!is_power_of_2(sizeof(struct page)))
> 	foliop = (struct folio *)(info - 1);
> else {
> 	unsigned long p = (unsigned long)page;
> 	foliop = (struct folio *)(p & info);
> }

Okay.

> 	
> >  		ps->idx = folio_page_idx(foliop, page);
> >  	}
> >  
> 

-- 
  Kiryl Shutsemau / Kirill A. Shutemov
Re: [PATCH 04/11] mm: Rework compound_head() for power-of-2 sizeof(struct page)
Posted by Usama Arif 1 week, 2 days ago
>> hmm my understanding reviewing up until this patch of the series is that everything works
>> the same as old code when struct page is not a power of 2. Returning page here means you dont
>> fix page head when sizeof(struct page) is not a power of 2?
> 
> There's no change for non-power-of-2 sizeof(struct page) as there's no
> fake heads because there's no HVO for such cases.


Yeah, I forgot that while reviewing :). I see its mentioned in vmemmap_dedup.rst as well.
I think might be good to add a reminder in the commit message that HVO doesn't apply to
non-power-of-2 sizeof(struct page), but no strong preference :)

> 
> See hugetlb_vmemmap_optimizable_size() as I mentioned in the comment.
> 
>>
>>>  	/*
>>>  	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
>>>  	 * struct page. The alignment check aims to avoid access the fields (
>>> @@ -223,10 +230,13 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
>>>  		 * because the @page is a compound page composed with at least
>>>  		 * two contiguous pages.
>>>  		 */
>>> -		unsigned long head = READ_ONCE(page[1].compound_info);
>>> +		unsigned long info = READ_ONCE(page[1].compound_info);
>>>  
>>> -		if (likely(head & 1))
>>> -			return (const struct page *)(head - 1);
>>> +		if (likely(info & 1)) {
>>> +			unsigned long p = (unsigned long)page;
>>> +
>>> +			return (const struct page *)(p & info);
>>
>> Would it be worth writing a comment over here similar to what you have in set_compound_head
>> to explain why this works? i.e. compound_info contains the mask derived from folio order that
>> can be applied to the virtual address to get the head page.
> 
> But this code is about to be deleted. Is it really worth it?

Ack, hadn't gotten to that commit.