In order to separately allocate ptdescs from pages, we need all allocation
and free sites to use the appropriate functions. Convert these pte
allocation/free sites to use ptdescs.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
arch/x86/mm/pat/set_memory.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 6c6eb486f7a6..2dcb565d8f9b 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte)
if (!pte_none(pte[i]))
return false;
- free_page((unsigned long)pte);
+ pagetable_free(virt_to_ptdesc((void *)pte));
return true;
}
@@ -1537,9 +1537,10 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
*/
}
-static int alloc_pte_page(pmd_t *pmd)
+static int alloc_pte_ptdesc(pmd_t *pmd)
{
- pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+ pte_t *pte = (pte_t *) ptdesc_address(
+ pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0));
if (!pte)
return -1;
@@ -1600,7 +1601,7 @@ static long populate_pmd(struct cpa_data *cpa,
*/
pmd = pmd_offset(pud, start);
if (pmd_none(*pmd))
- if (alloc_pte_page(pmd))
+ if (alloc_pte_ptdesc(pmd))
return -1;
populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
@@ -1641,7 +1642,7 @@ static long populate_pmd(struct cpa_data *cpa,
if (start < end) {
pmd = pmd_offset(pud, start);
if (pmd_none(*pmd))
- if (alloc_pte_page(pmd))
+ if (alloc_pte_ptdesc(pmd))
return -1;
populate_pte(cpa, start, end, num_pages - cur_pages,
--
2.52.0
On Wed, Jan 28, 2026 at 02:40:47PM -0800, Vishal Moola (Oracle) wrote:
> In order to separately allocate ptdescs from pages, we need all allocation
> and free sites to use the appropriate functions. Convert these pte
> allocation/free sites to use ptdescs.
>
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> ---
> arch/x86/mm/pat/set_memory.c | 11 ++++++-----
> 1 file changed, 6 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index 6c6eb486f7a6..2dcb565d8f9b 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte)
> if (!pte_none(pte[i]))
> return false;
>
> - free_page((unsigned long)pte);
> + pagetable_free(virt_to_ptdesc((void *)pte));
> return true;
> }
>
> @@ -1537,9 +1537,10 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
> */
> }
>
> -static int alloc_pte_page(pmd_t *pmd)
> +static int alloc_pte_ptdesc(pmd_t *pmd)
> {
> - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
> + pte_t *pte = (pte_t *) ptdesc_address(
> + pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0));
Sorry I missed this last time, but ptdesc_address(NULL) does not return
NULL.
The allocation and conversion should be split IMHO.
This applies to all instances in all the patches.
> if (!pte)
> return -1;
>
> @@ -1600,7 +1601,7 @@ static long populate_pmd(struct cpa_data *cpa,
> */
> pmd = pmd_offset(pud, start);
> if (pmd_none(*pmd))
> - if (alloc_pte_page(pmd))
> + if (alloc_pte_ptdesc(pmd))
> return -1;
>
> populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
> @@ -1641,7 +1642,7 @@ static long populate_pmd(struct cpa_data *cpa,
> if (start < end) {
> pmd = pmd_offset(pud, start);
> if (pmd_none(*pmd))
> - if (alloc_pte_page(pmd))
> + if (alloc_pte_ptdesc(pmd))
> return -1;
>
> populate_pte(cpa, start, end, num_pages - cur_pages,
> --
> 2.52.0
>
--
Sincerely yours,
Mike.
On Thu, Jan 29, 2026 at 10:08:33AM +0200, Mike Rapoport wrote:
> On Wed, Jan 28, 2026 at 02:40:47PM -0800, Vishal Moola (Oracle) wrote:
> > In order to separately allocate ptdescs from pages, we need all allocation
> > and free sites to use the appropriate functions. Convert these pte
> > allocation/free sites to use ptdescs.
> >
> > Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> > ---
> > arch/x86/mm/pat/set_memory.c | 11 ++++++-----
> > 1 file changed, 6 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> > index 6c6eb486f7a6..2dcb565d8f9b 100644
> > --- a/arch/x86/mm/pat/set_memory.c
> > +++ b/arch/x86/mm/pat/set_memory.c
> > @@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte)
> > if (!pte_none(pte[i]))
> > return false;
> >
> > - free_page((unsigned long)pte);
> > + pagetable_free(virt_to_ptdesc((void *)pte));
> > return true;
> > }
> >
> > @@ -1537,9 +1537,10 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
> > */
> > }
> >
> > -static int alloc_pte_page(pmd_t *pmd)
> > +static int alloc_pte_ptdesc(pmd_t *pmd)
> > {
> > - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
> > + pte_t *pte = (pte_t *) ptdesc_address(
> > + pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0));
>
> Sorry I missed this last time, but ptdesc_address(NULL) does not return
> NULL.
> The allocation and conversion should be split IMHO.
Good catch.
> This applies to all instances in all the patches.
Thanks for reviewing, I'll send v3 with your feedback included next
week.
> > if (!pte)
> > return -1;
> >
> > @@ -1600,7 +1601,7 @@ static long populate_pmd(struct cpa_data *cpa,
> > */
> > pmd = pmd_offset(pud, start);
> > if (pmd_none(*pmd))
> > - if (alloc_pte_page(pmd))
> > + if (alloc_pte_ptdesc(pmd))
> > return -1;
> >
> > populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
> > @@ -1641,7 +1642,7 @@ static long populate_pmd(struct cpa_data *cpa,
> > if (start < end) {
> > pmd = pmd_offset(pud, start);
> > if (pmd_none(*pmd))
> > - if (alloc_pte_page(pmd))
> > + if (alloc_pte_ptdesc(pmd))
> > return -1;
> >
> > populate_pte(cpa, start, end, num_pages - cur_pages,
> > --
> > 2.52.0
> >
>
> --
> Sincerely yours,
> Mike.
© 2016 - 2026 Red Hat, Inc.