Let's reduce the code duplication and factor out the non-pte/pmd related
magic into vm_normal_page_pfn().
To keep it simpler, check the pfn against both zero folios. We could
optimize this, but as it's only for the !CONFIG_ARCH_HAS_PTE_SPECIAL
case, it's not a compelling micro-optimization.
With CONFIG_ARCH_HAS_PTE_SPECIAL we don't have to check anything else,
really.
It's a good question if we can even hit the !CONFIG_ARCH_HAS_PTE_SPECIAL
scenario in the PMD case in practice: but doesn't really matter, as
it's now all unified in vm_normal_page_pfn().
Add kerneldoc for all involved functions.
No functional change intended.
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
mm/memory.c | 183 +++++++++++++++++++++++++++++++---------------------
1 file changed, 109 insertions(+), 74 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 08d16ed7b4cc7..c43ae5e4d7644 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -590,8 +590,13 @@ static void print_bad_page_map(struct vm_area_struct *vma,
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
-/*
- * vm_normal_page -- This function gets the "struct page" associated with a pte.
+/**
+ * vm_normal_page_pfn() - Get the "struct page" associated with a PFN in a
+ * non-special page table entry.
+ * @vma: The VMA mapping the @pfn.
+ * @addr: The address where the @pfn is mapped.
+ * @pfn: The PFN.
+ * @entry: The page table entry value for error reporting purposes.
*
* "Special" mappings do not wish to be associated with a "struct page" (either
* it doesn't exist, or it exists but they don't want to touch it). In this
@@ -603,10 +608,10 @@ static void print_bad_page_map(struct vm_area_struct *vma,
* (such as GUP) can still identify these mappings and work with the
* underlying "struct page".
*
- * There are 2 broad cases. Firstly, an architecture may define a pte_special()
- * pte bit, in which case this function is trivial. Secondly, an architecture
- * may not have a spare pte bit, which requires a more complicated scheme,
- * described below.
+ * There are 2 broad cases. Firstly, an architecture may define a "special"
+ * page table entry bit (e.g., pte_special()), in which case this function is
+ * trivial. Secondly, an architecture may not have a spare page table
+ * entry bit, which requires a more complicated scheme, described below.
*
* A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
* special mapping (even if there are underlying and valid "struct pages").
@@ -639,15 +644,72 @@ static void print_bad_page_map(struct vm_area_struct *vma,
* don't have to follow the strict linearity rule of PFNMAP mappings in
* order to support COWable mappings.
*
+ * This function is not expected to be called for obviously special mappings:
+ * when the page table entry has the "special" bit set.
+ *
+ * Return: Returns the "struct page" if this is a "normal" mapping. Returns
+ * NULL if this is a "special" mapping.
+ */
+static inline struct page *vm_normal_page_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long long entry)
+{
+ /*
+ * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table mappings
+ * (incl. shared zero folios) are marked accordingly and are handled
+ * by the caller.
+ */
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
+ if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ /* If it has a "struct page", it's "normal". */
+ if (!pfn_valid(pfn))
+ return NULL;
+ } else {
+ unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
+
+ /* Only CoW'ed anon folios are "normal". */
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+ }
+
+ if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
+ return NULL;
+ }
+
+ /* Cheap check for corrupted page table entries. */
+ if (pfn > highest_memmap_pfn) {
+ print_bad_page_map(vma, addr, entry, NULL);
+ return NULL;
+ }
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * For example, VDSO mappings can cause them to exist.
+ */
+ VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn));
+ return pfn_to_page(pfn);
+}
+
+/**
+ * vm_normal_page() - Get the "struct page" associated with a PTE
+ * @vma: The VMA mapping the @pte.
+ * @addr: The address where the @pte is mapped.
+ * @pte: The PTE.
+ *
+ * Get the "struct page" associated with a PTE. See vm_normal_page_pfn()
+ * for details.
+ *
+ * Return: Returns the "struct page" if this is a "normal" mapping. Returns
+ * NULL if this is a "special" mapping.
*/
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
- if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
- if (likely(!pte_special(pte)))
- goto check_pfn;
+ if (unlikely(pte_special(pte))) {
if (vma->vm_ops && vma->vm_ops->find_special_page)
return vma->vm_ops->find_special_page(vma, addr);
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
@@ -658,44 +720,21 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
print_bad_page_map(vma, addr, pte_val(pte), NULL);
return NULL;
}
-
- /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
-
- if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
- if (vma->vm_flags & VM_MIXEDMAP) {
- if (!pfn_valid(pfn))
- return NULL;
- if (is_zero_pfn(pfn))
- return NULL;
- goto out;
- } else {
- unsigned long off;
- off = (addr - vma->vm_start) >> PAGE_SHIFT;
- if (pfn == vma->vm_pgoff + off)
- return NULL;
- if (!is_cow_mapping(vma->vm_flags))
- return NULL;
- }
- }
-
- if (is_zero_pfn(pfn))
- return NULL;
-
-check_pfn:
- if (unlikely(pfn > highest_memmap_pfn)) {
- print_bad_page_map(vma, addr, pte_val(pte), NULL);
- return NULL;
- }
-
- /*
- * NOTE! We still have PageReserved() pages in the page tables.
- * eg. VDSO mappings can cause them to exist.
- */
-out:
- VM_WARN_ON_ONCE(is_zero_pfn(pfn));
- return pfn_to_page(pfn);
+ return vm_normal_page_pfn(vma, addr, pfn, pte_val(pte));
}
+/**
+ * vm_normal_folio() - Get the "struct folio" associated with a PTE
+ * @vma: The VMA mapping the @pte.
+ * @addr: The address where the @pte is mapped.
+ * @pte: The PTE.
+ *
+ * Get the "struct folio" associated with a PTE. See vm_normal_page_pfn()
+ * for details.
+ *
+ * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
+ * NULL if this is a "special" mapping.
+ */
struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
{
@@ -707,6 +746,18 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
}
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
+/**
+ * vm_normal_page_pmd() - Get the "struct page" associated with a PMD
+ * @vma: The VMA mapping the @pmd.
+ * @addr: The address where the @pmd is mapped.
+ * @pmd: The PMD.
+ *
+ * Get the "struct page" associated with a PMD. See vm_normal_page_pfn()
+ * for details.
+ *
+ * Return: Returns the "struct page" if this is a "normal" mapping. Returns
+ * NULL if this is a "special" mapping.
+ */
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd)
{
@@ -721,37 +772,21 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
print_bad_page_map(vma, addr, pmd_val(pmd), NULL);
return NULL;
}
-
- if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
- if (vma->vm_flags & VM_MIXEDMAP) {
- if (!pfn_valid(pfn))
- return NULL;
- goto out;
- } else {
- unsigned long off;
- off = (addr - vma->vm_start) >> PAGE_SHIFT;
- if (pfn == vma->vm_pgoff + off)
- return NULL;
- if (!is_cow_mapping(vma->vm_flags))
- return NULL;
- }
- }
-
- if (is_huge_zero_pfn(pfn))
- return NULL;
- if (unlikely(pfn > highest_memmap_pfn)) {
- print_bad_page_map(vma, addr, pmd_val(pmd), NULL);
- return NULL;
- }
-
- /*
- * NOTE! We still have PageReserved() pages in the page tables.
- * eg. VDSO mappings can cause them to exist.
- */
-out:
- return pfn_to_page(pfn);
+ return vm_normal_page_pfn(vma, addr, pfn, pmd_val(pmd));
}
+/**
+ * vm_normal_folio_pmd() - Get the "struct folio" associated with a PMD
+ * @vma: The VMA mapping the @pmd.
+ * @addr: The address where the @pmd is mapped.
+ * @pmd: The PMD.
+ *
+ * Get the "struct folio" associated with a PMD. See vm_normal_page_pfn()
+ * for details.
+ *
+ * Return: Returns the "struct folio" if this is a "normal" mapping. Returns
+ * NULL if this is a "special" mapping.
+ */
struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd)
{
--
2.50.1
On Thu, Jul 17, 2025 at 01:52:10PM +0200, David Hildenbrand wrote: > Let's reduce the code duplication and factor out the non-pte/pmd related > magic into vm_normal_page_pfn(). > > To keep it simpler, check the pfn against both zero folios. We could > optimize this, but as it's only for the !CONFIG_ARCH_HAS_PTE_SPECIAL > case, it's not a compelling micro-optimization. > > With CONFIG_ARCH_HAS_PTE_SPECIAL we don't have to check anything else, > really. > > It's a good question if we can even hit the !CONFIG_ARCH_HAS_PTE_SPECIAL > scenario in the PMD case in practice: but doesn't really matter, as > it's now all unified in vm_normal_page_pfn(). > > Add kerneldoc for all involved functions. > > No functional change intended. > > Reviewed-by: Oscar Salvador <osalvador@suse.de> > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > mm/memory.c | 183 +++++++++++++++++++++++++++++++--------------------- > 1 file changed, 109 insertions(+), 74 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 08d16ed7b4cc7..c43ae5e4d7644 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -590,8 +590,13 @@ static void print_bad_page_map(struct vm_area_struct *vma, > add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); > } > > -/* > - * vm_normal_page -- This function gets the "struct page" associated with a pte. > +/** > + * vm_normal_page_pfn() - Get the "struct page" associated with a PFN in a > + * non-special page table entry. This is a bit nebulous/confusing, I mean you'll get PTE entries with PTE special bit that'll have a PFN but just no struct page/folio to look at, or should not be touched. So the _pfn() bit doesn't really properly describe what it does. I wonder if it'd be better to just separate out the special handler, have that return a boolean indicating special of either form, and then separate other shared code separately from that? > + * @vma: The VMA mapping the @pfn. > + * @addr: The address where the @pfn is mapped. > + * @pfn: The PFN. > + * @entry: The page table entry value for error reporting purposes. > * > * "Special" mappings do not wish to be associated with a "struct page" (either > * it doesn't exist, or it exists but they don't want to touch it). In this > @@ -603,10 +608,10 @@ static void print_bad_page_map(struct vm_area_struct *vma, > * (such as GUP) can still identify these mappings and work with the > * underlying "struct page". > * > - * There are 2 broad cases. Firstly, an architecture may define a pte_special() > - * pte bit, in which case this function is trivial. Secondly, an architecture > - * may not have a spare pte bit, which requires a more complicated scheme, > - * described below. > + * There are 2 broad cases. Firstly, an architecture may define a "special" > + * page table entry bit (e.g., pte_special()), in which case this function is > + * trivial. Secondly, an architecture may not have a spare page table > + * entry bit, which requires a more complicated scheme, described below. Strikes me this bit of the comment should be with vm_normal_page(). As this implies the 2 broad cases are handled here and this isn't the case. > * > * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a > * special mapping (even if there are underlying and valid "struct pages"). > @@ -639,15 +644,72 @@ static void print_bad_page_map(struct vm_area_struct *vma, > * don't have to follow the strict linearity rule of PFNMAP mappings in > * order to support COWable mappings. > * > + * This function is not expected to be called for obviously special mappings: > + * when the page table entry has the "special" bit set. Hmm this is is a bit weird though, saying "obviously" special, because you're handling "special" mappings here, but only for architectures that don't specify the PTE special bit. So it makes it quite nebulous what constitutes 'obviously' here, really you mean pte_special(). > + * > + * Return: Returns the "struct page" if this is a "normal" mapping. Returns > + * NULL if this is a "special" mapping. > + */ > +static inline struct page *vm_normal_page_pfn(struct vm_area_struct *vma, > + unsigned long addr, unsigned long pfn, unsigned long long entry) > +{ > + /* > + * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table mappings > + * (incl. shared zero folios) are marked accordingly and are handled > + * by the caller. > + */ > + if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { > + if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) { > + if (vma->vm_flags & VM_MIXEDMAP) { > + /* If it has a "struct page", it's "normal". */ > + if (!pfn_valid(pfn)) > + return NULL; > + } else { > + unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; > + > + /* Only CoW'ed anon folios are "normal". */ > + if (pfn == vma->vm_pgoff + off) > + return NULL; > + if (!is_cow_mapping(vma->vm_flags)) > + return NULL; > + } > + } > + > + if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)) This handles zero/zero huge page handling for non-pte_special() case only. I wonder if we even need to bother having these marked special generally since you can just check the PFN every time anyway. > + return NULL; > + } > + > + /* Cheap check for corrupted page table entries. */ > + if (pfn > highest_memmap_pfn) { > + print_bad_page_map(vma, addr, entry, NULL); > + return NULL; > + } > + /* > + * NOTE! We still have PageReserved() pages in the page tables. > + * For example, VDSO mappings can cause them to exist. > + */ > + VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)); > + return pfn_to_page(pfn); > +} > + > +/** > + * vm_normal_page() - Get the "struct page" associated with a PTE > + * @vma: The VMA mapping the @pte. > + * @addr: The address where the @pte is mapped. > + * @pte: The PTE. > + * > + * Get the "struct page" associated with a PTE. See vm_normal_page_pfn() > + * for details. > + * > + * Return: Returns the "struct page" if this is a "normal" mapping. Returns > + * NULL if this is a "special" mapping. > */ > struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > pte_t pte) > { > unsigned long pfn = pte_pfn(pte); > > - if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { > - if (likely(!pte_special(pte))) > - goto check_pfn; > + if (unlikely(pte_special(pte))) { > if (vma->vm_ops && vma->vm_ops->find_special_page) > return vma->vm_ops->find_special_page(vma, addr); > if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) > @@ -658,44 +720,21 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > print_bad_page_map(vma, addr, pte_val(pte), NULL); > return NULL; > } > - > - /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ > - > - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { > - if (vma->vm_flags & VM_MIXEDMAP) { > - if (!pfn_valid(pfn)) > - return NULL; > - if (is_zero_pfn(pfn)) > - return NULL; > - goto out; > - } else { > - unsigned long off; > - off = (addr - vma->vm_start) >> PAGE_SHIFT; > - if (pfn == vma->vm_pgoff + off) > - return NULL; > - if (!is_cow_mapping(vma->vm_flags)) > - return NULL; > - } > - } > - > - if (is_zero_pfn(pfn)) > - return NULL; > - > -check_pfn: > - if (unlikely(pfn > highest_memmap_pfn)) { > - print_bad_page_map(vma, addr, pte_val(pte), NULL); > - return NULL; > - } > - > - /* > - * NOTE! We still have PageReserved() pages in the page tables. > - * eg. VDSO mappings can cause them to exist. > - */ > -out: > - VM_WARN_ON_ONCE(is_zero_pfn(pfn)); > - return pfn_to_page(pfn); > + return vm_normal_page_pfn(vma, addr, pfn, pte_val(pte)); > } > > +/** > + * vm_normal_folio() - Get the "struct folio" associated with a PTE > + * @vma: The VMA mapping the @pte. > + * @addr: The address where the @pte is mapped. > + * @pte: The PTE. > + * > + * Get the "struct folio" associated with a PTE. See vm_normal_page_pfn() > + * for details. > + * > + * Return: Returns the "struct folio" if this is a "normal" mapping. Returns > + * NULL if this is a "special" mapping. > + */ Nice to add a comment, but again feels weird to have the whole explanation in vm_normal_page_pfn() but then to invoke vm_normal_page()... > struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, > pte_t pte) > { > @@ -707,6 +746,18 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, > } > > #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES > +/** > + * vm_normal_page_pmd() - Get the "struct page" associated with a PMD > + * @vma: The VMA mapping the @pmd. > + * @addr: The address where the @pmd is mapped. > + * @pmd: The PMD. > + * > + * Get the "struct page" associated with a PMD. See vm_normal_page_pfn() > + * for details. > + * > + * Return: Returns the "struct page" if this is a "normal" mapping. Returns > + * NULL if this is a "special" mapping. > + */ > struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, > pmd_t pmd) > { > @@ -721,37 +772,21 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, > print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > return NULL; > } > - > - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { > - if (vma->vm_flags & VM_MIXEDMAP) { > - if (!pfn_valid(pfn)) > - return NULL; > - goto out; > - } else { > - unsigned long off; > - off = (addr - vma->vm_start) >> PAGE_SHIFT; > - if (pfn == vma->vm_pgoff + off) > - return NULL; > - if (!is_cow_mapping(vma->vm_flags)) > - return NULL; > - } > - } > - > - if (is_huge_zero_pfn(pfn)) > - return NULL; > - if (unlikely(pfn > highest_memmap_pfn)) { > - print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > - return NULL; > - } > - > - /* > - * NOTE! We still have PageReserved() pages in the page tables. > - * eg. VDSO mappings can cause them to exist. > - */ > -out: > - return pfn_to_page(pfn); > + return vm_normal_page_pfn(vma, addr, pfn, pmd_val(pmd)); Hmm this seems broken, because you're now making these special on arches with pte_special() right? But then you're invoking the not-special function? Also for non-pte_special() arches you're kind of implying they _maybe_ could be special. > } > > +/** > + * vm_normal_folio_pmd() - Get the "struct folio" associated with a PMD > + * @vma: The VMA mapping the @pmd. > + * @addr: The address where the @pmd is mapped. > + * @pmd: The PMD. > + * > + * Get the "struct folio" associated with a PMD. See vm_normal_page_pfn() > + * for details. > + * > + * Return: Returns the "struct folio" if this is a "normal" mapping. Returns > + * NULL if this is a "special" mapping. > + */ > struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, > unsigned long addr, pmd_t pmd) > { > -- > 2.50.1 >
>> >> -/* >> - * vm_normal_page -- This function gets the "struct page" associated with a pte. >> +/** >> + * vm_normal_page_pfn() - Get the "struct page" associated with a PFN in a >> + * non-special page table entry. > > This is a bit nebulous/confusing, I mean you'll get PTE entries with PTE special > bit that'll have a PFN but just no struct page/folio to look at, or should not > be touched. > > So the _pfn() bit doesn't really properly describe what it does. > > I wonder if it'd be better to just separate out the special handler, have > that return a boolean indicating special of either form, and then separate > other shared code separately from that? Let me think about that; I played with various approaches and this was the best I was come up with before running in circles. > >> + * @vma: The VMA mapping the @pfn. >> + * @addr: The address where the @pfn is mapped. >> + * @pfn: The PFN. >> + * @entry: The page table entry value for error reporting purposes. >> * >> * "Special" mappings do not wish to be associated with a "struct page" (either >> * it doesn't exist, or it exists but they don't want to touch it). In this >> @@ -603,10 +608,10 @@ static void print_bad_page_map(struct vm_area_struct *vma, >> * (such as GUP) can still identify these mappings and work with the >> * underlying "struct page". >> * >> - * There are 2 broad cases. Firstly, an architecture may define a pte_special() >> - * pte bit, in which case this function is trivial. Secondly, an architecture >> - * may not have a spare pte bit, which requires a more complicated scheme, >> - * described below. >> + * There are 2 broad cases. Firstly, an architecture may define a "special" >> + * page table entry bit (e.g., pte_special()), in which case this function is >> + * trivial. Secondly, an architecture may not have a spare page table >> + * entry bit, which requires a more complicated scheme, described below. > > Strikes me this bit of the comment should be with vm_normal_page(). As this > implies the 2 broad cases are handled here and this isn't the case. Well, pragmatism. Splitting up the doc doesn't make sense. Having it at vm_normal_page() doesn't make sense. I'm sure the educated reader will be able to make sense of it :P But I'm happy to hear suggestions on how to do it differently :) > >> * >> * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a >> * special mapping (even if there are underlying and valid "struct pages"). >> @@ -639,15 +644,72 @@ static void print_bad_page_map(struct vm_area_struct *vma, >> * don't have to follow the strict linearity rule of PFNMAP mappings in >> * order to support COWable mappings. >> * >> + * This function is not expected to be called for obviously special mappings: >> + * when the page table entry has the "special" bit set. > > Hmm this is is a bit weird though, saying "obviously" special, because you're > handling "special" mappings here, but only for architectures that don't specify > the PTE special bit. > > So it makes it quite nebulous what constitutes 'obviously' here, really you mean > pte_special(). Yes, I can clarify that. > >> + * >> + * Return: Returns the "struct page" if this is a "normal" mapping. Returns >> + * NULL if this is a "special" mapping. >> + */ >> +static inline struct page *vm_normal_page_pfn(struct vm_area_struct *vma, >> + unsigned long addr, unsigned long pfn, unsigned long long entry) >> +{ >> + /* >> + * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table mappings >> + * (incl. shared zero folios) are marked accordingly and are handled >> + * by the caller. >> + */ >> + if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { >> + if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) { >> + if (vma->vm_flags & VM_MIXEDMAP) { >> + /* If it has a "struct page", it's "normal". */ >> + if (!pfn_valid(pfn)) >> + return NULL; >> + } else { >> + unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; >> + >> + /* Only CoW'ed anon folios are "normal". */ >> + if (pfn == vma->vm_pgoff + off) >> + return NULL; >> + if (!is_cow_mapping(vma->vm_flags)) >> + return NULL; >> + } >> + } >> + >> + if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)) > > This handles zero/zero huge page handling for non-pte_special() case > only. I wonder if we even need to bother having these marked special > generally since you can just check the PFN every time anyway. Well, that makes (a) pte_special() a bit weird -- not set for some special pages and (b) requires additional runtime checks for the case we all really care about -- pte_special(). So I don't think we should change that. [...] >> >> +/** >> + * vm_normal_folio() - Get the "struct folio" associated with a PTE >> + * @vma: The VMA mapping the @pte. >> + * @addr: The address where the @pte is mapped. >> + * @pte: The PTE. >> + * >> + * Get the "struct folio" associated with a PTE. See vm_normal_page_pfn() >> + * for details. >> + * >> + * Return: Returns the "struct folio" if this is a "normal" mapping. Returns >> + * NULL if this is a "special" mapping. >> + */ > > Nice to add a comment, but again feels weird to have the whole explanation in > vm_normal_page_pfn() but then to invoke vm_normal_page().. You want people to do pointer chasing to find what they are looking for? :) -- Cheers, David / dhildenb
On Thu, Jul 17, 2025 at 10:12:37PM +0200, David Hildenbrand wrote: > > > > > > -/* > > > - * vm_normal_page -- This function gets the "struct page" associated with a pte. > > > +/** > > > + * vm_normal_page_pfn() - Get the "struct page" associated with a PFN in a > > > + * non-special page table entry. > > > > This is a bit nebulous/confusing, I mean you'll get PTE entries with PTE special > > bit that'll have a PFN but just no struct page/folio to look at, or should not > > be touched. > > > > So the _pfn() bit doesn't really properly describe what it does. > > > > I wonder if it'd be better to just separate out the special handler, have > > that return a boolean indicating special of either form, and then separate > > other shared code separately from that? > > Let me think about that; I played with various approaches and this was the > best I was come up with before running in circles. Thanks > > > > > > + * @vma: The VMA mapping the @pfn. > > > + * @addr: The address where the @pfn is mapped. > > > + * @pfn: The PFN. > > > + * @entry: The page table entry value for error reporting purposes. > > > * > > > * "Special" mappings do not wish to be associated with a "struct page" (either > > > * it doesn't exist, or it exists but they don't want to touch it). In this > > > @@ -603,10 +608,10 @@ static void print_bad_page_map(struct vm_area_struct *vma, > > > * (such as GUP) can still identify these mappings and work with the > > > * underlying "struct page". > > > * > > > - * There are 2 broad cases. Firstly, an architecture may define a pte_special() > > > - * pte bit, in which case this function is trivial. Secondly, an architecture > > > - * may not have a spare pte bit, which requires a more complicated scheme, > > > - * described below. > > > + * There are 2 broad cases. Firstly, an architecture may define a "special" > > > + * page table entry bit (e.g., pte_special()), in which case this function is > > > + * trivial. Secondly, an architecture may not have a spare page table > > > + * entry bit, which requires a more complicated scheme, described below. > > > > Strikes me this bit of the comment should be with vm_normal_page(). As this > > implies the 2 broad cases are handled here and this isn't the case. > > Well, pragmatism. Splitting up the doc doesn't make sense. Having it at > vm_normal_page() doesn't make sense. > > I'm sure the educated reader will be able to make sense of it :P > > But I'm happy to hear suggestions on how to do it differently :) Right yeah. I feel like having separate 'special' handling for each case as separate functions, each with their own specific explanation would work. But I don't want to hold up the series _too_ much on this, generally I just find the _pfn thing confusing. I mean the implementation is a total pain anyway... I feel like we could even have separate special handling functions like #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL /* * < description of pte special special page > * * If returns true, then pagep set to NULL or, if a page can be found, that * page. * */ static struct bool is_special_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page **pagep) { unsigned long pfn = pte_pfn(pte); if (likely(!pte_special(pte))) { if (pfn <= highest_memmap_pfn) return false; goto bad; } if (vma->vm_ops && vma->vm_ops->find_special_page) { *pagep = vma->vm_ops->find_special_page(vma, addr); return true; } else if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) { goto special; } if (is_zero_pfn(pfn)) goto special; /* If we reach here something's gone wrong. */ bad: print_bad_pte(vma, addr, pte, NULL); special: *pagep = NULL; return true; } #else /* * < description for not-pte special special page > */ static struct bool is_special_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page **pagep) { unsigned long pfn = pte_pfn(pte); if (is_zero_pfn(pfn)) goto special; if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn) || is_zero_pfn(pfn)) goto special; } else if (vma->vm_flags & VM_PFNMAP) { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) goto special; /* Hell's bells we allow CoW !arch_has_pte_special of PFN pages! help! */ if (!is_cow_mapping(vma->vm_flags)) goto special; } if (pfn > highest_memmap_pfn) { print_bad_pte(vma, addr, pte, NULL); goto special; } return false; special: *pagep = NULL; return true; } #endif And then obviously invoke as makes sense... This is rough and untested, just to give a sense :>) > > > > > > * > > > * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a > > > * special mapping (even if there are underlying and valid "struct pages"). > > > @@ -639,15 +644,72 @@ static void print_bad_page_map(struct vm_area_struct *vma, > > > * don't have to follow the strict linearity rule of PFNMAP mappings in > > > * order to support COWable mappings. > > > * > > > + * This function is not expected to be called for obviously special mappings: > > > + * when the page table entry has the "special" bit set. > > > > Hmm this is is a bit weird though, saying "obviously" special, because you're > > handling "special" mappings here, but only for architectures that don't specify > > the PTE special bit. > > > > So it makes it quite nebulous what constitutes 'obviously' here, really you mean > > pte_special(). > > Yes, I can clarify that. Thanks! > > > > > > + * > > > + * Return: Returns the "struct page" if this is a "normal" mapping. Returns > > > + * NULL if this is a "special" mapping. > > > + */ > > > +static inline struct page *vm_normal_page_pfn(struct vm_area_struct *vma, > > > + unsigned long addr, unsigned long pfn, unsigned long long entry) > > > +{ > > > + /* > > > + * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table mappings > > > + * (incl. shared zero folios) are marked accordingly and are handled > > > + * by the caller. > > > + */ > > > + if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { > > > + if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) { > > > + if (vma->vm_flags & VM_MIXEDMAP) { > > > + /* If it has a "struct page", it's "normal". */ > > > + if (!pfn_valid(pfn)) > > > + return NULL; > > > + } else { > > > + unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; > > > + > > > + /* Only CoW'ed anon folios are "normal". */ > > > + if (pfn == vma->vm_pgoff + off) > > > + return NULL; > > > + if (!is_cow_mapping(vma->vm_flags)) > > > + return NULL; > > > + } > > > + } > > > + > > > + if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn)) > > > > This handles zero/zero huge page handling for non-pte_special() case > > only. I wonder if we even need to bother having these marked special > > generally since you can just check the PFN every time anyway. > > Well, that makes (a) pte_special() a bit weird -- not set for some special > pages and (b) requires additional runtime checks for the case we all really > care about -- pte_special(). > > So I don't think we should change that. OK, best to be consistent in setting for special pages. > > [...] > > > > > > > +/** > > > + * vm_normal_folio() - Get the "struct folio" associated with a PTE > > > + * @vma: The VMA mapping the @pte. > > > + * @addr: The address where the @pte is mapped. > > > + * @pte: The PTE. > > > + * > > > + * Get the "struct folio" associated with a PTE. See vm_normal_page_pfn() > > > + * for details. > > > + * > > > + * Return: Returns the "struct folio" if this is a "normal" mapping. Returns > > > + * NULL if this is a "special" mapping. > > > + */ > > > > Nice to add a comment, but again feels weird to have the whole explanation in > > vm_normal_page_pfn() but then to invoke vm_normal_page().. > > You want people to do pointer chasing to find what they are looking for? :) Yes. Only joking :P Hopefully the ideas mentioned above clarify things... a bit maybe... :>) > > -- > Cheers, > > David / dhildenb >
On Thu, Jul 17, 2025 at 08:51:51PM +0100, Lorenzo Stoakes wrote: > > @@ -721,37 +772,21 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, > > print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > > return NULL; > > } > > - > > - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { > > - if (vma->vm_flags & VM_MIXEDMAP) { > > - if (!pfn_valid(pfn)) > > - return NULL; > > - goto out; > > - } else { > > - unsigned long off; > > - off = (addr - vma->vm_start) >> PAGE_SHIFT; > > - if (pfn == vma->vm_pgoff + off) > > - return NULL; > > - if (!is_cow_mapping(vma->vm_flags)) > > - return NULL; > > - } > > - } > > - > > - if (is_huge_zero_pfn(pfn)) > > - return NULL; > > - if (unlikely(pfn > highest_memmap_pfn)) { > > - print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > > - return NULL; > > - } > > - > > - /* > > - * NOTE! We still have PageReserved() pages in the page tables. > > - * eg. VDSO mappings can cause them to exist. > > - */ > > -out: > > - return pfn_to_page(pfn); > > + return vm_normal_page_pfn(vma, addr, pfn, pmd_val(pmd)); > > Hmm this seems broken, because you're now making these special on arches with > pte_special() right? But then you're invoking the not-special function? > > Also for non-pte_special() arches you're kind of implying they _maybe_ could be > special. OK sorry the diff caught me out here, you explicitly handle the pmd_special() case here, duplicatively (yuck). Maybe you fix this up in a later patch :) Anyway, again it'd be nice to somehow find a way to separate the special check out from the rest.
On 17.07.25 21:55, Lorenzo Stoakes wrote: > On Thu, Jul 17, 2025 at 08:51:51PM +0100, Lorenzo Stoakes wrote: >>> @@ -721,37 +772,21 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, >>> print_bad_page_map(vma, addr, pmd_val(pmd), NULL); >>> return NULL; >>> } >>> - >>> - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { >>> - if (vma->vm_flags & VM_MIXEDMAP) { >>> - if (!pfn_valid(pfn)) >>> - return NULL; >>> - goto out; >>> - } else { >>> - unsigned long off; >>> - off = (addr - vma->vm_start) >> PAGE_SHIFT; >>> - if (pfn == vma->vm_pgoff + off) >>> - return NULL; >>> - if (!is_cow_mapping(vma->vm_flags)) >>> - return NULL; >>> - } >>> - } >>> - >>> - if (is_huge_zero_pfn(pfn)) >>> - return NULL; >>> - if (unlikely(pfn > highest_memmap_pfn)) { >>> - print_bad_page_map(vma, addr, pmd_val(pmd), NULL); >>> - return NULL; >>> - } >>> - >>> - /* >>> - * NOTE! We still have PageReserved() pages in the page tables. >>> - * eg. VDSO mappings can cause them to exist. >>> - */ >>> -out: >>> - return pfn_to_page(pfn); >>> + return vm_normal_page_pfn(vma, addr, pfn, pmd_val(pmd)); >> >> Hmm this seems broken, because you're now making these special on arches with >> pte_special() right? But then you're invoking the not-special function? >> >> Also for non-pte_special() arches you're kind of implying they _maybe_ could be >> special. > > OK sorry the diff caught me out here, you explicitly handle the pmd_special() > case here, duplicatively (yuck). > > Maybe you fix this up in a later patch :) I had that, but the conditions depend on the level, meaning: unnecessary checks for pte/pmd/pud level. I had a variant where I would pass "bool special" into vm_normal_page_pfn(), but I didn't like it. To optimize out, I would have to provide a "level" argument, and did not convince myself yet that that is a good idea at this point. -- Cheers, David / dhildenb
On Thu, Jul 17, 2025 at 10:03:44PM +0200, David Hildenbrand wrote: > On 17.07.25 21:55, Lorenzo Stoakes wrote: > > On Thu, Jul 17, 2025 at 08:51:51PM +0100, Lorenzo Stoakes wrote: > > > > @@ -721,37 +772,21 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, > > > > print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > > > > return NULL; > > > > } > > > > - > > > > - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { > > > > - if (vma->vm_flags & VM_MIXEDMAP) { > > > > - if (!pfn_valid(pfn)) > > > > - return NULL; > > > > - goto out; > > > > - } else { > > > > - unsigned long off; > > > > - off = (addr - vma->vm_start) >> PAGE_SHIFT; > > > > - if (pfn == vma->vm_pgoff + off) > > > > - return NULL; > > > > - if (!is_cow_mapping(vma->vm_flags)) > > > > - return NULL; > > > > - } > > > > - } > > > > - > > > > - if (is_huge_zero_pfn(pfn)) > > > > - return NULL; > > > > - if (unlikely(pfn > highest_memmap_pfn)) { > > > > - print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > > > > - return NULL; > > > > - } > > > > - > > > > - /* > > > > - * NOTE! We still have PageReserved() pages in the page tables. > > > > - * eg. VDSO mappings can cause them to exist. > > > > - */ > > > > -out: > > > > - return pfn_to_page(pfn); > > > > + return vm_normal_page_pfn(vma, addr, pfn, pmd_val(pmd)); > > > > > > Hmm this seems broken, because you're now making these special on arches with > > > pte_special() right? But then you're invoking the not-special function? > > > > > > Also for non-pte_special() arches you're kind of implying they _maybe_ could be > > > special. > > > > OK sorry the diff caught me out here, you explicitly handle the pmd_special() > > case here, duplicatively (yuck). > > > > Maybe you fix this up in a later patch :) > > I had that, but the conditions depend on the level, meaning: unnecessary > checks for pte/pmd/pud level. > > I had a variant where I would pass "bool special" into vm_normal_page_pfn(), > but I didn't like it. > > To optimize out, I would have to provide a "level" argument, and did not > convince myself yet that that is a good idea at this point. Yeah fair enough. That probably isn't worth it or might end up making things even more ugly. We must keep things within the realms of good taste... See other mail for a suggestion... I think this is just an awkward function whatever way round. > > -- > Cheers, > > David / dhildenb >
On 18.07.25 14:43, Lorenzo Stoakes wrote: > On Thu, Jul 17, 2025 at 10:03:44PM +0200, David Hildenbrand wrote: >> On 17.07.25 21:55, Lorenzo Stoakes wrote: >>> On Thu, Jul 17, 2025 at 08:51:51PM +0100, Lorenzo Stoakes wrote: >>>>> @@ -721,37 +772,21 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, >>>>> print_bad_page_map(vma, addr, pmd_val(pmd), NULL); >>>>> return NULL; >>>>> } >>>>> - >>>>> - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { >>>>> - if (vma->vm_flags & VM_MIXEDMAP) { >>>>> - if (!pfn_valid(pfn)) >>>>> - return NULL; >>>>> - goto out; >>>>> - } else { >>>>> - unsigned long off; >>>>> - off = (addr - vma->vm_start) >> PAGE_SHIFT; >>>>> - if (pfn == vma->vm_pgoff + off) >>>>> - return NULL; >>>>> - if (!is_cow_mapping(vma->vm_flags)) >>>>> - return NULL; >>>>> - } >>>>> - } >>>>> - >>>>> - if (is_huge_zero_pfn(pfn)) >>>>> - return NULL; >>>>> - if (unlikely(pfn > highest_memmap_pfn)) { >>>>> - print_bad_page_map(vma, addr, pmd_val(pmd), NULL); >>>>> - return NULL; >>>>> - } >>>>> - >>>>> - /* >>>>> - * NOTE! We still have PageReserved() pages in the page tables. >>>>> - * eg. VDSO mappings can cause them to exist. >>>>> - */ >>>>> -out: >>>>> - return pfn_to_page(pfn); >>>>> + return vm_normal_page_pfn(vma, addr, pfn, pmd_val(pmd)); >>>> >>>> Hmm this seems broken, because you're now making these special on arches with >>>> pte_special() right? But then you're invoking the not-special function? >>>> >>>> Also for non-pte_special() arches you're kind of implying they _maybe_ could be >>>> special. >>> >>> OK sorry the diff caught me out here, you explicitly handle the pmd_special() >>> case here, duplicatively (yuck). >>> >>> Maybe you fix this up in a later patch :) >> >> I had that, but the conditions depend on the level, meaning: unnecessary >> checks for pte/pmd/pud level. >> >> I had a variant where I would pass "bool special" into vm_normal_page_pfn(), >> but I didn't like it. >> >> To optimize out, I would have to provide a "level" argument, and did not >> convince myself yet that that is a good idea at this point. > > Yeah fair enough. That probably isn't worth it or might end up making things > even more ugly. So, I decided to add the level arguments, but not use them to optimize the checks, only to forward it to the new print_bad_pte(). So the new helper will be /** * __vm_normal_page() - Get the "struct page" associated with a page table entry. * @vma: The VMA mapping the page table entry. * @addr: The address where the page table entry is mapped. * @pfn: The PFN stored in the page table entry. * @special: Whether the page table entry is marked "special". * @level: The page table level for error reporting purposes only. * @entry: The page table entry value for error reporting purposes only. ... */ static inline struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, bool special, unsigned long long entry, enum pgtable_level level) ... And vm_nomal_page() will for example be /** * vm_normal_page() - Get the "struct page" associated with a PTE * @vma: The VMA mapping the @pte. * @addr: The address where the @pte is mapped. * @pte: The PTE. * * Get the "struct page" associated with a PTE. See __vm_normal_page() * for details on "normal" and "special" mappings. * * Return: Returns the "struct page" if this is a "normal" mapping. Returns * NULL if this is a "special" mapping. */ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte), pte_val(pte), PGTABLE_LEVEL_PTE); } -- Cheers, David / dhildenb
On Wed, Jul 30, 2025 at 02:54:46PM +0200, David Hildenbrand wrote: > On 18.07.25 14:43, Lorenzo Stoakes wrote: > > On Thu, Jul 17, 2025 at 10:03:44PM +0200, David Hildenbrand wrote: > > > On 17.07.25 21:55, Lorenzo Stoakes wrote: > > > > On Thu, Jul 17, 2025 at 08:51:51PM +0100, Lorenzo Stoakes wrote: > > > > > > @@ -721,37 +772,21 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, > > > > > > print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > > > > > > return NULL; > > > > > > } > > > > > > - > > > > > > - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { > > > > > > - if (vma->vm_flags & VM_MIXEDMAP) { > > > > > > - if (!pfn_valid(pfn)) > > > > > > - return NULL; > > > > > > - goto out; > > > > > > - } else { > > > > > > - unsigned long off; > > > > > > - off = (addr - vma->vm_start) >> PAGE_SHIFT; > > > > > > - if (pfn == vma->vm_pgoff + off) > > > > > > - return NULL; > > > > > > - if (!is_cow_mapping(vma->vm_flags)) > > > > > > - return NULL; > > > > > > - } > > > > > > - } > > > > > > - > > > > > > - if (is_huge_zero_pfn(pfn)) > > > > > > - return NULL; > > > > > > - if (unlikely(pfn > highest_memmap_pfn)) { > > > > > > - print_bad_page_map(vma, addr, pmd_val(pmd), NULL); > > > > > > - return NULL; > > > > > > - } > > > > > > - > > > > > > - /* > > > > > > - * NOTE! We still have PageReserved() pages in the page tables. > > > > > > - * eg. VDSO mappings can cause them to exist. > > > > > > - */ > > > > > > -out: > > > > > > - return pfn_to_page(pfn); > > > > > > + return vm_normal_page_pfn(vma, addr, pfn, pmd_val(pmd)); > > > > > > > > > > Hmm this seems broken, because you're now making these special on arches with > > > > > pte_special() right? But then you're invoking the not-special function? > > > > > > > > > > Also for non-pte_special() arches you're kind of implying they _maybe_ could be > > > > > special. > > > > > > > > OK sorry the diff caught me out here, you explicitly handle the pmd_special() > > > > case here, duplicatively (yuck). > > > > > > > > Maybe you fix this up in a later patch :) > > > > > > I had that, but the conditions depend on the level, meaning: unnecessary > > > checks for pte/pmd/pud level. > > > > > > I had a variant where I would pass "bool special" into vm_normal_page_pfn(), > > > but I didn't like it. > > > > > > To optimize out, I would have to provide a "level" argument, and did not > > > convince myself yet that that is a good idea at this point. > > > > Yeah fair enough. That probably isn't worth it or might end up making things > > even more ugly. > > So, I decided to add the level arguments, but not use them to optimize the checks, > only to forward it to the new print_bad_pte(). > > So the new helper will be > > /** > * __vm_normal_page() - Get the "struct page" associated with a page table entry. > * @vma: The VMA mapping the page table entry. > * @addr: The address where the page table entry is mapped. > * @pfn: The PFN stored in the page table entry. > * @special: Whether the page table entry is marked "special". > * @level: The page table level for error reporting purposes only. > * @entry: The page table entry value for error reporting purposes only. > ... > */ > static inline struct page *__vm_normal_page(struct vm_area_struct *vma, > unsigned long addr, unsigned long pfn, bool special, > unsigned long long entry, enum pgtable_level level) > ... > > > And vm_nomal_page() will for example be > > /** > * vm_normal_page() - Get the "struct page" associated with a PTE > * @vma: The VMA mapping the @pte. > * @addr: The address where the @pte is mapped. > * @pte: The PTE. > * > * Get the "struct page" associated with a PTE. See __vm_normal_page() > * for details on "normal" and "special" mappings. > * > * Return: Returns the "struct page" if this is a "normal" mapping. Returns > * NULL if this is a "special" mapping. > */ > struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > pte_t pte) > { > return __vm_normal_page(vma, addr, pte_pfn(pte), pte_special(pte), > pte_val(pte), PGTABLE_LEVEL_PTE); > } > OK that could work out well actually, cool thank you!
© 2016 - 2025 Red Hat, Inc.