mm/gup.c | 10 +++++----- mm/hmm.c | 2 +- mm/memory.c | 4 ++-- mm/mprotect.c | 2 +- mm/sparse-vmemmap.c | 2 +- mm/vmscan.c | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-)
Replace all READ_ONCE() with a standard page table accessors i.e pxdp_get()
that defaults into READ_ONCE() in cases where platform does not override.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
mm/gup.c | 10 +++++-----
mm/hmm.c | 2 +-
mm/memory.c | 4 ++--
mm/mprotect.c | 2 +-
mm/sparse-vmemmap.c | 2 +-
mm/vmscan.c | 2 +-
6 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 0bc4d140fc07..37e2af5ed96d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -964,7 +964,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
pudp = pud_offset(p4dp, address);
- pud = READ_ONCE(*pudp);
+ pud = pudp_get(pudp);
if (!pud_present(pud))
return no_page_table(vma, flags, address);
if (pud_leaf(pud)) {
@@ -989,7 +989,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
p4d_t *p4dp, p4d;
p4dp = p4d_offset(pgdp, address);
- p4d = READ_ONCE(*p4dp);
+ p4d = p4dp_get(p4dp);
BUILD_BUG_ON(p4d_leaf(p4d));
if (!p4d_present(p4d) || p4d_bad(p4d))
@@ -3080,7 +3080,7 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
pudp = pud_offset_lockless(p4dp, p4d, addr);
do {
- pud_t pud = READ_ONCE(*pudp);
+ pud_t pud = pudp_get(pudp);
next = pud_addr_end(addr, end);
if (unlikely(!pud_present(pud)))
@@ -3106,7 +3106,7 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
p4dp = p4d_offset_lockless(pgdp, pgd, addr);
do {
- p4d_t p4d = READ_ONCE(*p4dp);
+ p4d_t p4d = p4dp_get(p4dp);
next = p4d_addr_end(addr, end);
if (!p4d_present(p4d))
@@ -3128,7 +3128,7 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
pgdp = pgd_offset(current->mm, addr);
do {
- pgd_t pgd = READ_ONCE(*pgdp);
+ pgd_t pgd = pgdp_get(pgdp);
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
diff --git a/mm/hmm.c b/mm/hmm.c
index d545e2494994..126c3f42e525 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -431,7 +431,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
/* Normally we don't want to split the huge page */
walk->action = ACTION_CONTINUE;
- pud = READ_ONCE(*pudp);
+ pud = pudp_get(pudp);
if (!pud_present(pud)) {
spin_unlock(ptl);
return hmm_vma_walk_hole(start, end, -1, walk);
diff --git a/mm/memory.c b/mm/memory.c
index 0ba4f6b71847..50f841ee6e84 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6549,12 +6549,12 @@ int follow_pfnmap_start(struct follow_pfnmap_args *args)
goto out;
p4dp = p4d_offset(pgdp, address);
- p4d = READ_ONCE(*p4dp);
+ p4d = p4dp_get(p4dp);
if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
goto out;
pudp = pud_offset(p4dp, address);
- pud = READ_ONCE(*pudp);
+ pud = pudp_get(pudp);
if (pud_none(pud))
goto out;
if (pud_leaf(pud)) {
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 113b48985834..988c366137d5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -599,7 +599,7 @@ static inline long change_pud_range(struct mmu_gather *tlb,
break;
}
- pud = READ_ONCE(*pudp);
+ pud = pudp_get(pudp);
if (pud_none(pud))
continue;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index dbd8daccade2..37522d6cb398 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -439,7 +439,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
return -ENOMEM;
pmd = pmd_offset(pud, addr);
- if (pmd_none(READ_ONCE(*pmd))) {
+ if (pmd_none(pmdp_get(pmd))) {
void *p;
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 674999999cd0..14c2722b955b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3772,7 +3772,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
pud = pud_offset(p4d, start & P4D_MASK);
restart:
for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
- pud_t val = READ_ONCE(pud[i]);
+ pud_t val = pudp_get(pud + i);
next = pud_addr_end(addr, end);
--
2.30.2
On Tue, Oct 07, 2025 at 07:31:00AM +0100, Anshuman Khandual wrote: >Replace all READ_ONCE() with a standard page table accessors i.e pxdp_get() >that defaults into READ_ONCE() in cases where platform does not override. > >Cc: Andrew Morton <akpm@linux-foundation.org> >Cc: David Hildenbrand <david@redhat.com> >Cc: linux-mm@kvack.org >Cc: linux-kernel@vger.kernel.org >Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> -- Wei Yang Help you, Help me
On Tue, Oct 7, 2025 at 2:31 PM Anshuman Khandual <anshuman.khandual@arm.com> wrote: > > Replace all READ_ONCE() with a standard page table accessors i.e pxdp_get() > that defaults into READ_ONCE() in cases where platform does not override. > > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: David Hildenbrand <david@redhat.com> > Cc: linux-mm@kvack.org > Cc: linux-kernel@vger.kernel.org > Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> > --- LGTM. I still assume it is a no-op change :) Reviewed-by: Lance Yang <lance.yang@linux.dev>
On 07.10.25 08:31, Anshuman Khandual wrote: > Replace all READ_ONCE() with a standard page table accessors i.e pxdp_get() > that defaults into READ_ONCE() in cases where platform does not override. > > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: David Hildenbrand <david@redhat.com> > Cc: linux-mm@kvack.org > Cc: linux-kernel@vger.kernel.org > Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> > --- Acked-by: David Hildenbrand <david@redhat.com> -- Cheers David / dhildenb
On 07/10/25 12:01 pm, Anshuman Khandual wrote:
> Replace all READ_ONCE() with a standard page table accessors i.e pxdp_get()
> that defaults into READ_ONCE() in cases where platform does not override.
Does any platform override into something else currently? The way you write
the description implies that.
>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: linux-mm@kvack.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
> mm/gup.c | 10 +++++-----
> mm/hmm.c | 2 +-
> mm/memory.c | 4 ++--
> mm/mprotect.c | 2 +-
> mm/sparse-vmemmap.c | 2 +-
> mm/vmscan.c | 2 +-
> 6 files changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/mm/gup.c b/mm/gup.c
> index 0bc4d140fc07..37e2af5ed96d 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -964,7 +964,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
> struct mm_struct *mm = vma->vm_mm;
>
> pudp = pud_offset(p4dp, address);
> - pud = READ_ONCE(*pudp);
> + pud = pudp_get(pudp);
> if (!pud_present(pud))
> return no_page_table(vma, flags, address);
> if (pud_leaf(pud)) {
> @@ -989,7 +989,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
> p4d_t *p4dp, p4d;
>
> p4dp = p4d_offset(pgdp, address);
> - p4d = READ_ONCE(*p4dp);
> + p4d = p4dp_get(p4dp);
> BUILD_BUG_ON(p4d_leaf(p4d));
>
> if (!p4d_present(p4d) || p4d_bad(p4d))
> @@ -3080,7 +3080,7 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
>
> pudp = pud_offset_lockless(p4dp, p4d, addr);
> do {
> - pud_t pud = READ_ONCE(*pudp);
> + pud_t pud = pudp_get(pudp);
>
> next = pud_addr_end(addr, end);
> if (unlikely(!pud_present(pud)))
> @@ -3106,7 +3106,7 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
>
> p4dp = p4d_offset_lockless(pgdp, pgd, addr);
> do {
> - p4d_t p4d = READ_ONCE(*p4dp);
> + p4d_t p4d = p4dp_get(p4dp);
>
> next = p4d_addr_end(addr, end);
> if (!p4d_present(p4d))
> @@ -3128,7 +3128,7 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
>
> pgdp = pgd_offset(current->mm, addr);
> do {
> - pgd_t pgd = READ_ONCE(*pgdp);
> + pgd_t pgd = pgdp_get(pgdp);
>
> next = pgd_addr_end(addr, end);
> if (pgd_none(pgd))
> diff --git a/mm/hmm.c b/mm/hmm.c
> index d545e2494994..126c3f42e525 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -431,7 +431,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
> /* Normally we don't want to split the huge page */
> walk->action = ACTION_CONTINUE;
>
> - pud = READ_ONCE(*pudp);
> + pud = pudp_get(pudp);
> if (!pud_present(pud)) {
> spin_unlock(ptl);
> return hmm_vma_walk_hole(start, end, -1, walk);
> diff --git a/mm/memory.c b/mm/memory.c
> index 0ba4f6b71847..50f841ee6e84 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -6549,12 +6549,12 @@ int follow_pfnmap_start(struct follow_pfnmap_args *args)
> goto out;
>
> p4dp = p4d_offset(pgdp, address);
> - p4d = READ_ONCE(*p4dp);
> + p4d = p4dp_get(p4dp);
> if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
> goto out;
>
> pudp = pud_offset(p4dp, address);
> - pud = READ_ONCE(*pudp);
> + pud = pudp_get(pudp);
> if (pud_none(pud))
> goto out;
> if (pud_leaf(pud)) {
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 113b48985834..988c366137d5 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -599,7 +599,7 @@ static inline long change_pud_range(struct mmu_gather *tlb,
> break;
> }
>
> - pud = READ_ONCE(*pudp);
> + pud = pudp_get(pudp);
> if (pud_none(pud))
> continue;
>
> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
> index dbd8daccade2..37522d6cb398 100644
> --- a/mm/sparse-vmemmap.c
> +++ b/mm/sparse-vmemmap.c
> @@ -439,7 +439,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
> return -ENOMEM;
>
> pmd = pmd_offset(pud, addr);
> - if (pmd_none(READ_ONCE(*pmd))) {
> + if (pmd_none(pmdp_get(pmd))) {
I believe sparse-vmemmap is only for 64 bit arches so we are safe.
> void *p;
>
> p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 674999999cd0..14c2722b955b 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3772,7 +3772,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
> pud = pud_offset(p4d, start & P4D_MASK);
> restart:
> for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
> - pud_t val = READ_ONCE(pud[i]);
> + pud_t val = pudp_get(pud + i);
>
> next = pud_addr_end(addr, end);
>
On 07/10/25 12:41 PM, Dev Jain wrote:
>
> On 07/10/25 12:01 pm, Anshuman Khandual wrote:
>> Replace all READ_ONCE() with a standard page table accessors i.e pxdp_get()
>> that defaults into READ_ONCE() in cases where platform does not override.
>
> Does any platform override into something else currently? The way you write
> the description implies that.
That's how the callbacks have been designed to be overridden when required.
>
>>
>> Cc: Andrew Morton <akpm@linux-foundation.org>
>> Cc: David Hildenbrand <david@redhat.com>
>> Cc: linux-mm@kvack.org
>> Cc: linux-kernel@vger.kernel.org
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>> mm/gup.c | 10 +++++-----
>> mm/hmm.c | 2 +-
>> mm/memory.c | 4 ++--
>> mm/mprotect.c | 2 +-
>> mm/sparse-vmemmap.c | 2 +-
>> mm/vmscan.c | 2 +-
>> 6 files changed, 11 insertions(+), 11 deletions(-)
>>
>> diff --git a/mm/gup.c b/mm/gup.c
>> index 0bc4d140fc07..37e2af5ed96d 100644
>> --- a/mm/gup.c
>> +++ b/mm/gup.c
>> @@ -964,7 +964,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
>> struct mm_struct *mm = vma->vm_mm;
>> pudp = pud_offset(p4dp, address);
>> - pud = READ_ONCE(*pudp);
>> + pud = pudp_get(pudp);
>> if (!pud_present(pud))
>> return no_page_table(vma, flags, address);
>> if (pud_leaf(pud)) {
>> @@ -989,7 +989,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
>> p4d_t *p4dp, p4d;
>> p4dp = p4d_offset(pgdp, address);
>> - p4d = READ_ONCE(*p4dp);
>> + p4d = p4dp_get(p4dp);
>> BUILD_BUG_ON(p4d_leaf(p4d));
>> if (!p4d_present(p4d) || p4d_bad(p4d))
>> @@ -3080,7 +3080,7 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
>> pudp = pud_offset_lockless(p4dp, p4d, addr);
>> do {
>> - pud_t pud = READ_ONCE(*pudp);
>> + pud_t pud = pudp_get(pudp);
>> next = pud_addr_end(addr, end);
>> if (unlikely(!pud_present(pud)))
>> @@ -3106,7 +3106,7 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
>> p4dp = p4d_offset_lockless(pgdp, pgd, addr);
>> do {
>> - p4d_t p4d = READ_ONCE(*p4dp);
>> + p4d_t p4d = p4dp_get(p4dp);
>> next = p4d_addr_end(addr, end);
>> if (!p4d_present(p4d))
>> @@ -3128,7 +3128,7 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
>> pgdp = pgd_offset(current->mm, addr);
>> do {
>> - pgd_t pgd = READ_ONCE(*pgdp);
>> + pgd_t pgd = pgdp_get(pgdp);
>> next = pgd_addr_end(addr, end);
>> if (pgd_none(pgd))
>> diff --git a/mm/hmm.c b/mm/hmm.c
>> index d545e2494994..126c3f42e525 100644
>> --- a/mm/hmm.c
>> +++ b/mm/hmm.c
>> @@ -431,7 +431,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
>> /* Normally we don't want to split the huge page */
>> walk->action = ACTION_CONTINUE;
>> - pud = READ_ONCE(*pudp);
>> + pud = pudp_get(pudp);
>> if (!pud_present(pud)) {
>> spin_unlock(ptl);
>> return hmm_vma_walk_hole(start, end, -1, walk);
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 0ba4f6b71847..50f841ee6e84 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -6549,12 +6549,12 @@ int follow_pfnmap_start(struct follow_pfnmap_args *args)
>> goto out;
>> p4dp = p4d_offset(pgdp, address);
>> - p4d = READ_ONCE(*p4dp);
>> + p4d = p4dp_get(p4dp);
>> if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
>> goto out;
>> pudp = pud_offset(p4dp, address);
>> - pud = READ_ONCE(*pudp);
>> + pud = pudp_get(pudp);
>> if (pud_none(pud))
>> goto out;
>> if (pud_leaf(pud)) {
>> diff --git a/mm/mprotect.c b/mm/mprotect.c
>> index 113b48985834..988c366137d5 100644
>> --- a/mm/mprotect.c
>> +++ b/mm/mprotect.c
>> @@ -599,7 +599,7 @@ static inline long change_pud_range(struct mmu_gather *tlb,
>> break;
>> }
>> - pud = READ_ONCE(*pudp);
>> + pud = pudp_get(pudp);
>> if (pud_none(pud))
>> continue;
>> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
>> index dbd8daccade2..37522d6cb398 100644
>> --- a/mm/sparse-vmemmap.c
>> +++ b/mm/sparse-vmemmap.c
>> @@ -439,7 +439,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
>> return -ENOMEM;
>> pmd = pmd_offset(pud, addr);
>> - if (pmd_none(READ_ONCE(*pmd))) {
>> + if (pmd_none(pmdp_get(pmd))) {
>
> I believe sparse-vmemmap is only for 64 bit arches so we are safe.
>
>> void *p;
>> p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index 674999999cd0..14c2722b955b 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -3772,7 +3772,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
>> pud = pud_offset(p4d, start & P4D_MASK);
>> restart:
>> for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
>> - pud_t val = READ_ONCE(pud[i]);
>> + pud_t val = pudp_get(pud + i);
>> next = pud_addr_end(addr, end);
>>
On 07/10/25 1:24 pm, Anshuman Khandual wrote: > > On 07/10/25 12:41 PM, Dev Jain wrote: >> On 07/10/25 12:01 pm, Anshuman Khandual wrote: >>> Replace all READ_ONCE() with a standard page table accessors i.e pxdp_get() >>> that defaults into READ_ONCE() in cases where platform does not override. >> Does any platform override into something else currently? The way you write >> the description implies that. > That's how the callbacks have been designed to be overridden when required. What I mean is, if currently some arch is overriding into something else, this patch will be a functional change. So it is beneficial to say in the description that the patch is a safeguard for when the platform chooses to override, and no functional change is intended. Reviewed-by: Dev Jain <dev.jain@arm.com> >>
© 2016 - 2026 Red Hat, Inc.