Let's clean it all further up.
No functional change intended.
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
mm/huge_memory.c | 36 +++++++++++++-----------------------
1 file changed, 13 insertions(+), 23 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1178760d2eda4..849feacaf8064 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1518,25 +1518,30 @@ static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
return pud;
}
-static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
+static vm_fault_t insert_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, struct folio_or_pfn fop, pgprot_t prot, bool write)
{
struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
pud_t entry;
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ ptl = pud_lock(mm, pud);
if (!pud_none(*pud)) {
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
fop.pfn;
if (write) {
if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
- return;
+ goto out_unlock;
entry = pud_mkyoung(*pud);
entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
if (pudp_set_access_flags(vma, addr, pud, entry, 1))
update_mmu_cache_pud(vma, addr, pud);
}
- return;
+ goto out_unlock;
}
if (fop.is_folio) {
@@ -1555,6 +1560,9 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr,
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
+out_unlock:
+ spin_unlock(ptl);
+ return VM_FAULT_NOPAGE;
}
/**
@@ -1576,7 +1584,6 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
struct folio_or_pfn fop = {
.pfn = pfn,
};
- spinlock_t *ptl;
/*
* If we had pud_special, we could avoid all these restrictions,
@@ -1588,16 +1595,9 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
- if (addr < vma->vm_start || addr >= vma->vm_end)
- return VM_FAULT_SIGBUS;
-
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
- ptl = pud_lock(vma->vm_mm, vmf->pud);
- insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
- spin_unlock(ptl);
-
- return VM_FAULT_NOPAGE;
+ return insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
}
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
@@ -1614,25 +1614,15 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
{
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address & PUD_MASK;
- pud_t *pud = vmf->pud;
- struct mm_struct *mm = vma->vm_mm;
struct folio_or_pfn fop = {
.folio = folio,
.is_folio = true,
};
- spinlock_t *ptl;
-
- if (addr < vma->vm_start || addr >= vma->vm_end)
- return VM_FAULT_SIGBUS;
if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
return VM_FAULT_SIGBUS;
- ptl = pud_lock(mm, pud);
- insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
- spin_unlock(ptl);
-
- return VM_FAULT_NOPAGE;
+ return insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
}
EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
--
2.50.1
On Thu, Jul 17, 2025 at 01:52:05PM +0200, David Hildenbrand wrote: >Let's clean it all further up. > >No functional change intended. > >Reviewed-by: Oscar Salvador <osalvador@suse.de> >Reviewed-by: Alistair Popple <apopple@nvidia.com> >Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> -- Wei Yang Help you, Help me
On Thu, Jul 17, 2025 at 01:52:05PM +0200, David Hildenbrand wrote: > Let's clean it all further up. > > No functional change intended. > > Reviewed-by: Oscar Salvador <osalvador@suse.de> > Reviewed-by: Alistair Popple <apopple@nvidia.com> > Signed-off-by: David Hildenbrand <david@redhat.com> LGTM: Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > --- > mm/huge_memory.c | 36 +++++++++++++----------------------- > 1 file changed, 13 insertions(+), 23 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 1178760d2eda4..849feacaf8064 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -1518,25 +1518,30 @@ static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) > return pud; > } > > -static void insert_pud(struct vm_area_struct *vma, unsigned long addr, > +static vm_fault_t insert_pud(struct vm_area_struct *vma, unsigned long addr, > pud_t *pud, struct folio_or_pfn fop, pgprot_t prot, bool write) > { > struct mm_struct *mm = vma->vm_mm; > + spinlock_t *ptl; > pud_t entry; > > + if (addr < vma->vm_start || addr >= vma->vm_end) > + return VM_FAULT_SIGBUS; > + > + ptl = pud_lock(mm, pud); > if (!pud_none(*pud)) { > const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) : > fop.pfn; > > if (write) { > if (WARN_ON_ONCE(pud_pfn(*pud) != pfn)) > - return; > + goto out_unlock; > entry = pud_mkyoung(*pud); > entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); > if (pudp_set_access_flags(vma, addr, pud, entry, 1)) > update_mmu_cache_pud(vma, addr, pud); > } > - return; > + goto out_unlock; > } > > if (fop.is_folio) { > @@ -1555,6 +1560,9 @@ static void insert_pud(struct vm_area_struct *vma, unsigned long addr, > } > set_pud_at(mm, addr, pud, entry); > update_mmu_cache_pud(vma, addr, pud); > +out_unlock: > + spin_unlock(ptl); > + return VM_FAULT_NOPAGE; > } > > /** > @@ -1576,7 +1584,6 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn, > struct folio_or_pfn fop = { > .pfn = pfn, > }; > - spinlock_t *ptl; > > /* > * If we had pud_special, we could avoid all these restrictions, > @@ -1588,16 +1595,9 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn, > (VM_PFNMAP|VM_MIXEDMAP)); > BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); > > - if (addr < vma->vm_start || addr >= vma->vm_end) > - return VM_FAULT_SIGBUS; > - > pfnmap_setup_cachemode_pfn(pfn, &pgprot); > > - ptl = pud_lock(vma->vm_mm, vmf->pud); > - insert_pud(vma, addr, vmf->pud, fop, pgprot, write); > - spin_unlock(ptl); > - > - return VM_FAULT_NOPAGE; > + return insert_pud(vma, addr, vmf->pud, fop, pgprot, write); > } > EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); > > @@ -1614,25 +1614,15 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, > { > struct vm_area_struct *vma = vmf->vma; > unsigned long addr = vmf->address & PUD_MASK; > - pud_t *pud = vmf->pud; > - struct mm_struct *mm = vma->vm_mm; > struct folio_or_pfn fop = { > .folio = folio, > .is_folio = true, > }; > - spinlock_t *ptl; > - > - if (addr < vma->vm_start || addr >= vma->vm_end) > - return VM_FAULT_SIGBUS; > > if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) > return VM_FAULT_SIGBUS; > > - ptl = pud_lock(mm, pud); > - insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write); > - spin_unlock(ptl); > - > - return VM_FAULT_NOPAGE; > + return insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write); > } > EXPORT_SYMBOL_GPL(vmf_insert_folio_pud); > #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ > -- > 2.50.1 >
© 2016 - 2025 Red Hat, Inc.