include/linux/hugetlb.h | 8 ++++---- mm/gup.c | 14 +++++++++++++- mm/hugetlb.c | 27 +++++++++++++-------------- 3 files changed, 30 insertions(+), 19 deletions(-)
On some architectures (like ARM64), it can support CONT-PTE/PMD size
hugetlb, which means it can support not only PMD/PUD size hugetlb
(2M and 1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size
specified.
So when looking up a CONT-PTE size hugetlb page by follow_page(), it
will use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE
size hugetlb in follow_page_pte(). However this pte entry lock is incorrect
for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to
get the correct lock, which is mm->page_table_lock.
That means the pte entry of the CONT-PTE size hugetlb under current
pte lock is unstable in follow_page_pte(), we can continue to migrate
or poison the pte entry of the CONT-PTE size hugetlb, which can cause
some potential race issues, even though they are under the 'pte lock'.
For example, suppose thread A is trying to look up a CONT-PTE size
hugetlb page by move_pages() syscall under the lock, however antoher
thread B can migrate the CONT-PTE hugetlb page at the same time, which
will cause thread A to get an incorrect page, if thread A also wants to
do page migration, then data inconsistency error occurs.
Moreover we have the same issue for CONT-PMD size hugetlb in
follow_huge_pmd().
To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte()
to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to
get the correct pte entry lock to make the pte entry stable.
Cc: <stable@vger.kernel.org>
Suggested-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
Changes from v2:
- Combine PMD and PTE level hugetlb handling into one function.
- Drop unnecessary patches.
- Update the commit message.
Mike, please fold this patch into your series. Thanks.
---
include/linux/hugetlb.h | 8 ++++----
mm/gup.c | 14 +++++++++++++-
mm/hugetlb.c | 27 +++++++++++++--------------
3 files changed, 30 insertions(+), 19 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 852f911..fe4944f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -207,8 +207,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd,
int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
+struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
+ int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
@@ -319,8 +319,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
return NULL;
}
-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
- unsigned long address, pmd_t *pmd, int flags)
+static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
+ unsigned long address, int flags)
{
return NULL;
}
diff --git a/mm/gup.c b/mm/gup.c
index 66d8619e..1e74fc0 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -530,6 +530,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
return ERR_PTR(-EINVAL);
+
+ /*
+ * Considering PTE level hugetlb, like continuous-PTE hugetlb on
+ * ARM64 architecture.
+ */
+ if (is_vm_hugetlb_page(vma)) {
+ page = follow_huge_pmd_pte(vma, address, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
@@ -662,7 +674,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
- page = follow_huge_pmd(mm, address, pmd, flags);
+ page = follow_huge_pmd_pte(vma, address, flags);
if (page)
return page;
return no_page_table(vma, flags);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d0617d6..c613d3c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7156,12 +7156,13 @@ struct page * __weak
}
struct page * __weak
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags)
+follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
{
+ struct hstate *h = hstate_vma(vma);
+ struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL;
spinlock_t *ptl;
- pte_t pte;
+ pte_t *ptep, pte;
/*
* FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
@@ -7171,17 +7172,15 @@ struct page * __weak
return NULL;
retry:
- ptl = pmd_lockptr(mm, pmd);
- spin_lock(ptl);
- /*
- * make sure that the address range covered by this pmd is not
- * unmapped from other threads.
- */
- if (!pmd_huge(*pmd))
- goto out;
- pte = huge_ptep_get((pte_t *)pmd);
+ ptep = huge_pte_offset(mm, address, huge_page_size(h));
+ if (!ptep)
+ return NULL;
+
+ ptl = huge_pte_lock(h, mm, ptep);
+ pte = huge_ptep_get(ptep);
if (pte_present(pte)) {
- page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pte_page(pte) +
+ ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
/*
* try_grab_page() should always succeed here, because: a) we
* hold the pmd (ptl) lock, and b) we've just checked that the
@@ -7197,7 +7196,7 @@ struct page * __weak
} else {
if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
- __migration_entry_wait_huge((pte_t *)pmd, ptl);
+ __migration_entry_wait_huge(ptep, ptl);
goto retry;
}
/*
--
1.8.3.1
On 09/01/22 18:41, Baolin Wang wrote: > On some architectures (like ARM64), it can support CONT-PTE/PMD size > hugetlb, which means it can support not only PMD/PUD size hugetlb > (2M and 1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size > specified. > > So when looking up a CONT-PTE size hugetlb page by follow_page(), it > will use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE > size hugetlb in follow_page_pte(). However this pte entry lock is incorrect > for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to > get the correct lock, which is mm->page_table_lock. > > That means the pte entry of the CONT-PTE size hugetlb under current > pte lock is unstable in follow_page_pte(), we can continue to migrate > or poison the pte entry of the CONT-PTE size hugetlb, which can cause > some potential race issues, even though they are under the 'pte lock'. > > For example, suppose thread A is trying to look up a CONT-PTE size > hugetlb page by move_pages() syscall under the lock, however antoher > thread B can migrate the CONT-PTE hugetlb page at the same time, which > will cause thread A to get an incorrect page, if thread A also wants to > do page migration, then data inconsistency error occurs. > > Moreover we have the same issue for CONT-PMD size hugetlb in > follow_huge_pmd(). > > To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte() > to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to > get the correct pte entry lock to make the pte entry stable. > > Cc: <stable@vger.kernel.org> > Suggested-by: Mike Kravetz <mike.kravetz@oracle.com> > Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> > --- > Changes from v2: > - Combine PMD and PTE level hugetlb handling into one function. > - Drop unnecessary patches. > - Update the commit message. Baolin, were you able to at least exercise the new code paths? Especially the path for CONT_PTE. Code looks fine to me. Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> It is a little hackish, but this is only for backports. So, I think it is OK. We may want to point out that code cleanup and simplification is going upstream that will address these issues in a more elegant manner. > > Mike, please fold this patch into your series. Thanks. If I understand Andrew, this can go in as a separate patch for backport to address potential bugs. I will provide a cleanup/simplification that will remove this going forward. Andrew also asked for a Fixes tag. Support for CONT_PMD/_PTE was added with bb9dd3df8ee9 "arm64: hugetlb: refactor find_num_contig()". Patch series "Support for contiguous pte hugepages", v4. However, I do not believe these code paths were executed until migration support was added with 5480280d3f2d "arm64/mm: enable HugeTLB migration for contiguous bit HugeTLB pages" I would go with 5480280d3f2d. -- Mike Kravetz
On 9/2/2022 5:06 AM, Mike Kravetz wrote: > On 09/01/22 18:41, Baolin Wang wrote: >> On some architectures (like ARM64), it can support CONT-PTE/PMD size >> hugetlb, which means it can support not only PMD/PUD size hugetlb >> (2M and 1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size >> specified. >> >> So when looking up a CONT-PTE size hugetlb page by follow_page(), it >> will use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE >> size hugetlb in follow_page_pte(). However this pte entry lock is incorrect >> for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to >> get the correct lock, which is mm->page_table_lock. >> >> That means the pte entry of the CONT-PTE size hugetlb under current >> pte lock is unstable in follow_page_pte(), we can continue to migrate >> or poison the pte entry of the CONT-PTE size hugetlb, which can cause >> some potential race issues, even though they are under the 'pte lock'. >> >> For example, suppose thread A is trying to look up a CONT-PTE size >> hugetlb page by move_pages() syscall under the lock, however antoher >> thread B can migrate the CONT-PTE hugetlb page at the same time, which >> will cause thread A to get an incorrect page, if thread A also wants to >> do page migration, then data inconsistency error occurs. >> >> Moreover we have the same issue for CONT-PMD size hugetlb in >> follow_huge_pmd(). >> >> To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte() >> to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to >> get the correct pte entry lock to make the pte entry stable. >> >> Cc: <stable@vger.kernel.org> >> Suggested-by: Mike Kravetz <mike.kravetz@oracle.com> >> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> >> --- >> Changes from v2: >> - Combine PMD and PTE level hugetlb handling into one function. >> - Drop unnecessary patches. >> - Update the commit message. > > Baolin, were you able to at least exercise the new code paths? Especially the > path for CONT_PTE. Code looks fine to me. Yes, I've tested CONT-PTE, CONT-PMD and PMD size hugetlb with move_pages() syscall, all works well and the lock is expected. > > Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> > > It is a little hackish, but this is only for backports. So, I think it is OK. > We may want to point out that code cleanup and simplification is going upstream > that will address these issues in a more elegant manner. > >> >> Mike, please fold this patch into your series. Thanks. > > If I understand Andrew, this can go in as a separate patch for backport to > address potential bugs. I will provide a cleanup/simplification that will > remove this going forward. > > Andrew also asked for a Fixes tag. > Support for CONT_PMD/_PTE was added with bb9dd3df8ee9 "arm64: hugetlb: refactor > find_num_contig()". Patch series "Support for contiguous pte hugepages", v4. > However, I do not believe these code paths were executed until migration > support was added with 5480280d3f2d "arm64/mm: enable HugeTLB migration for > contiguous bit HugeTLB pages" > I would go with 5480280d3f2d. Make sense. And I saw Andrew has helped to add a Fixes tag with your suggestion. Thanks Mike and Andrew.
On Thu, 1 Sep 2022 18:41:31 +0800 Baolin Wang <baolin.wang@linux.alibaba.com> wrote: > On some architectures (like ARM64), it can support CONT-PTE/PMD size > hugetlb, which means it can support not only PMD/PUD size hugetlb > (2M and 1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size > specified. > > So when looking up a CONT-PTE size hugetlb page by follow_page(), it > will use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE > size hugetlb in follow_page_pte(). However this pte entry lock is incorrect > for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to > get the correct lock, which is mm->page_table_lock. > > That means the pte entry of the CONT-PTE size hugetlb under current > pte lock is unstable in follow_page_pte(), we can continue to migrate > or poison the pte entry of the CONT-PTE size hugetlb, which can cause > some potential race issues, even though they are under the 'pte lock'. > > For example, suppose thread A is trying to look up a CONT-PTE size > hugetlb page by move_pages() syscall under the lock, however antoher > thread B can migrate the CONT-PTE hugetlb page at the same time, which > will cause thread A to get an incorrect page, if thread A also wants to > do page migration, then data inconsistency error occurs. > > Moreover we have the same issue for CONT-PMD size hugetlb in > follow_huge_pmd(). > > To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte() > to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to > get the correct pte entry lock to make the pte entry stable. > > Cc: <stable@vger.kernel.org> Are we able to think of a Fixes: for this? > Mike, please fold this patch into your series. Thanks. As this is cc:stable I'll be looking to get this into mainline during this -rc cycle, so it shouldn't be part of a for-next-rc patch series.
© 2016 - 2026 Red Hat, Inc.