mm/hugetlb.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
In alloc_surplus_hugetlb_folio(), we increase nr_huge_pages and
surplus_huge_pages separately. In the middle window, if we set
nr_hugepages to smaller and satisfy count < persistent_huge_pages(h),
the surplus_huge_pages will be increased by adjust_pool_surplus().
After adding delay in the middle window, we can reproduce the problem
easily by following step:
1. echo 3 > /proc/sys/vm/nr_overcommit_hugepages
2. mmap two hugepages. When nr_huge_pages=2 and surplus_huge_pages=1,
goto step 3.
3. echo 0 > /proc/sys/vm/nr_huge_pages
Finally, nr_huge_pages is less than surplus_huge_pages.
Fixes: 0c397daea1d4 ("mm, hugetlb: further simplify hugetlb allocation API")
Signed-off-by: Liu Shixin <liushixin2@huawei.com>
---
mm/hugetlb.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9faa1034704ff..a900562ea7679 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2248,14 +2248,17 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
if (hstate_is_gigantic(h))
return NULL;
+ mutex_lock(&h->resize_lock);
spin_lock_irq(&hugetlb_lock);
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
goto out_unlock;
spin_unlock_irq(&hugetlb_lock);
folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
- if (!folio)
+ if (!folio) {
+ mutex_unlock(&h->resize_lock);
return NULL;
+ }
spin_lock_irq(&hugetlb_lock);
/*
@@ -2268,6 +2271,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
folio_set_hugetlb_temporary(folio);
spin_unlock_irq(&hugetlb_lock);
+ mutex_unlock(&h->resize_lock);
free_huge_folio(folio);
return NULL;
}
@@ -2277,6 +2281,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
out_unlock:
spin_unlock_irq(&hugetlb_lock);
+ mutex_unlock(&h->resize_lock);
return folio;
}
--
2.34.1
On Mon, Mar 03, 2025 at 10:41:05AM +0800, Liu Shixin wrote:
> In alloc_surplus_hugetlb_folio(), we increase nr_huge_pages and
> surplus_huge_pages separately. In the middle window, if we set
> nr_hugepages to smaller and satisfy count < persistent_huge_pages(h),
> the surplus_huge_pages will be increased by adjust_pool_surplus().
>
> After adding delay in the middle window, we can reproduce the problem
> easily by following step:
>
> 1. echo 3 > /proc/sys/vm/nr_overcommit_hugepages
> 2. mmap two hugepages. When nr_huge_pages=2 and surplus_huge_pages=1,
> goto step 3.
> 3. echo 0 > /proc/sys/vm/nr_huge_pages
Looks reasonable to me. However I'm not sure whether this may cause
regression on concurrent allocations of surplus pages.
Would it be possible to stick with hugetlb_lock? IIUC only the allocation
part of alloc_fresh_hugetlb_folio() needs the lock to be released, then
we could also update the two counters together with hugetlb_lock by open
code alloc_fresh_hugetlb_folio(), and move __prep_account_new_huge_page()
out to be after lock taken.
>
> Finally, nr_huge_pages is less than surplus_huge_pages.
>
> Fixes: 0c397daea1d4 ("mm, hugetlb: further simplify hugetlb allocation API")
> Signed-off-by: Liu Shixin <liushixin2@huawei.com>
> ---
> mm/hugetlb.c | 7 ++++++-
> 1 file changed, 6 insertions(+), 1 deletion(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 9faa1034704ff..a900562ea7679 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -2248,14 +2248,17 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
> if (hstate_is_gigantic(h))
> return NULL;
>
> + mutex_lock(&h->resize_lock);
> spin_lock_irq(&hugetlb_lock);
> if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
> goto out_unlock;
> spin_unlock_irq(&hugetlb_lock);
>
> folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
> - if (!folio)
> + if (!folio) {
> + mutex_unlock(&h->resize_lock);
> return NULL;
> + }
>
> spin_lock_irq(&hugetlb_lock);
> /*
> @@ -2268,6 +2271,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
> if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
> folio_set_hugetlb_temporary(folio);
> spin_unlock_irq(&hugetlb_lock);
> + mutex_unlock(&h->resize_lock);
> free_huge_folio(folio);
> return NULL;
> }
> @@ -2277,6 +2281,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
>
> out_unlock:
> spin_unlock_irq(&hugetlb_lock);
> + mutex_unlock(&h->resize_lock);
>
> return folio;
> }
> --
> 2.34.1
>
>
--
Peter Xu
© 2016 - 2026 Red Hat, Inc.