mm/page_alloc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
As documented in the comment this underflow should not happen. The
locking has indeed changed here since the comment was written, see the
migratetype hygiene patches[0]. However, those changes made the locking
_safer_, so the underflow _really_ shouldn't happen now. So upgrade
the comment to a warning.
[0] https://lore.kernel.org/all/20240320180429.678181-7-hannes@cmpxchg.org/T/#m3da87e6cc3348a4640aa298137bc9f8f61b76c84
Signed-off-by: Brendan Jackman <jackmanb@google.com>
---
mm/page_alloc.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5d8e274c8b1d500d263a17ef36fe190f60b88196..715a9cfe162090cca9eb819a34c64f9a1c6db29a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3095,6 +3095,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
if (!page)
continue;
+ size = max(pageblock_nr_pages, 1UL << order);
/*
* It should never happen but changes to
* locking could inadvertently allow a per-cpu
@@ -3102,8 +3103,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* while unreserving so be safe and watch for
* underflows.
*/
- size = max(pageblock_nr_pages, 1UL << order);
- size = min(size, zone->nr_reserved_highatomic);
+ if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
+ size = zone->nr_reserved_highatomic;
zone->nr_reserved_highatomic -= size;
/*
---
base-commit: 0c789105c9d6c65777c995f4935f2e119d5a31a5
change-id: 20250225-warn-underflow-7ab0069182e2
Best regards,
--
Brendan Jackman <jackmanb@google.com>
On 2/25/25 19:45, Brendan Jackman wrote: > As documented in the comment this underflow should not happen. The > locking has indeed changed here since the comment was written, see the > migratetype hygiene patches[0]. However, those changes made the locking > _safer_, so the underflow _really_ shouldn't happen now. So upgrade > the comment to a warning. > > [0] https://lore.kernel.org/all/20240320180429.678181-7-hannes@cmpxchg.org/T/#m3da87e6cc3348a4640aa298137bc9f8f61b76c84 > > Signed-off-by: Brendan Jackman <jackmanb@google.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Thanks! > --- > mm/page_alloc.c | 5 +++-- > 1 file changed, 3 insertions(+), 2 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 5d8e274c8b1d500d263a17ef36fe190f60b88196..715a9cfe162090cca9eb819a34c64f9a1c6db29a 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -3095,6 +3095,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, > if (!page) > continue; > > + size = max(pageblock_nr_pages, 1UL << order); > /* > * It should never happen but changes to > * locking could inadvertently allow a per-cpu > @@ -3102,8 +3103,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, > * while unreserving so be safe and watch for > * underflows. > */ > - size = max(pageblock_nr_pages, 1UL << order); > - size = min(size, zone->nr_reserved_highatomic); > + if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) > + size = zone->nr_reserved_highatomic; > zone->nr_reserved_highatomic -= size; > > /* > > --- > base-commit: 0c789105c9d6c65777c995f4935f2e119d5a31a5 > change-id: 20250225-warn-underflow-7ab0069182e2 > > Best regards,
© 2016 - 2026 Red Hat, Inc.