Use zone_lock_irqsave lock guard in free_pcppages_bulk() to replace the
explicit lock/unlock pattern with automatic scope-based cleanup.
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
---
mm/page_alloc.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 28b06baa4075..2759e02340fa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1455,7 +1455,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp,
int pindex)
{
- unsigned long flags;
unsigned int order;
struct page *page;
@@ -1468,7 +1467,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
/* Ensure requested pindex is drained first. */
pindex = pindex - 1;
- zone_lock_irqsave(zone, flags);
+ guard(zone_lock_irqsave)(zone);
while (count > 0) {
struct list_head *list;
@@ -1500,8 +1499,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
trace_mm_page_pcpu_drain(page, order, mt);
} while (count > 0 && !list_empty(list));
}
-
- zone_unlock_irqrestore(zone, flags);
}
/* Split a multi-block free page into its individual pageblocks. */
--
2.47.3