mm/page_alloc.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-)
From: lipeifeng <lipeifeng@oppo.com>
Cma-pages will be fallback to movable pages in many scenarios.when cma
pages are freed to pcplist, we give priority to free it from pcplist
to buddy in order to avoids cma-pages to be used as movable-pages soon
if there is enough free-movable-pages, which saves more cma-pages in
buddy to decrease pages migration when cma_alloc.
Signed-off-by: lipeifeng <lipeifeng@oppo.com>
---
mm/page_alloc.c | 28 ++++++++++++++++++++++++----
1 file changed, 24 insertions(+), 4 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3589feb..69369ed 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3372,7 +3372,7 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
}
static void free_unref_page_commit(struct page *page, unsigned long pfn,
- int migratetype, unsigned int order)
+ int migratetype, unsigned int order, bool fast_free)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
@@ -3382,7 +3382,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn,
__count_vm_event(PGFREE);
pcp = this_cpu_ptr(zone->per_cpu_pageset);
pindex = order_to_pindex(migratetype, order);
- list_add(&page->lru, &pcp->lists[pindex]);
+ if (fast_free)
+ list_add_tail(&page->lru, &pcp->lists[pindex]);
+ else
+ list_add(&page->lru, &pcp->lists[pindex]);
pcp->count += 1 << order;
high = nr_pcp_high(pcp, zone);
if (pcp->count >= high) {
@@ -3400,6 +3403,7 @@ void free_unref_page(struct page *page, unsigned int order)
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
int migratetype;
+ bool fast_free = false;
if (!free_unref_page_prepare(page, pfn, order))
return;
@@ -3419,9 +3423,15 @@ void free_unref_page(struct page *page, unsigned int order)
}
migratetype = MIGRATE_MOVABLE;
}
+ /*
+ * Give priority to free cma-pages to buddy in order to
+ * decrease pages migration when cma_alloc.
+ */
+ if (migratetype == MIGRATE_CMA)
+ fast_free = true;
local_lock_irqsave(&pagesets.lock, flags);
- free_unref_page_commit(page, pfn, migratetype, order);
+ free_unref_page_commit(page, pfn, migratetype, order, fast_free);
local_unlock_irqrestore(&pagesets.lock, flags);
}
@@ -3459,6 +3469,8 @@ void free_unref_page_list(struct list_head *list)
local_lock_irqsave(&pagesets.lock, flags);
list_for_each_entry_safe(page, next, list, lru) {
+ bool fast_free = false;
+
pfn = page_private(page);
set_page_private(page, 0);
@@ -3467,11 +3479,19 @@ void free_unref_page_list(struct list_head *list)
* to the MIGRATE_MOVABLE pcp list.
*/
migratetype = get_pcppage_migratetype(page);
+
+ /*
+ * Give priority to free cma-pages to buddy in order to
+ * decrease pages migration when cma_alloc.
+ */
+ if (migratetype == MIGRATE_CMA)
+ fast_free = true;
+
if (unlikely(migratetype >= MIGRATE_PCPTYPES))
migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(page);
- free_unref_page_commit(page, pfn, migratetype, 0);
+ free_unref_page_commit(page, pfn, migratetype, 0, fast_free);
/*
* Guard against excessive IRQ disabled times when we get
--
2.7.4
On Sun, Apr 24, 2022 at 3:28 PM <lipeifeng@oppo.com> wrote:
>
> From: lipeifeng <lipeifeng@oppo.com>
>
> Cma-pages will be fallback to movable pages in many scenarios.when cma
> pages are freed to pcplist, we give priority to free it from pcplist
> to buddy in order to avoids cma-pages to be used as movable-pages soon
> if there is enough free-movable-pages, which saves more cma-pages in
> buddy to decrease pages migration when cma_alloc.
>
> Signed-off-by: lipeifeng <lipeifeng@oppo.com>
> ---
+ Christoph, Marek, Robin as it is cma-related.
> mm/page_alloc.c | 28 ++++++++++++++++++++++++----
> 1 file changed, 24 insertions(+), 4 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 3589feb..69369ed 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3372,7 +3372,7 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
> }
>
> static void free_unref_page_commit(struct page *page, unsigned long pfn,
> - int migratetype, unsigned int order)
> + int migratetype, unsigned int order, bool fast_free)
> {
> struct zone *zone = page_zone(page);
> struct per_cpu_pages *pcp;
> @@ -3382,7 +3382,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn,
> __count_vm_event(PGFREE);
> pcp = this_cpu_ptr(zone->per_cpu_pageset);
> pindex = order_to_pindex(migratetype, order);
> - list_add(&page->lru, &pcp->lists[pindex]);
> + if (fast_free)
> + list_add_tail(&page->lru, &pcp->lists[pindex]);
> + else
> + list_add(&page->lru, &pcp->lists[pindex]);
Ok. This is interesting, we used to have a separate cma pcp list but
now MIGRATE_CMA is an outsider so cma pages are placed in the
MIGRATE_MOVABLE list.
enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
...
MIGRATE_CMA,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
MIGRATE_TYPES
};
#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1
+ NR_PCP_THP))
> pcp->count += 1 << order;
> high = nr_pcp_high(pcp, zone);
> if (pcp->count >= high) {
> @@ -3400,6 +3403,7 @@ void free_unref_page(struct page *page, unsigned int order)
> unsigned long flags;
> unsigned long pfn = page_to_pfn(page);
> int migratetype;
> + bool fast_free = false;
>
> if (!free_unref_page_prepare(page, pfn, order))
> return;
> @@ -3419,9 +3423,15 @@ void free_unref_page(struct page *page, unsigned int order)
> }
> migratetype = MIGRATE_MOVABLE;
> }
> + /*
> + * Give priority to free cma-pages to buddy in order to
> + * decrease pages migration when cma_alloc.
> + */
> + if (migratetype == MIGRATE_CMA)
> + fast_free = true;
>
> local_lock_irqsave(&pagesets.lock, flags);
> - free_unref_page_commit(page, pfn, migratetype, order);
> + free_unref_page_commit(page, pfn, migratetype, order, fast_free);
> local_unlock_irqrestore(&pagesets.lock, flags);
> }
>
> @@ -3459,6 +3469,8 @@ void free_unref_page_list(struct list_head *list)
>
> local_lock_irqsave(&pagesets.lock, flags);
> list_for_each_entry_safe(page, next, list, lru) {
> + bool fast_free = false;
> +
> pfn = page_private(page);
> set_page_private(page, 0);
>
> @@ -3467,11 +3479,19 @@ void free_unref_page_list(struct list_head *list)
> * to the MIGRATE_MOVABLE pcp list.
> */
> migratetype = get_pcppage_migratetype(page);
> +
> + /*
> + * Give priority to free cma-pages to buddy in order to
> + * decrease pages migration when cma_alloc.
> + */
> + if (migratetype == MIGRATE_CMA)
> + fast_free = true;
> +
> if (unlikely(migratetype >= MIGRATE_PCPTYPES))
> migratetype = MIGRATE_MOVABLE;
>
> trace_mm_page_free_batched(page);
> - free_unref_page_commit(page, pfn, migratetype, 0);
> + free_unref_page_commit(page, pfn, migratetype, 0, fast_free);
i'd call get_pcppage_migratetype() again in free_unref_page_commit()
rather than adding a parameter to increase a couple cross functions.
>
> /*
> * Guard against excessive IRQ disabled times when we get
> --
> 2.7.4
>
Thanks
Barry
© 2016 - 2026 Red Hat, Inc.