Currently, in free_area_init_core(), when initialize zone's field, a
rough value is set to zone->managed_pages. That value is calculated by
(zone->present_pages - memmap_pages).
In the meantime, add the value to nr_all_pages and nr_kernel_pages which
represent all free pages of system (only low memory or including HIGHMEM
memory separately). Both of them are gonna be used in
alloc_large_system_hash().
However, the rough calculation and setting of zone->managed_pages is
meaningless because
a) memmap pages are allocated on units of node in sparse_init() or
alloc_node_mem_map(pgdat); The simple (zone->present_pages -
memmap_pages) is too rough to make sense for zone;
b) the set zone->managed_pages will be zeroed out and reset with
acutal value in mem_init() via memblock_free_all(). Before the
resetting, no buddy allocation request is issued.
Here, remove the meaningless and complicated calculation of
(zone->present_pages - memmap_pages), initialize zone->managed_pages as 0
which reflect its actual value because no any page is added into buddy
system right now. It will be reset in mem_init().
And also remove the assignment of nr_all_pages and nr_kernel_pages in
free_area_init_core(). Instead, call the newly added calc_nr_kernel_pages()
to count up all free but not reserved memory in memblock and assign to
nr_all_pages and nr_kernel_pages. The counting excludes memmap_pages,
and other kernel used data, which is more accurate than old way and
simpler, and can also cover the ppc required arch_reserved_kernel_pages()
case.
And also clean up the outdated code comment above free_area_init_core().
And free_area_init_core() is easy to understand now, no need to add
words to explain.
Signed-off-by: Baoquan He <bhe@redhat.com>
---
mm/mm_init.c | 46 +++++-----------------------------------------
1 file changed, 5 insertions(+), 41 deletions(-)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index c57a7fc97a16..7f71e56e83f3 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1565,15 +1565,6 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
}
#endif
-/*
- * Set up the zone data structures:
- * - mark all pages reserved
- * - mark all memory queues empty
- * - clear the memory bitmaps
- *
- * NOTE: pgdat should get zeroed by caller.
- * NOTE: this function is only called during early init.
- */
static void __init free_area_init_core(struct pglist_data *pgdat)
{
enum zone_type j;
@@ -1584,41 +1575,13 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
- unsigned long size, freesize, memmap_pages;
-
- size = zone->spanned_pages;
- freesize = zone->present_pages;
-
- /*
- * Adjust freesize so that it accounts for how much memory
- * is used by this zone for memmap. This affects the watermark
- * and per-cpu initialisations
- */
- memmap_pages = calc_memmap_size(size, freesize);
- if (!is_highmem_idx(j)) {
- if (freesize >= memmap_pages) {
- freesize -= memmap_pages;
- if (memmap_pages)
- pr_debug(" %s zone: %lu pages used for memmap\n",
- zone_names[j], memmap_pages);
- } else
- pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
- zone_names[j], memmap_pages, freesize);
- }
-
- if (!is_highmem_idx(j))
- nr_kernel_pages += freesize;
- /* Charge for highmem memmap if there are enough kernel pages */
- else if (nr_kernel_pages > memmap_pages * 2)
- nr_kernel_pages -= memmap_pages;
- nr_all_pages += freesize;
+ unsigned long size = zone->spanned_pages;
/*
- * Set an approximate value for lowmem here, it will be adjusted
- * when the bootmem allocator frees pages into the buddy system.
- * And all highmem pages will be managed by the buddy system.
+ * Initialize zone->managed_pages as 0 , it will be reset
+ * when memblock allocator frees pages into buddy system.
*/
- zone_init_internals(zone, j, nid, freesize);
+ zone_init_internals(zone, j, nid, 0);
if (!size)
continue;
@@ -1915,6 +1878,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
check_for_memory(pgdat);
}
+ calc_nr_kernel_pages();
memmap_init();
/* disable hash distribution for systems with a single node */
--
2.41.0
On 03/25/24 at 10:56pm, Baoquan He wrote:
> Currently, in free_area_init_core(), when initialize zone's field, a
> rough value is set to zone->managed_pages. That value is calculated by
> (zone->present_pages - memmap_pages).
>
> In the meantime, add the value to nr_all_pages and nr_kernel_pages which
> represent all free pages of system (only low memory or including HIGHMEM
> memory separately). Both of them are gonna be used in
> alloc_large_system_hash().
>
> However, the rough calculation and setting of zone->managed_pages is
> meaningless because
> a) memmap pages are allocated on units of node in sparse_init() or
> alloc_node_mem_map(pgdat); The simple (zone->present_pages -
> memmap_pages) is too rough to make sense for zone;
> b) the set zone->managed_pages will be zeroed out and reset with
> acutal value in mem_init() via memblock_free_all(). Before the
> resetting, no buddy allocation request is issued.
>
> Here, remove the meaningless and complicated calculation of
> (zone->present_pages - memmap_pages), initialize zone->managed_pages as 0
> which reflect its actual value because no any page is added into buddy
> system right now. It will be reset in mem_init().
>
> And also remove the assignment of nr_all_pages and nr_kernel_pages in
> free_area_init_core(). Instead, call the newly added calc_nr_kernel_pages()
> to count up all free but not reserved memory in memblock and assign to
> nr_all_pages and nr_kernel_pages. The counting excludes memmap_pages,
> and other kernel used data, which is more accurate than old way and
> simpler, and can also cover the ppc required arch_reserved_kernel_pages()
> case.
>
> And also clean up the outdated code comment above free_area_init_core().
> And free_area_init_core() is easy to understand now, no need to add
> words to explain.
>
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
> mm/mm_init.c | 46 +++++-----------------------------------------
> 1 file changed, 5 insertions(+), 41 deletions(-)
>
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index c57a7fc97a16..7f71e56e83f3 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1565,15 +1565,6 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
> }
> #endif
>
> -/*
> - * Set up the zone data structures:
> - * - mark all pages reserved
> - * - mark all memory queues empty
> - * - clear the memory bitmaps
> - *
> - * NOTE: pgdat should get zeroed by caller.
> - * NOTE: this function is only called during early init.
> - */
> static void __init free_area_init_core(struct pglist_data *pgdat)
> {
> enum zone_type j;
> @@ -1584,41 +1575,13 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
>
> for (j = 0; j < MAX_NR_ZONES; j++) {
> struct zone *zone = pgdat->node_zones + j;
> - unsigned long size, freesize, memmap_pages;
> -
> - size = zone->spanned_pages;
> - freesize = zone->present_pages;
> -
> - /*
> - * Adjust freesize so that it accounts for how much memory
> - * is used by this zone for memmap. This affects the watermark
> - * and per-cpu initialisations
> - */
> - memmap_pages = calc_memmap_size(size, freesize);
> - if (!is_highmem_idx(j)) {
> - if (freesize >= memmap_pages) {
> - freesize -= memmap_pages;
> - if (memmap_pages)
> - pr_debug(" %s zone: %lu pages used for memmap\n",
> - zone_names[j], memmap_pages);
> - } else
> - pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
> - zone_names[j], memmap_pages, freesize);
> - }
> -
> - if (!is_highmem_idx(j))
> - nr_kernel_pages += freesize;
> - /* Charge for highmem memmap if there are enough kernel pages */
> - else if (nr_kernel_pages > memmap_pages * 2)
> - nr_kernel_pages -= memmap_pages;
> - nr_all_pages += freesize;
> + unsigned long size = zone->spanned_pages;
>
> /*
> - * Set an approximate value for lowmem here, it will be adjusted
> - * when the bootmem allocator frees pages into the buddy system.
> - * And all highmem pages will be managed by the buddy system.
> + * Initialize zone->managed_pages as 0 , it will be reset
> + * when memblock allocator frees pages into buddy system.
> */
> - zone_init_internals(zone, j, nid, freesize);
> + zone_init_internals(zone, j, nid, 0);
Here, we should initialize zone->managed_pages as zone->present_pages
because later page_group_by_mobility_disabled need be set according to
zone->managed_pages. Otherwise page_group_by_mobility_disabled will be
set to 1 always. I will sent out v3.
From a17b0921b4bd00596330f61ee9ea4b82386a9fed Mon Sep 17 00:00:00 2001
From: Baoquan He <bhe@redhat.com>
Date: Thu, 28 Mar 2024 16:20:15 +0800
Subject: [PATCH] mm/mm_init.c: set zone's ->managed_pages as ->present_pages
for now
Content-type: text/plain
Because page_group_by_mobility_disabled need be set according to zone's
managed_pages later.
Signed-off-by: Baoquan He <bhe@redhat.com>
---
mm/mm_init.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index cc24e7958c0c..dd875f943cbb 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1561,7 +1561,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
* Initialize zone->managed_pages as 0 , it will be reset
* when memblock allocator frees pages into buddy system.
*/
- zone_init_internals(zone, j, nid, 0);
+ zone_init_internals(zone, j, nid, zone->present_pages);
if (!size)
continue;
--
2.41.0
>
> if (!size)
> continue;
> @@ -1915,6 +1878,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
> check_for_memory(pgdat);
> }
>
> + calc_nr_kernel_pages();
> memmap_init();
>
> /* disable hash distribution for systems with a single node */
> --
> 2.41.0
>
On Thu, Mar 28, 2024 at 04:32:38PM +0800, Baoquan He wrote: > On 03/25/24 at 10:56pm, Baoquan He wrote: > > > > /* > > - * Set an approximate value for lowmem here, it will be adjusted > > - * when the bootmem allocator frees pages into the buddy system. > > - * And all highmem pages will be managed by the buddy system. > > + * Initialize zone->managed_pages as 0 , it will be reset > > + * when memblock allocator frees pages into buddy system. > > */ > > - zone_init_internals(zone, j, nid, freesize); > > + zone_init_internals(zone, j, nid, 0); > > Here, we should initialize zone->managed_pages as zone->present_pages > because later page_group_by_mobility_disabled need be set according to > zone->managed_pages. Otherwise page_group_by_mobility_disabled will be > set to 1 always. I will sent out v3. With zone->managed_pages set to zone->present_pages we won't account for the reserved memory for initialization of page_group_by_mobility_disabled. As watermarks are still not initialized at the time build_all_zonelists() is called, we may use nr_all_pages - nr_kernel_pages instead of nr_free_zone_pages(), IMO. > From a17b0921b4bd00596330f61ee9ea4b82386a9fed Mon Sep 17 00:00:00 2001 > From: Baoquan He <bhe@redhat.com> > Date: Thu, 28 Mar 2024 16:20:15 +0800 > Subject: [PATCH] mm/mm_init.c: set zone's ->managed_pages as ->present_pages > for now > Content-type: text/plain > > Because page_group_by_mobility_disabled need be set according to zone's > managed_pages later. > > Signed-off-by: Baoquan He <bhe@redhat.com> > --- > mm/mm_init.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/mm/mm_init.c b/mm/mm_init.c > index cc24e7958c0c..dd875f943cbb 100644 > --- a/mm/mm_init.c > +++ b/mm/mm_init.c > @@ -1561,7 +1561,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat) > * Initialize zone->managed_pages as 0 , it will be reset > * when memblock allocator frees pages into buddy system. > */ > - zone_init_internals(zone, j, nid, 0); > + zone_init_internals(zone, j, nid, zone->present_pages); > > if (!size) > continue; > -- > 2.41.0 > > > > > > if (!size) > > continue; > > @@ -1915,6 +1878,7 @@ void __init free_area_init(unsigned long *max_zone_pfn) > > check_for_memory(pgdat); > > } > > > > + calc_nr_kernel_pages(); > > memmap_init(); > > > > /* disable hash distribution for systems with a single node */ > > -- > > 2.41.0 > > > -- Sincerely yours, Mike.
On 03/28/24 at 11:53am, Mike Rapoport wrote:
> On Thu, Mar 28, 2024 at 04:32:38PM +0800, Baoquan He wrote:
> > On 03/25/24 at 10:56pm, Baoquan He wrote:
> > >
> > > /*
> > > - * Set an approximate value for lowmem here, it will be adjusted
> > > - * when the bootmem allocator frees pages into the buddy system.
> > > - * And all highmem pages will be managed by the buddy system.
> > > + * Initialize zone->managed_pages as 0 , it will be reset
> > > + * when memblock allocator frees pages into buddy system.
> > > */
> > > - zone_init_internals(zone, j, nid, freesize);
> > > + zone_init_internals(zone, j, nid, 0);
> >
> > Here, we should initialize zone->managed_pages as zone->present_pages
> > because later page_group_by_mobility_disabled need be set according to
> > zone->managed_pages. Otherwise page_group_by_mobility_disabled will be
> > set to 1 always. I will sent out v3.
>
> With zone->managed_pages set to zone->present_pages we won't account for
> the reserved memory for initialization of page_group_by_mobility_disabled.
The old zone->managed_pages didn't account for the reserved pages
either. It's calculated by (zone->present_pages - memmap_pages). memmap
pages only is only a very small portion, e.g on x86_64, 4K page size,
assuming size of struct page is 64, then it's 1/64 of system memory.
On arm64, 64K page size, it's 1/1024 of system memory.
And about the setting of page_group_by_mobility_disabled, the compared
value pageblock_nr_pages * MIGRATE_TYPES which is very small. On x86_64,
it's 4M*6=24M; on arm64 with 64K size and 128M*6=768M which should be
the biggest among ARCH-es.
if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
page_group_by_mobility_disabled = 1;
else
page_group_by_mobility_disabled = 0;
So page_group_by_mobility_disabled could be set to 1 only on system with
very little memory which is very rarely seen. And setting
zone->managed_pages as zone->present_pages is very close to its old
value: (zone->present_pages - memmap_pages). Here we don't need be very
accurate, just a rough value.
>
> As watermarks are still not initialized at the time build_all_zonelists()
> is called, we may use nr_all_pages - nr_kernel_pages instead of
> nr_free_zone_pages(), IMO.
nr_all_pages should be fine if we take this way. nr_kernel_pages is a
misleading name, it's all low memory pages excluding kernel reserved
apges. nr_all_pages is all memory pages including highmema and exluding
kernel reserved pages.
Both is fine to me. The first one is easier, simply setting
zone->managed_pages as zone->present_pages. The 2nd way is a little more
accurate.
>
> > From a17b0921b4bd00596330f61ee9ea4b82386a9fed Mon Sep 17 00:00:00 2001
> > From: Baoquan He <bhe@redhat.com>
> > Date: Thu, 28 Mar 2024 16:20:15 +0800
> > Subject: [PATCH] mm/mm_init.c: set zone's ->managed_pages as ->present_pages
> > for now
> > Content-type: text/plain
> >
> > Because page_group_by_mobility_disabled need be set according to zone's
> > managed_pages later.
> >
> > Signed-off-by: Baoquan He <bhe@redhat.com>
> > ---
> > mm/mm_init.c | 2 +-
> > 1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/mm/mm_init.c b/mm/mm_init.c
> > index cc24e7958c0c..dd875f943cbb 100644
> > --- a/mm/mm_init.c
> > +++ b/mm/mm_init.c
> > @@ -1561,7 +1561,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
> > * Initialize zone->managed_pages as 0 , it will be reset
> > * when memblock allocator frees pages into buddy system.
> > */
> > - zone_init_internals(zone, j, nid, 0);
> > + zone_init_internals(zone, j, nid, zone->present_pages);
> >
> > if (!size)
> > continue;
> > --
> > 2.41.0
> >
> >
> > >
> > > if (!size)
> > > continue;
> > > @@ -1915,6 +1878,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
> > > check_for_memory(pgdat);
> > > }
> > >
> > > + calc_nr_kernel_pages();
> > > memmap_init();
> > >
> > > /* disable hash distribution for systems with a single node */
> > > --
> > > 2.41.0
> > >
> >
>
> --
> Sincerely yours,
> Mike.
>
On Mon, Mar 25, 2024 at 10:56:44PM +0800, Baoquan He wrote: > Currently, in free_area_init_core(), when initialize zone's field, a > rough value is set to zone->managed_pages. That value is calculated by > (zone->present_pages - memmap_pages). > > In the meantime, add the value to nr_all_pages and nr_kernel_pages which > represent all free pages of system (only low memory or including HIGHMEM > memory separately). Both of them are gonna be used in > alloc_large_system_hash(). > > However, the rough calculation and setting of zone->managed_pages is > meaningless because > a) memmap pages are allocated on units of node in sparse_init() or > alloc_node_mem_map(pgdat); The simple (zone->present_pages - > memmap_pages) is too rough to make sense for zone; > b) the set zone->managed_pages will be zeroed out and reset with > acutal value in mem_init() via memblock_free_all(). Before the > resetting, no buddy allocation request is issued. > > Here, remove the meaningless and complicated calculation of > (zone->present_pages - memmap_pages), initialize zone->managed_pages as 0 > which reflect its actual value because no any page is added into buddy > system right now. It will be reset in mem_init(). > > And also remove the assignment of nr_all_pages and nr_kernel_pages in > free_area_init_core(). Instead, call the newly added calc_nr_kernel_pages() > to count up all free but not reserved memory in memblock and assign to > nr_all_pages and nr_kernel_pages. The counting excludes memmap_pages, > and other kernel used data, which is more accurate than old way and > simpler, and can also cover the ppc required arch_reserved_kernel_pages() > case. > > And also clean up the outdated code comment above free_area_init_core(). > And free_area_init_core() is easy to understand now, no need to add > words to explain. > > Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> > --- > mm/mm_init.c | 46 +++++----------------------------------------- > 1 file changed, 5 insertions(+), 41 deletions(-)
Currently, in free_area_init_core(), when initialize zone's field, a
rough value is set to zone->managed_pages. That value is calculated by
(zone->present_pages - memmap_pages).
In the meantime, add the value to nr_all_pages and nr_kernel_pages which
represent all free pages of system (only low memory or including HIGHMEM
memory separately). Both of them are gonna be used in
alloc_large_system_hash().
However, the rough calculation and setting of zone->managed_pages is
meaningless because
a) memmap pages are allocated on units of node in sparse_init() or
alloc_node_mem_map(pgdat); The simple (zone->present_pages -
memmap_pages) is too rough to make sense for zone;
b) the set zone->managed_pages will be zeroed out and reset with
acutal value in mem_init() via memblock_free_all(). Before the
resetting, no buddy allocation request is issued.
Here, remove the meaningless and complicated calculation of
(zone->present_pages - memmap_pages), directly set zone->managed_pages
as zone->present_pages for now. It will be adjusted in mem_init().
And also remove the assignment of nr_all_pages and nr_kernel_pages in
free_area_init_core(). Instead, call the newly added calc_nr_kernel_pages()
to count up all free but not reserved memory in memblock and assign to
nr_all_pages and nr_kernel_pages. The counting excludes memmap_pages,
and other kernel used data, which is more accurate than old way and
simpler, and can also cover the ppc required arch_reserved_kernel_pages()
case.
And also clean up the outdated code comment above free_area_init_core().
And free_area_init_core() is easy to understand now, no need to add
words to explain.
Signed-off-by: Baoquan He <bhe@redhat.com>
---
v2->v3:
- Change to initialize zone->managed_pages as zone->present_pages for now
because later page_group_by_mobility_disabled need be set according to
zone->managed_pages. Otherwise it will cause setting
page_group_by_mobility_disabled to 1 always.
mm/mm_init.c | 46 +++++-----------------------------------------
1 file changed, 5 insertions(+), 41 deletions(-)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index c57a7fc97a16..a4b80e8276bb 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1565,15 +1565,6 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
}
#endif
-/*
- * Set up the zone data structures:
- * - mark all pages reserved
- * - mark all memory queues empty
- * - clear the memory bitmaps
- *
- * NOTE: pgdat should get zeroed by caller.
- * NOTE: this function is only called during early init.
- */
static void __init free_area_init_core(struct pglist_data *pgdat)
{
enum zone_type j;
@@ -1584,41 +1575,13 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
- unsigned long size, freesize, memmap_pages;
-
- size = zone->spanned_pages;
- freesize = zone->present_pages;
-
- /*
- * Adjust freesize so that it accounts for how much memory
- * is used by this zone for memmap. This affects the watermark
- * and per-cpu initialisations
- */
- memmap_pages = calc_memmap_size(size, freesize);
- if (!is_highmem_idx(j)) {
- if (freesize >= memmap_pages) {
- freesize -= memmap_pages;
- if (memmap_pages)
- pr_debug(" %s zone: %lu pages used for memmap\n",
- zone_names[j], memmap_pages);
- } else
- pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
- zone_names[j], memmap_pages, freesize);
- }
-
- if (!is_highmem_idx(j))
- nr_kernel_pages += freesize;
- /* Charge for highmem memmap if there are enough kernel pages */
- else if (nr_kernel_pages > memmap_pages * 2)
- nr_kernel_pages -= memmap_pages;
- nr_all_pages += freesize;
+ unsigned long size = zone->spanned_pages;
/*
- * Set an approximate value for lowmem here, it will be adjusted
- * when the bootmem allocator frees pages into the buddy system.
- * And all highmem pages will be managed by the buddy system.
+ * Initialize zone->managed_pages as 0 , it will be reset
+ * when memblock allocator frees pages into buddy system.
*/
- zone_init_internals(zone, j, nid, freesize);
+ zone_init_internals(zone, j, nid, zone->present_pages);
if (!size)
continue;
@@ -1915,6 +1878,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
check_for_memory(pgdat);
}
+ calc_nr_kernel_pages();
memmap_init();
/* disable hash distribution for systems with a single node */
--
2.41.0
© 2016 - 2026 Red Hat, Inc.