When invoke move_pfn_range_to_zone or remove_pfn_range_from_zone, it will
update the zone->contiguous by checking the new zone's pfn range from the
beginning to the end, regardless the previous state of the old zone. When
the zone's pfn range is large, the cost of traversing the pfn range to
update the zone->contiguous could be significant.
Add fast paths to quickly detect cases where zone is definitely not
contiguous without scanning the new zone. The cases are: when the new range
did not overlap with previous range, the contiguous should be false; if the
new range adjacent with the previous range, just need to check the new
range; if the new added pages could not fill the hole of previous zone, the
contiguous should be false.
The following test cases of memory hotplug for a VM [1], tested in the
environment [2], show that this optimization can significantly reduce the
memory hotplug time [3].
+----------------+------+---------------+--------------+----------------+
| | Size | Time (before) | Time (after) | Time Reduction |
| +------+---------------+--------------+----------------+
| Plug Memory | 256G | 10s | 2s | 80% |
| +------+---------------+--------------+----------------+
| | 512G | 33s | 6s | 81% |
+----------------+------+---------------+--------------+----------------+
+----------------+------+---------------+--------------+----------------+
| | Size | Time (before) | Time (after) | Time Reduction |
| +------+---------------+--------------+----------------+
| Unplug Memory | 256G | 10s | 2s | 80% |
| +------+---------------+--------------+----------------+
| | 512G | 34s | 6s | 82% |
+----------------+------+---------------+--------------+----------------+
[1] Qemu commands to hotplug 256G/512G memory for a VM:
object_add memory-backend-ram,id=hotmem0,size=256G/512G,share=on
device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
qom-set vmem1 requested-size 256G/512G (Plug Memory)
qom-set vmem1 requested-size 0G (Unplug Memory)
[2] Hardware : Intel Icelake server
Guest Kernel : v6.18-rc2
Qemu : v9.0.0
Launch VM :
qemu-system-x86_64 -accel kvm -cpu host \
-drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
-drive file=./seed.img,format=raw,if=virtio \
-smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
-m 2G,slots=10,maxmem=2052472M \
-device pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
-device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
-nographic -machine q35 \
-nic user,hostfwd=tcp::3000-:22
Guest kernel auto-onlines newly added memory blocks:
echo online > /sys/devices/system/memory/auto_online_blocks
[3] The time from typing the QEMU commands in [1] to when the output of
'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
memory is recognized.
Reported-by: Nanhai Zou <nanhai.zou@intel.com>
Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
Tested-by: Yuan Liu <yuan1.liu@intel.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
Reviewed-by: Pan Deng <pan.deng@intel.com>
Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
Signed-off-by: Tianyou Li <tianyou.li@intel.com>
---
mm/internal.h | 8 ++++-
mm/memory_hotplug.c | 86 +++++++++++++++++++++++++++++++++++++--------
mm/mm_init.c | 15 ++++++--
3 files changed, 92 insertions(+), 17 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index e430da900430..828aed5c2fef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -730,7 +730,13 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
}
-void set_zone_contiguous(struct zone *zone);
+enum zone_contig_state {
+ ZONE_CONTIG_YES,
+ ZONE_CONTIG_NO,
+ ZONE_CONTIG_MAYBE,
+};
+
+void set_zone_contiguous(struct zone *zone, enum zone_contig_state state);
bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
unsigned long nr_pages);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 8793a83702c5..7b8feaca0d63 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -544,6 +544,25 @@ static void update_pgdat_span(struct pglist_data *pgdat)
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
}
+static enum zone_contig_state zone_contig_state_after_shrinking(struct zone *zone,
+ unsigned long start_pfn, unsigned long nr_pages)
+{
+ const unsigned long end_pfn = start_pfn + nr_pages;
+
+ /*
+ * If we cut a hole into the zone span, then the zone is
+ * certainly not contiguous.
+ */
+ if (start_pfn > zone->zone_start_pfn && end_pfn < zone_end_pfn(zone))
+ return ZONE_CONTIG_NO;
+
+ /* Removing from the start/end of the zone will not change anything. */
+ if (start_pfn == zone->zone_start_pfn || end_pfn == zone_end_pfn(zone))
+ return zone->contiguous ? ZONE_CONTIG_YES : ZONE_CONTIG_MAYBE;
+
+ return ZONE_CONTIG_MAYBE;
+}
+
void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages)
@@ -551,6 +570,7 @@ void remove_pfn_range_from_zone(struct zone *zone,
const unsigned long end_pfn = start_pfn + nr_pages;
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long pfn, cur_nr_pages;
+ enum zone_contig_state new_contiguous_state;
/* Poison struct pages because they are now uninitialized again. */
for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
@@ -571,12 +591,14 @@ void remove_pfn_range_from_zone(struct zone *zone,
if (zone_is_zone_device(zone))
return;
+ new_contiguous_state = zone_contig_state_after_shrinking(zone, start_pfn,
+ nr_pages);
clear_zone_contiguous(zone);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat);
- set_zone_contiguous(zone);
+ set_zone_contiguous(zone, new_contiguous_state);
}
/**
@@ -736,6 +758,32 @@ static inline void section_taint_zone_device(unsigned long pfn)
}
#endif
+static enum zone_contig_state zone_contig_state_after_growing(struct zone *zone,
+ unsigned long start_pfn, unsigned long nr_pages)
+{
+ const unsigned long end_pfn = start_pfn + nr_pages;
+
+ if (zone_is_empty(zone))
+ return ZONE_CONTIG_YES;
+
+ /*
+ * If the moved pfn range does not intersect with the original zone span
+ * the zone is surely not contiguous.
+ */
+ if (end_pfn < zone->zone_start_pfn || start_pfn > zone_end_pfn(zone))
+ return ZONE_CONTIG_NO;
+
+ /* Adding to the start/end of the zone will not change anything. */
+ if (end_pfn == zone->zone_start_pfn || start_pfn == zone_end_pfn(zone))
+ return zone->contiguous ? ZONE_CONTIG_YES : ZONE_CONTIG_NO;
+
+ /* If we cannot fill the hole, the zone stays not contiguous. */
+ if (nr_pages < (zone->spanned_pages - zone->present_pages))
+ return ZONE_CONTIG_NO;
+
+ return ZONE_CONTIG_MAYBE;
+}
+
/*
* Associate the pfn range with the given zone, initializing the memmaps
* and resizing the pgdat/zone data to span the added pages. After this
@@ -1165,7 +1213,6 @@ static int online_pages(unsigned long pfn, unsigned long nr_pages,
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
-
/* associate pfn range with the zone */
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE,
true);
@@ -1203,13 +1250,6 @@ static int online_pages(unsigned long pfn, unsigned long nr_pages,
}
online_pages_range(pfn, nr_pages);
-
- /*
- * Now that the ranges are indicated as online, check whether the whole
- * zone is contiguous.
- */
- set_zone_contiguous(zone);
-
adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
if (node_arg.nid >= 0)
@@ -1254,16 +1294,25 @@ static int online_pages(unsigned long pfn, unsigned long nr_pages,
return ret;
}
-int online_memory_block_pages(unsigned long start_pfn,
- unsigned long nr_pages, unsigned long nr_vmemmap_pages,
- struct zone *zone, struct memory_group *group)
+int online_memory_block_pages(unsigned long start_pfn, unsigned long nr_pages,
+ unsigned long nr_vmemmap_pages, struct zone *zone,
+ struct memory_group *group)
{
+ const bool contiguous = zone->contiguous;
+ enum zone_contig_state new_contiguous_state;
int ret;
+ /*
+ * Calculate the new zone contig state before move_pfn_range_to_zone()
+ * sets the zone temporarily to non-contiguous.
+ */
+ new_contiguous_state = zone_contig_state_after_growing(zone, start_pfn,
+ nr_pages);
+
if (nr_vmemmap_pages) {
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
- return ret;
+ goto restore_zone_contig;
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
@@ -1271,7 +1320,7 @@ int online_memory_block_pages(unsigned long start_pfn,
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
- return ret;
+ goto restore_zone_contig;
}
/*
@@ -1282,6 +1331,15 @@ int online_memory_block_pages(unsigned long start_pfn,
adjust_present_page_count(pfn_to_page(start_pfn), group,
nr_vmemmap_pages);
+ /*
+ * Now that the ranges are indicated as online, check whether the whole
+ * zone is contiguous.
+ */
+ set_zone_contiguous(zone, new_contiguous_state);
+ return 0;
+
+restore_zone_contig:
+ zone->contiguous = contiguous;
return ret;
}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index fc2a6f1e518f..5ed3fbd5c643 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2263,11 +2263,22 @@ void __init init_cma_pageblock(struct page *page)
}
#endif
-void set_zone_contiguous(struct zone *zone)
+void set_zone_contiguous(struct zone *zone, enum zone_contig_state state)
{
unsigned long block_start_pfn = zone->zone_start_pfn;
unsigned long block_end_pfn;
+ /* We expect an earlier call to clear_zone_contiguous(). */
+ VM_WARN_ON_ONCE(zone->contiguous);
+
+ if (state == ZONE_CONTIG_YES) {
+ zone->contiguous = true;
+ return;
+ }
+
+ if (state == ZONE_CONTIG_NO)
+ return;
+
block_end_pfn = pageblock_end_pfn(block_start_pfn);
for (; block_start_pfn < zone_end_pfn(zone);
block_start_pfn = block_end_pfn,
@@ -2348,7 +2359,7 @@ void __init page_alloc_init_late(void)
shuffle_free_memory(NODE_DATA(nid));
for_each_populated_zone(zone)
- set_zone_contiguous(zone);
+ set_zone_contiguous(zone, ZONE_CONTIG_MAYBE);
/* Initialize page ext after all struct pages are initialized. */
if (deferred_struct_pages)
--
2.47.1
Hi,
On Tue, Jan 20, 2026 at 10:33:46PM +0800, Tianyou Li wrote:
> When invoke move_pfn_range_to_zone or remove_pfn_range_from_zone, it will
> update the zone->contiguous by checking the new zone's pfn range from the
> beginning to the end, regardless the previous state of the old zone. When
> the zone's pfn range is large, the cost of traversing the pfn range to
> update the zone->contiguous could be significant.
>
> Add fast paths to quickly detect cases where zone is definitely not
> contiguous without scanning the new zone. The cases are: when the new range
> did not overlap with previous range, the contiguous should be false; if the
> new range adjacent with the previous range, just need to check the new
> range; if the new added pages could not fill the hole of previous zone, the
> contiguous should be false.
>
> The following test cases of memory hotplug for a VM [1], tested in the
> environment [2], show that this optimization can significantly reduce the
> memory hotplug time [3].
>
> +----------------+------+---------------+--------------+----------------+
> | | Size | Time (before) | Time (after) | Time Reduction |
> | +------+---------------+--------------+----------------+
> | Plug Memory | 256G | 10s | 2s | 80% |
> | +------+---------------+--------------+----------------+
> | | 512G | 33s | 6s | 81% |
> +----------------+------+---------------+--------------+----------------+
>
> +----------------+------+---------------+--------------+----------------+
> | | Size | Time (before) | Time (after) | Time Reduction |
> | +------+---------------+--------------+----------------+
> | Unplug Memory | 256G | 10s | 2s | 80% |
> | +------+---------------+--------------+----------------+
> | | 512G | 34s | 6s | 82% |
> +----------------+------+---------------+--------------+----------------+
>
> [1] Qemu commands to hotplug 256G/512G memory for a VM:
> object_add memory-backend-ram,id=hotmem0,size=256G/512G,share=on
> device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
> qom-set vmem1 requested-size 256G/512G (Plug Memory)
> qom-set vmem1 requested-size 0G (Unplug Memory)
>
> [2] Hardware : Intel Icelake server
> Guest Kernel : v6.18-rc2
> Qemu : v9.0.0
>
> Launch VM :
> qemu-system-x86_64 -accel kvm -cpu host \
> -drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
> -drive file=./seed.img,format=raw,if=virtio \
> -smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
> -m 2G,slots=10,maxmem=2052472M \
> -device pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
> -device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
> -nographic -machine q35 \
> -nic user,hostfwd=tcp::3000-:22
>
> Guest kernel auto-onlines newly added memory blocks:
> echo online > /sys/devices/system/memory/auto_online_blocks
>
> [3] The time from typing the QEMU commands in [1] to when the output of
> 'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
> memory is recognized.
>
> Reported-by: Nanhai Zou <nanhai.zou@intel.com>
> Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
> Tested-by: Yuan Liu <yuan1.liu@intel.com>
> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
> Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
> Reviewed-by: Pan Deng <pan.deng@intel.com>
> Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
> Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
> Signed-off-by: Tianyou Li <tianyou.li@intel.com>
> ---
...
> +int online_memory_block_pages(unsigned long start_pfn, unsigned long nr_pages,
> + unsigned long nr_vmemmap_pages, struct zone *zone,
> + struct memory_group *group)
> {
> + const bool contiguous = zone->contiguous;
> + enum zone_contig_state new_contiguous_state;
> int ret;
>
> + /*
> + * Calculate the new zone contig state before move_pfn_range_to_zone()
> + * sets the zone temporarily to non-contiguous.
> + */
> + new_contiguous_state = zone_contig_state_after_growing(zone, start_pfn,
> + nr_pages);
> +
> if (nr_vmemmap_pages) {
> ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
> if (ret)
> - return ret;
> + goto restore_zone_contig;
But zone_contig_state_after_growing() does not change zone->contiguous. Why
do we need to save and restore it?
> }
>
> ret = online_pages(start_pfn + nr_vmemmap_pages,
> @@ -1271,7 +1320,7 @@ int online_memory_block_pages(unsigned long start_pfn,
> if (ret) {
> if (nr_vmemmap_pages)
> mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
> - return ret;
> + goto restore_zone_contig;
> }
>
> /*
> @@ -1282,6 +1331,15 @@ int online_memory_block_pages(unsigned long start_pfn,
> adjust_present_page_count(pfn_to_page(start_pfn), group,
> nr_vmemmap_pages);
>
> + /*
> + * Now that the ranges are indicated as online, check whether the whole
> + * zone is contiguous.
> + */
> + set_zone_contiguous(zone, new_contiguous_state);
> + return 0;
> +
> +restore_zone_contig:
> + zone->contiguous = contiguous;
> return ret;
> }
--
Sincerely yours,
Mike.
On 1/22/2026 7:43 PM, Mike Rapoport wrote:
> Hi,
>
> On Tue, Jan 20, 2026 at 10:33:46PM +0800, Tianyou Li wrote:
>> When invoke move_pfn_range_to_zone or remove_pfn_range_from_zone, it will
>> update the zone->contiguous by checking the new zone's pfn range from the
>> beginning to the end, regardless the previous state of the old zone. When
>> the zone's pfn range is large, the cost of traversing the pfn range to
>> update the zone->contiguous could be significant.
>>
>> Add fast paths to quickly detect cases where zone is definitely not
>> contiguous without scanning the new zone. The cases are: when the new range
>> did not overlap with previous range, the contiguous should be false; if the
>> new range adjacent with the previous range, just need to check the new
>> range; if the new added pages could not fill the hole of previous zone, the
>> contiguous should be false.
>>
>> The following test cases of memory hotplug for a VM [1], tested in the
>> environment [2], show that this optimization can significantly reduce the
>> memory hotplug time [3].
>>
>> +----------------+------+---------------+--------------+----------------+
>> | | Size | Time (before) | Time (after) | Time Reduction |
>> | +------+---------------+--------------+----------------+
>> | Plug Memory | 256G | 10s | 2s | 80% |
>> | +------+---------------+--------------+----------------+
>> | | 512G | 33s | 6s | 81% |
>> +----------------+------+---------------+--------------+----------------+
>>
>> +----------------+------+---------------+--------------+----------------+
>> | | Size | Time (before) | Time (after) | Time Reduction |
>> | +------+---------------+--------------+----------------+
>> | Unplug Memory | 256G | 10s | 2s | 80% |
>> | +------+---------------+--------------+----------------+
>> | | 512G | 34s | 6s | 82% |
>> +----------------+------+---------------+--------------+----------------+
>>
>> [1] Qemu commands to hotplug 256G/512G memory for a VM:
>> object_add memory-backend-ram,id=hotmem0,size=256G/512G,share=on
>> device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
>> qom-set vmem1 requested-size 256G/512G (Plug Memory)
>> qom-set vmem1 requested-size 0G (Unplug Memory)
>>
>> [2] Hardware : Intel Icelake server
>> Guest Kernel : v6.18-rc2
>> Qemu : v9.0.0
>>
>> Launch VM :
>> qemu-system-x86_64 -accel kvm -cpu host \
>> -drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
>> -drive file=./seed.img,format=raw,if=virtio \
>> -smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
>> -m 2G,slots=10,maxmem=2052472M \
>> -device pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
>> -device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
>> -nographic -machine q35 \
>> -nic user,hostfwd=tcp::3000-:22
>>
>> Guest kernel auto-onlines newly added memory blocks:
>> echo online > /sys/devices/system/memory/auto_online_blocks
>>
>> [3] The time from typing the QEMU commands in [1] to when the output of
>> 'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
>> memory is recognized.
>>
>> Reported-by: Nanhai Zou <nanhai.zou@intel.com>
>> Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
>> Tested-by: Yuan Liu <yuan1.liu@intel.com>
>> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
>> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
>> Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
>> Reviewed-by: Pan Deng <pan.deng@intel.com>
>> Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
>> Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
>> Signed-off-by: Tianyou Li <tianyou.li@intel.com>
>> ---
> ...
>
>> +int online_memory_block_pages(unsigned long start_pfn, unsigned long nr_pages,
>> + unsigned long nr_vmemmap_pages, struct zone *zone,
>> + struct memory_group *group)
>> {
>> + const bool contiguous = zone->contiguous;
>> + enum zone_contig_state new_contiguous_state;
>> int ret;
>>
>> + /*
>> + * Calculate the new zone contig state before move_pfn_range_to_zone()
>> + * sets the zone temporarily to non-contiguous.
>> + */
>> + new_contiguous_state = zone_contig_state_after_growing(zone, start_pfn,
>> + nr_pages);
>> +
>> if (nr_vmemmap_pages) {
>> ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
>> if (ret)
>> - return ret;
>> + goto restore_zone_contig;
> But zone_contig_state_after_growing() does not change zone->contiguous. Why
> do we need to save and restore it?
Move_pfn_range_to_zone() will clear the zone contiguous state and it was
invoked by online_pages(). If error occurs after
move_pfn_range_to_zone() called like in online_pages(), I think we'd
better to restore the original value if previous zone contiguous state
is true.
>
>> }
>>
>> ret = online_pages(start_pfn + nr_vmemmap_pages,
>> @@ -1271,7 +1320,7 @@ int online_memory_block_pages(unsigned long start_pfn,
>> if (ret) {
>> if (nr_vmemmap_pages)
>> mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
>> - return ret;
>> + goto restore_zone_contig;
>> }
>>
>> /*
>> @@ -1282,6 +1331,15 @@ int online_memory_block_pages(unsigned long start_pfn,
>> adjust_present_page_count(pfn_to_page(start_pfn), group,
>> nr_vmemmap_pages);
>>
>> + /*
>> + * Now that the ranges are indicated as online, check whether the whole
>> + * zone is contiguous.
>> + */
>> + set_zone_contiguous(zone, new_contiguous_state);
>> + return 0;
>> +
>> +restore_zone_contig:
>> + zone->contiguous = contiguous;
>> return ret;
>> }
Hi,
On Sat, Jan 24, 2026 at 08:43:51PM +0800, Li, Tianyou wrote:
> On 1/22/2026 7:43 PM, Mike Rapoport wrote:
>
> > > +int online_memory_block_pages(unsigned long start_pfn, unsigned long nr_pages,
> > > + unsigned long nr_vmemmap_pages, struct zone *zone,
> > > + struct memory_group *group)
> > > {
> > > + const bool contiguous = zone->contiguous;
> > > + enum zone_contig_state new_contiguous_state;
> > > int ret;
> > > + /*
> > > + * Calculate the new zone contig state before move_pfn_range_to_zone()
> > > + * sets the zone temporarily to non-contiguous.
> > > + */
> > > + new_contiguous_state = zone_contig_state_after_growing(zone, start_pfn,
> > > + nr_pages);
> > > +
> > > if (nr_vmemmap_pages) {
> > > ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
> > > if (ret)
> > > - return ret;
> > > + goto restore_zone_contig;
> > But zone_contig_state_after_growing() does not change zone->contiguous. Why
> > do we need to save and restore it?
>
> Move_pfn_range_to_zone() will clear the zone contiguous state and it was
> invoked by online_pages(). If error occurs after
> move_pfn_range_to_zone() called like in online_pages(), I think we'd better
> to restore the original value if previous zone contiguous state is true.
But after move_pfn_range_to_zone() the added pages are still offline, so I
think the zone remains contiguous and the call to
clear_zone_contiguous(zone) should not be there.
BTW, as we have set_zone_contiguous(ZONE_CONTIG_NO) I think we can use it
instead if clear_zone_contiguous() and remove the latter.
--
Sincerely yours,
Mike.
On 1/27/2026 3:10 PM, Mike Rapoport wrote:
> Hi,
>
> On Sat, Jan 24, 2026 at 08:43:51PM +0800, Li, Tianyou wrote:
>> On 1/22/2026 7:43 PM, Mike Rapoport wrote:
>>
>>>> +int online_memory_block_pages(unsigned long start_pfn, unsigned long nr_pages,
>>>> + unsigned long nr_vmemmap_pages, struct zone *zone,
>>>> + struct memory_group *group)
>>>> {
>>>> + const bool contiguous = zone->contiguous;
>>>> + enum zone_contig_state new_contiguous_state;
>>>> int ret;
>>>> + /*
>>>> + * Calculate the new zone contig state before move_pfn_range_to_zone()
>>>> + * sets the zone temporarily to non-contiguous.
>>>> + */
>>>> + new_contiguous_state = zone_contig_state_after_growing(zone, start_pfn,
>>>> + nr_pages);
>>>> +
>>>> if (nr_vmemmap_pages) {
>>>> ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
>>>> if (ret)
>>>> - return ret;
>>>> + goto restore_zone_contig;
>>> But zone_contig_state_after_growing() does not change zone->contiguous. Why
>>> do we need to save and restore it?
>> Move_pfn_range_to_zone() will clear the zone contiguous state and it was
>> invoked by online_pages(). If error occurs after
>> move_pfn_range_to_zone() called like in online_pages(), I think we'd better
>> to restore the original value if previous zone contiguous state is true.
>
> But after move_pfn_range_to_zone() the added pages are still offline, so I
> think the zone remains contiguous and the call to
> clear_zone_contiguous(zone) should not be there.
Since move_pfn_range_to_zone() may change the zone contiguous state,
clear_zone_contiguous() should be invoked. If we did not
clear_zone_contiguous() properly, especially keep the zone contiguous
state as true but actually the zone is non contiguous after resize the
zone, the code path rely on the contiguous state potentially may fail?
I am not sure if we need to handle the clear_zone_contiguous() in or out
of move_pfn_range_to_zone() in this patch series.
> BTW, as we have set_zone_contiguous(ZONE_CONTIG_NO) I think we can use it
> instead if clear_zone_contiguous() and remove the latter.
>
Yes we can. I am hesitate to do so because clear_zone_contiguous() has a
name that explain itself. The pair of clear/set seems a pattern should
be preserved? I am OK to change the code, let's hear from other comments
if feasible? Thanks.
Regards,
Tianyou
© 2016 - 2026 Red Hat, Inc.