mm/memory_hotplug.c | 51 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 3 deletions(-)
When invoke move_pfn_range_to_zone, it will update the zone->contiguous by
checking the new zone's pfn range from the beginning to the end, regardless
the previous state of the old zone. When the zone's pfn range is large, the
cost of traversing the pfn range to update the zone->contiguous could be
significant.
Add fast paths to quickly detect cases where zone is definitely not
contiguous without scanning the new zone. The cases are: when the new range
did not overlap with previous range, the contiguous should be false; if the
new range adjacent with the previous range, just need to check the new
range; if the new added pages could not fill the hole of previous zone, the
contiguous should be false.
The following test cases of memory hotplug for a VM [1], tested in the
environment [2], show that this optimization can significantly reduce the
memory hotplug time [3].
+----------------+------+---------------+--------------+----------------+
| | Size | Time (before) | Time (after) | Time Reduction |
| +------+---------------+--------------+----------------+
| Memory Hotplug | 256G | 10s | 2s | 80% |
| +------+---------------+--------------+----------------+
| | 512G | 33s | 6s | 81% |
+----------------+------+---------------+--------------+----------------+
[1] Qemu commands to hotplug 512G memory for a VM:
object_add memory-backend-ram,id=hotmem0,size=512G,share=on
device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
qom-set vmem1 requested-size 512G
[2] Hardware : Intel Icelake server
Guest Kernel : v6.18-rc2
Qemu : v9.0.0
Launch VM :
qemu-system-x86_64 -accel kvm -cpu host \
-drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
-drive file=./seed.img,format=raw,if=virtio \
-smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
-m 2G,slots=10,maxmem=2052472M \
-device pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
-device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
-nographic -machine q35 \
-nic user,hostfwd=tcp::3000-:22
Guest kernel auto-onlines newly added memory blocks:
echo online > /sys/devices/system/memory/auto_online_blocks
[3] The time from typing the QEMU commands in [1] to when the output of
'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
memory is recognized.
Reported-by: Nanhai Zou <nanhai.zou@intel.com>
Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
Tested-by: Yuan Liu <yuan1.liu@intel.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
Reviewed-by: Pan Deng <pan.deng@intel.com>
Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
Signed-off-by: Tianyou Li <tianyou.li@intel.com>
---
mm/memory_hotplug.c | 51 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 48 insertions(+), 3 deletions(-)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0be83039c3b5..aed1827a2778 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -723,6 +723,51 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
}
+static bool __meminit check_zone_contiguous_fast(struct zone *zone,
+ unsigned long start_pfn, unsigned long nr_pages)
+{
+ const unsigned long end_pfn = start_pfn + nr_pages;
+
+ /*
+ * Given the moved pfn range's contiguous property is always true,
+ * under the conditional of empty zone, the contiguous property should
+ * be true.
+ */
+ if (zone_is_empty(zone)) {
+ zone->contiguous = true;
+ return true;
+ }
+
+ /*
+ * If the moved pfn range does not intersect with the original zone span,
+ * the contiguous property is surely false.
+ */
+ if (end_pfn < zone->zone_start_pfn || start_pfn > zone_end_pfn(zone)) {
+ zone->contiguous = false;
+ return true;
+ }
+
+ /*
+ * If the moved pfn range is adjacent to the original zone span, given
+ * the moved pfn range's contiguous property is always true, the zone's
+ * contiguous property inherited from the original value.
+ */
+ if (end_pfn == zone->zone_start_pfn || start_pfn == zone_end_pfn(zone))
+ return true;
+
+ /*
+ * If the original zone's hole larger than the moved pages in the range,
+ * the contiguous property is surely false.
+ */
+ if (nr_pages < (zone->spanned_pages - zone->present_pages)) {
+ zone->contiguous = false;
+ return true;
+ }
+
+ clear_zone_contiguous(zone);
+ return false;
+}
+
#ifdef CONFIG_ZONE_DEVICE
static void section_taint_zone_device(unsigned long pfn)
{
@@ -752,8 +797,7 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nid = pgdat->node_id;
-
- clear_zone_contiguous(zone);
+ const bool fast_path = check_zone_contiguous_fast(zone, start_pfn, nr_pages);
if (zone_is_empty(zone))
init_currently_empty_zone(zone, start_pfn, nr_pages);
@@ -783,7 +827,8 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
MEMINIT_HOTPLUG, altmap, migratetype,
isolate_pageblock);
- set_zone_contiguous(zone);
+ if (!fast_path)
+ set_zone_contiguous(zone);
}
struct auto_movable_stats {
--
2.47.1
On 11/19/25 15:06, Tianyou Li wrote:
> When invoke move_pfn_range_to_zone, it will update the zone->contiguous by
> checking the new zone's pfn range from the beginning to the end, regardless
> the previous state of the old zone. When the zone's pfn range is large, the
> cost of traversing the pfn range to update the zone->contiguous could be
> significant.
>
> Add fast paths to quickly detect cases where zone is definitely not
> contiguous without scanning the new zone. The cases are: when the new range
> did not overlap with previous range, the contiguous should be false; if the
> new range adjacent with the previous range, just need to check the new
> range; if the new added pages could not fill the hole of previous zone, the
> contiguous should be false.
>
> The following test cases of memory hotplug for a VM [1], tested in the
> environment [2], show that this optimization can significantly reduce the
> memory hotplug time [3].
>
> +----------------+------+---------------+--------------+----------------+
> | | Size | Time (before) | Time (after) | Time Reduction |
> | +------+---------------+--------------+----------------+
> | Memory Hotplug | 256G | 10s | 2s | 80% |
> | +------+---------------+--------------+----------------+
> | | 512G | 33s | 6s | 81% |
> +----------------+------+---------------+--------------+----------------+
>
> [1] Qemu commands to hotplug 512G memory for a VM:
> object_add memory-backend-ram,id=hotmem0,size=512G,share=on
> device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
> qom-set vmem1 requested-size 512G
>
> [2] Hardware : Intel Icelake server
> Guest Kernel : v6.18-rc2
> Qemu : v9.0.0
>
> Launch VM :
> qemu-system-x86_64 -accel kvm -cpu host \
> -drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
> -drive file=./seed.img,format=raw,if=virtio \
> -smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
> -m 2G,slots=10,maxmem=2052472M \
> -device pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
> -device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
> -nographic -machine q35 \
> -nic user,hostfwd=tcp::3000-:22
>
> Guest kernel auto-onlines newly added memory blocks:
> echo online > /sys/devices/system/memory/auto_online_blocks
>
> [3] The time from typing the QEMU commands in [1] to when the output of
> 'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
> memory is recognized.
>
> Reported-by: Nanhai Zou <nanhai.zou@intel.com>
> Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
> Tested-by: Yuan Liu <yuan1.liu@intel.com>
> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
> Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
> Reviewed-by: Pan Deng <pan.deng@intel.com>
> Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
> Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
> Signed-off-by: Tianyou Li <tianyou.li@intel.com>
> ---
> mm/memory_hotplug.c | 51 ++++++++++++++++++++++++++++++++++++++++++---
> 1 file changed, 48 insertions(+), 3 deletions(-)
>
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 0be83039c3b5..aed1827a2778 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -723,6 +723,51 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
>
> }
>
> +static bool __meminit check_zone_contiguous_fast(struct zone *zone,
> + unsigned long start_pfn, unsigned long nr_pages)
> +{
> + const unsigned long end_pfn = start_pfn + nr_pages;
> +
> + /*
> + * Given the moved pfn range's contiguous property is always true,
> + * under the conditional of empty zone, the contiguous property should
> + * be true.
> + */
> + if (zone_is_empty(zone)) {
> + zone->contiguous = true;
> + return true;
> + }
> +
> + /*
> + * If the moved pfn range does not intersect with the original zone span,
> + * the contiguous property is surely false.
> + */
> + if (end_pfn < zone->zone_start_pfn || start_pfn > zone_end_pfn(zone)) {
> + zone->contiguous = false;
> + return true;
> + }
> +
> + /*
> + * If the moved pfn range is adjacent to the original zone span, given
> + * the moved pfn range's contiguous property is always true, the zone's
> + * contiguous property inherited from the original value.
> + */
> + if (end_pfn == zone->zone_start_pfn || start_pfn == zone_end_pfn(zone))
> + return true;
> +
> + /*
> + * If the original zone's hole larger than the moved pages in the range,
> + * the contiguous property is surely false.
> + */
> + if (nr_pages < (zone->spanned_pages - zone->present_pages)) {
> + zone->contiguous = false;
> + return true;
> + }
> +
> + clear_zone_contiguous(zone);
> + return false;
> +}
> +
> #ifdef CONFIG_ZONE_DEVICE
> static void section_taint_zone_device(unsigned long pfn)
> {
> @@ -752,8 +797,7 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
> {
> struct pglist_data *pgdat = zone->zone_pgdat;
> int nid = pgdat->node_id;
> -
> - clear_zone_contiguous(zone);
> + const bool fast_path = check_zone_contiguous_fast(zone, start_pfn, nr_pages);
>
> if (zone_is_empty(zone))
> init_currently_empty_zone(zone, start_pfn, nr_pages);
> @@ -783,7 +827,8 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
> MEMINIT_HOTPLUG, altmap, migratetype,
> isolate_pageblock);
>
> - set_zone_contiguous(zone);
> + if (!fast_path)
> + set_zone_contiguous(zone);
> }
>
> struct auto_movable_stats {
Agreed with Mike that we should keep clearing+resetting the bit.
Also, I don't particularly enjoy the "fast_path" terminology. Probably we
want in the end something high-level like:
bool definetly_contig;
definetly_contig = clear_zone_contiguous_for_growing(zone, start_pfn, nr_pages);
...
set_zone_contiguous(zone, definetly_contig);
We could do something similar on the removal path then, where the zone
will for sure stay contiguous if we are removing the first/last part.
bool definetly_contig;
stays_contiguous = clear_zone_contiguous_for_shrinking(zone, start_pfn, nr_pages);
...
set_zone_contiguous(zone, definetly_contig);
If we can come up for a better name for definetly_contig that would be nice.
--
Cheers
David
Thanks for your review David.
On 11/28/2025 8:01 PM, David Hildenbrand (Red Hat) wrote:
> On 11/19/25 15:06, Tianyou Li wrote:
>> When invoke move_pfn_range_to_zone, it will update the
>> zone->contiguous by
>> checking the new zone's pfn range from the beginning to the end,
>> regardless
>> the previous state of the old zone. When the zone's pfn range is
>> large, the
>> cost of traversing the pfn range to update the zone->contiguous could be
>> significant.
>>
>> Add fast paths to quickly detect cases where zone is definitely not
>> contiguous without scanning the new zone. The cases are: when the new
>> range
>> did not overlap with previous range, the contiguous should be false;
>> if the
>> new range adjacent with the previous range, just need to check the new
>> range; if the new added pages could not fill the hole of previous
>> zone, the
>> contiguous should be false.
>>
>> The following test cases of memory hotplug for a VM [1], tested in the
>> environment [2], show that this optimization can significantly reduce
>> the
>> memory hotplug time [3].
>>
>> +----------------+------+---------------+--------------+----------------+
>>
>> | | Size | Time (before) | Time (after) | Time
>> Reduction |
>> | +------+---------------+--------------+----------------+
>> | Memory Hotplug | 256G | 10s | 2s | 80% |
>> | +------+---------------+--------------+----------------+
>> | | 512G | 33s | 6s | 81% |
>> +----------------+------+---------------+--------------+----------------+
>>
>>
>> [1] Qemu commands to hotplug 512G memory for a VM:
>> object_add memory-backend-ram,id=hotmem0,size=512G,share=on
>> device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
>> qom-set vmem1 requested-size 512G
>>
>> [2] Hardware : Intel Icelake server
>> Guest Kernel : v6.18-rc2
>> Qemu : v9.0.0
>>
>> Launch VM :
>> qemu-system-x86_64 -accel kvm -cpu host \
>> -drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
>> -drive file=./seed.img,format=raw,if=virtio \
>> -smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
>> -m 2G,slots=10,maxmem=2052472M \
>> -device
>> pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
>> -device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
>> -nographic -machine q35 \
>> -nic user,hostfwd=tcp::3000-:22
>>
>> Guest kernel auto-onlines newly added memory blocks:
>> echo online > /sys/devices/system/memory/auto_online_blocks
>>
>> [3] The time from typing the QEMU commands in [1] to when the output of
>> 'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
>> memory is recognized.
>>
>> Reported-by: Nanhai Zou <nanhai.zou@intel.com>
>> Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
>> Tested-by: Yuan Liu <yuan1.liu@intel.com>
>> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
>> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
>> Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
>> Reviewed-by: Pan Deng <pan.deng@intel.com>
>> Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
>> Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
>> Signed-off-by: Tianyou Li <tianyou.li@intel.com>
>> ---
>> mm/memory_hotplug.c | 51 ++++++++++++++++++++++++++++++++++++++++++---
>> 1 file changed, 48 insertions(+), 3 deletions(-)
>>
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index 0be83039c3b5..aed1827a2778 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -723,6 +723,51 @@ static void __meminit resize_pgdat_range(struct
>> pglist_data *pgdat, unsigned lon
>> }
>> +static bool __meminit check_zone_contiguous_fast(struct zone *zone,
>> + unsigned long start_pfn, unsigned long nr_pages)
>> +{
>> + const unsigned long end_pfn = start_pfn + nr_pages;
>> +
>> + /*
>> + * Given the moved pfn range's contiguous property is always true,
>> + * under the conditional of empty zone, the contiguous property
>> should
>> + * be true.
>> + */
>> + if (zone_is_empty(zone)) {
>> + zone->contiguous = true;
>> + return true;
>> + }
>> +
>> + /*
>> + * If the moved pfn range does not intersect with the original
>> zone span,
>> + * the contiguous property is surely false.
>> + */
>> + if (end_pfn < zone->zone_start_pfn || start_pfn >
>> zone_end_pfn(zone)) {
>> + zone->contiguous = false;
>> + return true;
>> + }
>> +
>> + /*
>> + * If the moved pfn range is adjacent to the original zone span,
>> given
>> + * the moved pfn range's contiguous property is always true, the
>> zone's
>> + * contiguous property inherited from the original value.
>> + */
>> + if (end_pfn == zone->zone_start_pfn || start_pfn ==
>> zone_end_pfn(zone))
>> + return true;
>> +
>> + /*
>> + * If the original zone's hole larger than the moved pages in
>> the range,
>> + * the contiguous property is surely false.
>> + */
>> + if (nr_pages < (zone->spanned_pages - zone->present_pages)) {
>> + zone->contiguous = false;
>> + return true;
>> + }
>> +
>> + clear_zone_contiguous(zone);
>> + return false;
>> +}
>> +
>> #ifdef CONFIG_ZONE_DEVICE
>> static void section_taint_zone_device(unsigned long pfn)
>> {
>> @@ -752,8 +797,7 @@ void move_pfn_range_to_zone(struct zone *zone,
>> unsigned long start_pfn,
>> {
>> struct pglist_data *pgdat = zone->zone_pgdat;
>> int nid = pgdat->node_id;
>> -
>> - clear_zone_contiguous(zone);
>> + const bool fast_path = check_zone_contiguous_fast(zone,
>> start_pfn, nr_pages);
>> if (zone_is_empty(zone))
>> init_currently_empty_zone(zone, start_pfn, nr_pages);
>> @@ -783,7 +827,8 @@ void move_pfn_range_to_zone(struct zone *zone,
>> unsigned long start_pfn,
>> MEMINIT_HOTPLUG, altmap, migratetype,
>> isolate_pageblock);
>> - set_zone_contiguous(zone);
>> + if (!fast_path)
>> + set_zone_contiguous(zone);
>> }
>> struct auto_movable_stats {
>
> Agreed with Mike that we should keep clearing+resetting the bit.
Got it. Worked with Yuan Liu to understand the risk that if set the
zone->contiguous before the pfn range fully initialized. It seems
pageblock_pfn_to_page code path could be affect thus potentially it is
not safe.
> Also, I don't particularly enjoy the "fast_path" terminology. Probably we
> want in the end something high-level like:
>
>
> bool definetly_contig;
>
> definetly_contig = clear_zone_contiguous_for_growing(zone, start_pfn,
> nr_pages);
>
> ...
>
> set_zone_contiguous(zone, definetly_contig);
>
>
> We could do something similar on the removal path then, where the zone
> will for sure stay contiguous if we are removing the first/last part.
>
>
> bool definetly_contig;
>
> stays_contiguous = clear_zone_contiguous_for_shrinking(zone,
> start_pfn, nr_pages);
>
> ...
>
> set_zone_contiguous(zone, definetly_contig);
>
>
>
> If we can come up for a better name for definetly_contig that would be
> nice.
>
Instead of a bool value, could the clear_zone_contiguous_for_growing
and clear_zone_contiguous_for_shrinking return a enum value to indicate
one of the three states: 1. DEFINITELY_CONTIGUOUS;
2. DEFINITELY_NOT_CONTIGUOUS; 3. UNDETERMINED_CONTIGUOUS? The
set_zone_contiguous took the state and skip the contiguous check if
DEFINITELY_CONTIGUOUS or DEFINITELY_NOT_CONTIGUOUS.
Regards,
Tianyou
> > Instead of a bool value, could the clear_zone_contiguous_for_growing > and clear_zone_contiguous_for_shrinking return a enum value to indicate > one of the three states: 1. DEFINITELY_CONTIGUOUS; > 2. DEFINITELY_NOT_CONTIGUOUS; 3. UNDETERMINED_CONTIGUOUS? The > set_zone_contiguous took the state and skip the contiguous check if > DEFINITELY_CONTIGUOUS or DEFINITELY_NOT_CONTIGUOUS. I had the exact same thought while writing my rely, so it's worth investigating. If that helps to come up with even better+descriptive variable/function names, even better :) -- Cheers David
Thanks David for your time to review. On 11/29/2025 12:04 AM, David Hildenbrand (Red Hat) wrote: >> >> Instead of a bool value, could the clear_zone_contiguous_for_growing >> and clear_zone_contiguous_for_shrinking return a enum value to indicate >> one of the three states: 1. DEFINITELY_CONTIGUOUS; >> 2. DEFINITELY_NOT_CONTIGUOUS; 3. UNDETERMINED_CONTIGUOUS? The >> set_zone_contiguous took the state and skip the contiguous check if >> DEFINITELY_CONTIGUOUS or DEFINITELY_NOT_CONTIGUOUS. > > I had the exact same thought while writing my rely, so it's worth > investigating. > > If that helps to come up with even better+descriptive > variable/function names, even better :) > I've created a patch v4 for review in a new thread as previously suggested, Yuan Liu added the test result for memory plug and unplug. Welcome for any comments or suggestions. Appreciated. Regards, Tianyou
Hi,
Please start a new thread when sending a new version of a patch next time.
And as Wei mentioned, wait a bit for the discussion on vN to settle before
sending vN+1.
On Wed, Nov 19, 2025 at 10:06:57PM +0800, Tianyou Li wrote:
> When invoke move_pfn_range_to_zone, it will update the zone->contiguous by
> checking the new zone's pfn range from the beginning to the end, regardless
> the previous state of the old zone. When the zone's pfn range is large, the
> cost of traversing the pfn range to update the zone->contiguous could be
> significant.
>
> Add fast paths to quickly detect cases where zone is definitely not
> contiguous without scanning the new zone. The cases are: when the new range
> did not overlap with previous range, the contiguous should be false; if the
> new range adjacent with the previous range, just need to check the new
> range; if the new added pages could not fill the hole of previous zone, the
> contiguous should be false.
>
> The following test cases of memory hotplug for a VM [1], tested in the
> environment [2], show that this optimization can significantly reduce the
> memory hotplug time [3].
>
> +----------------+------+---------------+--------------+----------------+
> | | Size | Time (before) | Time (after) | Time Reduction |
> | +------+---------------+--------------+----------------+
> | Memory Hotplug | 256G | 10s | 2s | 80% |
> | +------+---------------+--------------+----------------+
> | | 512G | 33s | 6s | 81% |
> +----------------+------+---------------+--------------+----------------+
>
> [1] Qemu commands to hotplug 512G memory for a VM:
> object_add memory-backend-ram,id=hotmem0,size=512G,share=on
> device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
> qom-set vmem1 requested-size 512G
>
> [2] Hardware : Intel Icelake server
> Guest Kernel : v6.18-rc2
> Qemu : v9.0.0
>
> Launch VM :
> qemu-system-x86_64 -accel kvm -cpu host \
> -drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
> -drive file=./seed.img,format=raw,if=virtio \
> -smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
> -m 2G,slots=10,maxmem=2052472M \
> -device pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
> -device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
> -nographic -machine q35 \
> -nic user,hostfwd=tcp::3000-:22
>
> Guest kernel auto-onlines newly added memory blocks:
> echo online > /sys/devices/system/memory/auto_online_blocks
>
> [3] The time from typing the QEMU commands in [1] to when the output of
> 'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
> memory is recognized.
>
> Reported-by: Nanhai Zou <nanhai.zou@intel.com>
> Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
> Tested-by: Yuan Liu <yuan1.liu@intel.com>
> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
> Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
> Reviewed-by: Pan Deng <pan.deng@intel.com>
> Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
> Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
> Signed-off-by: Tianyou Li <tianyou.li@intel.com>
> ---
> mm/memory_hotplug.c | 51 ++++++++++++++++++++++++++++++++++++++++++---
> 1 file changed, 48 insertions(+), 3 deletions(-)
>
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 0be83039c3b5..aed1827a2778 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -723,6 +723,51 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
>
> }
>
> +static bool __meminit check_zone_contiguous_fast(struct zone *zone,
> + unsigned long start_pfn, unsigned long nr_pages)
> +{
> + const unsigned long end_pfn = start_pfn + nr_pages;
> +
> + /*
> + * Given the moved pfn range's contiguous property is always true,
> + * under the conditional of empty zone, the contiguous property should
> + * be true.
> + */
> + if (zone_is_empty(zone)) {
> + zone->contiguous = true;
I don't think it's safe to set zone->contiguous until the end of
move_pfn_range_to_zone(). See commit feee6b298916 ("mm/memory_hotplug:
shrink zones when offlining memory").
check_zone_contiguous_fast() should only check if the zone remains
contiguous after hotplug or it's certainly discontinuous, but should not
set zone->contiguous. It still must be cleared before resizing the zone and
set after the initialization of the memory map.
> + return true;
> + }
> +
> + /*
> + * If the moved pfn range does not intersect with the original zone span,
> + * the contiguous property is surely false.
> + */
> + if (end_pfn < zone->zone_start_pfn || start_pfn > zone_end_pfn(zone)) {
> + zone->contiguous = false;
> + return true;
> + }
> +
> + /*
> + * If the moved pfn range is adjacent to the original zone span, given
> + * the moved pfn range's contiguous property is always true, the zone's
> + * contiguous property inherited from the original value.
> + */
> + if (end_pfn == zone->zone_start_pfn || start_pfn == zone_end_pfn(zone))
> + return true;
> +
> + /*
> + * If the original zone's hole larger than the moved pages in the range,
> + * the contiguous property is surely false.
> + */
> + if (nr_pages < (zone->spanned_pages - zone->present_pages)) {
> + zone->contiguous = false;
> + return true;
> + }
> +
> + clear_zone_contiguous(zone);
> + return false;
> +}
> +
> #ifdef CONFIG_ZONE_DEVICE
> static void section_taint_zone_device(unsigned long pfn)
> {
> @@ -752,8 +797,7 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
> {
> struct pglist_data *pgdat = zone->zone_pgdat;
> int nid = pgdat->node_id;
> -
> - clear_zone_contiguous(zone);
> + const bool fast_path = check_zone_contiguous_fast(zone, start_pfn, nr_pages);
>
> if (zone_is_empty(zone))
> init_currently_empty_zone(zone, start_pfn, nr_pages);
> @@ -783,7 +827,8 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
> MEMINIT_HOTPLUG, altmap, migratetype,
> isolate_pageblock);
>
> - set_zone_contiguous(zone);
> + if (!fast_path)
> + set_zone_contiguous(zone);
> }
>
> struct auto_movable_stats {
> --
> 2.47.1
>
--
Sincerely yours,
Mike.
Hi Mike,
Thanks for your review. Appreciated.
On 11/20/2025 8:00 PM, Mike Rapoport wrote:
> Hi,
>
> Please start a new thread when sending a new version of a patch next time.
Got it.
> And as Wei mentioned, wait a bit for the discussion on vN to settle before
> sending vN+1.
Will do. Thanks.
> On Wed, Nov 19, 2025 at 10:06:57PM +0800, Tianyou Li wrote:
>> When invoke move_pfn_range_to_zone, it will update the zone->contiguous by
>> checking the new zone's pfn range from the beginning to the end, regardless
>> the previous state of the old zone. When the zone's pfn range is large, the
>> cost of traversing the pfn range to update the zone->contiguous could be
>> significant.
>>
>> Add fast paths to quickly detect cases where zone is definitely not
>> contiguous without scanning the new zone. The cases are: when the new range
>> did not overlap with previous range, the contiguous should be false; if the
>> new range adjacent with the previous range, just need to check the new
>> range; if the new added pages could not fill the hole of previous zone, the
>> contiguous should be false.
>>
>> The following test cases of memory hotplug for a VM [1], tested in the
>> environment [2], show that this optimization can significantly reduce the
>> memory hotplug time [3].
>>
>> +----------------+------+---------------+--------------+----------------+
>> | | Size | Time (before) | Time (after) | Time Reduction |
>> | +------+---------------+--------------+----------------+
>> | Memory Hotplug | 256G | 10s | 2s | 80% |
>> | +------+---------------+--------------+----------------+
>> | | 512G | 33s | 6s | 81% |
>> +----------------+------+---------------+--------------+----------------+
>>
>> [1] Qemu commands to hotplug 512G memory for a VM:
>> object_add memory-backend-ram,id=hotmem0,size=512G,share=on
>> device_add virtio-mem-pci,id=vmem1,memdev=hotmem0,bus=port1
>> qom-set vmem1 requested-size 512G
>>
>> [2] Hardware : Intel Icelake server
>> Guest Kernel : v6.18-rc2
>> Qemu : v9.0.0
>>
>> Launch VM :
>> qemu-system-x86_64 -accel kvm -cpu host \
>> -drive file=./Centos10_cloud.qcow2,format=qcow2,if=virtio \
>> -drive file=./seed.img,format=raw,if=virtio \
>> -smp 3,cores=3,threads=1,sockets=1,maxcpus=3 \
>> -m 2G,slots=10,maxmem=2052472M \
>> -device pcie-root-port,id=port1,bus=pcie.0,slot=1,multifunction=on \
>> -device pcie-root-port,id=port2,bus=pcie.0,slot=2 \
>> -nographic -machine q35 \
>> -nic user,hostfwd=tcp::3000-:22
>>
>> Guest kernel auto-onlines newly added memory blocks:
>> echo online > /sys/devices/system/memory/auto_online_blocks
>>
>> [3] The time from typing the QEMU commands in [1] to when the output of
>> 'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged
>> memory is recognized.
>>
>> Reported-by: Nanhai Zou <nanhai.zou@intel.com>
>> Reported-by: Chen Zhang <zhangchen.kidd@jd.com>
>> Tested-by: Yuan Liu <yuan1.liu@intel.com>
>> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
>> Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
>> Reviewed-by: Yu C Chen <yu.c.chen@intel.com>
>> Reviewed-by: Pan Deng <pan.deng@intel.com>
>> Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
>> Reviewed-by: Yuan Liu <yuan1.liu@intel.com>
>> Signed-off-by: Tianyou Li <tianyou.li@intel.com>
>> ---
>> mm/memory_hotplug.c | 51 ++++++++++++++++++++++++++++++++++++++++++---
>> 1 file changed, 48 insertions(+), 3 deletions(-)
>>
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index 0be83039c3b5..aed1827a2778 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -723,6 +723,51 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
>>
>> }
>>
>> +static bool __meminit check_zone_contiguous_fast(struct zone *zone,
>> + unsigned long start_pfn, unsigned long nr_pages)
>> +{
>> + const unsigned long end_pfn = start_pfn + nr_pages;
>> +
>> + /*
>> + * Given the moved pfn range's contiguous property is always true,
>> + * under the conditional of empty zone, the contiguous property should
>> + * be true.
>> + */
>> + if (zone_is_empty(zone)) {
>> + zone->contiguous = true;
> I don't think it's safe to set zone->contiguous until the end of
> move_pfn_range_to_zone(). See commit feee6b298916 ("mm/memory_hotplug:
> shrink zones when offlining memory").
>
> check_zone_contiguous_fast() should only check if the zone remains
> contiguous after hotplug or it's certainly discontinuous, but should not
> set zone->contiguous. It still must be cleared before resizing the zone and
> set after the initialization of the memory map.
Thanks for the pointer. Allow me to learn more about the context and get
back to you soon. Thanks.
>> + return true;
>> + }
>> +
>> + /*
>> + * If the moved pfn range does not intersect with the original zone span,
>> + * the contiguous property is surely false.
>> + */
>> + if (end_pfn < zone->zone_start_pfn || start_pfn > zone_end_pfn(zone)) {
>> + zone->contiguous = false;
>> + return true;
>> + }
>> +
>> + /*
>> + * If the moved pfn range is adjacent to the original zone span, given
>> + * the moved pfn range's contiguous property is always true, the zone's
>> + * contiguous property inherited from the original value.
>> + */
>> + if (end_pfn == zone->zone_start_pfn || start_pfn == zone_end_pfn(zone))
>> + return true;
>> +
>> + /*
>> + * If the original zone's hole larger than the moved pages in the range,
>> + * the contiguous property is surely false.
>> + */
>> + if (nr_pages < (zone->spanned_pages - zone->present_pages)) {
>> + zone->contiguous = false;
>> + return true;
>> + }
>> +
>> + clear_zone_contiguous(zone);
>> + return false;
>> +}
>> +
>> #ifdef CONFIG_ZONE_DEVICE
>> static void section_taint_zone_device(unsigned long pfn)
>> {
>> @@ -752,8 +797,7 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
>> {
>> struct pglist_data *pgdat = zone->zone_pgdat;
>> int nid = pgdat->node_id;
>> -
>> - clear_zone_contiguous(zone);
>> + const bool fast_path = check_zone_contiguous_fast(zone, start_pfn, nr_pages);
>>
>> if (zone_is_empty(zone))
>> init_currently_empty_zone(zone, start_pfn, nr_pages);
>> @@ -783,7 +827,8 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
>> MEMINIT_HOTPLUG, altmap, migratetype,
>> isolate_pageblock);
>>
>> - set_zone_contiguous(zone);
>> + if (!fast_path)
>> + set_zone_contiguous(zone);
>> }
>>
>> struct auto_movable_stats {
>> --
>> 2.47.1
>>
© 2016 - 2025 Red Hat, Inc.