From nobody Sun Feb 8 01:31:04 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9F0892E8DEA for ; Mon, 22 Dec 2025 13:59:50 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.17 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766411992; cv=none; b=Fa2/9YbQJrIHknslujED4mSrDCBtE9wMyiUQMUKlCGN8P9T3RRbhZGG8UCuHWeiG4qMBQdoVgVfhBk4gkOQQl9xr1CSZoNPW6YHKMHkaCaF8o2EWokAdJKRizkx93ZgdDUDC6AIcDUyOBCHOj/SBRVQoTFZ47vlfQBioQXUBYJo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766411992; c=relaxed/simple; bh=8ZfPW8FLfn5108gLlzBjYP1a7U55zV4LZPgYpJMp4sM=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=PSW+Y7vSXuJKBU6aiJi3+VoY4Uom2vYG+pZ/JDL1723rVrhRClf9KDOtC7oxXRZn2VdOfswi/EijWjy7U2Zj162c9Hvt9HNTPnAq9yy1rsr0qKsMFxLsGmk1eC7pa4/jhRZvsGgMb85xhgr0vIZvpQY+TfHhMw7efUxd6dgotfs= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=intel.com; spf=pass smtp.mailfrom=intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=Ywawk+8/; arc=none smtp.client-ip=198.175.65.17 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="Ywawk+8/" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1766411991; x=1797947991; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=8ZfPW8FLfn5108gLlzBjYP1a7U55zV4LZPgYpJMp4sM=; b=Ywawk+8/TBzEn8rVS+395BlqHVBMg06md8VxYwXQBN3xkrB6bwN+M3nr P6JuAYXJzsSNZD0QEhviYQQ6DQqYUdsaYAVmQURYCiW/WlhSqzs/lysUz fdVAjeO8QueG+OeVm1PxdAyoapZLS702Mk+gkWuLevUWnLJ51/lz3+adg pH/MlQygowF9W3dOGCdshzohCfCOBg+klDE7uJ2lLqqUhYvAjE16Dl3e3 92YsRbgdtrU0gVbIopNTg0SjrPCMQCwseIvXLjtlUwWuM3apdhYr9Af9T 0L+dyQlAfcoKf4gQB5QHFXXCdEOfE5A5GSKI2KTxkHS3GW3ZsPvNPXcec w==; X-CSE-ConnectionGUID: ydyK6hAMRBC9MdpLbZVAdg== X-CSE-MsgGUID: uy/8UsHFQU2//J6qCmxVig== X-IronPort-AV: E=McAfee;i="6800,10657,11650"; a="68248524" X-IronPort-AV: E=Sophos;i="6.21,168,1763452800"; d="scan'208";a="68248524" Received: from orviesa002.jf.intel.com ([10.64.159.142]) by orvoesa109.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Dec 2025 05:59:51 -0800 X-CSE-ConnectionGUID: +3QTUcGgRp6cLKlkaopwTg== X-CSE-MsgGUID: 6+XTCocSTvKH5wkIhm219A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.21,168,1763452800"; d="scan'208";a="230189558" Received: from linux-pnp-server-27.sh.intel.com ([10.239.147.41]) by orviesa002.jf.intel.com with ESMTP; 22 Dec 2025 05:59:46 -0800 From: Tianyou Li To: David Hildenbrand , Oscar Salvador , Mike Rapoport , Wei Yang Cc: linux-mm@kvack.org, Yong Hu , Nanhai Zou , Yuan Liu , Tim Chen , Qiuxu Zhuo , Yu C Chen , Pan Deng , Tianyou Li , Chen Zhang , linux-kernel@vger.kernel.org Subject: [PATCH v7 1/2] mm/memory hotplug: fix zone->contiguous always false when hotplug Date: Mon, 22 Dec 2025 22:58:06 +0800 Message-ID: <20251222145807.11351-2-tianyou.li@intel.com> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20251222145807.11351-1-tianyou.li@intel.com> References: <20251222145807.11351-1-tianyou.li@intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Function set_zone_contiguous used __pageblock_pfn_to_page to check the whole pageblock is in the same zone. One assumption is the memory section must online, otherwise the __pageblock_pfn_to_page will return NULL, then the set_zone_contiguous will be false. When move_pfn_range_to_zone invoked set_zone_contiguous, since the memory section did not online, the return value will always be false. To fix this issue, we removed the set_zone_contiguous from the move_pfn_range_to_zone, and place it after memory section onlined. Function remove_pfn_range_from_zone did not have this issue because memory section remains online at the time set_zone_contiguous invoked. Reviewed-by: Nanhai Zou Signed-off-by: Yuan Liu Signed-off-by: Tianyou Li --- mm/memory_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a63ec679d861..ce6caf8674a5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -782,8 +782,6 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned= long start_pfn, memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, MEMINIT_HOTPLUG, altmap, migratetype, isolate_pageblock); - - set_zone_contiguous(zone); } =20 struct auto_movable_stats { @@ -1115,6 +1113,7 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsi= gned long nr_pages, if (nr_pages >=3D PAGES_PER_SECTION) online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); =20 + set_zone_contiguous(zone); return ret; } =20 @@ -1205,6 +1204,7 @@ int online_pages(unsigned long pfn, unsigned long nr_= pages, } =20 online_pages_range(pfn, nr_pages); + set_zone_contiguous(zone); adjust_present_page_count(pfn_to_page(pfn), group, nr_pages); =20 if (node_arg.nid >=3D 0) --=20 2.47.1 From nobody Sun Feb 8 01:31:04 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B19412E8DEA for ; Mon, 22 Dec 2025 14:00:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.17 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766412003; cv=none; b=lkHRw0m5Xwc5zyu+4eggZPp13PfUydgYt+HnAzKGWeWvWG7hkURzsTH/QHPEQ61crm5qDAGy1egvJh1T8QPynfqpedzBvIXroUIR1TI7Cv20dchnNy5LTGKIWm/iy8W6Iad2ns3VCMcciylDAxdwQk34JJV1h3QMdDCr4lByZzU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766412003; c=relaxed/simple; bh=LHW8YUkwwN6wXd9yGcUC5UpprIWO5Zf0W58o5wEcQnk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=YxxF2EFhgYsLmJPxMLUTx0xZg1PuLnF4HtrWR7mApnc+h4XRZdeqoEkZ3Q3nnRo0PHIxXdv1y/h4CrZXK7G8eGJHxRFDpv8b23ViRQuH/n4TlCh8PB+Lgi138c/usHj4d0enD/6KdDbqxa0VLW/GMiZuBE6x7OBoEeXYB1TV0a4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=intel.com; spf=pass smtp.mailfrom=intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=ndHihzld; arc=none smtp.client-ip=198.175.65.17 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="ndHihzld" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1766412002; x=1797948002; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=LHW8YUkwwN6wXd9yGcUC5UpprIWO5Zf0W58o5wEcQnk=; b=ndHihzldqMX/ENbCrsvxkxfgfH/beliWEKh1YGIjKOg4gtfnRXL89BmE Hdd5r6FyGJjFifEVD3s0OAVAELXnTbuX57h3UiMSfpDx6ZD/JE1Py9pmu rPD+3rsPbg4gBdXS1zyCUAT2ZbodR65rwmF7P8suLJxmz2xoCh7Qq4jJO Pa8ef0dFO7VDpY31Odlc0eJVEq+DaH8BLaUycV/zQ4Aj+5d90wJcX48TR Jf9Zi9Qfb+MtH4pMj4hDTFk/s5oIWfzOqrBg4uuleOUs9gXgfKS6QWlRw 03i/URCTmABWhrWA9XZ+SyiK1OPXW4cvFxSmj1bfv57YOLnyhWJ8NcLwf Q==; X-CSE-ConnectionGUID: E2t2rQhhQ3mlUKNCiYGdbA== X-CSE-MsgGUID: ULdhKiovQGmjnVl9xy8LwA== X-IronPort-AV: E=McAfee;i="6800,10657,11650"; a="68248544" X-IronPort-AV: E=Sophos;i="6.21,168,1763452800"; d="scan'208";a="68248544" Received: from orviesa002.jf.intel.com ([10.64.159.142]) by orvoesa109.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Dec 2025 06:00:01 -0800 X-CSE-ConnectionGUID: svxj+7fgTtqUONr8QoF5mw== X-CSE-MsgGUID: mojjmZZmQTeRd7nWxlC3og== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.21,168,1763452800"; d="scan'208";a="230189577" Received: from linux-pnp-server-27.sh.intel.com ([10.239.147.41]) by orviesa002.jf.intel.com with ESMTP; 22 Dec 2025 05:59:56 -0800 From: Tianyou Li To: David Hildenbrand , Oscar Salvador , Mike Rapoport , Wei Yang Cc: linux-mm@kvack.org, Yong Hu , Nanhai Zou , Yuan Liu , Tim Chen , Qiuxu Zhuo , Yu C Chen , Pan Deng , Tianyou Li , Chen Zhang , linux-kernel@vger.kernel.org Subject: [PATCH v7 2/2] mm/memory hotplug/unplug: Optimize zone->contiguous update when changes pfn range Date: Mon, 22 Dec 2025 22:58:07 +0800 Message-ID: <20251222145807.11351-3-tianyou.li@intel.com> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20251222145807.11351-1-tianyou.li@intel.com> References: <20251222145807.11351-1-tianyou.li@intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" When invoke move_pfn_range_to_zone or remove_pfn_range_from_zone, it will update the zone->contiguous by checking the new zone's pfn range from the beginning to the end, regardless the previous state of the old zone. When the zone's pfn range is large, the cost of traversing the pfn range to update the zone->contiguous could be significant. Add fast paths to quickly detect cases where zone is definitely not contiguous without scanning the new zone. The cases are: when the new range did not overlap with previous range, the contiguous should be false; if the new range adjacent with the previous range, just need to check the new range; if the new added pages could not fill the hole of previous zone, the contiguous should be false. The following test cases of memory hotplug for a VM [1], tested in the environment [2], show that this optimization can significantly reduce the memory hotplug time [3]. +----------------+------+---------------+--------------+----------------+ | | Size | Time (before) | Time (after) | Time Reduction | | +------+---------------+--------------+----------------+ | Plug Memory | 256G | 10s | 2s | 80% | | +------+---------------+--------------+----------------+ | | 512G | 33s | 6s | 81% | +----------------+------+---------------+--------------+----------------+ +----------------+------+---------------+--------------+----------------+ | | Size | Time (before) | Time (after) | Time Reduction | | +------+---------------+--------------+----------------+ | Unplug Memory | 256G | 10s | 2s | 80% | | +------+---------------+--------------+----------------+ | | 512G | 34s | 6s | 82% | +----------------+------+---------------+--------------+----------------+ [1] Qemu commands to hotplug 256G/512G memory for a VM: object_add memory-backend-ram,id=3Dhotmem0,size=3D256G/512G,share=3Don device_add virtio-mem-pci,id=3Dvmem1,memdev=3Dhotmem0,bus=3Dport1 qom-set vmem1 requested-size 256G/512G (Plug Memory) qom-set vmem1 requested-size 0G (Unplug Memory) [2] Hardware : Intel Icelake server Guest Kernel : v6.18-rc2 Qemu : v9.0.0 Launch VM : qemu-system-x86_64 -accel kvm -cpu host \ -drive file=3D./Centos10_cloud.qcow2,format=3Dqcow2,if=3Dvirtio \ -drive file=3D./seed.img,format=3Draw,if=3Dvirtio \ -smp 3,cores=3D3,threads=3D1,sockets=3D1,maxcpus=3D3 \ -m 2G,slots=3D10,maxmem=3D2052472M \ -device pcie-root-port,id=3Dport1,bus=3Dpcie.0,slot=3D1,multifunction= =3Don \ -device pcie-root-port,id=3Dport2,bus=3Dpcie.0,slot=3D2 \ -nographic -machine q35 \ -nic user,hostfwd=3Dtcp::3000-:22 Guest kernel auto-onlines newly added memory blocks: echo online > /sys/devices/system/memory/auto_online_blocks [3] The time from typing the QEMU commands in [1] to when the output of 'grep MemTotal /proc/meminfo' on Guest reflects that all hotplugged memory is recognized. Reported-by: Nanhai Zou Reported-by: Chen Zhang Tested-by: Yuan Liu Reviewed-by: Tim Chen Reviewed-by: Qiuxu Zhuo Reviewed-by: Yu C Chen Reviewed-by: Pan Deng Reviewed-by: Nanhai Zou Reviewed-by: Yuan Liu Signed-off-by: Tianyou Li --- mm/internal.h | 8 ++++- mm/memory_hotplug.c | 75 +++++++++++++++++++++++++++++++++++++++++++-- mm/mm_init.c | 12 ++++++-- 3 files changed, 89 insertions(+), 6 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index e430da900430..828aed5c2fef 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -730,7 +730,13 @@ static inline struct page *pageblock_pfn_to_page(unsig= ned long start_pfn, return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); } =20 -void set_zone_contiguous(struct zone *zone); +enum zone_contig_state { + ZONE_CONTIG_YES, + ZONE_CONTIG_NO, + ZONE_CONTIG_MAYBE, +}; + +void set_zone_contiguous(struct zone *zone, enum zone_contig_state state); bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, unsigned long nr_pages); =20 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ce6caf8674a5..f51293be12eb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -544,6 +544,28 @@ static void update_pgdat_span(struct pglist_data *pgda= t) pgdat->node_spanned_pages =3D node_end_pfn - node_start_pfn; } =20 +static enum zone_contig_state zone_contig_state_after_shrinking(struct zon= e *zone, + unsigned long start_pfn, unsigned long nr_pages) +{ + const unsigned long end_pfn =3D start_pfn + nr_pages; + + /* + * If the removed pfn range inside the original zone span, the contiguous + * property is surely false. + */ + if (start_pfn > zone->zone_start_pfn && end_pfn < zone_end_pfn(zone)) + return ZONE_CONTIG_NO; + + /* If the removed pfn range is at the beginning or end of the + * original zone span, the contiguous property is preserved when + * the original zone is contiguous. + */ + if (start_pfn =3D=3D zone->zone_start_pfn || end_pfn =3D=3D zone_end_pfn(= zone)) + return zone->contiguous ? ZONE_CONTIG_YES : ZONE_CONTIG_MAYBE; + + return ZONE_CONTIG_MAYBE; +} + void remove_pfn_range_from_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) @@ -551,6 +573,7 @@ void remove_pfn_range_from_zone(struct zone *zone, const unsigned long end_pfn =3D start_pfn + nr_pages; struct pglist_data *pgdat =3D zone->zone_pgdat; unsigned long pfn, cur_nr_pages; + enum zone_contig_state new_contiguous_state =3D ZONE_CONTIG_MAYBE; =20 /* Poison struct pages because they are now uninitialized again. */ for (pfn =3D start_pfn; pfn < end_pfn; pfn +=3D cur_nr_pages) { @@ -571,12 +594,14 @@ void remove_pfn_range_from_zone(struct zone *zone, if (zone_is_zone_device(zone)) return; =20 + new_contiguous_state =3D zone_contig_state_after_shrinking(zone, start_pf= n, + nr_pages); clear_zone_contiguous(zone); =20 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); update_pgdat_span(pgdat); =20 - set_zone_contiguous(zone); + set_zone_contiguous(zone, new_contiguous_state); } =20 /** @@ -736,6 +761,39 @@ static inline void section_taint_zone_device(unsigned = long pfn) } #endif =20 +static enum zone_contig_state zone_contig_state_after_growing(struct zone = *zone, + unsigned long start_pfn, unsigned long nr_pages) +{ + const unsigned long end_pfn =3D start_pfn + nr_pages; + + if (zone_is_empty(zone)) + return ZONE_CONTIG_YES; + + /* + * If the moved pfn range does not intersect with the original zone spa + * the contiguous property is surely false. + */ + if (end_pfn < zone->zone_start_pfn || start_pfn > zone_end_pfn(zone)) + return ZONE_CONTIG_NO; + + /* + * If the moved pfn range is adjacent to the original zone span, given + * the moved pfn range's contiguous property is always true, the zone's + * contiguous property inherited from the original value. + */ + if (end_pfn =3D=3D zone->zone_start_pfn || start_pfn =3D=3D zone_end_pfn(= zone)) + return zone->contiguous ? ZONE_CONTIG_YES : ZONE_CONTIG_NO; + + /* + * If the original zone's hole larger than the moved pages in the range + * the contiguous property is surely false. + */ + if (nr_pages < (zone->spanned_pages - zone->present_pages)) + return ZONE_CONTIG_NO; + + return ZONE_CONTIG_MAYBE; +} + /* * Associate the pfn range with the given zone, initializing the memmaps * and resizing the pgdat/zone data to span the added pages. After this @@ -1090,11 +1148,20 @@ int mhp_init_memmap_on_memory(unsigned long pfn, un= signed long nr_pages, { unsigned long end_pfn =3D pfn + nr_pages; int ret, i; + enum zone_contig_state new_contiguous_state =3D ZONE_CONTIG_NO; =20 ret =3D kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); if (ret) return ret; =20 + /* + * If the allocated memmap pages are not in a full section, keep the + * contiguous state as ZONE_CONTIG_NO. + */ + if (IS_ALIGNED(end_pfn, PAGES_PER_SECTION)) + new_contiguous_state =3D zone_contig_state_after_growing(zone, + pfn, nr_pages); + move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE, false); =20 @@ -1113,7 +1180,7 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsi= gned long nr_pages, if (nr_pages >=3D PAGES_PER_SECTION) online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); =20 - set_zone_contiguous(zone); + set_zone_contiguous(zone, new_contiguous_state); return ret; } =20 @@ -1153,6 +1220,7 @@ int online_pages(unsigned long pfn, unsigned long nr_= pages, const int nid =3D zone_to_nid(zone); int need_zonelists_rebuild =3D 0; unsigned long flags; + enum zone_contig_state new_contiguous_state =3D ZONE_CONTIG_NO; int ret; =20 /* @@ -1166,6 +1234,7 @@ int online_pages(unsigned long pfn, unsigned long nr_= pages, !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) return -EINVAL; =20 + new_contiguous_state =3D zone_contig_state_after_growing(zone, pfn, nr_pa= ges); =20 /* associate pfn range with the zone */ move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE, @@ -1204,7 +1273,7 @@ int online_pages(unsigned long pfn, unsigned long nr_= pages, } =20 online_pages_range(pfn, nr_pages); - set_zone_contiguous(zone); + set_zone_contiguous(zone, new_contiguous_state); adjust_present_page_count(pfn_to_page(pfn), group, nr_pages); =20 if (node_arg.nid >=3D 0) diff --git a/mm/mm_init.c b/mm/mm_init.c index fc2a6f1e518f..0c41f1004847 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2263,11 +2263,19 @@ void __init init_cma_pageblock(struct page *page) } #endif =20 -void set_zone_contiguous(struct zone *zone) +void set_zone_contiguous(struct zone *zone, enum zone_contig_state state) { unsigned long block_start_pfn =3D zone->zone_start_pfn; unsigned long block_end_pfn; =20 + if (state =3D=3D ZONE_CONTIG_YES) { + zone->contiguous =3D true; + return; + } + + if (state =3D=3D ZONE_CONTIG_NO) + return; + block_end_pfn =3D pageblock_end_pfn(block_start_pfn); for (; block_start_pfn < zone_end_pfn(zone); block_start_pfn =3D block_end_pfn, @@ -2348,7 +2356,7 @@ void __init page_alloc_init_late(void) shuffle_free_memory(NODE_DATA(nid)); =20 for_each_populated_zone(zone) - set_zone_contiguous(zone); + set_zone_contiguous(zone, ZONE_CONTIG_MAYBE); =20 /* Initialize page ext after all struct pages are initialized. */ if (deferred_struct_pages) --=20 2.47.1