Since migratetype is no longer overwritten during pageblock isolation,
moving a pageblock out of MIGRATE_ISOLATE no longer needs a new
migratetype.
Add pageblock_isolate_and_move_free_pages() and
pageblock_unisolate_and_move_free_pages() to be explicit about the page
isolation operations. Both share the common code in
__move_freepages_block_isolate(), which is renamed from
move_freepages_block_isolate().
Add toggle_pageblock_isolate() to flip pageblock isolation bit in
__move_freepages_block_isolate().
Make set_pageblock_migratetype() only accept non MIGRATE_ISOLATE types,
so that one should use set_pageblock_isolate() to isolate pageblocks.
As a result, move pageblock migratetype code out of
__move_freepages_block().
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/page-isolation.h | 5 +-
mm/page_alloc.c | 97 ++++++++++++++++++++++++++++------
mm/page_isolation.c | 21 ++++----
3 files changed, 92 insertions(+), 31 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 14c6a5f691c2..7241a6719618 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -44,10 +44,9 @@ static inline void set_pageblock_isolate(struct page *page)
void __meminit init_pageblock_migratetype(struct page *page,
enum migratetype migratetype,
bool isolate);
-void set_pageblock_migratetype(struct page *page, enum migratetype migratetype);
-bool move_freepages_block_isolate(struct zone *zone, struct page *page,
- int migratetype);
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int flags);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8fcbd7fa13c2..44a08b1a9de4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -524,13 +524,36 @@ void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
__clear_bit(bitidx + pb_bit, bitmap_word);
}
+#ifdef CONFIG_MEMORY_ISOLATION
+/**
+ * toggle_pfnblock_bit - Toggle a standalone bit of a pageblock
+ * @page: The page within the block of interest
+ * @pfn: The target page frame number
+ * @pb_bit: pageblock bit to toggle
+ */
+static void toggle_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit)
+{
+ unsigned long *bitmap_word;
+ unsigned long bitidx;
+
+ if (WARN_ON_ONCE(pb_bit <= PB_migrate_end ||
+ pb_bit >= __NR_PAGEBLOCK_BITS))
+ return;
+
+ get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
+
+ __change_bit(bitidx + pb_bit, bitmap_word);
+}
+#endif
+
/**
* set_pageblock_migratetype - Set the migratetype of a pageblock
* @page: The page within the block of interest
* @migratetype: migratetype to set
*/
-__always_inline void set_pageblock_migratetype(struct page *page,
- enum migratetype migratetype)
+static void set_pageblock_migratetype(struct page *page,
+ enum migratetype migratetype)
{
unsigned long mask = MIGRATETYPE_MASK;
@@ -540,11 +563,15 @@ __always_inline void set_pageblock_migratetype(struct page *page,
#ifdef CONFIG_MEMORY_ISOLATION
if (migratetype == MIGRATE_ISOLATE) {
- set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate);
+ VM_WARN_ONCE(1,
+ "Use set_pageblock_isolate() for pageblock isolation");
return;
}
/* change mask to clear PB_migrate_isolate if it is set */
mask = MIGRATETYPE_AND_ISO_MASK;
+ VM_WARN_ONCE(get_pfnblock_bit(page, page_to_pfn(page),
+ PB_migrate_isolate),
+ "Use clear_pageblock_isolate() to unisolate pageblock");
#endif
__set_pfnblock_flags_mask(page, page_to_pfn(page),
(unsigned long)migratetype, mask);
@@ -1931,8 +1958,8 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
#endif
/*
- * Change the type of a block and move all its free pages to that
- * type's freelist.
+ * Move all free pages of a block to new type's freelist. Caller needs to
+ * change the block type.
*/
static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
int old_mt, int new_mt)
@@ -1964,8 +1991,6 @@ static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
pages_moved += 1 << order;
}
- set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
-
return pages_moved;
}
@@ -2023,11 +2048,16 @@ static int move_freepages_block(struct zone *zone, struct page *page,
int old_mt, int new_mt)
{
unsigned long start_pfn;
+ int res;
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return -1;
- return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
+ res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
+ set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
+
+ return res;
+
}
#ifdef CONFIG_MEMORY_ISOLATION
@@ -2055,11 +2085,16 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
return start_pfn;
}
+static inline void toggle_pageblock_isolate(struct page *page)
+{
+ toggle_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate);
+}
+
/**
- * move_freepages_block_isolate - move free pages in block for page isolation
+ * __move_freepages_block_isolate - move free pages in block for page isolation
* @zone: the zone
* @page: the pageblock page
- * @migratetype: migratetype to set on the pageblock
+ * @isolate: to isolate the given pageblock or unisolate it
*
* This is similar to move_freepages_block(), but handles the special
* case encountered in page isolation, where the block of interest
@@ -2074,10 +2109,15 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
*
* Returns %true if pages could be moved, %false otherwise.
*/
-bool move_freepages_block_isolate(struct zone *zone, struct page *page,
- int migratetype)
+static bool __move_freepages_block_isolate(struct zone *zone,
+ struct page *page, bool isolate)
{
unsigned long start_pfn, pfn;
+ int from_mt;
+ int to_mt;
+
+ if (isolate == get_pageblock_isolate(page))
+ return false;
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return false;
@@ -2094,7 +2134,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(buddy, zone, order,
get_pfnblock_migratetype(buddy, pfn));
- set_pageblock_migratetype(page, migratetype);
+ toggle_pageblock_isolate(page);
split_large_buddy(zone, buddy, pfn, order, FPI_NONE);
return true;
}
@@ -2105,16 +2145,38 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(page, zone, order,
get_pfnblock_migratetype(page, pfn));
- set_pageblock_migratetype(page, migratetype);
+ toggle_pageblock_isolate(page);
split_large_buddy(zone, page, pfn, order, FPI_NONE);
return true;
}
move:
- __move_freepages_block(zone, start_pfn,
- get_pfnblock_migratetype(page, start_pfn),
- migratetype);
+ /* Use MIGRATETYPE_MASK to get non-isolate migratetype */
+ if (isolate) {
+ from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
+ MIGRATETYPE_MASK);
+ to_mt = MIGRATE_ISOLATE;
+ } else {
+ from_mt = MIGRATE_ISOLATE;
+ to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page),
+ MIGRATETYPE_MASK);
+ }
+
+ __move_freepages_block(zone, start_pfn, from_mt, to_mt);
+ toggle_pageblock_isolate(pfn_to_page(start_pfn));
+
return true;
}
+
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
+{
+ return __move_freepages_block_isolate(zone, page, true);
+}
+
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
+{
+ return __move_freepages_block_isolate(zone, page, false);
+}
+
#endif /* CONFIG_MEMORY_ISOLATION */
static void change_pageblock_range(struct page *pageblock_page,
@@ -2306,6 +2368,7 @@ try_to_claim_block(struct zone *zone, struct page *page,
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) {
__move_freepages_block(zone, start_pfn, block_type, start_type);
+ set_pageblock_migratetype(pfn_to_page(start_pfn), start_type);
return __rmqueue_smallest(zone, order, start_type);
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index b2fc5266e3d2..08f627a5032f 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -188,7 +188,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
migratetype, isol_flags);
if (!unmovable) {
- if (!move_freepages_block_isolate(zone, page, MIGRATE_ISOLATE)) {
+ if (!pageblock_isolate_and_move_free_pages(zone, page)) {
spin_unlock_irqrestore(&zone->lock, flags);
return -EBUSY;
}
@@ -209,7 +209,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
return -EBUSY;
}
-static void unset_migratetype_isolate(struct page *page, int migratetype)
+static void unset_migratetype_isolate(struct page *page)
{
struct zone *zone;
unsigned long flags;
@@ -262,10 +262,10 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
* Isolating this block already succeeded, so this
* should not fail on zone boundaries.
*/
- WARN_ON_ONCE(!move_freepages_block_isolate(zone, page, migratetype));
+ WARN_ON_ONCE(!pageblock_unisolate_and_move_free_pages(zone, page));
} else {
- set_pageblock_migratetype(page, migratetype);
- __putback_isolated_page(page, order, migratetype);
+ clear_pageblock_isolate(page);
+ __putback_isolated_page(page, order, get_pageblock_migratetype(page));
}
zone->nr_isolate_pageblock--;
out:
@@ -383,7 +383,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
if (PageBuddy(page)) {
int order = buddy_order(page);
- /* move_freepages_block_isolate() handled this */
+ /* pageblock_isolate_and_move_free_pages() handled this */
VM_WARN_ON_ONCE(pfn + (1 << order) > boundary_pfn);
pfn += 1UL << order;
@@ -433,7 +433,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
failed:
/* restore the original migratetype */
if (!skip_isolation)
- unset_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype);
+ unset_migratetype_isolate(pfn_to_page(isolate_pageblock));
return -EBUSY;
}
@@ -504,7 +504,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
ret = isolate_single_pageblock(isolate_end, flags, true,
skip_isolation, migratetype);
if (ret) {
- unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
+ unset_migratetype_isolate(pfn_to_page(isolate_start));
return ret;
}
@@ -517,8 +517,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
start_pfn, end_pfn)) {
undo_isolate_page_range(isolate_start, pfn, migratetype);
unset_migratetype_isolate(
- pfn_to_page(isolate_end - pageblock_nr_pages),
- migratetype);
+ pfn_to_page(isolate_end - pageblock_nr_pages));
return -EBUSY;
}
}
@@ -548,7 +547,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
page = __first_valid_page(pfn, pageblock_nr_pages);
if (!page || !is_migrate_isolate_page(page))
continue;
- unset_migratetype_isolate(page, migratetype);
+ unset_migratetype_isolate(page);
}
}
/*
--
2.47.2
On 5/23/25 21:12, Zi Yan wrote:
> Since migratetype is no longer overwritten during pageblock isolation,
> moving a pageblock out of MIGRATE_ISOLATE no longer needs a new
> migratetype.
>
> Add pageblock_isolate_and_move_free_pages() and
> pageblock_unisolate_and_move_free_pages() to be explicit about the page
> isolation operations. Both share the common code in
> __move_freepages_block_isolate(), which is renamed from
> move_freepages_block_isolate().
>
> Add toggle_pageblock_isolate() to flip pageblock isolation bit in
> __move_freepages_block_isolate().
>
> Make set_pageblock_migratetype() only accept non MIGRATE_ISOLATE types,
> so that one should use set_pageblock_isolate() to isolate pageblocks.
> As a result, move pageblock migratetype code out of
> __move_freepages_block().
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/page-isolation.h | 5 +-
> mm/page_alloc.c | 97 ++++++++++++++++++++++++++++------
> mm/page_isolation.c | 21 ++++----
> 3 files changed, 92 insertions(+), 31 deletions(-)
>
> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
> index 14c6a5f691c2..7241a6719618 100644
> --- a/include/linux/page-isolation.h
> +++ b/include/linux/page-isolation.h
> @@ -44,10 +44,9 @@ static inline void set_pageblock_isolate(struct page *page)
> void __meminit init_pageblock_migratetype(struct page *page,
> enum migratetype migratetype,
> bool isolate);
> -void set_pageblock_migratetype(struct page *page, enum migratetype migratetype);
>
> -bool move_freepages_block_isolate(struct zone *zone, struct page *page,
> - int migratetype);
> +bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
> +bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
>
> int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
> int migratetype, int flags);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 8fcbd7fa13c2..44a08b1a9de4 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -524,13 +524,36 @@ void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
> __clear_bit(bitidx + pb_bit, bitmap_word);
> }
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +/**
> + * toggle_pfnblock_bit - Toggle a standalone bit of a pageblock
> + * @page: The page within the block of interest
> + * @pfn: The target page frame number
> + * @pb_bit: pageblock bit to toggle
> + */
> +static void toggle_pfnblock_bit(const struct page *page, unsigned long pfn,
> + enum pageblock_bits pb_bit)
> +{
> + unsigned long *bitmap_word;
> + unsigned long bitidx;
> +
> + if (WARN_ON_ONCE(pb_bit <= PB_migrate_end ||
> + pb_bit >= __NR_PAGEBLOCK_BITS))
> + return;
> +
> + get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
> +
> + __change_bit(bitidx + pb_bit, bitmap_word);
Again the non-atomic variant, but actually below I suggest we drop this.
> +}
> +#endif
> +
> /**
> * set_pageblock_migratetype - Set the migratetype of a pageblock
> * @page: The page within the block of interest
> * @migratetype: migratetype to set
> */
> -__always_inline void set_pageblock_migratetype(struct page *page,
> - enum migratetype migratetype)
> +static void set_pageblock_migratetype(struct page *page,
> + enum migratetype migratetype)
> {
> unsigned long mask = MIGRATETYPE_MASK;
>
> @@ -540,11 +563,15 @@ __always_inline void set_pageblock_migratetype(struct page *page,
>
> #ifdef CONFIG_MEMORY_ISOLATION
> if (migratetype == MIGRATE_ISOLATE) {
> - set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate);
> + VM_WARN_ONCE(1,
> + "Use set_pageblock_isolate() for pageblock isolation");
> return;
> }
> /* change mask to clear PB_migrate_isolate if it is set */
> mask = MIGRATETYPE_AND_ISO_MASK;
> + VM_WARN_ONCE(get_pfnblock_bit(page, page_to_pfn(page),
> + PB_migrate_isolate),
> + "Use clear_pageblock_isolate() to unisolate pageblock");
> #endif
We might be too paranoid with the warnings given these are all local
functions to this file so risk of misuse should be low. Maybe we could
remove later...
> __set_pfnblock_flags_mask(page, page_to_pfn(page),
> (unsigned long)migratetype, mask);
> @@ -1931,8 +1958,8 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
> #endif
>
> /*
> - * Change the type of a block and move all its free pages to that
> - * type's freelist.
> + * Move all free pages of a block to new type's freelist. Caller needs to
> + * change the block type.
> */
> static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
> int old_mt, int new_mt)
> @@ -1964,8 +1991,6 @@ static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
> pages_moved += 1 << order;
> }
>
> - set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
> -
> return pages_moved;
> }
>
> @@ -2023,11 +2048,16 @@ static int move_freepages_block(struct zone *zone, struct page *page,
> int old_mt, int new_mt)
> {
> unsigned long start_pfn;
> + int res;
>
> if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
> return -1;
>
> - return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
> + res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
> + set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
> +
> + return res;
> +
> }
>
> #ifdef CONFIG_MEMORY_ISOLATION
> @@ -2055,11 +2085,16 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
> return start_pfn;
> }
>
> +static inline void toggle_pageblock_isolate(struct page *page)
> +{
> + toggle_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate);
> +}
I'm wary about the togle action, as we should always know what action we
want to do anyway. So we could just add a "bool isolate" parameter and call
set or clear explicitly? Allows for some hypothetical DEBUG_VM checks too
(pageblock is not already in the state we want it to be).
On 27 May 2025, at 6:50, Vlastimil Babka wrote:
> On 5/23/25 21:12, Zi Yan wrote:
>> Since migratetype is no longer overwritten during pageblock isolation,
>> moving a pageblock out of MIGRATE_ISOLATE no longer needs a new
>> migratetype.
>>
>> Add pageblock_isolate_and_move_free_pages() and
>> pageblock_unisolate_and_move_free_pages() to be explicit about the page
>> isolation operations. Both share the common code in
>> __move_freepages_block_isolate(), which is renamed from
>> move_freepages_block_isolate().
>>
>> Add toggle_pageblock_isolate() to flip pageblock isolation bit in
>> __move_freepages_block_isolate().
>>
>> Make set_pageblock_migratetype() only accept non MIGRATE_ISOLATE types,
>> so that one should use set_pageblock_isolate() to isolate pageblocks.
>> As a result, move pageblock migratetype code out of
>> __move_freepages_block().
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>> include/linux/page-isolation.h | 5 +-
>> mm/page_alloc.c | 97 ++++++++++++++++++++++++++++------
>> mm/page_isolation.c | 21 ++++----
>> 3 files changed, 92 insertions(+), 31 deletions(-)
>>
>> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
>> index 14c6a5f691c2..7241a6719618 100644
>> --- a/include/linux/page-isolation.h
>> +++ b/include/linux/page-isolation.h
>> @@ -44,10 +44,9 @@ static inline void set_pageblock_isolate(struct page *page)
>> void __meminit init_pageblock_migratetype(struct page *page,
>> enum migratetype migratetype,
>> bool isolate);
>> -void set_pageblock_migratetype(struct page *page, enum migratetype migratetype);
>>
>> -bool move_freepages_block_isolate(struct zone *zone, struct page *page,
>> - int migratetype);
>> +bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
>> +bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
>>
>> int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
>> int migratetype, int flags);
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 8fcbd7fa13c2..44a08b1a9de4 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -524,13 +524,36 @@ void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
>> __clear_bit(bitidx + pb_bit, bitmap_word);
>> }
>>
>> +#ifdef CONFIG_MEMORY_ISOLATION
>> +/**
>> + * toggle_pfnblock_bit - Toggle a standalone bit of a pageblock
>> + * @page: The page within the block of interest
>> + * @pfn: The target page frame number
>> + * @pb_bit: pageblock bit to toggle
>> + */
>> +static void toggle_pfnblock_bit(const struct page *page, unsigned long pfn,
>> + enum pageblock_bits pb_bit)
>> +{
>> + unsigned long *bitmap_word;
>> + unsigned long bitidx;
>> +
>> + if (WARN_ON_ONCE(pb_bit <= PB_migrate_end ||
>> + pb_bit >= __NR_PAGEBLOCK_BITS))
>> + return;
>> +
>> + get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
>> +
>> + __change_bit(bitidx + pb_bit, bitmap_word);
>
> Again the non-atomic variant, but actually below I suggest we drop this.
Yep.
>
>> +}
>> +#endif
>> +
>> /**
>> * set_pageblock_migratetype - Set the migratetype of a pageblock
>> * @page: The page within the block of interest
>> * @migratetype: migratetype to set
>> */
>> -__always_inline void set_pageblock_migratetype(struct page *page,
>> - enum migratetype migratetype)
>> +static void set_pageblock_migratetype(struct page *page,
>> + enum migratetype migratetype)
>> {
>> unsigned long mask = MIGRATETYPE_MASK;
>>
>> @@ -540,11 +563,15 @@ __always_inline void set_pageblock_migratetype(struct page *page,
>>
>> #ifdef CONFIG_MEMORY_ISOLATION
>> if (migratetype == MIGRATE_ISOLATE) {
>> - set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate);
>> + VM_WARN_ONCE(1,
>> + "Use set_pageblock_isolate() for pageblock isolation");
>> return;
>> }
>> /* change mask to clear PB_migrate_isolate if it is set */
>> mask = MIGRATETYPE_AND_ISO_MASK;
>> + VM_WARN_ONCE(get_pfnblock_bit(page, page_to_pfn(page),
>> + PB_migrate_isolate),
>> + "Use clear_pageblock_isolate() to unisolate pageblock");
>> #endif
>
> We might be too paranoid with the warnings given these are all local
> functions to this file so risk of misuse should be low. Maybe we could
> remove later...
Yeah. In the next step, when struct pageblock_info is used to change
a pageblock migratetype and isolation state, these warnings should
go away, since caller will need to be explicit about isolation operations.
>
>> __set_pfnblock_flags_mask(page, page_to_pfn(page),
>> (unsigned long)migratetype, mask);
>> @@ -1931,8 +1958,8 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
>> #endif
>>
>> /*
>> - * Change the type of a block and move all its free pages to that
>> - * type's freelist.
>> + * Move all free pages of a block to new type's freelist. Caller needs to
>> + * change the block type.
>> */
>> static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
>> int old_mt, int new_mt)
>> @@ -1964,8 +1991,6 @@ static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
>> pages_moved += 1 << order;
>> }
>>
>> - set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
>> -
>> return pages_moved;
>> }
>>
>> @@ -2023,11 +2048,16 @@ static int move_freepages_block(struct zone *zone, struct page *page,
>> int old_mt, int new_mt)
>> {
>> unsigned long start_pfn;
>> + int res;
>>
>> if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
>> return -1;
>>
>> - return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
>> + res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
>> + set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
>> +
>> + return res;
>> +
>> }
>>
>> #ifdef CONFIG_MEMORY_ISOLATION
>> @@ -2055,11 +2085,16 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
>> return start_pfn;
>> }
>>
>> +static inline void toggle_pageblock_isolate(struct page *page)
>> +{
>> + toggle_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate);
>> +}
>
> I'm wary about the togle action, as we should always know what action we
> want to do anyway. So we could just add a "bool isolate" parameter and call
> set or clear explicitly? Allows for some hypothetical DEBUG_VM checks too
> (pageblock is not already in the state we want it to be).
This function was added to follow Johannes’ suggestion of getting rid of
if statement. I can change it back, make it explicit, and add
an VM_WARN_ONCE.
Hi Johannes,
If you want the non if statement version to stay, please let me know.
Best Regards,
Yan, Zi
© 2016 - 2025 Red Hat, Inc.