We always do zone_watermark_ok check and compaction_suitable check
together to test if compaction for target order should be runned.
Factor these code out to remove repeat code.
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
mm/compaction.c | 63 ++++++++++++++++++++++++++++---------------------
1 file changed, 36 insertions(+), 27 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 00b7bba6c72e..6f2b87b026b8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2374,6 +2374,30 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
return false;
}
+/*
+ * Should we do compaction for target allocation order.
+ * Return COMPACT_SUCCESS if allocation for target order can be already
+ * satisfied
+ * Return COMPACT_SKIPPED if compaction for target order is likely to fail
+ * Return COMPACT_CONTINUE if compaction for target order should be runned
+ */
+static inline enum compact_result
+compaction_suit_allocation_order(struct zone *zone, unsigned int order,
+ int highest_zoneidx, unsigned int alloc_flags)
+{
+ unsigned long watermark;
+
+ watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
+ if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
+ alloc_flags))
+ return COMPACT_SUCCESS;
+
+ if (!compaction_suitable(zone, order, highest_zoneidx))
+ return COMPACT_SKIPPED;
+
+ return COMPACT_CONTINUE;
+}
+
static enum compact_result
compact_zone(struct compact_control *cc, struct capture_control *capc)
{
@@ -2399,19 +2423,11 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->migratetype = gfp_migratetype(cc->gfp_mask);
if (!is_via_compact_memory(cc->order)) {
- unsigned long watermark;
-
- /* Allocation can already succeed, nothing to do */
- watermark = wmark_pages(cc->zone,
- cc->alloc_flags & ALLOC_WMARK_MASK);
- if (zone_watermark_ok(cc->zone, cc->order, watermark,
- cc->highest_zoneidx, cc->alloc_flags))
- return COMPACT_SUCCESS;
-
- /* Compaction is likely to fail */
- if (!compaction_suitable(cc->zone, cc->order,
- cc->highest_zoneidx))
- return COMPACT_SKIPPED;
+ ret = compaction_suit_allocation_order(cc->zone, cc->order,
+ cc->highest_zoneidx,
+ cc->alloc_flags);
+ if (ret != COMPACT_CONTINUE)
+ return ret;
}
/*
@@ -2917,14 +2933,10 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
if (!populated_zone(zone))
continue;
- /* Allocation can already succeed, check other zones */
- if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
- min_wmark_pages(zone),
- highest_zoneidx, 0))
- continue;
-
- if (compaction_suitable(zone, pgdat->kcompactd_max_order,
- highest_zoneidx))
+ if (compaction_suit_allocation_order(zone,
+ pgdat->kcompactd_max_order,
+ highest_zoneidx, ALLOC_WMARK_MIN) ==
+ COMPACT_CONTINUE)
return true;
}
@@ -2961,12 +2973,9 @@ static void kcompactd_do_work(pg_data_t *pgdat)
if (compaction_deferred(zone, cc.order))
continue;
- /* Allocation can already succeed, nothing to do */
- if (zone_watermark_ok(zone, cc.order,
- min_wmark_pages(zone), zoneid, 0))
- continue;
-
- if (!compaction_suitable(zone, cc.order, zoneid))
+ if (compaction_suit_allocation_order(zone,
+ cc.order, zoneid, ALLOC_WMARK_MIN) !=
+ COMPACT_CONTINUE)
continue;
if (kthread_should_stop())
--
2.30.0
On Sat, Aug 26, 2023 at 11:36:17PM +0800, Kemeng Shi wrote: > + if (compaction_suit_allocation_order(zone, > + pgdat->kcompactd_max_order, > + highest_zoneidx, ALLOC_WMARK_MIN) == > + COMPACT_CONTINUE) The indentation is confusing here. It looks like COMPACT_CONTINUE is an argument of compaction_suit_allocation_order(). How about: ret = compaction_suit_allocation_order(zone, pgdat->kcompactd_max_order, highest_zoneidx, ALLOC_WMARK_MIN); if (ret == COMPACT_CONTINUE) (assuming there's a handy variable called ret) You could also distinguih it by indenting COMPACT_CONTINUE by an extra tab, but I think it's worth an extra variable just because this is such a long line. > + if (compaction_suit_allocation_order(zone, > + cc.order, zoneid, ALLOC_WMARK_MIN) != > + COMPACT_CONTINUE) > continue; Same here.
on 8/29/2023 11:54 AM, Matthew Wilcox wrote: > On Sat, Aug 26, 2023 at 11:36:17PM +0800, Kemeng Shi wrote: >> + if (compaction_suit_allocation_order(zone, >> + pgdat->kcompactd_max_order, >> + highest_zoneidx, ALLOC_WMARK_MIN) == >> + COMPACT_CONTINUE) > > The indentation is confusing here. It looks like COMPACT_CONTINUE is > an argument of compaction_suit_allocation_order(). How about: > > ret = compaction_suit_allocation_order(zone, > pgdat->kcompactd_max_order, > highest_zoneidx, ALLOC_WMARK_MIN); > if (ret == COMPACT_CONTINUE) > Thanks for information, I will fix it this way in next version. > (assuming there's a handy variable called ret) > > You could also distinguih it by indenting COMPACT_CONTINUE by an > extra tab, but I think it's worth an extra variable just because this is > such a long line> >> + if (compaction_suit_allocation_order(zone, >> + cc.order, zoneid, ALLOC_WMARK_MIN) != >> + COMPACT_CONTINUE) >> continue; > > Same here. >
On 8/26/2023 11:36 PM, Kemeng Shi wrote:
> We always do zone_watermark_ok check and compaction_suitable check
> together to test if compaction for target order should be runned.
> Factor these code out to remove repeat code.
>
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> ---
> mm/compaction.c | 63 ++++++++++++++++++++++++++++---------------------
> 1 file changed, 36 insertions(+), 27 deletions(-)
>
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 00b7bba6c72e..6f2b87b026b8 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -2374,6 +2374,30 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
> return false;
> }
>
> +/*
> + * Should we do compaction for target allocation order.
> + * Return COMPACT_SUCCESS if allocation for target order can be already
> + * satisfied
> + * Return COMPACT_SKIPPED if compaction for target order is likely to fail
> + * Return COMPACT_CONTINUE if compaction for target order should be runned
> + */
> +static inline enum compact_result
I think you should drop the 'inline' to let the compiler make the decision.
> +compaction_suit_allocation_order(struct zone *zone, unsigned int order,
> + int highest_zoneidx, unsigned int alloc_flags)
The changes look good to me. So please feel free to add:
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> +{
> + unsigned long watermark;
> +
> + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
> + if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
> + alloc_flags))
> + return COMPACT_SUCCESS;
> +
> + if (!compaction_suitable(zone, order, highest_zoneidx))
> + return COMPACT_SKIPPED;
> +
> + return COMPACT_CONTINUE;
> +}
> +
> static enum compact_result
> compact_zone(struct compact_control *cc, struct capture_control *capc)
> {
> @@ -2399,19 +2423,11 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
> cc->migratetype = gfp_migratetype(cc->gfp_mask);
>
> if (!is_via_compact_memory(cc->order)) {
> - unsigned long watermark;
> -
> - /* Allocation can already succeed, nothing to do */
> - watermark = wmark_pages(cc->zone,
> - cc->alloc_flags & ALLOC_WMARK_MASK);
> - if (zone_watermark_ok(cc->zone, cc->order, watermark,
> - cc->highest_zoneidx, cc->alloc_flags))
> - return COMPACT_SUCCESS;
> -
> - /* Compaction is likely to fail */
> - if (!compaction_suitable(cc->zone, cc->order,
> - cc->highest_zoneidx))
> - return COMPACT_SKIPPED;
> + ret = compaction_suit_allocation_order(cc->zone, cc->order,
> + cc->highest_zoneidx,
> + cc->alloc_flags);
> + if (ret != COMPACT_CONTINUE)
> + return ret;
> }
>
> /*
> @@ -2917,14 +2933,10 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
> if (!populated_zone(zone))
> continue;
>
> - /* Allocation can already succeed, check other zones */
> - if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
> - min_wmark_pages(zone),
> - highest_zoneidx, 0))
> - continue;
> -
> - if (compaction_suitable(zone, pgdat->kcompactd_max_order,
> - highest_zoneidx))
> + if (compaction_suit_allocation_order(zone,
> + pgdat->kcompactd_max_order,
> + highest_zoneidx, ALLOC_WMARK_MIN) ==
> + COMPACT_CONTINUE)
> return true;
> }
>
> @@ -2961,12 +2973,9 @@ static void kcompactd_do_work(pg_data_t *pgdat)
> if (compaction_deferred(zone, cc.order))
> continue;
>
> - /* Allocation can already succeed, nothing to do */
> - if (zone_watermark_ok(zone, cc.order,
> - min_wmark_pages(zone), zoneid, 0))
> - continue;
> -
> - if (!compaction_suitable(zone, cc.order, zoneid))
> + if (compaction_suit_allocation_order(zone,
> + cc.order, zoneid, ALLOC_WMARK_MIN) !=
> + COMPACT_CONTINUE)
> continue;
>
> if (kthread_should_stop())
on 8/29/2023 11:48 AM, Baolin Wang wrote:
>
>
> On 8/26/2023 11:36 PM, Kemeng Shi wrote:
>> We always do zone_watermark_ok check and compaction_suitable check
>> together to test if compaction for target order should be runned.
>> Factor these code out to remove repeat code.
>>
>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>> ---
>> mm/compaction.c | 63 ++++++++++++++++++++++++++++---------------------
>> 1 file changed, 36 insertions(+), 27 deletions(-)
>>
>> diff --git a/mm/compaction.c b/mm/compaction.c
>> index 00b7bba6c72e..6f2b87b026b8 100644
>> --- a/mm/compaction.c
>> +++ b/mm/compaction.c
>> @@ -2374,6 +2374,30 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
>> return false;
>> }
>> +/*
>> + * Should we do compaction for target allocation order.
>> + * Return COMPACT_SUCCESS if allocation for target order can be already
>> + * satisfied
>> + * Return COMPACT_SKIPPED if compaction for target order is likely to fail
>> + * Return COMPACT_CONTINUE if compaction for target order should be runned
>> + */
>> +static inline enum compact_result
>
> I think you should drop the 'inline' to let the compiler make the decision.
>
Sure, I will drop this in next version. Thanks for feedback.
>> +compaction_suit_allocation_order(struct zone *zone, unsigned int order,
>> + int highest_zoneidx, unsigned int alloc_flags)
>
> The changes look good to me. So please feel free to add:
> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>
>> +{
>> + unsigned long watermark;
>> +
>> + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
>> + if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
>> + alloc_flags))
>> + return COMPACT_SUCCESS;
>> +
>> + if (!compaction_suitable(zone, order, highest_zoneidx))
>> + return COMPACT_SKIPPED;
>> +
>> + return COMPACT_CONTINUE;
>> +}
>> +
>> static enum compact_result
>> compact_zone(struct compact_control *cc, struct capture_control *capc)
>> {
>> @@ -2399,19 +2423,11 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
>> cc->migratetype = gfp_migratetype(cc->gfp_mask);
>> if (!is_via_compact_memory(cc->order)) {
>> - unsigned long watermark;
>> -
>> - /* Allocation can already succeed, nothing to do */
>> - watermark = wmark_pages(cc->zone,
>> - cc->alloc_flags & ALLOC_WMARK_MASK);
>> - if (zone_watermark_ok(cc->zone, cc->order, watermark,
>> - cc->highest_zoneidx, cc->alloc_flags))
>> - return COMPACT_SUCCESS;
>> -
>> - /* Compaction is likely to fail */
>> - if (!compaction_suitable(cc->zone, cc->order,
>> - cc->highest_zoneidx))
>> - return COMPACT_SKIPPED;
>> + ret = compaction_suit_allocation_order(cc->zone, cc->order,
>> + cc->highest_zoneidx,
>> + cc->alloc_flags);
>> + if (ret != COMPACT_CONTINUE)
>> + return ret;
>> }
>> /*
>> @@ -2917,14 +2933,10 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
>> if (!populated_zone(zone))
>> continue;
>> - /* Allocation can already succeed, check other zones */
>> - if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
>> - min_wmark_pages(zone),
>> - highest_zoneidx, 0))
>> - continue;
>> -
>> - if (compaction_suitable(zone, pgdat->kcompactd_max_order,
>> - highest_zoneidx))
>> + if (compaction_suit_allocation_order(zone,
>> + pgdat->kcompactd_max_order,
>> + highest_zoneidx, ALLOC_WMARK_MIN) ==
>> + COMPACT_CONTINUE)
>> return true;
>> }
>> @@ -2961,12 +2973,9 @@ static void kcompactd_do_work(pg_data_t *pgdat)
>> if (compaction_deferred(zone, cc.order))
>> continue;
>> - /* Allocation can already succeed, nothing to do */
>> - if (zone_watermark_ok(zone, cc.order,
>> - min_wmark_pages(zone), zoneid, 0))
>> - continue;
>> -
>> - if (!compaction_suitable(zone, cc.order, zoneid))
>> + if (compaction_suit_allocation_order(zone,
>> + cc.order, zoneid, ALLOC_WMARK_MIN) !=
>> + COMPACT_CONTINUE)
>> continue;
>> if (kthread_should_stop())
>
© 2016 - 2025 Red Hat, Inc.