Expand the is_refcount_suitable() to support reference checks for file folios,
as preparation for supporting shmem mTHP collapse.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/khugepaged.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index cdd1d8655a76..f11b4f172e61 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -549,8 +549,14 @@ static bool is_refcount_suitable(struct folio *folio)
int expected_refcount;
expected_refcount = folio_mapcount(folio);
- if (folio_test_swapcache(folio))
+ if (folio_test_anon(folio)) {
+ expected_refcount += folio_test_swapcache(folio) ?
+ folio_nr_pages(folio) : 0;
+ } else {
expected_refcount += folio_nr_pages(folio);
+ if (folio_test_private(folio))
+ expected_refcount++;
+ }
return folio_ref_count(folio) == expected_refcount;
}
@@ -2285,8 +2291,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
break;
}
- if (folio_ref_count(folio) !=
- 1 + folio_mapcount(folio) + folio_test_private(folio)) {
+ if (!is_refcount_suitable(folio)) {
result = SCAN_PAGE_COUNT;
break;
}
--
2.39.3
On 19.08.24 10:14, Baolin Wang wrote:
> Expand the is_refcount_suitable() to support reference checks for file folios,
> as preparation for supporting shmem mTHP collapse.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> mm/khugepaged.c | 11 ++++++++---
> 1 file changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index cdd1d8655a76..f11b4f172e61 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -549,8 +549,14 @@ static bool is_refcount_suitable(struct folio *folio)
> int expected_refcount;
>
> expected_refcount = folio_mapcount(folio);
> - if (folio_test_swapcache(folio))
> + if (folio_test_anon(folio)) {
> + expected_refcount += folio_test_swapcache(folio) ?
> + folio_nr_pages(folio) : 0;
> + } else {
> expected_refcount += folio_nr_pages(folio);
> + if (folio_test_private(folio))
> + expected_refcount++;
> + }
Alternatively, a bit neater
if (!folio_test_anon(folio) || folio_test_swapcache(folio))
expected_refcount += folio_nr_pages(folio);
if (folio_test_private(folio))
expected_refcount++;
The latter check should be fine even for anon folios (although always false)
>
> return folio_ref_count(folio) == expected_refcount;
> }
> @@ -2285,8 +2291,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
> break;
> }
>
> - if (folio_ref_count(folio) !=
> - 1 + folio_mapcount(folio) + folio_test_private(folio)) {
The "1" is due to the pagecache, right? IIUC, we don't hold a raised
folio refcount as we do the xas_for_each().
--
Cheers,
David / dhildenb
On 2024/8/19 16:36, David Hildenbrand wrote:
> On 19.08.24 10:14, Baolin Wang wrote:
>> Expand the is_refcount_suitable() to support reference checks for file
>> folios,
>> as preparation for supporting shmem mTHP collapse.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>> mm/khugepaged.c | 11 ++++++++---
>> 1 file changed, 8 insertions(+), 3 deletions(-)
>>
>> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>> index cdd1d8655a76..f11b4f172e61 100644
>> --- a/mm/khugepaged.c
>> +++ b/mm/khugepaged.c
>> @@ -549,8 +549,14 @@ static bool is_refcount_suitable(struct folio
>> *folio)
>> int expected_refcount;
>> expected_refcount = folio_mapcount(folio);
>> - if (folio_test_swapcache(folio))
>> + if (folio_test_anon(folio)) {
>> + expected_refcount += folio_test_swapcache(folio) ?
>> + folio_nr_pages(folio) : 0;
>> + } else {
>> expected_refcount += folio_nr_pages(folio);
>> + if (folio_test_private(folio))
>> + expected_refcount++;
>> + }
>
> Alternatively, a bit neater
>
> if (!folio_test_anon(folio) || folio_test_swapcache(folio))
> expected_refcount += folio_nr_pages(folio);
> if (folio_test_private(folio))
> expected_refcount++;
>
> The latter check should be fine even for anon folios (although always
> false)
Looks better. Will do in v2.
>> return folio_ref_count(folio) == expected_refcount;
>> }
>> @@ -2285,8 +2291,7 @@ static int hpage_collapse_scan_file(struct
>> mm_struct *mm, unsigned long addr,
>> break;
>> }
>> - if (folio_ref_count(folio) !=
>> - 1 + folio_mapcount(folio) + folio_test_private(folio)) {
>
> The "1" is due to the pagecache, right? IIUC, we don't hold a raised
> folio refcount as we do the xas_for_each().
Right.
© 2016 - 2026 Red Hat, Inc.