The helper gathers an folio order statistics of folios within a virtual
address range and checks it against a given order list. It aims to provide
a more precise folio order check instead of just checking the existence of
PMD folios.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
tools/testing/selftests/mm/vm_util.c | 139 +++++++++++++++++++++++++++
tools/testing/selftests/mm/vm_util.h | 2 +
2 files changed, 141 insertions(+)
diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
index 9dafa7669ef9..373621145b2a 100644
--- a/tools/testing/selftests/mm/vm_util.c
+++ b/tools/testing/selftests/mm/vm_util.c
@@ -17,6 +17,12 @@
#define STATUS_FILE_PATH "/proc/self/status"
#define MAX_LINE_LENGTH 500
+#define PGMAP_PRESENT (1UL << 63)
+#define KPF_COMPOUND_HEAD (1UL << 15)
+#define KPF_COMPOUND_TAIL (1UL << 16)
+#define KPF_THP (1UL << 22)
+#define PFN_MASK ((1UL<<55)-1)
+
unsigned int __page_size;
unsigned int __page_shift;
@@ -338,6 +344,139 @@ int detect_hugetlb_page_sizes(size_t sizes[], int max)
return count;
}
+static int get_page_flags(uint64_t vpn, int pagemap_file, int kpageflags_file,
+ uint64_t *flags)
+{
+ uint64_t pfn;
+ size_t count;
+
+ count = pread(pagemap_file, &pfn, sizeof(pfn),
+ vpn * sizeof(pfn));
+
+ if (count != sizeof(pfn))
+ return -1;
+
+ /*
+ * Treat non-present page as a page without any flag, so that
+ * gather_folio_orders() just record the current folio order.
+ */
+ if (!(pfn & PGMAP_PRESENT)) {
+ *flags = 0;
+ return 0;
+ }
+
+ count = pread(kpageflags_file, flags, sizeof(*flags),
+ (pfn & PFN_MASK) * sizeof(*flags));
+
+ if (count != sizeof(*flags))
+ return -1;
+
+ return 0;
+}
+
+static int gather_folio_orders(uint64_t vpn_start, size_t nr_pages,
+ int pagemap_file, int kpageflags_file,
+ int orders[], int nr_orders)
+{
+ uint64_t page_flags = 0;
+ int cur_order = -1;
+ uint64_t vpn;
+
+ if (!pagemap_file || !kpageflags_file)
+ return -1;
+ if (nr_orders <= 0)
+ return -1;
+
+ for (vpn = vpn_start; vpn < vpn_start + nr_pages; ) {
+ uint64_t next_folio_vpn;
+ int status;
+
+ if (get_page_flags(vpn, pagemap_file, kpageflags_file, &page_flags))
+ return -1;
+
+ /* all order-0 pages with possible false postive (non folio) */
+ if (!(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) {
+ orders[0]++;
+ vpn++;
+ continue;
+ }
+
+ /* skip non thp compound pages */
+ if (!(page_flags & KPF_THP)) {
+ vpn++;
+ continue;
+ }
+
+ /* vpn points to part of a THP at this point */
+ if (page_flags & KPF_COMPOUND_HEAD)
+ cur_order = 1;
+ else {
+ /* not a head nor a tail in a THP? */
+ if (!(page_flags & KPF_COMPOUND_TAIL))
+ return -1;
+ continue;
+ }
+
+ next_folio_vpn = vpn + (1 << cur_order);
+
+ if (next_folio_vpn >= vpn_start + nr_pages)
+ break;
+
+ while (!(status = get_page_flags(next_folio_vpn, pagemap_file,
+ kpageflags_file,
+ &page_flags))) {
+ /* next compound head page or order-0 page */
+ if ((page_flags & KPF_COMPOUND_HEAD) ||
+ !(page_flags & (KPF_COMPOUND_HEAD |
+ KPF_COMPOUND_TAIL))) {
+ if (cur_order < nr_orders) {
+ orders[cur_order]++;
+ cur_order = -1;
+ vpn = next_folio_vpn;
+ }
+ break;
+ }
+
+ /* not a head nor a tail in a THP? */
+ if (!(page_flags & KPF_COMPOUND_TAIL))
+ return -1;
+
+ cur_order++;
+ next_folio_vpn = vpn + (1 << cur_order);
+ }
+
+ if (status)
+ return status;
+ }
+ if (cur_order > 0 && cur_order < nr_orders)
+ orders[cur_order]++;
+ return 0;
+}
+
+int check_folio_orders(uint64_t vpn_start, size_t nr_pages, int pagemap_file,
+ int kpageflags_file, int orders[], int nr_orders)
+{
+ int vpn_orders[nr_orders];
+ int status;
+ int i;
+
+ memset(vpn_orders, 0, sizeof(int) * nr_orders);
+ status = gather_folio_orders(vpn_start, nr_pages, pagemap_file,
+ kpageflags_file, vpn_orders, nr_orders);
+ if (status)
+ return status;
+
+ status = 0;
+ for (i = 0; i < nr_orders; i++)
+ if (vpn_orders[i] != orders[i]) {
+ ksft_print_msg("order %d: expected: %d got %d\n", i,
+ orders[i], vpn_orders[i]);
+ status = -1;
+ }
+
+ return status;
+}
+
/* If `ioctls' non-NULL, the allowed ioctls will be returned into the var */
int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
bool miss, bool wp, bool minor, uint64_t *ioctls)
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index b55d1809debc..dee9504a6129 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -85,6 +85,8 @@ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
int64_t allocate_transhuge(void *ptr, int pagemap_fd);
unsigned long default_huge_page_size(void);
int detect_hugetlb_page_sizes(size_t sizes[], int max);
+int check_folio_orders(uint64_t vpn_start, size_t nr_pages, int pagemap_file,
+ int kpageflags_file, int orders[], int nr_orders);
int uffd_register(int uffd, void *addr, uint64_t len,
bool miss, bool wp, bool minor);
--
2.47.2
On 2025/8/6 10:20, Zi Yan wrote:
> The helper gathers an folio order statistics of folios within a virtual
> address range and checks it against a given order list. It aims to provide
> a more precise folio order check instead of just checking the existence of
> PMD folios.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> tools/testing/selftests/mm/vm_util.c | 139 +++++++++++++++++++++++++++
> tools/testing/selftests/mm/vm_util.h | 2 +
> 2 files changed, 141 insertions(+)
>
> diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
> index 9dafa7669ef9..373621145b2a 100644
> --- a/tools/testing/selftests/mm/vm_util.c
> +++ b/tools/testing/selftests/mm/vm_util.c
> @@ -17,6 +17,12 @@
> #define STATUS_FILE_PATH "/proc/self/status"
> #define MAX_LINE_LENGTH 500
>
> +#define PGMAP_PRESENT (1UL << 63)
> +#define KPF_COMPOUND_HEAD (1UL << 15)
> +#define KPF_COMPOUND_TAIL (1UL << 16)
> +#define KPF_THP (1UL << 22)
> +#define PFN_MASK ((1UL<<55)-1)
> +
> unsigned int __page_size;
> unsigned int __page_shift;
>
> @@ -338,6 +344,139 @@ int detect_hugetlb_page_sizes(size_t sizes[], int max)
> return count;
> }
>
> +static int get_page_flags(uint64_t vpn, int pagemap_file, int kpageflags_file,
> + uint64_t *flags)
> +{
> + uint64_t pfn;
> + size_t count;
> +
> + count = pread(pagemap_file, &pfn, sizeof(pfn),
> + vpn * sizeof(pfn));
> +
> + if (count != sizeof(pfn))
> + return -1;
> +
> + /*
> + * Treat non-present page as a page without any flag, so that
> + * gather_folio_orders() just record the current folio order.
> + */
> + if (!(pfn & PGMAP_PRESENT)) {
> + *flags = 0;
> + return 0;
> + }
It looks like you can reuse the helper pagemap_get_pfn() in this file?
> +
> + count = pread(kpageflags_file, flags, sizeof(*flags),
> + (pfn & PFN_MASK) * sizeof(*flags));
> +
> + if (count != sizeof(*flags))
> + return -1;
> +
> + return 0;
> +}
> +
> +static int gather_folio_orders(uint64_t vpn_start, size_t nr_pages,
In this file, other helper functions use userspace virtual address as
parameters, so can we consistently use virtual address for calculations
instead of the 'vpn_start'?
> + int pagemap_file, int kpageflags_file,
> + int orders[], int nr_orders)
> +{
> + uint64_t page_flags = 0;
> + int cur_order = -1;
> + uint64_t vpn;
> +
> + if (!pagemap_file || !kpageflags_file)
> + return -1;
> + if (nr_orders <= 0)
> + return -1;
> +
> + for (vpn = vpn_start; vpn < vpn_start + nr_pages; ) {
> + uint64_t next_folio_vpn;
> + int status;
> +
> + if (get_page_flags(vpn, pagemap_file, kpageflags_file, &page_flags))
> + return -1;
> +
> + /* all order-0 pages with possible false postive (non folio) */
> + if (!(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) {
> + orders[0]++;
> + vpn++;
> + continue;
> + }
> +
> + /* skip non thp compound pages */
> + if (!(page_flags & KPF_THP)) {
> + vpn++;
> + continue;
> + }
> +
> + /* vpn points to part of a THP at this point */
> + if (page_flags & KPF_COMPOUND_HEAD)
> + cur_order = 1;
> + else {
> + /* not a head nor a tail in a THP? */
> + if (!(page_flags & KPF_COMPOUND_TAIL))
> + return -1;
> + continue;
> + }
> +
> + next_folio_vpn = vpn + (1 << cur_order);
> +
> + if (next_folio_vpn >= vpn_start + nr_pages)
> + break;
> +
> + while (!(status = get_page_flags(next_folio_vpn, pagemap_file,
> + kpageflags_file,
> + &page_flags))) {
> + /* next compound head page or order-0 page */
> + if ((page_flags & KPF_COMPOUND_HEAD) ||
> + !(page_flags & (KPF_COMPOUND_HEAD |
> + KPF_COMPOUND_TAIL))) {
> + if (cur_order < nr_orders) {
> + orders[cur_order]++;
> + cur_order = -1;
> + vpn = next_folio_vpn;
> + }
> + break;
> + }
> +
> + /* not a head nor a tail in a THP? */
> + if (!(page_flags & KPF_COMPOUND_TAIL))
> + return -1;
> +
> + cur_order++;
> + next_folio_vpn = vpn + (1 << cur_order);
> + }
> +
> + if (status)
> + return status;
> + }
> + if (cur_order > 0 && cur_order < nr_orders)
> + orders[cur_order]++;
> + return 0;
> +}
> +
> +int check_folio_orders(uint64_t vpn_start, size_t nr_pages, int pagemap_file,
> + int kpageflags_file, int orders[], int nr_orders)
> +{
> + int vpn_orders[nr_orders];
IIRC, we should avoid using VLA (variable length arrays)?
> + int status;
> + int i;
> +
> + memset(vpn_orders, 0, sizeof(int) * nr_orders);
> + status = gather_folio_orders(vpn_start, nr_pages, pagemap_file,
> + kpageflags_file, vpn_orders, nr_orders);
> + if (status)
> + return status;
> +
> + status = 0;
> + for (i = 0; i < nr_orders; i++)
> + if (vpn_orders[i] != orders[i]) {
> + ksft_print_msg("order %d: expected: %d got %d\n", i,
> + orders[i], vpn_orders[i]);
> + status = -1;
> + }
> +
> + return status;
> +}
> +
> /* If `ioctls' non-NULL, the allowed ioctls will be returned into the var */
> int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
> bool miss, bool wp, bool minor, uint64_t *ioctls)
> diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
> index b55d1809debc..dee9504a6129 100644
> --- a/tools/testing/selftests/mm/vm_util.h
> +++ b/tools/testing/selftests/mm/vm_util.h
> @@ -85,6 +85,8 @@ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
> int64_t allocate_transhuge(void *ptr, int pagemap_fd);
> unsigned long default_huge_page_size(void);
> int detect_hugetlb_page_sizes(size_t sizes[], int max);
> +int check_folio_orders(uint64_t vpn_start, size_t nr_pages, int pagemap_file,
> + int kpageflags_file, int orders[], int nr_orders);
>
> int uffd_register(int uffd, void *addr, uint64_t len,
> bool miss, bool wp, bool minor);
On 7 Aug 2025, at 2:49, Baolin Wang wrote:
> On 2025/8/6 10:20, Zi Yan wrote:
>> The helper gathers an folio order statistics of folios within a virtual
>> address range and checks it against a given order list. It aims to provide
>> a more precise folio order check instead of just checking the existence of
>> PMD folios.
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>> tools/testing/selftests/mm/vm_util.c | 139 +++++++++++++++++++++++++++
>> tools/testing/selftests/mm/vm_util.h | 2 +
>> 2 files changed, 141 insertions(+)
>>
>> diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
>> index 9dafa7669ef9..373621145b2a 100644
>> --- a/tools/testing/selftests/mm/vm_util.c
>> +++ b/tools/testing/selftests/mm/vm_util.c
>> @@ -17,6 +17,12 @@
>> #define STATUS_FILE_PATH "/proc/self/status"
>> #define MAX_LINE_LENGTH 500
>> +#define PGMAP_PRESENT (1UL << 63)
>> +#define KPF_COMPOUND_HEAD (1UL << 15)
>> +#define KPF_COMPOUND_TAIL (1UL << 16)
>> +#define KPF_THP (1UL << 22)
>> +#define PFN_MASK ((1UL<<55)-1)
>> +
>> unsigned int __page_size;
>> unsigned int __page_shift;
>> @@ -338,6 +344,139 @@ int detect_hugetlb_page_sizes(size_t sizes[], int max)
>> return count;
>> }
>> +static int get_page_flags(uint64_t vpn, int pagemap_file, int kpageflags_file,
>> + uint64_t *flags)
>> +{
>> + uint64_t pfn;
>> + size_t count;
>> +
>> + count = pread(pagemap_file, &pfn, sizeof(pfn),
>> + vpn * sizeof(pfn));
>> +
>> + if (count != sizeof(pfn))
>> + return -1;
>> +
>> + /*
>> + * Treat non-present page as a page without any flag, so that
>> + * gather_folio_orders() just record the current folio order.
>> + */
>> + if (!(pfn & PGMAP_PRESENT)) {
>> + *flags = 0;
>> + return 0;
>> + }
>
> It looks like you can reuse the helper pagemap_get_pfn() in this file?
Sure.
>
>> +
>> + count = pread(kpageflags_file, flags, sizeof(*flags),
>> + (pfn & PFN_MASK) * sizeof(*flags));
>> +
>> + if (count != sizeof(*flags))
>> + return -1;
>> +
>> + return 0;
>> +}
>> +
>> +static int gather_folio_orders(uint64_t vpn_start, size_t nr_pages,
>
> In this file, other helper functions use userspace virtual address as parameters, so can we consistently use virtual address for calculations instead of the 'vpn_start'?
>
Sure.
>> + int pagemap_file, int kpageflags_file,
>> + int orders[], int nr_orders)
>> +{
>> + uint64_t page_flags = 0;
>> + int cur_order = -1;
>> + uint64_t vpn;
>> +
>> + if (!pagemap_file || !kpageflags_file)
>> + return -1;
>> + if (nr_orders <= 0)
>> + return -1;
>> +
>> + for (vpn = vpn_start; vpn < vpn_start + nr_pages; ) {
>> + uint64_t next_folio_vpn;
>> + int status;
>> +
>> + if (get_page_flags(vpn, pagemap_file, kpageflags_file, &page_flags))
>> + return -1;
>> +
>> + /* all order-0 pages with possible false postive (non folio) */
>> + if (!(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) {
>> + orders[0]++;
>> + vpn++;
>> + continue;
>> + }
>> +
>> + /* skip non thp compound pages */
>> + if (!(page_flags & KPF_THP)) {
>> + vpn++;
>> + continue;
>> + }
>> +
>> + /* vpn points to part of a THP at this point */
>> + if (page_flags & KPF_COMPOUND_HEAD)
>> + cur_order = 1;
>> + else {
>> + /* not a head nor a tail in a THP? */
>> + if (!(page_flags & KPF_COMPOUND_TAIL))
>> + return -1;
>> + continue;
>> + }
>> +
>> + next_folio_vpn = vpn + (1 << cur_order);
>> +
>> + if (next_folio_vpn >= vpn_start + nr_pages)
>> + break;
>> +
>> + while (!(status = get_page_flags(next_folio_vpn, pagemap_file,
>> + kpageflags_file,
>> + &page_flags))) {
>> + /* next compound head page or order-0 page */
>> + if ((page_flags & KPF_COMPOUND_HEAD) ||
>> + !(page_flags & (KPF_COMPOUND_HEAD |
>> + KPF_COMPOUND_TAIL))) {
>> + if (cur_order < nr_orders) {
>> + orders[cur_order]++;
>> + cur_order = -1;
>> + vpn = next_folio_vpn;
>> + }
>> + break;
>> + }
>> +
>> + /* not a head nor a tail in a THP? */
>> + if (!(page_flags & KPF_COMPOUND_TAIL))
>> + return -1;
>> +
>> + cur_order++;
>> + next_folio_vpn = vpn + (1 << cur_order);
>> + }
>> +
>> + if (status)
>> + return status;
>> + }
>> + if (cur_order > 0 && cur_order < nr_orders)
>> + orders[cur_order]++;
>> + return 0;
>> +}
>> +
>> +int check_folio_orders(uint64_t vpn_start, size_t nr_pages, int pagemap_file,
>> + int kpageflags_file, int orders[], int nr_orders)
>> +{
>> + int vpn_orders[nr_orders];
>
> IIRC, we should avoid using VLA (variable length arrays)?
OK. I can change it to malloc.
Thanks.
>
>> + int status;
>> + int i;
>> +
>> + memset(vpn_orders, 0, sizeof(int) * nr_orders);
>> + status = gather_folio_orders(vpn_start, nr_pages, pagemap_file,
>> + kpageflags_file, vpn_orders, nr_orders);
>> + if (status)
>> + return status;
>> +
>> + status = 0;
>> + for (i = 0; i < nr_orders; i++)
>> + if (vpn_orders[i] != orders[i]) {
>> + ksft_print_msg("order %d: expected: %d got %d\n", i,
>> + orders[i], vpn_orders[i]);
>> + status = -1;
>> + }
>> +
>> + return status;
>> +}
>> +
>> /* If `ioctls' non-NULL, the allowed ioctls will be returned into the var */
>> int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
>> bool miss, bool wp, bool minor, uint64_t *ioctls)
>> diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
>> index b55d1809debc..dee9504a6129 100644
>> --- a/tools/testing/selftests/mm/vm_util.h
>> +++ b/tools/testing/selftests/mm/vm_util.h
>> @@ -85,6 +85,8 @@ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
>> int64_t allocate_transhuge(void *ptr, int pagemap_fd);
>> unsigned long default_huge_page_size(void);
>> int detect_hugetlb_page_sizes(size_t sizes[], int max);
>> +int check_folio_orders(uint64_t vpn_start, size_t nr_pages, int pagemap_file,
>> + int kpageflags_file, int orders[], int nr_orders);
>> int uffd_register(int uffd, void *addr, uint64_t len,
>> bool miss, bool wp, bool minor);
--
Best Regards,
Yan, Zi
Hi Zi,
Thanks for the patch.
I have a nit suggestion to centralize some of the macro definitions
for better consistency and reusability.
On [Date of patch], Zi Yan wrote:
> diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
> ...
> +#define PGMAP_PRESENT (1UL << 63)
> +#define KPF_COMPOUND_HEAD (1UL << 15)
> +#define KPF_COMPOUND_TAIL (1UL << 16)
> +#define KPF_THP (1UL << 22)
> +#define PFN_MASK ((1UL<<55)-1)
Currently, these macros and `PGMAP_PRESENT` are defined locally in
`vm_util.c`. It would be cleaner to move them to the shared header
`vm_util.h`.
This would also allow us to consistently use `PM_PRESENT` (from the
header) instead of the local `PGMAP_PRESENT` duplicate. I noticed the
patch is already moving in this direction, and we can complete this
cleanup.
How about a change like this?
--- a/tools/testing/selftests/mm/vm_util.c
+++ b/tools/testing/selftests/mm/vm_util.c
@@ -17,17 +17,6 @@
#define STATUS_FILE_PATH "/proc/self/status"
#define MAX_LINE_LENGTH 500
-#define PGMAP_PRESENT (1UL << 63)
-#define KPF_COMPOUND_HEAD (1UL << 15)
-#define KPF_COMPOUND_TAIL (1UL << 16)
-#define KPF_THP (1UL << 22)
-#define PFN_MASK ((1UL<<55)-1)
-
unsigned int __page_size;
unsigned int __page_shift;
@@ -360,7 +349,7 @@ static int get_page_flags(uint64_t vpn, int pagemap_file, int kpageflags_file,
* Treat non-present page as a page without any flag, so that
* gather_folio_orders() just record the current folio order.
*/
- if (!(pfn & PGMAP_PRESENT)) {
+ if (!(pfn & PM_PRESENT)) {
*flags = 0;
return 0;
}
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -17,6 +17,11 @@
#define PM_FILE BIT_ULL(61)
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
+#define KPF_COMPOUND_HEAD (1UL << 15)
+#define KPF_COMPOUND_TAIL (1UL << 16)
+#define KPF_THP (1UL << 22)
+#define PFN_MASK ((1UL<<55)-1)
extern unsigned int __page_size;
extern unsigned int __page_shift;
Best regards,
wang lian
On 6 Aug 2025, at 23:00, wang lian wrote:
> Hi Zi,
>
> Thanks for the patch.
>
> I have a nit suggestion to centralize some of the macro definitions
> for better consistency and reusability.
>
> On [Date of patch], Zi Yan wrote:
>> diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
>> ...
>> +#define PGMAP_PRESENT (1UL << 63)
>> +#define KPF_COMPOUND_HEAD (1UL << 15)
>> +#define KPF_COMPOUND_TAIL (1UL << 16)
>> +#define KPF_THP (1UL << 22)
>> +#define PFN_MASK ((1UL<<55)-1)
>
> Currently, these macros and `PGMAP_PRESENT` are defined locally in
> `vm_util.c`. It would be cleaner to move them to the shared header
> `vm_util.h`.
>
> This would also allow us to consistently use `PM_PRESENT` (from the
> header) instead of the local `PGMAP_PRESENT` duplicate. I noticed the
> patch is already moving in this direction, and we can complete this
> cleanup.
>
> How about a change like this?
I did not know about PM_PRESENT. Sure, will move the code like you
did below. Thanks.
>
> --- a/tools/testing/selftests/mm/vm_util.c
> +++ b/tools/testing/selftests/mm/vm_util.c
> @@ -17,17 +17,6 @@
> #define STATUS_FILE_PATH "/proc/self/status"
> #define MAX_LINE_LENGTH 500
>
> -#define PGMAP_PRESENT (1UL << 63)
> -#define KPF_COMPOUND_HEAD (1UL << 15)
> -#define KPF_COMPOUND_TAIL (1UL << 16)
> -#define KPF_THP (1UL << 22)
> -#define PFN_MASK ((1UL<<55)-1)
> -
> unsigned int __page_size;
> unsigned int __page_shift;
>
> @@ -360,7 +349,7 @@ static int get_page_flags(uint64_t vpn, int pagemap_file, int kpageflags_file,
> * Treat non-present page as a page without any flag, so that
> * gather_folio_orders() just record the current folio order.
> */
> - if (!(pfn & PGMAP_PRESENT)) {
> + if (!(pfn & PM_PRESENT)) {
> *flags = 0;
> return 0;
> }
> --- a/tools/testing/selftests/mm/vm_util.h
> +++ b/tools/testing/selftests/mm/vm_util.h
> @@ -17,6 +17,11 @@
> #define PM_FILE BIT_ULL(61)
> #define PM_SWAP BIT_ULL(62)
> #define PM_PRESENT BIT_ULL(63)
> +#define KPF_COMPOUND_HEAD (1UL << 15)
> +#define KPF_COMPOUND_TAIL (1UL << 16)
> +#define KPF_THP (1UL << 22)
> +#define PFN_MASK ((1UL<<55)-1)
>
> extern unsigned int __page_size;
> extern unsigned int __page_shift;
>
>
> Best regards,
> wang lian
--
Best Regards,
Yan, Zi
© 2016 - 2026 Red Hat, Inc.