The page_ext_next() function assumes that page extension objects for a
page order allocation always reside in the same memory section, which
may not be true and could lead to crashes. Use the new page_ext
iteration API instead.
Fixes: cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios")
Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
---
mm/page_table_check.c | 39 ++++++++++++---------------------------
1 file changed, 12 insertions(+), 27 deletions(-)
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 509c6ef8de400..b52e04d31c809 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -62,24 +62,20 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
*/
static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
{
+ struct page_ext_iter iter;
struct page_ext *page_ext;
struct page *page;
- unsigned long i;
bool anon;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
- page_ext = page_ext_get(page);
-
- if (!page_ext)
- return;
-
BUG_ON(PageSlab(page));
anon = PageAnon(page);
- for (i = 0; i < pgcnt; i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, pgcnt, page_ext, iter) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
@@ -89,9 +85,8 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
}
- page_ext = page_ext_next(page_ext);
}
- page_ext_put(page_ext);
+ rcu_read_unlock();
}
/*
@@ -102,24 +97,20 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
bool rw)
{
+ struct page_ext_iter iter;
struct page_ext *page_ext;
struct page *page;
- unsigned long i;
bool anon;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
- page_ext = page_ext_get(page);
-
- if (!page_ext)
- return;
-
BUG_ON(PageSlab(page));
anon = PageAnon(page);
- for (i = 0; i < pgcnt; i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, pgcnt, page_ext, iter) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
@@ -129,9 +120,8 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
}
- page_ext = page_ext_next(page_ext);
}
- page_ext_put(page_ext);
+ rcu_read_unlock();
}
/*
@@ -140,24 +130,19 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
*/
void __page_table_check_zero(struct page *page, unsigned int order)
{
+ struct page_ext_iter iter;
struct page_ext *page_ext;
- unsigned long i;
BUG_ON(PageSlab(page));
- page_ext = page_ext_get(page);
-
- if (!page_ext)
- return;
-
- for (i = 0; i < (1ul << order); i++) {
+ rcu_read_lock();
+ for_each_page_ext_order(page, order, page_ext, iter) {
struct page_table_check *ptc = get_page_table_check(page_ext);
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_read(&ptc->file_map_count));
- page_ext = page_ext_next(page_ext);
}
- page_ext_put(page_ext);
+ rcu_read_unlock();
}
void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
--
2.48.1
On 19.02.25 03:17, Luiz Capitulino wrote:
> The page_ext_next() function assumes that page extension objects for a
> page order allocation always reside in the same memory section, which
> may not be true and could lead to crashes. Use the new page_ext
> iteration API instead.
>
> Fixes: cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios")
> Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
> ---
> mm/page_table_check.c | 39 ++++++++++++---------------------------
> 1 file changed, 12 insertions(+), 27 deletions(-)
>
> diff --git a/mm/page_table_check.c b/mm/page_table_check.c
> index 509c6ef8de400..b52e04d31c809 100644
> --- a/mm/page_table_check.c
> +++ b/mm/page_table_check.c
> @@ -62,24 +62,20 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
> */
> static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
> {
> + struct page_ext_iter iter;
> struct page_ext *page_ext;
> struct page *page;
> - unsigned long i;
> bool anon;
>
> if (!pfn_valid(pfn))
> return;
>
> page = pfn_to_page(pfn);
> - page_ext = page_ext_get(page);
> -
> - if (!page_ext)
> - return;
> -
> BUG_ON(PageSlab(page));
> anon = PageAnon(page);
>
> - for (i = 0; i < pgcnt; i++) {
> + rcu_read_lock();
> + for_each_page_ext(page, pgcnt, page_ext, iter) {
> struct page_table_check *ptc = get_page_table_check(page_ext);
>
> if (anon) {
> @@ -89,9 +85,8 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
> BUG_ON(atomic_read(&ptc->anon_map_count));
> BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
> }
> - page_ext = page_ext_next(page_ext);
> }
> - page_ext_put(page_ext);
> + rcu_read_unlock();
> }
[...]
>
> /*
> @@ -140,24 +130,19 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
> */
> void __page_table_check_zero(struct page *page, unsigned int order)
> {
> + struct page_ext_iter iter;
> struct page_ext *page_ext;
> - unsigned long i;
>
> BUG_ON(PageSlab(page));
>
> - page_ext = page_ext_get(page);
> -
> - if (!page_ext)
> - return;
> -
> - for (i = 0; i < (1ul << order); i++) {
> + rcu_read_lock();
> + for_each_page_ext_order(page, order, page_ext, iter) {
I would avoid introducing for_each_page_ext_order() and just pass "1 <<
order" as the page count.
> struct page_table_check *ptc = get_page_table_check(page_ext);
>
> BUG_ON(atomic_read(&ptc->anon_map_count));
> BUG_ON(atomic_read(&ptc->file_map_count));
> - page_ext = page_ext_next(page_ext);
> }
> - page_ext_put(page_ext);
> + rcu_read_unlock();
> }
>
> void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
Apart from that, this looks very nice to me
Acked-by: David Hildenbrand <david@redhat.com>
--
Cheers,
David / dhildenb
On 2025-02-20 06:05, David Hildenbrand wrote:
> On 19.02.25 03:17, Luiz Capitulino wrote:
>> The page_ext_next() function assumes that page extension objects for a
>> page order allocation always reside in the same memory section, which
>> may not be true and could lead to crashes. Use the new page_ext
>> iteration API instead.
>>
>> Fixes: cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios")
>> Signed-off-by: Luiz Capitulino <luizcap@redhat.com>
>> ---
>> mm/page_table_check.c | 39 ++++++++++++---------------------------
>> 1 file changed, 12 insertions(+), 27 deletions(-)
>>
>> diff --git a/mm/page_table_check.c b/mm/page_table_check.c
>> index 509c6ef8de400..b52e04d31c809 100644
>> --- a/mm/page_table_check.c
>> +++ b/mm/page_table_check.c
>> @@ -62,24 +62,20 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
>> */
>> static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
>> {
>> + struct page_ext_iter iter;
>> struct page_ext *page_ext;
>> struct page *page;
>> - unsigned long i;
>> bool anon;
>> if (!pfn_valid(pfn))
>> return;
>> page = pfn_to_page(pfn);
>> - page_ext = page_ext_get(page);
>> -
>> - if (!page_ext)
>> - return;
>> -
>> BUG_ON(PageSlab(page));
>> anon = PageAnon(page);
>> - for (i = 0; i < pgcnt; i++) {
>> + rcu_read_lock();
>> + for_each_page_ext(page, pgcnt, page_ext, iter) {
>> struct page_table_check *ptc = get_page_table_check(page_ext);
>> if (anon) {
>> @@ -89,9 +85,8 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
>> BUG_ON(atomic_read(&ptc->anon_map_count));
>> BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
>> }
>> - page_ext = page_ext_next(page_ext);
>> }
>> - page_ext_put(page_ext);
>> + rcu_read_unlock();
>> }
>
> [...]
>
>> /*
>> @@ -140,24 +130,19 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
>> */
>> void __page_table_check_zero(struct page *page, unsigned int order)
>> {
>> + struct page_ext_iter iter;
>> struct page_ext *page_ext;
>> - unsigned long i;
>> BUG_ON(PageSlab(page));
>> - page_ext = page_ext_get(page);
>> -
>> - if (!page_ext)
>> - return;
>> -
>> - for (i = 0; i < (1ul << order); i++) {
>> + rcu_read_lock();
>> + for_each_page_ext_order(page, order, page_ext, iter) {
>
> I would avoid introducing for_each_page_ext_order() and just pass "1 << order" as the page count.
OK, I'll drop it.
>
>> struct page_table_check *ptc = get_page_table_check(page_ext);
>> BUG_ON(atomic_read(&ptc->anon_map_count));
>> BUG_ON(atomic_read(&ptc->file_map_count));
>> - page_ext = page_ext_next(page_ext);
>> }
>> - page_ext_put(page_ext);
>> + rcu_read_unlock();
>> }
>> void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
>
> Apart from that, this looks very nice to me
>
> Acked-by: David Hildenbrand <david@redhat.com>
>
© 2016 - 2025 Red Hat, Inc.