mm/huge_memory.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
Now the mTHP can also be split or added into the deferred list, so add
folio_test_pmd_mappable() validation for PMD mapped THP, to avoid confusion
with PMD mapped THP related statistics.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/huge_memory.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1683de78c313..3ca9282a0dc9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3109,7 +3109,8 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
i_mmap_unlock_read(mapping);
out:
xas_destroy(&xas);
- count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
return ret;
}
@@ -3171,7 +3172,8 @@ void deferred_split_folio(struct folio *folio)
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (list_empty(&folio->_deferred_list)) {
- count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(THP_DEFERRED_SPLIT_PAGE);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
--
2.39.3
Hey Baolin, Maybe I spotted a bug and made a change to this patch as follows. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 824eff9211db..f3c4e08c6d30 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2994,6 +2994,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; + int order = folio_order(folio); int extra_pins, ret; pgoff_t end; bool is_hzp; @@ -3172,7 +3173,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, i_mmap_unlock_read(mapping); out: xas_destroy(&xas); - if (folio_test_pmd_mappable(folio)) IIUC, if we split the folio successfully, it won’t be large here. + if (order >= HPAGE_PMD_ORDER) count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); return ret; } Thanks, Lance
On 2024/4/22 14:17, Lance Yang wrote: > Hey Baolin, > > Maybe I spotted a bug and made a change to this patch as follows. > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 824eff9211db..f3c4e08c6d30 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -2994,6 +2994,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, > XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); > struct anon_vma *anon_vma = NULL; > struct address_space *mapping = NULL; > + int order = folio_order(folio); > int extra_pins, ret; > pgoff_t end; > bool is_hzp; > @@ -3172,7 +3173,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, > i_mmap_unlock_read(mapping); > out: > xas_destroy(&xas); > - if (folio_test_pmd_mappable(folio)) > > IIUC, if we split the folio successfully, it won’t be large here. Yes, good catch. I did a blindly copy-paste. Will fix in next version. Thanks for reviewing. > + if (order >= HPAGE_PMD_ORDER) > count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); > return ret; > } > > Thanks, > Lance
On 29.03.24 07:59, Baolin Wang wrote:
> Now the mTHP can also be split or added into the deferred list, so add
> folio_test_pmd_mappable() validation for PMD mapped THP, to avoid confusion
> with PMD mapped THP related statistics.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
> mm/huge_memory.c | 6 ++++--
> 1 file changed, 4 insertions(+), 2 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 1683de78c313..3ca9282a0dc9 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -3109,7 +3109,8 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> i_mmap_unlock_read(mapping);
> out:
> xas_destroy(&xas);
> - count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
> + if (folio_test_pmd_mappable(folio))
> + count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
> return ret;
> }
>
> @@ -3171,7 +3172,8 @@ void deferred_split_folio(struct folio *folio)
>
> spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
> if (list_empty(&folio->_deferred_list)) {
> - count_vm_event(THP_DEFERRED_SPLIT_PAGE);
> + if (folio_test_pmd_mappable(folio))
> + count_vm_event(THP_DEFERRED_SPLIT_PAGE);
> list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
> ds_queue->split_queue_len++;
> #ifdef CONFIG_MEMCG
Acked-by: David Hildenbrand <david@redhat.com>
--
Cheers,
David / dhildenb
© 2016 - 2026 Red Hat, Inc.