Allows __prep_new_huge_page() to operate on a folio by converting
set_hugetlb_cgroup*() to take in a folio.
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
include/linux/hugetlb_cgroup.h | 12 ++++++------
mm/hugetlb.c | 33 +++++++++++++++++++--------------
mm/hugetlb_cgroup.c | 11 ++++++-----
3 files changed, 31 insertions(+), 25 deletions(-)
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index feb2edafc8b6..a7e3540f7f38 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -112,16 +112,16 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
(unsigned long)h_cg);
}
-static inline void set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- __set_hugetlb_cgroup(page_folio(page), h_cg, false);
+ __set_hugetlb_cgroup(folio, h_cg, false);
}
-static inline void set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- __set_hugetlb_cgroup(page_folio(page), h_cg, true);
+ __set_hugetlb_cgroup(folio, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -199,12 +199,12 @@ hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
return NULL;
}
-static inline void set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
}
-static inline void set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 27b87dc85c48..a6384fb0b57f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1758,19 +1758,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
h->nr_huge_pages_node[nid]++;
}
-static void __prep_new_huge_page(struct hstate *h, struct page *page)
+static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
- hugetlb_vmemmap_optimize(h, page);
- INIT_LIST_HEAD(&page->lru);
- set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
- hugetlb_set_page_subpool(page, NULL);
- set_hugetlb_cgroup(page, NULL);
- set_hugetlb_cgroup_rsvd(page, NULL);
+ hugetlb_vmemmap_optimize(h, &folio->page);
+ INIT_LIST_HEAD(&folio->lru);
+ folio->_folio_dtor = HUGETLB_PAGE_DTOR;
+ hugetlb_set_folio_subpool(folio, NULL);
+ set_hugetlb_cgroup(folio, NULL);
+ set_hugetlb_cgroup_rsvd(folio, NULL);
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
- __prep_new_huge_page(h, page);
+ struct folio *folio = page_folio(page);
+
+ __prep_new_hugetlb_folio(h, folio);
spin_lock_irq(&hugetlb_lock);
__prep_account_new_huge_page(h, nid);
spin_unlock_irq(&hugetlb_lock);
@@ -2731,8 +2733,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
struct list_head *list)
{
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
- int nid = page_to_nid(old_page);
+ struct folio *old_folio = page_folio(old_page);
+ int nid = folio_nid(old_folio);
struct page *new_page;
+ struct folio *new_folio;
int ret = 0;
/*
@@ -2745,16 +2749,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
if (!new_page)
return -ENOMEM;
- __prep_new_huge_page(h, new_page);
+ new_folio = page_folio(new_page);
+ __prep_new_hugetlb_folio(h, new_folio);
retry:
spin_lock_irq(&hugetlb_lock);
- if (!PageHuge(old_page)) {
+ if (!folio_test_hugetlb(old_folio)) {
/*
* Freed from under us. Drop new_page too.
*/
goto free_new;
- } else if (page_count(old_page)) {
+ } else if (folio_ref_count(old_folio)) {
/*
* Someone has grabbed the page, try to isolate it here.
* Fail with -EBUSY if not possible.
@@ -2763,7 +2768,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
ret = isolate_hugetlb(old_page, list);
spin_lock_irq(&hugetlb_lock);
goto free_new;
- } else if (!HPageFreed(old_page)) {
+ } else if (!folio_test_hugetlb_freed(old_folio)) {
/*
* Page's refcount is 0 but it has not been enqueued in the
* freelist yet. Race window is small, so we can succeed here if
@@ -2801,7 +2806,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
free_new:
spin_unlock_irq(&hugetlb_lock);
/* Page has a zero ref count, but needs a ref to be freed */
- set_page_refcounted(new_page);
+ folio_ref_unfreeze(new_folio, 1);
update_and_free_page(h, new_page, false);
return ret;
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 600c98560a0f..692b23b5d423 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -212,7 +212,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
/* Take the pages off the local counter */
page_counter_cancel(counter, nr_pages);
- set_hugetlb_cgroup(page, parent);
+ set_hugetlb_cgroup(folio, parent);
out:
return;
}
@@ -894,6 +894,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
struct hugetlb_cgroup *h_cg_rsvd;
struct hstate *h = page_hstate(oldhpage);
struct folio *old_folio = page_folio(oldhpage);
+ struct folio *new_folio = page_folio(newhpage);
if (hugetlb_cgroup_disabled())
return;
@@ -901,12 +902,12 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
spin_lock_irq(&hugetlb_lock);
h_cg = hugetlb_cgroup_from_folio(old_folio);
h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
- set_hugetlb_cgroup(oldhpage, NULL);
- set_hugetlb_cgroup_rsvd(oldhpage, NULL);
+ set_hugetlb_cgroup(old_folio, NULL);
+ set_hugetlb_cgroup_rsvd(old_folio, NULL);
/* move the h_cg details to new cgroup */
- set_hugetlb_cgroup(newhpage, h_cg);
- set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
+ set_hugetlb_cgroup(new_folio, h_cg);
+ set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
list_move(&newhpage->lru, &h->hugepage_activelist);
spin_unlock_irq(&hugetlb_lock);
return;
--
2.31.1
On 11/01/22 15:30, Sidhartha Kumar wrote: > Allows __prep_new_huge_page() to operate on a folio by converting > set_hugetlb_cgroup*() to take in a folio. > > Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> > --- > include/linux/hugetlb_cgroup.h | 12 ++++++------ > mm/hugetlb.c | 33 +++++++++++++++++++-------------- > mm/hugetlb_cgroup.c | 11 ++++++----- > 3 files changed, 31 insertions(+), 25 deletions(-) Muchun had the same comment I had in v1 about the need for a routine to set the destructor of a folio. Since this will be added in a subsequent series, Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> -- Mike Kravetz
> On Nov 2, 2022, at 06:30, Sidhartha Kumar <sidhartha.kumar@oracle.com> wrote:
>
> Allows __prep_new_huge_page() to operate on a folio by converting
> set_hugetlb_cgroup*() to take in a folio.
>
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> ---
> include/linux/hugetlb_cgroup.h | 12 ++++++------
> mm/hugetlb.c | 33 +++++++++++++++++++--------------
> mm/hugetlb_cgroup.c | 11 ++++++-----
> 3 files changed, 31 insertions(+), 25 deletions(-)
>
> diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
> index feb2edafc8b6..a7e3540f7f38 100644
> --- a/include/linux/hugetlb_cgroup.h
> +++ b/include/linux/hugetlb_cgroup.h
> @@ -112,16 +112,16 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
> (unsigned long)h_cg);
> }
>
> -static inline void set_hugetlb_cgroup(struct page *page,
> +static inline void set_hugetlb_cgroup(struct folio *folio,
> struct hugetlb_cgroup *h_cg)
> {
> - __set_hugetlb_cgroup(page_folio(page), h_cg, false);
> + __set_hugetlb_cgroup(folio, h_cg, false);
> }
>
> -static inline void set_hugetlb_cgroup_rsvd(struct page *page,
> +static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
> struct hugetlb_cgroup *h_cg)
> {
> - __set_hugetlb_cgroup(page_folio(page), h_cg, true);
> + __set_hugetlb_cgroup(folio, h_cg, true);
> }
>
> static inline bool hugetlb_cgroup_disabled(void)
> @@ -199,12 +199,12 @@ hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
> return NULL;
> }
>
> -static inline void set_hugetlb_cgroup(struct page *page,
> +static inline void set_hugetlb_cgroup(struct folio *folio,
> struct hugetlb_cgroup *h_cg)
> {
> }
>
> -static inline void set_hugetlb_cgroup_rsvd(struct page *page,
> +static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
> struct hugetlb_cgroup *h_cg)
> {
> }
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 27b87dc85c48..a6384fb0b57f 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1758,19 +1758,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
> h->nr_huge_pages_node[nid]++;
> }
>
> -static void __prep_new_huge_page(struct hstate *h, struct page *page)
> +static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
> {
> - hugetlb_vmemmap_optimize(h, page);
> - INIT_LIST_HEAD(&page->lru);
> - set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
> - hugetlb_set_page_subpool(page, NULL);
> - set_hugetlb_cgroup(page, NULL);
> - set_hugetlb_cgroup_rsvd(page, NULL);
> + hugetlb_vmemmap_optimize(h, &folio->page);
> + INIT_LIST_HEAD(&folio->lru);
> + folio->_folio_dtor = HUGETLB_PAGE_DTOR;
Seems like a variant of set_compound_page_dtor() for folio is missing,
e.g. set_large_folio_dtor(). It's better to add it in this series.
Thanks.
> + hugetlb_set_folio_subpool(folio, NULL);
> + set_hugetlb_cgroup(folio, NULL);
> + set_hugetlb_cgroup_rsvd(folio, NULL);
> }
>
> static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
> {
> - __prep_new_huge_page(h, page);
> + struct folio *folio = page_folio(page);
> +
> + __prep_new_hugetlb_folio(h, folio);
> spin_lock_irq(&hugetlb_lock);
> __prep_account_new_huge_page(h, nid);
> spin_unlock_irq(&hugetlb_lock);
> @@ -2731,8 +2733,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
> struct list_head *list)
> {
> gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
> - int nid = page_to_nid(old_page);
> + struct folio *old_folio = page_folio(old_page);
> + int nid = folio_nid(old_folio);
> struct page *new_page;
> + struct folio *new_folio;
> int ret = 0;
>
> /*
> @@ -2745,16 +2749,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
> new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
> if (!new_page)
> return -ENOMEM;
> - __prep_new_huge_page(h, new_page);
> + new_folio = page_folio(new_page);
> + __prep_new_hugetlb_folio(h, new_folio);
>
> retry:
> spin_lock_irq(&hugetlb_lock);
> - if (!PageHuge(old_page)) {
> + if (!folio_test_hugetlb(old_folio)) {
> /*
> * Freed from under us. Drop new_page too.
> */
> goto free_new;
> - } else if (page_count(old_page)) {
> + } else if (folio_ref_count(old_folio)) {
> /*
> * Someone has grabbed the page, try to isolate it here.
> * Fail with -EBUSY if not possible.
> @@ -2763,7 +2768,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
> ret = isolate_hugetlb(old_page, list);
> spin_lock_irq(&hugetlb_lock);
> goto free_new;
> - } else if (!HPageFreed(old_page)) {
> + } else if (!folio_test_hugetlb_freed(old_folio)) {
> /*
> * Page's refcount is 0 but it has not been enqueued in the
> * freelist yet. Race window is small, so we can succeed here if
> @@ -2801,7 +2806,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
> free_new:
> spin_unlock_irq(&hugetlb_lock);
> /* Page has a zero ref count, but needs a ref to be freed */
> - set_page_refcounted(new_page);
> + folio_ref_unfreeze(new_folio, 1);
> update_and_free_page(h, new_page, false);
>
> return ret;
> diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
> index 600c98560a0f..692b23b5d423 100644
> --- a/mm/hugetlb_cgroup.c
> +++ b/mm/hugetlb_cgroup.c
> @@ -212,7 +212,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
> /* Take the pages off the local counter */
> page_counter_cancel(counter, nr_pages);
>
> - set_hugetlb_cgroup(page, parent);
> + set_hugetlb_cgroup(folio, parent);
> out:
> return;
> }
> @@ -894,6 +894,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
> struct hugetlb_cgroup *h_cg_rsvd;
> struct hstate *h = page_hstate(oldhpage);
> struct folio *old_folio = page_folio(oldhpage);
> + struct folio *new_folio = page_folio(newhpage);
>
> if (hugetlb_cgroup_disabled())
> return;
> @@ -901,12 +902,12 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
> spin_lock_irq(&hugetlb_lock);
> h_cg = hugetlb_cgroup_from_folio(old_folio);
> h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
> - set_hugetlb_cgroup(oldhpage, NULL);
> - set_hugetlb_cgroup_rsvd(oldhpage, NULL);
> + set_hugetlb_cgroup(old_folio, NULL);
> + set_hugetlb_cgroup_rsvd(old_folio, NULL);
>
> /* move the h_cg details to new cgroup */
> - set_hugetlb_cgroup(newhpage, h_cg);
> - set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
> + set_hugetlb_cgroup(new_folio, h_cg);
> + set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
> list_move(&newhpage->lru, &h->hugepage_activelist);
> spin_unlock_irq(&hugetlb_lock);
> return;
> --
> 2.31.1
>
>
On 11/1/22 11:45 PM, Muchun Song wrote:
>
>> On Nov 2, 2022, at 06:30, Sidhartha Kumar <sidhartha.kumar@oracle.com> wrote:
>>
>> Allows __prep_new_huge_page() to operate on a folio by converting
>> set_hugetlb_cgroup*() to take in a folio.
>>
>> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
>> ---
>> include/linux/hugetlb_cgroup.h | 12 ++++++------
>> mm/hugetlb.c | 33 +++++++++++++++++++--------------
>> mm/hugetlb_cgroup.c | 11 ++++++-----
>> 3 files changed, 31 insertions(+), 25 deletions(-)
>>
>> diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
>> index feb2edafc8b6..a7e3540f7f38 100644
>> --- a/include/linux/hugetlb_cgroup.h
>> +++ b/include/linux/hugetlb_cgroup.h
>> @@ -112,16 +112,16 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
>> (unsigned long)h_cg);
>> }
>>
>> -static inline void set_hugetlb_cgroup(struct page *page,
>> +static inline void set_hugetlb_cgroup(struct folio *folio,
>> struct hugetlb_cgroup *h_cg)
>> {
>> - __set_hugetlb_cgroup(page_folio(page), h_cg, false);
>> + __set_hugetlb_cgroup(folio, h_cg, false);
>> }
>>
>> -static inline void set_hugetlb_cgroup_rsvd(struct page *page,
>> +static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
>> struct hugetlb_cgroup *h_cg)
>> {
>> - __set_hugetlb_cgroup(page_folio(page), h_cg, true);
>> + __set_hugetlb_cgroup(folio, h_cg, true);
>> }
>>
>> static inline bool hugetlb_cgroup_disabled(void)
>> @@ -199,12 +199,12 @@ hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
>> return NULL;
>> }
>>
>> -static inline void set_hugetlb_cgroup(struct page *page,
>> +static inline void set_hugetlb_cgroup(struct folio *folio,
>> struct hugetlb_cgroup *h_cg)
>> {
>> }
>>
>> -static inline void set_hugetlb_cgroup_rsvd(struct page *page,
>> +static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
>> struct hugetlb_cgroup *h_cg)
>> {
>> }
>> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
>> index 27b87dc85c48..a6384fb0b57f 100644
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -1758,19 +1758,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
>> h->nr_huge_pages_node[nid]++;
>> }
>>
>> -static void __prep_new_huge_page(struct hstate *h, struct page *page)
>> +static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
>> {
>> - hugetlb_vmemmap_optimize(h, page);
>> - INIT_LIST_HEAD(&page->lru);
>> - set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
>> - hugetlb_set_page_subpool(page, NULL);
>> - set_hugetlb_cgroup(page, NULL);
>> - set_hugetlb_cgroup_rsvd(page, NULL);
>> + hugetlb_vmemmap_optimize(h, &folio->page);
>> + INIT_LIST_HEAD(&folio->lru);
>> + folio->_folio_dtor = HUGETLB_PAGE_DTOR;
> Seems like a variant of set_compound_page_dtor() for folio is missing,
> e.g. set_large_folio_dtor(). It's better to add it in this series.
>
> Thanks.
Hi Muchun thanks for taking a look,
Would it be ok to add this functionality in a separate patch series?
Some of the earlier patches in this series were modified by Hugh's
series[1] so I'm not sure how a v3 of this series would look now. Let me
know which approach you would prefer.
Thanks,
Sidhartha Kumar
[1]
https://lore.kernel.org/linux-mm/3818cc9a-9999-d064-d778-9c94c5911e6@google.com/
>> + hugetlb_set_folio_subpool(folio, NULL);
>> + set_hugetlb_cgroup(folio, NULL);
>> + set_hugetlb_cgroup_rsvd(folio, NULL);
>> }
>>
>> static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
>> {
>> - __prep_new_huge_page(h, page);
>> + struct folio *folio = page_folio(page);
>> +
>> + __prep_new_hugetlb_folio(h, folio);
>> spin_lock_irq(&hugetlb_lock);
>> __prep_account_new_huge_page(h, nid);
>> spin_unlock_irq(&hugetlb_lock);
>> @@ -2731,8 +2733,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>> struct list_head *list)
>> {
>> gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
>> - int nid = page_to_nid(old_page);
>> + struct folio *old_folio = page_folio(old_page);
>> + int nid = folio_nid(old_folio);
>> struct page *new_page;
>> + struct folio *new_folio;
>> int ret = 0;
>>
>> /*
>> @@ -2745,16 +2749,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>> new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
>> if (!new_page)
>> return -ENOMEM;
>> - __prep_new_huge_page(h, new_page);
>> + new_folio = page_folio(new_page);
>> + __prep_new_hugetlb_folio(h, new_folio);
>>
>> retry:
>> spin_lock_irq(&hugetlb_lock);
>> - if (!PageHuge(old_page)) {
>> + if (!folio_test_hugetlb(old_folio)) {
>> /*
>> * Freed from under us. Drop new_page too.
>> */
>> goto free_new;
>> - } else if (page_count(old_page)) {
>> + } else if (folio_ref_count(old_folio)) {
>> /*
>> * Someone has grabbed the page, try to isolate it here.
>> * Fail with -EBUSY if not possible.
>> @@ -2763,7 +2768,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>> ret = isolate_hugetlb(old_page, list);
>> spin_lock_irq(&hugetlb_lock);
>> goto free_new;
>> - } else if (!HPageFreed(old_page)) {
>> + } else if (!folio_test_hugetlb_freed(old_folio)) {
>> /*
>> * Page's refcount is 0 but it has not been enqueued in the
>> * freelist yet. Race window is small, so we can succeed here if
>> @@ -2801,7 +2806,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>> free_new:
>> spin_unlock_irq(&hugetlb_lock);
>> /* Page has a zero ref count, but needs a ref to be freed */
>> - set_page_refcounted(new_page);
>> + folio_ref_unfreeze(new_folio, 1);
>> update_and_free_page(h, new_page, false);
>>
>> return ret;
>> diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
>> index 600c98560a0f..692b23b5d423 100644
>> --- a/mm/hugetlb_cgroup.c
>> +++ b/mm/hugetlb_cgroup.c
>> @@ -212,7 +212,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
>> /* Take the pages off the local counter */
>> page_counter_cancel(counter, nr_pages);
>>
>> - set_hugetlb_cgroup(page, parent);
>> + set_hugetlb_cgroup(folio, parent);
>> out:
>> return;
>> }
>> @@ -894,6 +894,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
>> struct hugetlb_cgroup *h_cg_rsvd;
>> struct hstate *h = page_hstate(oldhpage);
>> struct folio *old_folio = page_folio(oldhpage);
>> + struct folio *new_folio = page_folio(newhpage);
>>
>> if (hugetlb_cgroup_disabled())
>> return;
>> @@ -901,12 +902,12 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
>> spin_lock_irq(&hugetlb_lock);
>> h_cg = hugetlb_cgroup_from_folio(old_folio);
>> h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
>> - set_hugetlb_cgroup(oldhpage, NULL);
>> - set_hugetlb_cgroup_rsvd(oldhpage, NULL);
>> + set_hugetlb_cgroup(old_folio, NULL);
>> + set_hugetlb_cgroup_rsvd(old_folio, NULL);
>>
>> /* move the h_cg details to new cgroup */
>> - set_hugetlb_cgroup(newhpage, h_cg);
>> - set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
>> + set_hugetlb_cgroup(new_folio, h_cg);
>> + set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
>> list_move(&newhpage->lru, &h->hugepage_activelist);
>> spin_unlock_irq(&hugetlb_lock);
>> return;
>> --
>> 2.31.1
>>
>>
>
> On Nov 10, 2022, at 08:20, Sidhartha Kumar <sidhartha.kumar@oracle.com> wrote:
>
>
> On 11/1/22 11:45 PM, Muchun Song wrote:
>>
>>> On Nov 2, 2022, at 06:30, Sidhartha Kumar <sidhartha.kumar@oracle.com> wrote:
>>>
>>> Allows __prep_new_huge_page() to operate on a folio by converting
>>> set_hugetlb_cgroup*() to take in a folio.
>>>
>>> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
>>> ---
>>> include/linux/hugetlb_cgroup.h | 12 ++++++------
>>> mm/hugetlb.c | 33 +++++++++++++++++++--------------
>>> mm/hugetlb_cgroup.c | 11 ++++++-----
>>> 3 files changed, 31 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
>>> index feb2edafc8b6..a7e3540f7f38 100644
>>> --- a/include/linux/hugetlb_cgroup.h
>>> +++ b/include/linux/hugetlb_cgroup.h
>>> @@ -112,16 +112,16 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
>>> (unsigned long)h_cg);
>>> }
>>>
>>> -static inline void set_hugetlb_cgroup(struct page *page,
>>> +static inline void set_hugetlb_cgroup(struct folio *folio,
>>> struct hugetlb_cgroup *h_cg)
>>> {
>>> - __set_hugetlb_cgroup(page_folio(page), h_cg, false);
>>> + __set_hugetlb_cgroup(folio, h_cg, false);
>>> }
>>>
>>> -static inline void set_hugetlb_cgroup_rsvd(struct page *page,
>>> +static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
>>> struct hugetlb_cgroup *h_cg)
>>> {
>>> - __set_hugetlb_cgroup(page_folio(page), h_cg, true);
>>> + __set_hugetlb_cgroup(folio, h_cg, true);
>>> }
>>>
>>> static inline bool hugetlb_cgroup_disabled(void)
>>> @@ -199,12 +199,12 @@ hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
>>> return NULL;
>>> }
>>>
>>> -static inline void set_hugetlb_cgroup(struct page *page,
>>> +static inline void set_hugetlb_cgroup(struct folio *folio,
>>> struct hugetlb_cgroup *h_cg)
>>> {
>>> }
>>>
>>> -static inline void set_hugetlb_cgroup_rsvd(struct page *page,
>>> +static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
>>> struct hugetlb_cgroup *h_cg)
>>> {
>>> }
>>> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
>>> index 27b87dc85c48..a6384fb0b57f 100644
>>> --- a/mm/hugetlb.c
>>> +++ b/mm/hugetlb.c
>>> @@ -1758,19 +1758,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
>>> h->nr_huge_pages_node[nid]++;
>>> }
>>>
>>> -static void __prep_new_huge_page(struct hstate *h, struct page *page)
>>> +static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
>>> {
>>> - hugetlb_vmemmap_optimize(h, page);
>>> - INIT_LIST_HEAD(&page->lru);
>>> - set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
>>> - hugetlb_set_page_subpool(page, NULL);
>>> - set_hugetlb_cgroup(page, NULL);
>>> - set_hugetlb_cgroup_rsvd(page, NULL);
>>> + hugetlb_vmemmap_optimize(h, &folio->page);
>>> + INIT_LIST_HEAD(&folio->lru);
>>> + folio->_folio_dtor = HUGETLB_PAGE_DTOR;
>> Seems like a variant of set_compound_page_dtor() for folio is missing,
>> e.g. set_large_folio_dtor(). It's better to add it in this series.
>>
>> Thanks.
>
> Hi Muchun thanks for taking a look,
>
> Would it be ok to add this functionality in a separate patch series? Some of the earlier patches in this series were
I'm ok.
Thanks.
> modified by Hugh's series[1] so I'm not sure how a v3 of this series would look now. Let me know which approach you would prefer.
>
>
> Thanks,
>
> Sidhartha Kumar
>
> [1] https://lore.kernel.org/linux-mm/3818cc9a-9999-d064-d778-9c94c5911e6@google.com/
>
>>> + hugetlb_set_folio_subpool(folio, NULL);
>>> + set_hugetlb_cgroup(folio, NULL);
>>> + set_hugetlb_cgroup_rsvd(folio, NULL);
>>> }
>>>
>>> static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
>>> {
>>> - __prep_new_huge_page(h, page);
>>> + struct folio *folio = page_folio(page);
>>> +
>>> + __prep_new_hugetlb_folio(h, folio);
>>> spin_lock_irq(&hugetlb_lock);
>>> __prep_account_new_huge_page(h, nid);
>>> spin_unlock_irq(&hugetlb_lock);
>>> @@ -2731,8 +2733,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>>> struct list_head *list)
>>> {
>>> gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
>>> - int nid = page_to_nid(old_page);
>>> + struct folio *old_folio = page_folio(old_page);
>>> + int nid = folio_nid(old_folio);
>>> struct page *new_page;
>>> + struct folio *new_folio;
>>> int ret = 0;
>>>
>>> /*
>>> @@ -2745,16 +2749,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>>> new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
>>> if (!new_page)
>>> return -ENOMEM;
>>> - __prep_new_huge_page(h, new_page);
>>> + new_folio = page_folio(new_page);
>>> + __prep_new_hugetlb_folio(h, new_folio);
>>>
>>> retry:
>>> spin_lock_irq(&hugetlb_lock);
>>> - if (!PageHuge(old_page)) {
>>> + if (!folio_test_hugetlb(old_folio)) {
>>> /*
>>> * Freed from under us. Drop new_page too.
>>> */
>>> goto free_new;
>>> - } else if (page_count(old_page)) {
>>> + } else if (folio_ref_count(old_folio)) {
>>> /*
>>> * Someone has grabbed the page, try to isolate it here.
>>> * Fail with -EBUSY if not possible.
>>> @@ -2763,7 +2768,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>>> ret = isolate_hugetlb(old_page, list);
>>> spin_lock_irq(&hugetlb_lock);
>>> goto free_new;
>>> - } else if (!HPageFreed(old_page)) {
>>> + } else if (!folio_test_hugetlb_freed(old_folio)) {
>>> /*
>>> * Page's refcount is 0 but it has not been enqueued in the
>>> * freelist yet. Race window is small, so we can succeed here if
>>> @@ -2801,7 +2806,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
>>> free_new:
>>> spin_unlock_irq(&hugetlb_lock);
>>> /* Page has a zero ref count, but needs a ref to be freed */
>>> - set_page_refcounted(new_page);
>>> + folio_ref_unfreeze(new_folio, 1);
>>> update_and_free_page(h, new_page, false);
>>>
>>> return ret;
>>> diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
>>> index 600c98560a0f..692b23b5d423 100644
>>> --- a/mm/hugetlb_cgroup.c
>>> +++ b/mm/hugetlb_cgroup.c
>>> @@ -212,7 +212,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
>>> /* Take the pages off the local counter */
>>> page_counter_cancel(counter, nr_pages);
>>>
>>> - set_hugetlb_cgroup(page, parent);
>>> + set_hugetlb_cgroup(folio, parent);
>>> out:
>>> return;
>>> }
>>> @@ -894,6 +894,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
>>> struct hugetlb_cgroup *h_cg_rsvd;
>>> struct hstate *h = page_hstate(oldhpage);
>>> struct folio *old_folio = page_folio(oldhpage);
>>> + struct folio *new_folio = page_folio(newhpage);
>>>
>>> if (hugetlb_cgroup_disabled())
>>> return;
>>> @@ -901,12 +902,12 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
>>> spin_lock_irq(&hugetlb_lock);
>>> h_cg = hugetlb_cgroup_from_folio(old_folio);
>>> h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
>>> - set_hugetlb_cgroup(oldhpage, NULL);
>>> - set_hugetlb_cgroup_rsvd(oldhpage, NULL);
>>> + set_hugetlb_cgroup(old_folio, NULL);
>>> + set_hugetlb_cgroup_rsvd(old_folio, NULL);
>>>
>>> /* move the h_cg details to new cgroup */
>>> - set_hugetlb_cgroup(newhpage, h_cg);
>>> - set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
>>> + set_hugetlb_cgroup(new_folio, h_cg);
>>> + set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
>>> list_move(&newhpage->lru, &h->hugepage_activelist);
>>> spin_unlock_irq(&hugetlb_lock);
>>> return;
>>> --
>>> 2.31.1
>>>
>>>
>>
© 2016 - 2026 Red Hat, Inc.