[RFC PATCH v3 09/13] mm/damon/vaddr: Add vaddr versions of migrate_{hot,cold}

Bijan Tabatabai posted 13 patches 3 months, 1 week ago
There is a newer version of this series
[RFC PATCH v3 09/13] mm/damon/vaddr: Add vaddr versions of migrate_{hot,cold}
Posted by Bijan Tabatabai 3 months, 1 week ago
From: Bijan Tabatabai <bijantabatab@micron.com>

migrate_{hot,cold} are paddr schemes that are used to migrate hot/cold
data to a specified node. However, these schemes are only available when
doing physical address monitoring. This patch adds an implementation for
them virtual address monitoring as well.

Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
---
 mm/damon/vaddr.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 102 insertions(+)

diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 46554e49a478..5cdfdc47c5ff 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -15,6 +15,7 @@
 #include <linux/pagewalk.h>
 #include <linux/sched/mm.h>
 
+#include "../internal.h"
 #include "ops-common.h"
 
 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
@@ -610,6 +611,65 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
 	return max_nr_accesses;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
+		unsigned long next, struct mm_walk *walk)
+{
+	struct list_head *migration_list = walk->private;
+	struct folio *folio;
+	spinlock_t *ptl;
+	pmd_t pmde;
+
+	ptl = pmd_lock(walk->mm, pmd);
+	pmde = pmdp_get(pmd);
+
+	if (!pmd_present(pmde) || !pmd_trans_huge(pmde))
+		goto unlock;
+
+	folio = damon_get_folio(pmd_pfn(pmde));
+	if (!folio)
+		goto unlock;
+
+	if (!folio_isolate_lru(folio))
+		goto put_folio;
+
+	list_add(&folio->lru, migration_list);
+
+put_folio:
+	folio_put(folio);
+unlock:
+	spin_unlock(ptl);
+	return 0;
+}
+#else
+#define damos_va_migrate_pmd_entry NULL
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
+		unsigned long enxt, struct mm_walk *walk)
+{
+	struct list_head *migration_list = walk->private;
+	struct folio *folio;
+	pte_t ptent;
+
+	ptent = ptep_get(pte);
+	if (pte_none(*pte) || !pte_present(*pte))
+		return 0;
+
+	folio = damon_get_folio(pte_pfn(ptent));
+	if (!folio)
+		return 0;
+
+	if (!folio_isolate_lru(folio))
+		goto out;
+
+	list_add(&folio->lru, migration_list);
+
+out:
+	folio_put(folio);
+	return 0;
+}
+
 /*
  * Functions for the target validity check and cleanup
  */
@@ -653,6 +713,41 @@ static unsigned long damos_madvise(struct damon_target *target,
 }
 #endif	/* CONFIG_ADVISE_SYSCALLS */
 
+static unsigned long damos_va_migrate(struct damon_target *target,
+		struct damon_region *r, struct damos *s,
+		unsigned long *sz_filter_passed)
+{
+	LIST_HEAD(folio_list);
+	struct task_struct *task;
+	struct mm_struct *mm;
+	unsigned long applied = 0;
+	struct mm_walk_ops walk_ops = {
+		.pmd_entry = damos_va_migrate_pmd_entry,
+		.pte_entry = damos_va_migrate_pte_entry,
+		.walk_lock = PGWALK_RDLOCK,
+	};
+
+	task = damon_get_task_struct(target);
+	if (!task)
+		return 0;
+
+	mm = damon_get_mm(target);
+	if (!mm)
+		goto put_task;
+
+	mmap_read_lock(mm);
+	walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &folio_list);
+	mmap_read_unlock(mm);
+	mmput(mm);
+
+	applied = damon_migrate_pages(&folio_list, s->target_nid);
+	cond_resched();
+
+put_task:
+	put_task_struct(task);
+	return applied * PAGE_SIZE;
+}
+
 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
 		struct damon_target *t, struct damon_region *r,
 		struct damos *scheme, unsigned long *sz_filter_passed)
@@ -675,6 +770,9 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
 	case DAMOS_NOHUGEPAGE:
 		madv_action = MADV_NOHUGEPAGE;
 		break;
+	case DAMOS_MIGRATE_HOT:
+	case DAMOS_MIGRATE_COLD:
+		return damos_va_migrate(t, r, scheme, sz_filter_passed);
 	case DAMOS_STAT:
 		return 0;
 	default:
@@ -695,6 +793,10 @@ static int damon_va_scheme_score(struct damon_ctx *context,
 	switch (scheme->action) {
 	case DAMOS_PAGEOUT:
 		return damon_cold_score(context, r, scheme);
+	case DAMOS_MIGRATE_HOT:
+		return damon_hot_score(context, r, scheme);
+	case DAMOS_MIGRATE_COLD:
+		return damon_cold_score(context, r, scheme);
 	default:
 		break;
 	}
-- 
2.43.5
Re: [RFC PATCH v3 09/13] mm/damon/vaddr: Add vaddr versions of migrate_{hot,cold}
Posted by SeongJae Park 3 months, 1 week ago
On Wed,  2 Jul 2025 15:13:32 -0500 Bijan Tabatabai <bijan311@gmail.com> wrote:

> From: Bijan Tabatabai <bijantabatab@micron.com>
> 
> migrate_{hot,cold} are paddr schemes that are used to migrate hot/cold
> data to a specified node. However, these schemes are only available when
> doing physical address monitoring. This patch adds an implementation for
> them virtual address monitoring as well.
> 
> Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
> Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
> Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
> ---
>  mm/damon/vaddr.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 102 insertions(+)
> 
> diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
> index 46554e49a478..5cdfdc47c5ff 100644
> --- a/mm/damon/vaddr.c
> +++ b/mm/damon/vaddr.c
> @@ -15,6 +15,7 @@
>  #include <linux/pagewalk.h>
>  #include <linux/sched/mm.h>
>  
> +#include "../internal.h"
>  #include "ops-common.h"
>  
>  #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
> @@ -610,6 +611,65 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
>  	return max_nr_accesses;
>  }
>  
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
> +		unsigned long next, struct mm_walk *walk)

I'd suggest to put CONFIG_TRANSPARENT_HUGEPAGE check into the body of this
function and handle both pmd and pte here, consistent to
damon_young_pmd_entry().

> +{
> +	struct list_head *migration_list = walk->private;
> +	struct folio *folio;
> +	spinlock_t *ptl;
> +	pmd_t pmde;
> +
> +	ptl = pmd_lock(walk->mm, pmd);
> +	pmde = pmdp_get(pmd);
> +
> +	if (!pmd_present(pmde) || !pmd_trans_huge(pmde))
> +		goto unlock;
> +
> +	folio = damon_get_folio(pmd_pfn(pmde));
> +	if (!folio)
> +		goto unlock;
> +
> +	if (!folio_isolate_lru(folio))
> +		goto put_folio;
> +
> +	list_add(&folio->lru, migration_list);
> +
> +put_folio:
> +	folio_put(folio);
> +unlock:
> +	spin_unlock(ptl);
> +	return 0;
> +}
> +#else
> +#define damos_va_migrate_pmd_entry NULL
> +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> +
> +static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
> +		unsigned long enxt, struct mm_walk *walk)

Nit.  s/enxt/next/ ?

> +{
> +	struct list_head *migration_list = walk->private;
> +	struct folio *folio;
> +	pte_t ptent;
> +
> +	ptent = ptep_get(pte);
> +	if (pte_none(*pte) || !pte_present(*pte))
> +		return 0;

Shouldn't we use cached pte value (ptent) instad of *pte?  I'd suggest merging
this into damos_va_migrate_pmd_entry() consistent to damon_young_pmd_entry().

> +
> +	folio = damon_get_folio(pte_pfn(ptent));
> +	if (!folio)
> +		return 0;
> +
> +	if (!folio_isolate_lru(folio))
> +		goto out;
> +
> +	list_add(&folio->lru, migration_list);
> +
> +out:
> +	folio_put(folio);
> +	return 0;
> +}
> +
>  /*
>   * Functions for the target validity check and cleanup
>   */
> @@ -653,6 +713,41 @@ static unsigned long damos_madvise(struct damon_target *target,
>  }
>  #endif	/* CONFIG_ADVISE_SYSCALLS */
>  
> +static unsigned long damos_va_migrate(struct damon_target *target,
> +		struct damon_region *r, struct damos *s,
> +		unsigned long *sz_filter_passed)
> +{
> +	LIST_HEAD(folio_list);
> +	struct task_struct *task;
> +	struct mm_struct *mm;
> +	unsigned long applied = 0;
> +	struct mm_walk_ops walk_ops = {
> +		.pmd_entry = damos_va_migrate_pmd_entry,
> +		.pte_entry = damos_va_migrate_pte_entry,
> +		.walk_lock = PGWALK_RDLOCK,
> +	};
> +
> +	task = damon_get_task_struct(target);
> +	if (!task)
> +		return 0;
> +
> +	mm = damon_get_mm(target);
> +	if (!mm)
> +		goto put_task;
> +
> +	mmap_read_lock(mm);
> +	walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &folio_list);
> +	mmap_read_unlock(mm);
> +	mmput(mm);
> +
> +	applied = damon_migrate_pages(&folio_list, s->target_nid);
> +	cond_resched();
> +
> +put_task:
> +	put_task_struct(task);

Seems task is not being used in real, so this variable and the related code in
this function can be removed?  Or, am I missing something?

> +	return applied * PAGE_SIZE;
> +}
> +
>  static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
>  		struct damon_target *t, struct damon_region *r,
>  		struct damos *scheme, unsigned long *sz_filter_passed)
> @@ -675,6 +770,9 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
>  	case DAMOS_NOHUGEPAGE:
>  		madv_action = MADV_NOHUGEPAGE;
>  		break;
> +	case DAMOS_MIGRATE_HOT:
> +	case DAMOS_MIGRATE_COLD:
> +		return damos_va_migrate(t, r, scheme, sz_filter_passed);
>  	case DAMOS_STAT:
>  		return 0;
>  	default:
> @@ -695,6 +793,10 @@ static int damon_va_scheme_score(struct damon_ctx *context,
>  	switch (scheme->action) {
>  	case DAMOS_PAGEOUT:
>  		return damon_cold_score(context, r, scheme);
> +	case DAMOS_MIGRATE_HOT:
> +		return damon_hot_score(context, r, scheme);
> +	case DAMOS_MIGRATE_COLD:
> +		return damon_cold_score(context, r, scheme);
>  	default:
>  		break;
>  	}
> -- 
> 2.43.5


Thanks,
SJ
Re: [RFC PATCH v3 09/13] mm/damon/vaddr: Add vaddr versions of migrate_{hot,cold}
Posted by SeongJae Park 3 months, 1 week ago
On Wed,  2 Jul 2025 16:51:38 -0700 SeongJae Park <sj@kernel.org> wrote:

> On Wed,  2 Jul 2025 15:13:32 -0500 Bijan Tabatabai <bijan311@gmail.com> wrote:
> 
> > From: Bijan Tabatabai <bijantabatab@micron.com>
> > 
> > migrate_{hot,cold} are paddr schemes that are used to migrate hot/cold
> > data to a specified node. However, these schemes are only available when
> > doing physical address monitoring. This patch adds an implementation for
> > them virtual address monitoring as well.
> > 
> > Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
> > Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
> > Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
> > ---
> >  mm/damon/vaddr.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 102 insertions(+)
> > 
> > diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
> > index 46554e49a478..5cdfdc47c5ff 100644
> > --- a/mm/damon/vaddr.c
> > +++ b/mm/damon/vaddr.c
> > @@ -15,6 +15,7 @@
> >  #include <linux/pagewalk.h>
> >  #include <linux/sched/mm.h>
> >  
> > +#include "../internal.h"
> >  #include "ops-common.h"
> >  
> >  #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
> > @@ -610,6 +611,65 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
> >  	return max_nr_accesses;
> >  }
> >  
> > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> > +static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
> > +		unsigned long next, struct mm_walk *walk)
> 
> I'd suggest to put CONFIG_TRANSPARENT_HUGEPAGE check into the body of this
> function and handle both pmd and pte here, consistent to
> damon_young_pmd_entry().

Ah, unlike damon_young_pmd_entry() which is for a single address, this is for
walking the range of a given DAMON region, and hence should have a separate pte
entry function.  Please ignore the above comment.

[...]
> > +static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
> > +		unsigned long enxt, struct mm_walk *walk)
> 
> Nit.  s/enxt/next/ ?
> 
> > +{
> > +	struct list_head *migration_list = walk->private;
> > +	struct folio *folio;
> > +	pte_t ptent;
> > +
> > +	ptent = ptep_get(pte);
> > +	if (pte_none(*pte) || !pte_present(*pte))
> > +		return 0;
> 
> Shouldn't we use cached pte value (ptent) instad of *pte?  I'd suggest merging
> this into damos_va_migrate_pmd_entry() consistent to damon_young_pmd_entry().

Again, I overlooked the fact that this is for walking not only single address
point but a range.  Please ignore the latter suggestion.


Thanks,
SJ

[...]