[PATCH v2 2/4] mm/madvise: batch tlb flushes for MADV_FREE

SeongJae Park posted 4 patches 10 months, 1 week ago
There is a newer version of this series
[PATCH v2 2/4] mm/madvise: batch tlb flushes for MADV_FREE
Posted by SeongJae Park 10 months, 1 week ago
MADV_FREE handling for [process_]madvise() flushes tlb for each vma of
each address range.  Update the logic to do tlb flushes in a batched
way.  Initialize an mmu_gather object from do_madvise() and
vector_madvise(), which are the entry level functions for
[process_]madvise(), respectively.  And pass those objects to the
function for per-vma work, via madvise_behavior struct.  Make the
per-vma logic not flushes tlb on their own but just saves the tlb
entries to the received mmu_gather object.  Finally, the entry level
functions flush the tlb entries that gathered for the entire user
request, at once.

Signed-off-by: SeongJae Park <sj@kernel.org>
---
 mm/madvise.c | 59 +++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 47 insertions(+), 12 deletions(-)

diff --git a/mm/madvise.c b/mm/madvise.c
index 8bcfdd995d18..564095e381b2 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -799,12 +799,13 @@ static const struct mm_walk_ops madvise_free_walk_ops = {
 	.walk_lock		= PGWALK_RDLOCK,
 };
 
-static int madvise_free_single_vma(struct vm_area_struct *vma,
-			unsigned long start_addr, unsigned long end_addr)
+static int madvise_free_single_vma(
+		struct madvise_behavior *behavior, struct vm_area_struct *vma,
+		unsigned long start_addr, unsigned long end_addr)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct mmu_notifier_range range;
-	struct mmu_gather tlb;
+	struct mmu_gather *tlb = behavior->tlb;
 
 	/* MADV_FREE works for only anon vma at the moment */
 	if (!vma_is_anonymous(vma))
@@ -820,17 +821,14 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
 				range.start, range.end);
 
 	lru_add_drain();
-	tlb_gather_mmu(&tlb, mm);
 	update_hiwater_rss(mm);
 
 	mmu_notifier_invalidate_range_start(&range);
-	tlb_start_vma(&tlb, vma);
+	tlb_start_vma(tlb, vma);
 	walk_page_range(vma->vm_mm, range.start, range.end,
-			&madvise_free_walk_ops, &tlb);
-	tlb_end_vma(&tlb, vma);
+			&madvise_free_walk_ops, tlb);
+	tlb_end_vma(tlb, vma);
 	mmu_notifier_invalidate_range_end(&range);
-	tlb_finish_mmu(&tlb);
-
 	return 0;
 }
 
@@ -953,7 +951,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
 	if (action == MADV_DONTNEED || action == MADV_DONTNEED_LOCKED)
 		return madvise_dontneed_single_vma(vma, start, end);
 	else if (action == MADV_FREE)
-		return madvise_free_single_vma(vma, start, end);
+		return madvise_free_single_vma(behavior, vma, start, end);
 	else
 		return -EINVAL;
 }
@@ -1626,6 +1624,29 @@ static void madvise_unlock(struct mm_struct *mm, int behavior)
 		mmap_read_unlock(mm);
 }
 
+static bool madvise_batch_tlb_flush(int behavior)
+{
+	switch (behavior) {
+	case MADV_FREE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static void madvise_init_tlb(struct madvise_behavior *madv_behavior,
+		struct mm_struct *mm)
+{
+	if (madvise_batch_tlb_flush(madv_behavior->behavior))
+		tlb_gather_mmu(madv_behavior->tlb, mm);
+}
+
+static void madvise_finish_tlb(struct madvise_behavior *madv_behavior)
+{
+	if (madvise_batch_tlb_flush(madv_behavior->behavior))
+		tlb_finish_mmu(madv_behavior->tlb);
+}
+
 static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior)
 {
 	size_t len;
@@ -1782,14 +1803,20 @@ static int madvise_do_behavior(struct mm_struct *mm,
 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
 {
 	int error;
-	struct madvise_behavior madv_behavior = {.behavior = behavior};
+	struct mmu_gather tlb;
+	struct madvise_behavior madv_behavior = {
+		.behavior = behavior,
+		.tlb = &tlb,
+	};
 
 	if (madvise_should_skip(start, len_in, behavior, &error))
 		return error;
 	error = madvise_lock(mm, behavior);
 	if (error)
 		return error;
+	madvise_init_tlb(&madv_behavior, mm);
 	error = madvise_do_behavior(mm, start, len_in, &madv_behavior);
+	madvise_finish_tlb(&madv_behavior);
 	madvise_unlock(mm, behavior);
 
 	return error;
@@ -1806,13 +1833,18 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
 {
 	ssize_t ret = 0;
 	size_t total_len;
-	struct madvise_behavior madv_behavior = {.behavior = behavior};
+	struct mmu_gather tlb;
+	struct madvise_behavior madv_behavior = {
+		.behavior = behavior,
+		.tlb = &tlb,
+	};
 
 	total_len = iov_iter_count(iter);
 
 	ret = madvise_lock(mm, behavior);
 	if (ret)
 		return ret;
+	madvise_init_tlb(&madv_behavior, mm);
 
 	while (iov_iter_count(iter)) {
 		unsigned long start = (unsigned long)iter_iov_addr(iter);
@@ -1841,14 +1873,17 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
 			}
 
 			/* Drop and reacquire lock to unwind race. */
+			madvise_finish_tlb(&madv_behavior);
 			madvise_unlock(mm, behavior);
 			madvise_lock(mm, behavior);
+			madvise_init_tlb(&madv_behavior, mm);
 			continue;
 		}
 		if (ret < 0)
 			break;
 		iov_iter_advance(iter, iter_iov_len(iter));
 	}
+	madvise_finish_tlb(&madv_behavior);
 	madvise_unlock(mm, behavior);
 
 	ret = (total_len - iov_iter_count(iter)) ? : ret;
-- 
2.39.5
Re: [PATCH v2 2/4] mm/madvise: batch tlb flushes for MADV_FREE
Posted by Lorenzo Stoakes 10 months ago
On Fri, Apr 04, 2025 at 02:06:58PM -0700, SeongJae Park wrote:
> MADV_FREE handling for [process_]madvise() flushes tlb for each vma of
> each address range.  Update the logic to do tlb flushes in a batched
> way.  Initialize an mmu_gather object from do_madvise() and
> vector_madvise(), which are the entry level functions for
> [process_]madvise(), respectively.  And pass those objects to the
> function for per-vma work, via madvise_behavior struct.  Make the
> per-vma logic not flushes tlb on their own but just saves the tlb
> entries to the received mmu_gather object.  Finally, the entry level
> functions flush the tlb entries that gathered for the entire user
> request, at once.
>
> Signed-off-by: SeongJae Park <sj@kernel.org>

Other than some nitty stuff, and a desire for some careful testing of the
horrid edge case that err... I introduced :P this looks fine, so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

> ---
>  mm/madvise.c | 59 +++++++++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 47 insertions(+), 12 deletions(-)
>
> diff --git a/mm/madvise.c b/mm/madvise.c
> index 8bcfdd995d18..564095e381b2 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -799,12 +799,13 @@ static const struct mm_walk_ops madvise_free_walk_ops = {
>  	.walk_lock		= PGWALK_RDLOCK,
>  };
>
> -static int madvise_free_single_vma(struct vm_area_struct *vma,
> -			unsigned long start_addr, unsigned long end_addr)
> +static int madvise_free_single_vma(
> +		struct madvise_behavior *behavior, struct vm_area_struct *vma,

This is pedantic, but elsewhere you differentiate between int behavior and
struct madvise_behavior by referringt to the later as madv_behavior.

The naming kind of sucks in general though.

But for consistency, let's maybe rename this to madv_behavior, and we can
maybe do a commit later to do a rename across the board?

> +		unsigned long start_addr, unsigned long end_addr)
>  {
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct mmu_notifier_range range;
> -	struct mmu_gather tlb;
> +	struct mmu_gather *tlb = behavior->tlb;
>
>  	/* MADV_FREE works for only anon vma at the moment */
>  	if (!vma_is_anonymous(vma))
> @@ -820,17 +821,14 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
>  				range.start, range.end);
>
>  	lru_add_drain();
> -	tlb_gather_mmu(&tlb, mm);
>  	update_hiwater_rss(mm);
>
>  	mmu_notifier_invalidate_range_start(&range);
> -	tlb_start_vma(&tlb, vma);
> +	tlb_start_vma(tlb, vma);
>  	walk_page_range(vma->vm_mm, range.start, range.end,
> -			&madvise_free_walk_ops, &tlb);
> -	tlb_end_vma(&tlb, vma);
> +			&madvise_free_walk_ops, tlb);
> +	tlb_end_vma(tlb, vma);
>  	mmu_notifier_invalidate_range_end(&range);
> -	tlb_finish_mmu(&tlb);
> -
>  	return 0;
>  }
>
> @@ -953,7 +951,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
>  	if (action == MADV_DONTNEED || action == MADV_DONTNEED_LOCKED)
>  		return madvise_dontneed_single_vma(vma, start, end);
>  	else if (action == MADV_FREE)
> -		return madvise_free_single_vma(vma, start, end);
> +		return madvise_free_single_vma(behavior, vma, start, end);
>  	else
>  		return -EINVAL;

On error paths, do we correctly finish the batched (botched? :P) TLB
operation?

>  }
> @@ -1626,6 +1624,29 @@ static void madvise_unlock(struct mm_struct *mm, int behavior)
>  		mmap_read_unlock(mm);
>  }
>
> +static bool madvise_batch_tlb_flush(int behavior)
> +{
> +	switch (behavior) {
> +	case MADV_FREE:
> +		return true;
> +	default:
> +		return false;
> +	}
> +}
> +
> +static void madvise_init_tlb(struct madvise_behavior *madv_behavior,
> +		struct mm_struct *mm)
> +{
> +	if (madvise_batch_tlb_flush(madv_behavior->behavior))
> +		tlb_gather_mmu(madv_behavior->tlb, mm);
> +}
> +
> +static void madvise_finish_tlb(struct madvise_behavior *madv_behavior)
> +{
> +	if (madvise_batch_tlb_flush(madv_behavior->behavior))
> +		tlb_finish_mmu(madv_behavior->tlb);
> +}

These are nice.

> +
>  static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior)
>  {
>  	size_t len;
> @@ -1782,14 +1803,20 @@ static int madvise_do_behavior(struct mm_struct *mm,
>  int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
>  {
>  	int error;
> -	struct madvise_behavior madv_behavior = {.behavior = behavior};
> +	struct mmu_gather tlb;
> +	struct madvise_behavior madv_behavior = {
> +		.behavior = behavior,
> +		.tlb = &tlb,
> +	};
>
>  	if (madvise_should_skip(start, len_in, behavior, &error))
>  		return error;
>  	error = madvise_lock(mm, behavior);
>  	if (error)
>  		return error;
> +	madvise_init_tlb(&madv_behavior, mm);
>  	error = madvise_do_behavior(mm, start, len_in, &madv_behavior);
> +	madvise_finish_tlb(&madv_behavior);
>  	madvise_unlock(mm, behavior);
>
>  	return error;
> @@ -1806,13 +1833,18 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
>  {
>  	ssize_t ret = 0;
>  	size_t total_len;
> -	struct madvise_behavior madv_behavior = {.behavior = behavior};
> +	struct mmu_gather tlb;
> +	struct madvise_behavior madv_behavior = {
> +		.behavior = behavior,
> +		.tlb = &tlb,
> +	};

Again the naming is kinda yucky, but let's just yeah for now stick with
'madv_behavior' for values of this helper struct and 'behavior' for the
actual int value, and we can revist that later.

>
>  	total_len = iov_iter_count(iter);
>
>  	ret = madvise_lock(mm, behavior);
>  	if (ret)
>  		return ret;
> +	madvise_init_tlb(&madv_behavior, mm);
>
>  	while (iov_iter_count(iter)) {
>  		unsigned long start = (unsigned long)iter_iov_addr(iter);
> @@ -1841,14 +1873,17 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
>  			}
>
>  			/* Drop and reacquire lock to unwind race. */
> +			madvise_finish_tlb(&madv_behavior);
>  			madvise_unlock(mm, behavior);
>  			madvise_lock(mm, behavior);
> +			madvise_init_tlb(&madv_behavior, mm);
>  			continue;

Have you found a way in which to test this? Perhaps force this case and
find a means of asserting the TLB flushing behaves as expected? I think
we're ok from the logic, but it's such a tricky one it'd be good to find a
means of doing so, albeit in a manual way.

>  		}
>  		if (ret < 0)
>  			break;
>  		iov_iter_advance(iter, iter_iov_len(iter));
>  	}
> +	madvise_finish_tlb(&madv_behavior);
>  	madvise_unlock(mm, behavior);
>
>  	ret = (total_len - iov_iter_count(iter)) ? : ret;
> --
> 2.39.5
Re: [PATCH v2 2/4] mm/madvise: batch tlb flushes for MADV_FREE
Posted by SeongJae Park 10 months ago
On Tue, 8 Apr 2025 13:58:18 +0100 Lorenzo Stoakes <lorenzo.stoakes@oracle.com> wrote:

> On Fri, Apr 04, 2025 at 02:06:58PM -0700, SeongJae Park wrote:
> > MADV_FREE handling for [process_]madvise() flushes tlb for each vma of
> > each address range.  Update the logic to do tlb flushes in a batched
> > way.  Initialize an mmu_gather object from do_madvise() and
> > vector_madvise(), which are the entry level functions for
> > [process_]madvise(), respectively.  And pass those objects to the
> > function for per-vma work, via madvise_behavior struct.  Make the
> > per-vma logic not flushes tlb on their own but just saves the tlb
> > entries to the received mmu_gather object.  Finally, the entry level
> > functions flush the tlb entries that gathered for the entire user
> > request, at once.
> >
> > Signed-off-by: SeongJae Park <sj@kernel.org>
> 
> Other than some nitty stuff, and a desire for some careful testing of the
> horrid edge case that err... I introduced :P this looks fine, so:
> 
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

Thank you for your kind review!  I will make the next revision following your
suggestions as I answered below.

> 
> > ---
> >  mm/madvise.c | 59 +++++++++++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 47 insertions(+), 12 deletions(-)
> >
> > diff --git a/mm/madvise.c b/mm/madvise.c
> > index 8bcfdd995d18..564095e381b2 100644
> > --- a/mm/madvise.c
> > +++ b/mm/madvise.c
> > @@ -799,12 +799,13 @@ static const struct mm_walk_ops madvise_free_walk_ops = {
> >  	.walk_lock		= PGWALK_RDLOCK,
> >  };
> >
> > -static int madvise_free_single_vma(struct vm_area_struct *vma,
> > -			unsigned long start_addr, unsigned long end_addr)
> > +static int madvise_free_single_vma(
> > +		struct madvise_behavior *behavior, struct vm_area_struct *vma,
> 
> This is pedantic, but elsewhere you differentiate between int behavior and
> struct madvise_behavior by referringt to the later as madv_behavior.
> 
> The naming kind of sucks in general though.
> 
> But for consistency, let's maybe rename this to madv_behavior, and we can
> maybe do a commit later to do a rename across the board?

I completely agree.  I will rename so in the next spin.

> 
> > +		unsigned long start_addr, unsigned long end_addr)
> >  {
> >  	struct mm_struct *mm = vma->vm_mm;
> >  	struct mmu_notifier_range range;
> > -	struct mmu_gather tlb;
> > +	struct mmu_gather *tlb = behavior->tlb;
> >
> >  	/* MADV_FREE works for only anon vma at the moment */
> >  	if (!vma_is_anonymous(vma))
[...]
> > @@ -953,7 +951,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
> >  	if (action == MADV_DONTNEED || action == MADV_DONTNEED_LOCKED)
> >  		return madvise_dontneed_single_vma(vma, start, end);
> >  	else if (action == MADV_FREE)
> > -		return madvise_free_single_vma(vma, start, end);
> > +		return madvise_free_single_vma(behavior, vma, start, end);
> >  	else
> >  		return -EINVAL;
> 
> On error paths, do we correctly finish the batched (botched? :P) TLB
> operation?

Yes, the change calls tlb_finish_mmu() and tlb_gather_mmu() as needed in the
error paths.  Of course I might forgot calling those in some edge cases.
Please let me know if you find such mistakes.

> 
> >  }
[...]
> > @@ -1841,14 +1873,17 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
> >  			}
> >
> >  			/* Drop and reacquire lock to unwind race. */
> > +			madvise_finish_tlb(&madv_behavior);
> >  			madvise_unlock(mm, behavior);
> >  			madvise_lock(mm, behavior);
> > +			madvise_init_tlb(&madv_behavior, mm);
> >  			continue;
> 
> Have you found a way in which to test this? Perhaps force this case and
> find a means of asserting the TLB flushing behaves as expected? I think
> we're ok from the logic, but it's such a tricky one it'd be good to find a
> means of doing so, albeit in a manual way.

No, unfortunately I haven't found a good way to test this case.

> 
> >  		}
> >  		if (ret < 0)
> >  			break;
> >  		iov_iter_advance(iter, iter_iov_len(iter));
> >  	}
> > +	madvise_finish_tlb(&madv_behavior);
> >  	madvise_unlock(mm, behavior);
> >
> >  	ret = (total_len - iov_iter_count(iter)) ? : ret;
> > --
> > 2.39.5


Thanks,
SJ