From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
Split the code that finds, locks and verifies VMA from mfill_atomic()
into a helper function.
This function will be used later during refactoring of
mfill_atomic_pte_copy().
Add a counterpart mfill_put_vma() helper that unlocks the VMA and
releases map_changing_lock.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
mm/userfaultfd.c | 124 ++++++++++++++++++++++++++++-------------------
1 file changed, 73 insertions(+), 51 deletions(-)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 9dd285b13f3b..45d8f04aaf4f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -157,6 +157,73 @@ static void uffd_mfill_unlock(struct vm_area_struct *vma)
}
#endif
+static void mfill_put_vma(struct mfill_state *state)
+{
+ up_read(&state->ctx->map_changing_lock);
+ uffd_mfill_unlock(state->vma);
+ state->vma = NULL;
+}
+
+static int mfill_get_vma(struct mfill_state *state)
+{
+ struct userfaultfd_ctx *ctx = state->ctx;
+ uffd_flags_t flags = state->flags;
+ struct vm_area_struct *dst_vma;
+ int err;
+
+ /*
+ * Make sure the vma is not shared, that the dst range is
+ * both valid and fully within a single existing vma.
+ */
+ dst_vma = uffd_mfill_lock(ctx->mm, state->dst_start, state->len);
+ if (IS_ERR(dst_vma))
+ return PTR_ERR(dst_vma);
+
+ /*
+ * If memory mappings are changing because of non-cooperative
+ * operation (e.g. mremap) running in parallel, bail out and
+ * request the user to retry later
+ */
+ down_read(&ctx->map_changing_lock);
+ err = -EAGAIN;
+ if (atomic_read(&ctx->mmap_changing))
+ goto out_unlock;
+
+ err = -EINVAL;
+
+ /*
+ * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
+ * it will overwrite vm_ops, so vma_is_anonymous must return false.
+ */
+ if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
+ dst_vma->vm_flags & VM_SHARED))
+ goto out_unlock;
+
+ /*
+ * validate 'mode' now that we know the dst_vma: don't allow
+ * a wrprotect copy if the userfaultfd didn't register as WP.
+ */
+ if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
+ goto out_unlock;
+
+ if (is_vm_hugetlb_page(dst_vma))
+ goto out;
+
+ if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
+ goto out_unlock;
+ if (!vma_is_shmem(dst_vma) &&
+ uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
+ goto out_unlock;
+
+out:
+ state->vma = dst_vma;
+ return 0;
+
+out_unlock:
+ mfill_put_vma(state);
+ return err;
+}
+
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
@@ -768,8 +835,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
.src_addr = src_start,
.dst_addr = dst_start,
};
- struct mm_struct *dst_mm = ctx->mm;
- struct vm_area_struct *dst_vma;
long copied = 0;
ssize_t err;
@@ -784,57 +849,17 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
VM_WARN_ON_ONCE(dst_start + len <= dst_start);
retry:
- /*
- * Make sure the vma is not shared, that the dst range is
- * both valid and fully within a single existing vma.
- */
- dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
- if (IS_ERR(dst_vma)) {
- err = PTR_ERR(dst_vma);
+ err = mfill_get_vma(&state);
+ if (err)
goto out;
- }
-
- /*
- * If memory mappings are changing because of non-cooperative
- * operation (e.g. mremap) running in parallel, bail out and
- * request the user to retry later
- */
- down_read(&ctx->map_changing_lock);
- err = -EAGAIN;
- if (atomic_read(&ctx->mmap_changing))
- goto out_unlock;
-
- err = -EINVAL;
- /*
- * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
- * it will overwrite vm_ops, so vma_is_anonymous must return false.
- */
- if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
- dst_vma->vm_flags & VM_SHARED))
- goto out_unlock;
-
- /*
- * validate 'mode' now that we know the dst_vma: don't allow
- * a wrprotect copy if the userfaultfd didn't register as WP.
- */
- if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
- goto out_unlock;
/*
* If this is a HUGETLB vma, pass off to appropriate routine
*/
- if (is_vm_hugetlb_page(dst_vma))
- return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
+ if (is_vm_hugetlb_page(state.vma))
+ return mfill_atomic_hugetlb(ctx, state.vma, dst_start,
src_start, len, flags);
- if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
- goto out_unlock;
- if (!vma_is_shmem(dst_vma) &&
- uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
- goto out_unlock;
-
- state.vma = dst_vma;
-
while (state.src_addr < src_start + len) {
VM_WARN_ON_ONCE(state.dst_addr >= dst_start + len);
@@ -853,8 +878,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
if (unlikely(err == -ENOENT)) {
void *kaddr;
- up_read(&ctx->map_changing_lock);
- uffd_mfill_unlock(state.vma);
+ mfill_put_vma(&state);
VM_WARN_ON_ONCE(!state.folio);
kaddr = kmap_local_folio(state.folio, 0);
@@ -883,9 +907,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
break;
}
-out_unlock:
- up_read(&ctx->map_changing_lock);
- uffd_mfill_unlock(state.vma);
+ mfill_put_vma(&state);
out:
if (state.folio)
folio_put(state.folio);
--
2.51.0
Hi, Mike,
On Tue, Jan 27, 2026 at 09:29:23PM +0200, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
>
> Split the code that finds, locks and verifies VMA from mfill_atomic()
> into a helper function.
>
> This function will be used later during refactoring of
> mfill_atomic_pte_copy().
>
> Add a counterpart mfill_put_vma() helper that unlocks the VMA and
> releases map_changing_lock.
>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---
> mm/userfaultfd.c | 124 ++++++++++++++++++++++++++++-------------------
> 1 file changed, 73 insertions(+), 51 deletions(-)
>
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 9dd285b13f3b..45d8f04aaf4f 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -157,6 +157,73 @@ static void uffd_mfill_unlock(struct vm_area_struct *vma)
> }
> #endif
>
> +static void mfill_put_vma(struct mfill_state *state)
> +{
> + up_read(&state->ctx->map_changing_lock);
> + uffd_mfill_unlock(state->vma);
> + state->vma = NULL;
> +}
> +
> +static int mfill_get_vma(struct mfill_state *state)
> +{
> + struct userfaultfd_ctx *ctx = state->ctx;
> + uffd_flags_t flags = state->flags;
> + struct vm_area_struct *dst_vma;
> + int err;
> +
> + /*
> + * Make sure the vma is not shared, that the dst range is
> + * both valid and fully within a single existing vma.
> + */
> + dst_vma = uffd_mfill_lock(ctx->mm, state->dst_start, state->len);
> + if (IS_ERR(dst_vma))
> + return PTR_ERR(dst_vma);
> +
> + /*
> + * If memory mappings are changing because of non-cooperative
> + * operation (e.g. mremap) running in parallel, bail out and
> + * request the user to retry later
> + */
> + down_read(&ctx->map_changing_lock);
> + err = -EAGAIN;
> + if (atomic_read(&ctx->mmap_changing))
> + goto out_unlock;
> +
> + err = -EINVAL;
> +
> + /*
> + * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
> + * it will overwrite vm_ops, so vma_is_anonymous must return false.
> + */
> + if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
> + dst_vma->vm_flags & VM_SHARED))
> + goto out_unlock;
> +
> + /*
> + * validate 'mode' now that we know the dst_vma: don't allow
> + * a wrprotect copy if the userfaultfd didn't register as WP.
> + */
> + if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
> + goto out_unlock;
> +
> + if (is_vm_hugetlb_page(dst_vma))
> + goto out;
> +
> + if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
> + goto out_unlock;
> + if (!vma_is_shmem(dst_vma) &&
> + uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
> + goto out_unlock;
IMHO it's a bit weird to check for vma permissions in a get_vma() function.
Also, in the follow up patch it'll be also reused in
mfill_copy_folio_retry() which doesn't need to check vma permission.
Maybe we can introduce mfill_vma_check() for these two checks? Then we can
also drop the slightly weird is_vm_hugetlb_page() check (and "out" label)
above.
> +
> +out:
> + state->vma = dst_vma;
> + return 0;
> +
> +out_unlock:
> + mfill_put_vma(state);
> + return err;
> +}
> +
> static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
> {
> pgd_t *pgd;
> @@ -768,8 +835,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
> .src_addr = src_start,
> .dst_addr = dst_start,
> };
> - struct mm_struct *dst_mm = ctx->mm;
> - struct vm_area_struct *dst_vma;
> long copied = 0;
> ssize_t err;
>
> @@ -784,57 +849,17 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
> VM_WARN_ON_ONCE(dst_start + len <= dst_start);
>
> retry:
> - /*
> - * Make sure the vma is not shared, that the dst range is
> - * both valid and fully within a single existing vma.
> - */
> - dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
> - if (IS_ERR(dst_vma)) {
> - err = PTR_ERR(dst_vma);
> + err = mfill_get_vma(&state);
> + if (err)
> goto out;
> - }
> -
> - /*
> - * If memory mappings are changing because of non-cooperative
> - * operation (e.g. mremap) running in parallel, bail out and
> - * request the user to retry later
> - */
> - down_read(&ctx->map_changing_lock);
> - err = -EAGAIN;
> - if (atomic_read(&ctx->mmap_changing))
> - goto out_unlock;
> -
> - err = -EINVAL;
> - /*
> - * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
> - * it will overwrite vm_ops, so vma_is_anonymous must return false.
> - */
> - if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
> - dst_vma->vm_flags & VM_SHARED))
> - goto out_unlock;
> -
> - /*
> - * validate 'mode' now that we know the dst_vma: don't allow
> - * a wrprotect copy if the userfaultfd didn't register as WP.
> - */
> - if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
> - goto out_unlock;
>
> /*
> * If this is a HUGETLB vma, pass off to appropriate routine
> */
> - if (is_vm_hugetlb_page(dst_vma))
> - return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
> + if (is_vm_hugetlb_page(state.vma))
> + return mfill_atomic_hugetlb(ctx, state.vma, dst_start,
> src_start, len, flags);
>
> - if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
> - goto out_unlock;
> - if (!vma_is_shmem(dst_vma) &&
> - uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
> - goto out_unlock;
> -
> - state.vma = dst_vma;
> -
> while (state.src_addr < src_start + len) {
> VM_WARN_ON_ONCE(state.dst_addr >= dst_start + len);
>
> @@ -853,8 +878,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
> if (unlikely(err == -ENOENT)) {
> void *kaddr;
>
> - up_read(&ctx->map_changing_lock);
> - uffd_mfill_unlock(state.vma);
> + mfill_put_vma(&state);
> VM_WARN_ON_ONCE(!state.folio);
>
> kaddr = kmap_local_folio(state.folio, 0);
> @@ -883,9 +907,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
> break;
> }
>
> -out_unlock:
> - up_read(&ctx->map_changing_lock);
> - uffd_mfill_unlock(state.vma);
> + mfill_put_vma(&state);
> out:
> if (state.folio)
> folio_put(state.folio);
> --
> 2.51.0
>
--
Peter Xu
Hi Peter,
On Mon, Feb 02, 2026 at 04:49:09PM -0500, Peter Xu wrote:
> Hi, Mike,
>
> On Tue, Jan 27, 2026 at 09:29:23PM +0200, Mike Rapoport wrote:
> > From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> >
> > Split the code that finds, locks and verifies VMA from mfill_atomic()
> > into a helper function.
> >
> > This function will be used later during refactoring of
> > mfill_atomic_pte_copy().
> >
> > Add a counterpart mfill_put_vma() helper that unlocks the VMA and
> > releases map_changing_lock.
> >
> > Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> > ---
> > mm/userfaultfd.c | 124 ++++++++++++++++++++++++++++-------------------
> > 1 file changed, 73 insertions(+), 51 deletions(-)
> >
> > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > index 9dd285b13f3b..45d8f04aaf4f 100644
> > --- a/mm/userfaultfd.c
> > +++ b/mm/userfaultfd.c
> > @@ -157,6 +157,73 @@ static void uffd_mfill_unlock(struct vm_area_struct *vma)
> > }
> > #endif
> >
> > +static void mfill_put_vma(struct mfill_state *state)
> > +{
> > + up_read(&state->ctx->map_changing_lock);
> > + uffd_mfill_unlock(state->vma);
> > + state->vma = NULL;
> > +}
> > +
> > +static int mfill_get_vma(struct mfill_state *state)
> > +{
> > + struct userfaultfd_ctx *ctx = state->ctx;
> > + uffd_flags_t flags = state->flags;
> > + struct vm_area_struct *dst_vma;
> > + int err;
> > +
> > + /*
> > + * Make sure the vma is not shared, that the dst range is
> > + * both valid and fully within a single existing vma.
> > + */
> > + dst_vma = uffd_mfill_lock(ctx->mm, state->dst_start, state->len);
> > + if (IS_ERR(dst_vma))
> > + return PTR_ERR(dst_vma);
> > +
> > + /*
> > + * If memory mappings are changing because of non-cooperative
> > + * operation (e.g. mremap) running in parallel, bail out and
> > + * request the user to retry later
> > + */
> > + down_read(&ctx->map_changing_lock);
> > + err = -EAGAIN;
> > + if (atomic_read(&ctx->mmap_changing))
> > + goto out_unlock;
> > +
> > + err = -EINVAL;
> > +
> > + /*
> > + * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
> > + * it will overwrite vm_ops, so vma_is_anonymous must return false.
> > + */
> > + if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
> > + dst_vma->vm_flags & VM_SHARED))
> > + goto out_unlock;
> > +
> > + /*
> > + * validate 'mode' now that we know the dst_vma: don't allow
> > + * a wrprotect copy if the userfaultfd didn't register as WP.
> > + */
> > + if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
> > + goto out_unlock;
> > +
> > + if (is_vm_hugetlb_page(dst_vma))
> > + goto out;
> > +
> > + if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
> > + goto out_unlock;
> > + if (!vma_is_shmem(dst_vma) &&
> > + uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
> > + goto out_unlock;
>
> IMHO it's a bit weird to check for vma permissions in a get_vma() function.
>
> Also, in the follow up patch it'll be also reused in
> mfill_copy_folio_retry() which doesn't need to check vma permission.
>
> Maybe we can introduce mfill_vma_check() for these two checks? Then we can
> also drop the slightly weird is_vm_hugetlb_page() check (and "out" label)
> above.
This version of get_vma() keeps the checks exactly as they were when we
were retrying after dropping the lock and I prefer to have them this way to
begin with.
Later we can optimize this further after the dust settles after these
changes.
--
Sincerely yours,
Mike.
© 2016 - 2026 Red Hat, Inc.