From nobody Tue Dec 2 01:04:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A5FEB2C15A5; Sun, 23 Nov 2025 10:27:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893643; cv=none; b=pFvoHbSS8hcRy3NFNl35nvEItFoPdQsBGLSH4EzIv1BQDpg1poi3kKRSNPk/9zrHSUrgt+j/avXYliLC/hchblIvCNxvwsKFfwOWiC3HN2WU4sZXwumkEGesHIz5WZqEDg/X02MpZ2Pn0w3dpZijKd83FwokayEhCKR0sbHuMDo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893643; c=relaxed/simple; bh=WZL3dOi9JF12AwW63UbE2JZ210f4aDb9qSF2OCoPSt4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=XI3MhFWWeXFLJknQOpU1/siRGVZOQGR1iQ0wsbhdsuTQ2vtGvzlgmyogPEeV4SLvDEml42U4FGtjelREjDYKvS1ZYHkFxfRgAX7zD8YX8XqA8Kexhn8kbfVgbeM2pKc09hcCHmIPX2Zu7zYDMg9kjohsbMfLOUJQ7QE/Tfq1vzM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=l1nW6j9s; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="l1nW6j9s" Received: by smtp.kernel.org (Postfix) with ESMTPSA id D54D7C116B1; Sun, 23 Nov 2025 10:27:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1763893642; bh=WZL3dOi9JF12AwW63UbE2JZ210f4aDb9qSF2OCoPSt4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=l1nW6j9sn6FoE5js92l/pHRlJhGv8ZSxi/MPWghrw/vFeAL14TcupB8vZtV0DeKRg VGW2WRNd12kfuKspl6yPLZ/Fy0mFJiWPE/4UTbQ+EYKiWE37mHujvH/4+CQt2Aq4he GRbMEEWfedQePLiIIBwWKC7SX/nX+pETBeEyFN4KEQNqbw7W94uOqXY6UU7jhyG2/8 Kn24IkHUtsDNlLP59DtKl3ZIL26IwZV81ePXPjOr9P134gxFm0gmjkbCpB2aLTbREs VvJ5v1vnKnH06yjlUNP8N1FuFLRhplm++wF28CVvtaLRH6ADHcqB+iGK1lC0YbZYDK 1oK061Lqmu/Sg== From: Mike Rapoport To: linux-mm@kvack.org Cc: Andrea Arcangeli , Andrew Morton , Axel Rasmussen , Baolin Wang , David Hildenbrand , Hugh Dickins , James Houghton , "Liam R. Howlett" , Lorenzo Stoakes , Michal Hocko , Mike Rapoport , Nikita Kalyazin , Paolo Bonzini , Peter Xu , Sean Christopherson , Shuah Khan , Suren Baghdasaryan , Vlastimil Babka , linux-kernel@vger.kernel.org, kvm@vger.kernel.org, linux-kselftest@vger.kernel.org, "David Hildenbrand (Red Hat)" Subject: [PATCH 1/5] userfaultfd: move vma_can_userfault out of line Date: Sun, 23 Nov 2025 12:27:03 +0200 Message-ID: <20251123102707.559422-2-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20251123102707.559422-1-rppt@kernel.org> References: <20251123102707.559422-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" vma_can_userfault() has grown pretty big and it's not called on performance critical path. Move it out of line. No functional changes. Reviewed-by: David Hildenbrand (Red Hat) Signed-off-by: Mike Rapoport (Microsoft) --- include/linux/userfaultfd_k.h | 36 ++--------------------------------- mm/userfaultfd.c | 34 +++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index c0e716aec26a..e4f43e7b063f 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -208,40 +208,8 @@ static inline bool userfaultfd_armed(struct vm_area_st= ruct *vma) return vma->vm_flags & __VM_UFFD_FLAGS; } =20 -static inline bool vma_can_userfault(struct vm_area_struct *vma, - vm_flags_t vm_flags, - bool wp_async) -{ - vm_flags &=3D __VM_UFFD_FLAGS; - - if (vma->vm_flags & VM_DROPPABLE) - return false; - - if ((vm_flags & VM_UFFD_MINOR) && - (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) - return false; - - /* - * If wp async enabled, and WP is the only mode enabled, allow any - * memory type. - */ - if (wp_async && (vm_flags =3D=3D VM_UFFD_WP)) - return true; - -#ifndef CONFIG_PTE_MARKER_UFFD_WP - /* - * If user requested uffd-wp but not enabled pte markers for - * uffd-wp, then shmem & hugetlbfs are not supported but only - * anonymous. - */ - if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) - return false; -#endif - - /* By default, allow any of anon|shmem|hugetlb */ - return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || - vma_is_shmem(vma); -} +bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags, + bool wp_async); =20 static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct = *vma) { diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index af61b95c89e4..8dc964389b0d 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1977,6 +1977,40 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsi= gned long dst_start, return moved ? moved : err; } =20 +bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags, + bool wp_async) +{ + vm_flags &=3D __VM_UFFD_FLAGS; + + if (vma->vm_flags & VM_DROPPABLE) + return false; + + if ((vm_flags & VM_UFFD_MINOR) && + (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) + return false; + + /* + * If wp async enabled, and WP is the only mode enabled, allow any + * memory type. + */ + if (wp_async && (vm_flags =3D=3D VM_UFFD_WP)) + return true; + +#ifndef CONFIG_PTE_MARKER_UFFD_WP + /* + * If user requested uffd-wp but not enabled pte markers for + * uffd-wp, then shmem & hugetlbfs are not supported but only + * anonymous. + */ + if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) + return false; +#endif + + /* By default, allow any of anon|shmem|hugetlb */ + return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || + vma_is_shmem(vma); +} + static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, vm_flags_t vm_flags) { --=20 2.50.1 From nobody Tue Dec 2 01:04:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 872E52C08CC; Sun, 23 Nov 2025 10:27:28 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893648; cv=none; b=UXGdl2h/rkYp2RfUU593q4h/MnuIwHbJL04vBs8CDucYeVHgeI2YZz60Gm43AsOiof7rZiOCtRl6CgFV7ysqzejtQOBBvAHjY/MxIvcxafUOAaH+XZoYC6r4j0LP8QTYk9clXUqZ3UeOyxFuU5Ctgf7gjb6jX1UNC8zQRvVp0f4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893648; c=relaxed/simple; bh=cDH3WIfWACYcFb+JNsVypvmaWJL8YVSC/VIGBsIP7Jo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=iUxDOwadseeqxT8f8Y8juKShRyK5XrziStQYcR/ardwoyY33HR5U0upJnpK1OfU44LIwrnhpZAbo3S9Yo8xMtDslQs/CXtg6f6/PrbwDXmdUBim144+oYjfbWhsW6nF9eOWTFnHf3PEaonF/Brt/XaNnYMFXC1QwbcXuRa4ClKA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ZZTgflsa; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ZZTgflsa" Received: by smtp.kernel.org (Postfix) with ESMTPSA id C00A3C16AAE; Sun, 23 Nov 2025 10:27:22 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1763893647; bh=cDH3WIfWACYcFb+JNsVypvmaWJL8YVSC/VIGBsIP7Jo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ZZTgflsai+RK4Zq/pWWn6QNq2LiWZYRqrDYPT9j65y4wruY+BECYFyU7uv40G1iax bW8fTBuk6Lu3BtOjYVCZBgljqgnggLSJQLHTcEXJ6IZitWiNRjw208mdn2MQQLh2Sh Uijip1NAgBV/tx1nw01ieSTlJhwMLcgTdXQ3urou9mrAjr68CjG38L7B0/O6BfmNmh +9cxH1/UbV7FKUHnOLvEdabqRt0LDWE79HfTpexORoxfTdJRRUZ0+eaCZhjTkPx/fl oC85lh31Qq8xJ6BW8UEd910uVkyWCMUYP34vKXO2Or/ljADNSw0Gc/HD07w357QLN3 cquCKQ8xaHgdQ== From: Mike Rapoport To: linux-mm@kvack.org Cc: Andrea Arcangeli , Andrew Morton , Axel Rasmussen , Baolin Wang , David Hildenbrand , Hugh Dickins , James Houghton , "Liam R. Howlett" , Lorenzo Stoakes , Michal Hocko , Mike Rapoport , Nikita Kalyazin , Paolo Bonzini , Peter Xu , Sean Christopherson , Shuah Khan , Suren Baghdasaryan , Vlastimil Babka , linux-kernel@vger.kernel.org, kvm@vger.kernel.org, linux-kselftest@vger.kernel.org Subject: [PATCH 2/5] userfaultfd, shmem: use a VMA callback to handle UFFDIO_CONTINUE Date: Sun, 23 Nov 2025 12:27:04 +0200 Message-ID: <20251123102707.559422-3-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20251123102707.559422-1-rppt@kernel.org> References: <20251123102707.559422-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" When userspace resolves a page fault in a shmem VMA with UFFDIO_CONTINUE it needs to get a folio that already exists in the pagecache backing that VMA. Instead of using shmem_get_folio() for that, add a get_pagecache_folio() method to 'struct vm_operations_struct' that will return a folio if it exists in the VMA's pagecache at given pgoff. Implement get_pagecache_folio() method for shmem and slightly refactor userfaultfd's mfill_atomic() and mfill_atomic_pte_continue() to support this new API. Signed-off-by: Mike Rapoport (Microsoft) --- include/linux/mm.h | 9 ++++++++ mm/shmem.c | 19 +++++++++++++++++ mm/userfaultfd.c | 52 +++++++++++++++++++++++++++++----------------- 3 files changed, 61 insertions(+), 19 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 7c79b3369b82..a5747c306cc2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -690,6 +690,15 @@ struct vm_operations_struct { struct page *(*find_normal_page)(struct vm_area_struct *vma, unsigned long addr); #endif /* CONFIG_FIND_NORMAL_PAGE */ +#ifdef CONFIG_USERFAULTFD + /* + * Called by userfault to resolve UFFDIO_CONTINUE request. + * Should return the folio found at pgoff in the VMA's pagecache if it + * exists or ERR_PTR otherwise. + * The returned folio is locked and with reference held. + */ + struct folio *(*get_shared_folio)(struct inode *inode, pgoff_t pgoff); +#endif }; =20 #ifdef CONFIG_NUMA_BALANCING diff --git a/mm/shmem.c b/mm/shmem.c index 58701d14dd96..aaa21bb60f51 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3263,6 +3263,19 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, shmem_inode_unacct_blocks(inode, 1); return ret; } + +static struct folio *shmem_get_shared_folio(struct inode *inode, + pgoff_t pgoff) +{ + struct folio *folio; + int err; + + err =3D shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC); + if (err) + return ERR_PTR(err); + + return folio; +} #endif /* CONFIG_USERFAULTFD */ =20 #ifdef CONFIG_TMPFS @@ -5295,6 +5308,9 @@ static const struct vm_operations_struct shmem_vm_ops= =3D { .set_policy =3D shmem_set_policy, .get_policy =3D shmem_get_policy, #endif +#ifdef CONFIG_USERFAULTFD + .get_shared_folio =3D shmem_get_shared_folio, +#endif }; =20 static const struct vm_operations_struct shmem_anon_vm_ops =3D { @@ -5304,6 +5320,9 @@ static const struct vm_operations_struct shmem_anon_v= m_ops =3D { .set_policy =3D shmem_set_policy, .get_policy =3D shmem_get_policy, #endif +#ifdef CONFIG_USERFAULTFD + .get_shared_folio =3D shmem_get_shared_folio, +#endif }; =20 int shmem_init_fs_context(struct fs_context *fc) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 8dc964389b0d..04563f88aab5 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -388,15 +388,12 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd, struct page *page; int ret; =20 - ret =3D shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC); + folio =3D dst_vma->vm_ops->get_shared_folio(inode, pgoff); /* Our caller expects us to return -EFAULT if we failed to find folio */ - if (ret =3D=3D -ENOENT) - ret =3D -EFAULT; - if (ret) - goto out; - if (!folio) { - ret =3D -EFAULT; - goto out; + if (IS_ERR_OR_NULL(folio)) { + if (PTR_ERR(folio) =3D=3D -ENOENT || !folio) + return -EFAULT; + return PTR_ERR(folio); } =20 page =3D folio_file_page(folio, pgoff); @@ -411,13 +408,12 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd, goto out_release; =20 folio_unlock(folio); - ret =3D 0; -out: - return ret; + return 0; + out_release: folio_unlock(folio); folio_put(folio); - goto out; + return ret; } =20 /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ @@ -694,6 +690,15 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t = *dst_pmd, return err; } =20 +static __always_inline bool vma_can_mfill_atomic(struct vm_area_struct *vm= a, + uffd_flags_t flags) +{ + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) + return vma->vm_ops && vma->vm_ops->get_shared_folio; + + return vma_is_anonymous(vma) || vma_is_shmem(vma); +} + static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, @@ -766,10 +771,7 @@ static __always_inline ssize_t mfill_atomic(struct use= rfaultfd_ctx *ctx, return mfill_atomic_hugetlb(ctx, dst_vma, dst_start, src_start, len, flags); =20 - if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) - goto out_unlock; - if (!vma_is_shmem(dst_vma) && - uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) + if (!vma_can_mfill_atomic(dst_vma, flags)) goto out_unlock; =20 while (src_addr < src_start + len) { @@ -1985,9 +1987,21 @@ bool vma_can_userfault(struct vm_area_struct *vma, v= m_flags_t vm_flags, if (vma->vm_flags & VM_DROPPABLE) return false; =20 - if ((vm_flags & VM_UFFD_MINOR) && - (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) - return false; + if (vm_flags & VM_UFFD_MINOR) { + /* + * If only MINOR mode is requested and we can request an + * existing folio from VMA's page cache, allow it + */ + if (vm_flags =3D=3D VM_UFFD_MINOR && vma->vm_ops && + vma->vm_ops->get_shared_folio) + return true; + /* + * Only hugetlb and shmem can support MINOR mode in combination + * with other modes + */ + if (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)) + return false; + } =20 /* * If wp async enabled, and WP is the only mode enabled, allow any --=20 2.50.1 From nobody Tue Dec 2 01:04:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 38E002C15A5; Sun, 23 Nov 2025 10:27:33 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893654; cv=none; b=tt92Ut4pDiTTytPc945sDtEs1i1su6bSeWGZii9yrRhX9ViN6YrGMLcnTT0AcPPvuMDlwoliRtv6b3pOXFVogx4rI0OgtufrV7G9Qm8l+hS+OxcoUA3syEHkTc77KLSVMN/oQ6I15LxNQnvJSG8TKPb/pvNHV57kYv03ieC0gJc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893654; c=relaxed/simple; bh=TGp6vvbLTdhwYnCuBgb7XJ6N0yTp/sgeTzqO6gH5CkQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=OUl7oikWjC6WmU8GXbmyQXHK5x8b0J7rK9QyDeObA/ybFFukI43ADyoJhq18FcNxD+NMhdskRAj7p+U+A6j5j8IMiRV77RBcVSyKOVxoF6C/71WWhnBLedeadovaaLGFbFWzr8/uQhSSlmJOvPbMgS60Q9D+YxdJd61z/lslVsI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=OBrZFSOb; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="OBrZFSOb" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 74B67C113D0; Sun, 23 Nov 2025 10:27:28 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1763893653; bh=TGp6vvbLTdhwYnCuBgb7XJ6N0yTp/sgeTzqO6gH5CkQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=OBrZFSObtudrp9MGuTQx1JbCUW5/SlKmBilfqqt7cVta7+2osmrx6ZyWfiBxv7L+S mv/oSxzGa9Pj5oyj6UeFbBqXqa/DnMTvwBT/sA1csuz0mHlB89qW61bP7HwGxsGmg9 F0BrGRGIL6+ooQN6dT1CUYLo5WiQmeqG2J4vOJ4YlPKml3KDHUbncQtw2KIOgD+gK3 n33A0tSjubrO9zwq2egBfRT0z9/XLeBTBTRDwuZmtjoAnb2WM7jM2hA1KBwKkJzB8+ tuj7yk1fsO69UQ/8E3JifeiIs6M9V5DAx1SDwyL5RS6CqOcD86pyfDzyQs5eYwe59O xR3EW4IXNx/xw== From: Mike Rapoport To: linux-mm@kvack.org Cc: Andrea Arcangeli , Andrew Morton , Axel Rasmussen , Baolin Wang , David Hildenbrand , Hugh Dickins , James Houghton , "Liam R. Howlett" , Lorenzo Stoakes , Michal Hocko , Mike Rapoport , Nikita Kalyazin , Paolo Bonzini , Peter Xu , Sean Christopherson , Shuah Khan , Suren Baghdasaryan , Vlastimil Babka , linux-kernel@vger.kernel.org, kvm@vger.kernel.org, linux-kselftest@vger.kernel.org, "David Hildenbrand (Red Hat)" Subject: [PATCH 3/5] mm: introduce VM_FAULT_UFFD_MINOR fault reason Date: Sun, 23 Nov 2025 12:27:05 +0200 Message-ID: <20251123102707.559422-4-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20251123102707.559422-1-rppt@kernel.org> References: <20251123102707.559422-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" When a VMA is registered with userfaulfd in minor mode, its ->fault() method should check if a folio exists in the page cache and if yes ->fault() should call handle_userfault(VM_UFFD_MISSING). Instead of calling handle_userfault() directly from a specific ->fault() implementation introduce new fault reason VM_FAULT_UFFD_MINOR that will notify the core page fault handler that it should call handle_userfaultfd(VM_UFFD_MISSING) to complete a page fault. Replace a call to handle_userfault(VM_UFFD_MISSING) in shmem and use the new VM_FAULT_UFFD_MINOR there instead. Suggested-by: David Hildenbrand (Red Hat) Signed-off-by: Mike Rapoport (Microsoft) --- include/linux/mm_types.h | 3 +++ mm/memory.c | 2 ++ mm/shmem.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 90e5790c318f..eb135369940f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1523,6 +1523,8 @@ typedef __bitwise unsigned int vm_fault_t; * fsync() to complete (for synchronous page faults * in DAX) * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released + * @VM_FAULT_UFFD_MINOR: ->fault did not modify page tables and needs + * handle_userfault(VM_UFFD_MINOR) to complete * @VM_FAULT_HINDEX_MASK: mask HINDEX value * */ @@ -1540,6 +1542,7 @@ enum vm_fault_reason { VM_FAULT_DONE_COW =3D (__force vm_fault_t)0x001000, VM_FAULT_NEEDDSYNC =3D (__force vm_fault_t)0x002000, VM_FAULT_COMPLETED =3D (__force vm_fault_t)0x004000, + VM_FAULT_UFFD_MINOR =3D (__force vm_fault_t)0x008000, VM_FAULT_HINDEX_MASK =3D (__force vm_fault_t)0x0f0000, }; =20 diff --git a/mm/memory.c b/mm/memory.c index b59ae7ce42eb..94acbac8cefb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5279,6 +5279,8 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) } =20 ret =3D vma->vm_ops->fault(vmf); + if (unlikely(ret & VM_FAULT_UFFD_MINOR)) + return handle_userfault(vmf, VM_UFFD_MINOR); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | VM_FAULT_DONE_COW))) return ret; diff --git a/mm/shmem.c b/mm/shmem.c index aaa21bb60f51..6dcb73b52bcc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2461,7 +2461,7 @@ static int shmem_get_folio_gfp(struct inode *inode, p= goff_t index, if (folio && vma && userfaultfd_minor(vma)) { if (!xa_is_value(folio)) folio_put(folio); - *fault_type =3D handle_userfault(vmf, VM_UFFD_MINOR); + *fault_type =3D VM_FAULT_UFFD_MINOR; return 0; } =20 --=20 2.50.1 From nobody Tue Dec 2 01:04:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E99602C15A5; Sun, 23 Nov 2025 10:27:39 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893660; cv=none; b=RfHTt3hic/qi1xIB9NpOMB8UIkcyLbw693op9T8nlnJdp4aGVRPzgBc0n/VGwecE+ZCF5/DHvgySiC8d7VcnQxZCfWf0ikG9cGnZJH2fQU1kWPU8L3tEO84/P10T8TULoRk9iPiKH9idrFNb2mQLKTBCuPVPt9B4VEDi1cXUNzk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893660; c=relaxed/simple; bh=MXDXjuuhIpZHs+XYyAm2UYANZTzUP59bGHx0lNGyMTk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=fVtzlt0+xEQgguPwEICLQ4u12RkzhtNws0Y9Vaai1hOYeN/0tmEQMSd12afrd1/yM1Q6DjD4aQCCi1PJywQW29qjovrOuw2eKsmLgQOW8noFOubsXUA73ETbWOzuPxVHfqqHw0ZY4qGUDV2oLzguN3JcAXulB73oHTdVD1vgBUY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=QYSK81Np; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="QYSK81Np" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5F2FEC116B1; Sun, 23 Nov 2025 10:27:34 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1763893659; bh=MXDXjuuhIpZHs+XYyAm2UYANZTzUP59bGHx0lNGyMTk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=QYSK81NpxPj/kDa8lMOmb3+46yyatrhh+rNN14oIxkrCYIRITAi/b2OTiIBwhhCAe KvuIOxmtBaiOruzKv4YnQ8Yl73mtzm1ZPpgsYYNidZZRzpZs0D+EppnHN40aka2OWI lEK7xsO1APEy1LR9TPhI9pp0z6QXZ7QlGR3oKwjuI9vF0HOa6cpEfDITH63MUAmuBc yrxOjuoi6YB0ZUHcZt3rztmUddq1h8E7yISE3rT/5hSrsRFKEt5XBQ6rLTQIYHQjAs xLUOu+eDypuj3X6zRq9GTnrVotfkjiqDbESJa6wh8+Boc/YnhARPWO+kEubD6qwfIh LR7yFXyv89ZPA== From: Mike Rapoport To: linux-mm@kvack.org Cc: Andrea Arcangeli , Andrew Morton , Axel Rasmussen , Baolin Wang , David Hildenbrand , Hugh Dickins , James Houghton , "Liam R. Howlett" , Lorenzo Stoakes , Michal Hocko , Mike Rapoport , Nikita Kalyazin , Paolo Bonzini , Peter Xu , Sean Christopherson , Shuah Khan , Suren Baghdasaryan , Vlastimil Babka , linux-kernel@vger.kernel.org, kvm@vger.kernel.org, linux-kselftest@vger.kernel.org Subject: [PATCH 4/5] guest_memfd: add support for userfaultfd minor mode Date: Sun, 23 Nov 2025 12:27:06 +0200 Message-ID: <20251123102707.559422-5-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20251123102707.559422-1-rppt@kernel.org> References: <20251123102707.559422-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" userfaultfd notifications about minor page faults used for live migration and snapshotting of VMs with memory backed by shared hugetlbfs or tmpfs mappings as described in detail in commit 7677f7fd8be7 ("userfaultfd: add minor fault registration mode"). To use the same mechanism for VMs that use guest_memfd to map their memory, guest_memfd should support userfaultfd minor mode. Extend ->fault() method of guest_memfd with ability to notify core page fault handler that a page fault requires handle_userfault(VM_UFFD_MINOR) to complete and add implementation of ->get_shared_folio() to guest_memfd vm_ops. Signed-off-by: Mike Rapoport (Microsoft) --- virt/kvm/guest_memfd.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index ffadc5ee8e04..bc8337f104ce 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -4,6 +4,7 @@ #include #include #include +#include =20 #include "kvm_mm.h" =20 @@ -369,6 +370,12 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct v= m_fault *vmf) return vmf_error(err); } =20 + if (userfaultfd_minor(vmf->vma)) { + folio_unlock(folio); + folio_put(folio); + return VM_FAULT_UFFD_MINOR; + } + if (WARN_ON_ONCE(folio_test_large(folio))) { ret =3D VM_FAULT_SIGBUS; goto out_folio; @@ -390,8 +397,30 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct v= m_fault *vmf) return ret; } =20 +#ifdef CONFIG_USERFAULTFD +static struct folio *kvm_gmem_get_shared_folio(struct inode *inode, + pgoff_t pgoff) +{ + struct folio *folio; + + folio =3D kvm_gmem_get_folio(inode, pgoff); + if (IS_ERR_OR_NULL(folio)) + return folio; + + if (!folio_test_uptodate(folio)) { + clear_highpage(folio_page(folio, 0)); + kvm_gmem_mark_prepared(folio); + } + + return folio; +} +#endif + static const struct vm_operations_struct kvm_gmem_vm_ops =3D { .fault =3D kvm_gmem_fault_user_mapping, +#ifdef CONFIG_USERFAULTFD + .get_shared_folio =3D kvm_gmem_get_shared_folio, +#endif }; =20 static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma) --=20 2.50.1 From nobody Tue Dec 2 01:04:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A11702C15A5; Sun, 23 Nov 2025 10:27:45 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893665; cv=none; b=TMkwImS7E665dcL5OLCmN3gy1/lNE5zWuvOTB27fssqhcSyv8h0mgiD9i0PmSQygfK9cr939RfKfARjh0zvoYsC0svnpbmQVV93/SegDSZMzd2LEbEu6SkWPs1TDKPm2XnM1h6wkXN55PKL9x9u5U5wdyOmGR0EzNv7mMHwbmBI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1763893665; c=relaxed/simple; bh=h8iM1V294GwM0YHrw7ozBTpi1JGWrn7/s48f8890vrE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=q1W99G9tDSihxRh1zFCAKm2laKq7HaCO2Swml21yQD3PM6nIQnT1iflmqHiHemU7Oeap+m1JABSAgyWe0utQBkux3glP/nnJ1+EFOa+VZ1aMZS7MpbTpcqzANZ3JR/WOXB/gZxH4G9aaSMJQSYCbeeFbhiCcdxsCSFckU88VcqQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=UlDoHH5Q; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="UlDoHH5Q" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 14A77C113D0; Sun, 23 Nov 2025 10:27:39 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1763893665; bh=h8iM1V294GwM0YHrw7ozBTpi1JGWrn7/s48f8890vrE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=UlDoHH5Q8Iv5mSabwAMbk7PJJOWlKmQbXeamae2ZPKxq9eOP//D3ZPty956O6D6M+ 48trdFwBAydNWx+vtYd9RJPX+7eqK+QFQsbv3tl7dn9srde9x0DN0u/Ne7yu8CTWGW 2p/p9d6oBMv5kZDX4dzi7RKCi34VyTzW5zDaOE9oELBy6If9Q1EI0V2gIZazVZwW/v 9bee9AoJXJ/KL1UktjrqcwRHS+OYF59lpHoFkb8oB+JNJ38SSRE2u4FjHkCj+3vf9u o8I0sN3cnEl5beAuz63QypaDUtW9hGPi1rcELbYnK+X7wlNuCG4pU8uXnL+Yg2ryyR oJIbW/pZ+OyIw== From: Mike Rapoport To: linux-mm@kvack.org Cc: Andrea Arcangeli , Andrew Morton , Axel Rasmussen , Baolin Wang , David Hildenbrand , Hugh Dickins , James Houghton , "Liam R. Howlett" , Lorenzo Stoakes , Michal Hocko , Mike Rapoport , Nikita Kalyazin , Paolo Bonzini , Peter Xu , Sean Christopherson , Shuah Khan , Suren Baghdasaryan , Vlastimil Babka , linux-kernel@vger.kernel.org, kvm@vger.kernel.org, linux-kselftest@vger.kernel.org Subject: [PATCH 5/5] KVM: selftests: test userfaultfd minor for guest_memfd Date: Sun, 23 Nov 2025 12:27:07 +0200 Message-ID: <20251123102707.559422-6-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20251123102707.559422-1-rppt@kernel.org> References: <20251123102707.559422-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Nikita Kalyazin The test demonstrates that a minor userfaultfd event in guest_memfd can be resolved via a memcpy followed by a UFFDIO_CONTINUE ioctl. Signed-off-by: Nikita Kalyazin Co-developed-by: Mike Rapoport (Microsoft) Signed-off-by: Mike Rapoport (Microsoft) --- .../testing/selftests/kvm/guest_memfd_test.c | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing= /selftests/kvm/guest_memfd_test.c index e7d9aeb418d3..a5d3ed21d7bb 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -10,13 +10,17 @@ #include #include #include +#include =20 #include #include #include +#include #include #include #include +#include +#include =20 #include "kvm_util.h" #include "test_util.h" @@ -254,6 +258,104 @@ static void test_guest_memfd_flags(struct kvm_vm *vm) } } =20 +struct fault_args { + char *addr; + volatile char value; +}; + +static void *fault_thread_fn(void *arg) +{ + struct fault_args *args =3D arg; + + /* Trigger page fault */ + args->value =3D *args->addr; + return NULL; +} + +static void test_uffd_minor(int fd, size_t total_size) +{ + struct uffdio_api uffdio_api =3D { + .api =3D UFFD_API, + .features =3D UFFD_FEATURE_MINOR_GENERIC, + }; + struct uffdio_register uffd_reg; + struct uffdio_continue uffd_cont; + struct uffd_msg msg; + struct fault_args args; + pthread_t fault_thread; + void *mem, *mem_nofault, *buf =3D NULL; + int uffd, ret; + off_t offset =3D page_size; + void *fault_addr; + + ret =3D posix_memalign(&buf, page_size, total_size); + TEST_ASSERT_EQ(ret, 0); + + memset(buf, 0xaa, total_size); + + uffd =3D syscall(__NR_userfaultfd, O_CLOEXEC); + TEST_ASSERT(uffd !=3D -1, "userfaultfd creation should succeed"); + + ret =3D ioctl(uffd, UFFDIO_API, &uffdio_api); + TEST_ASSERT(ret !=3D -1, "ioctl(UFFDIO_API) should succeed"); + + mem =3D mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem !=3D MAP_FAILED, "mmap should succeed"); + + mem_nofault =3D mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED= , fd, 0); + TEST_ASSERT(mem_nofault !=3D MAP_FAILED, "mmap should succeed"); + + uffd_reg.range.start =3D (unsigned long)mem; + uffd_reg.range.len =3D total_size; + uffd_reg.mode =3D UFFDIO_REGISTER_MODE_MINOR; + ret =3D ioctl(uffd, UFFDIO_REGISTER, &uffd_reg); + TEST_ASSERT(ret !=3D -1, "ioctl(UFFDIO_REGISTER) should succeed"); + + ret =3D fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, + offset, page_size); + TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed"); + + fault_addr =3D mem + offset; + args.addr =3D fault_addr; + + ret =3D pthread_create(&fault_thread, NULL, fault_thread_fn, &args); + TEST_ASSERT(ret =3D=3D 0, "pthread_create should succeed"); + + ret =3D read(uffd, &msg, sizeof(msg)); + TEST_ASSERT(ret !=3D -1, "read from userfaultfd should succeed"); + TEST_ASSERT(msg.event =3D=3D UFFD_EVENT_PAGEFAULT, "event type should be = pagefault"); + TEST_ASSERT((void *)(msg.arg.pagefault.address & ~(page_size - 1)) =3D=3D= fault_addr, + "pagefault should occur at expected address"); + + memcpy(mem_nofault + offset, buf + offset, page_size); + + uffd_cont.range.start =3D (unsigned long)fault_addr; + uffd_cont.range.len =3D page_size; + uffd_cont.mode =3D 0; + ret =3D ioctl(uffd, UFFDIO_CONTINUE, &uffd_cont); + TEST_ASSERT(ret !=3D -1, "ioctl(UFFDIO_CONTINUE) should succeed"); + + /* + * wait for fault_thread to finish to make sure fault happened and was + * resolved before we verify the values + */ + ret =3D pthread_join(fault_thread, NULL); + TEST_ASSERT(ret =3D=3D 0, "pthread_join should succeed"); + + TEST_ASSERT(args.value =3D=3D *(char *)(mem_nofault + offset), + "memory should contain the value that was copied"); + TEST_ASSERT(args.value =3D=3D *(char *)(mem + offset), + "no further fault is expected"); + + ret =3D munmap(mem_nofault, total_size); + TEST_ASSERT(!ret, "munmap should succeed"); + + ret =3D munmap(mem, total_size); + TEST_ASSERT(!ret, "munmap should succeed"); + free(buf); + close(uffd); +} + #define gmem_test(__test, __vm, __flags) \ do { \ int fd =3D vm_create_guest_memfd(__vm, page_size * 4, __flags); \ @@ -273,6 +375,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint6= 4_t flags) if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) { gmem_test(mmap_supported, vm, flags); gmem_test(fault_overflow, vm, flags); + gmem_test(uffd_minor, vm, flags); } else { gmem_test(fault_private, vm, flags); } --=20 2.50.1