From nobody Sun Feb 8 11:21:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AD87AC77B70 for ; Mon, 10 Apr 2023 13:39:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229849AbjDJNjv (ORCPT ); Mon, 10 Apr 2023 09:39:51 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59892 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229683AbjDJNjq (ORCPT ); Mon, 10 Apr 2023 09:39:46 -0400 Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 469C0558B for ; Mon, 10 Apr 2023 06:39:45 -0700 (PDT) Received: from kwepemm600020.china.huawei.com (unknown [172.30.72.53]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4Pw9373GfFzSrKP; Mon, 10 Apr 2023 21:35:47 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by kwepemm600020.china.huawei.com (7.193.23.147) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Mon, 10 Apr 2023 21:39:40 +0800 From: Peng Zhang To: , , , , , , CC: , , , ZhangPeng Subject: [PATCH v6 1/6] userfaultfd: convert mfill_atomic_pte_copy() to use a folio Date: Mon, 10 Apr 2023 21:39:27 +0800 Message-ID: <20230410133932.32288-2-zhangpeng362@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230410133932.32288-1-zhangpeng362@huawei.com> References: <20230410133932.32288-1-zhangpeng362@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm600020.china.huawei.com (7.193.23.147) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: ZhangPeng Call vma_alloc_folio() directly instead of alloc_page_vma() and convert page_kaddr to kaddr in mfill_atomic_pte_copy(). Removes several calls to compound_head(). Signed-off-by: ZhangPeng Reviewed-by: Sidhartha Kumar Reviewed-by: Mike Kravetz --- mm/userfaultfd.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 7f1b5f8b712c..313bc683c2b6 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -135,17 +135,18 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, uffd_flags_t flags, struct page **pagep) { - void *page_kaddr; + void *kaddr; int ret; - struct page *page; + struct folio *folio; =20 if (!*pagep) { ret =3D -ENOMEM; - page =3D alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); - if (!page) + folio =3D vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, + dst_addr, false); + if (!folio) goto out; =20 - page_kaddr =3D kmap_local_page(page); + kaddr =3D kmap_local_folio(folio, 0); /* * The read mmap_lock is held here. Despite the * mmap_lock being read recursive a deadlock is still @@ -162,45 +163,44 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, * and retry the copy outside the mmap_lock. */ pagefault_disable(); - ret =3D copy_from_user(page_kaddr, - (const void __user *) src_addr, + ret =3D copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE); pagefault_enable(); - kunmap_local(page_kaddr); + kunmap_local(kaddr); =20 /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret =3D -ENOENT; - *pagep =3D page; + *pagep =3D &folio->page; /* don't free the page */ goto out; } =20 - flush_dcache_page(page); + flush_dcache_folio(folio); } else { - page =3D *pagep; + folio =3D page_folio(*pagep); *pagep =3D NULL; } =20 /* - * The memory barrier inside __SetPageUptodate makes sure that + * The memory barrier inside __folio_mark_uptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ - __SetPageUptodate(page); + __folio_mark_uptodate(folio); =20 ret =3D -ENOMEM; - if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL)) + if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) goto out_release; =20 ret =3D mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, - page, true, flags); + &folio->page, true, flags); if (ret) goto out_release; out: return ret; out_release: - put_page(page); + folio_put(folio); goto out; } =20 --=20 2.25.1 From nobody Sun Feb 8 11:21:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DB138C77B61 for ; Mon, 10 Apr 2023 13:39:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229773AbjDJNjr (ORCPT ); Mon, 10 Apr 2023 09:39:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59868 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229536AbjDJNjp (ORCPT ); Mon, 10 Apr 2023 09:39:45 -0400 Received: from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 420D94C09 for ; Mon, 10 Apr 2023 06:39:44 -0700 (PDT) Received: from kwepemm600020.china.huawei.com (unknown [172.30.72.56]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4Pw96q713xzKrfJ; Mon, 10 Apr 2023 21:38:59 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by kwepemm600020.china.huawei.com (7.193.23.147) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Mon, 10 Apr 2023 21:39:41 +0800 From: Peng Zhang To: , , , , , , CC: , , , ZhangPeng Subject: [PATCH v6 2/6] userfaultfd: use kmap_local_page() in copy_huge_page_from_user() Date: Mon, 10 Apr 2023 21:39:28 +0800 Message-ID: <20230410133932.32288-3-zhangpeng362@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230410133932.32288-1-zhangpeng362@huawei.com> References: <20230410133932.32288-1-zhangpeng362@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm600020.china.huawei.com (7.193.23.147) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: ZhangPeng kmap() and kmap_atomic() are being deprecated in favor of kmap_local_page() which is appropriate for any thread local context.[1] Let's replace the kmap() and kmap_atomic() with kmap_local_page() in copy_huge_page_from_user(). When allow_pagefault is false, disable page faults to prevent potential deadlock.[2] [1] https://lore.kernel.org/all/20220813220034.806698-1-ira.weiny@intel.com/ [2] https://lkml.kernel.org/r/20221025220136.2366143-1-ira.weiny@intel.com Signed-off-by: ZhangPeng Reviewed-by: Mike Kravetz --- mm/memory.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 387226d6094d..808f354bce65 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5880,16 +5880,14 @@ long copy_huge_page_from_user(struct page *dst_page, =20 for (i =3D 0; i < pages_per_huge_page; i++) { subpage =3D nth_page(dst_page, i); - if (allow_pagefault) - page_kaddr =3D kmap(subpage); - else - page_kaddr =3D kmap_atomic(subpage); + page_kaddr =3D kmap_local_page(subpage); + if (!allow_pagefault) + pagefault_disable(); rc =3D copy_from_user(page_kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); - if (allow_pagefault) - kunmap(subpage); - else - kunmap_atomic(page_kaddr); + if (!allow_pagefault) + pagefault_enable(); + kunmap_local(page_kaddr); =20 ret_val -=3D (PAGE_SIZE - rc); if (rc) --=20 2.25.1 From nobody Sun Feb 8 11:21:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EE146C76196 for ; Mon, 10 Apr 2023 13:40:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229905AbjDJNkC (ORCPT ); Mon, 10 Apr 2023 09:40:02 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59918 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229801AbjDJNjr (ORCPT ); Mon, 10 Apr 2023 09:39:47 -0400 Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DA4731FEA for ; Mon, 10 Apr 2023 06:39:45 -0700 (PDT) Received: from kwepemm600020.china.huawei.com (unknown [172.30.72.55]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4Pw94k2nk6zKxnl; Mon, 10 Apr 2023 21:37:10 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by kwepemm600020.china.huawei.com (7.193.23.147) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Mon, 10 Apr 2023 21:39:41 +0800 From: Peng Zhang To: , , , , , , CC: , , , ZhangPeng Subject: [PATCH v6 3/6] userfaultfd: convert copy_huge_page_from_user() to copy_folio_from_user() Date: Mon, 10 Apr 2023 21:39:29 +0800 Message-ID: <20230410133932.32288-4-zhangpeng362@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230410133932.32288-1-zhangpeng362@huawei.com> References: <20230410133932.32288-1-zhangpeng362@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm600020.china.huawei.com (7.193.23.147) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: ZhangPeng Replace copy_huge_page_from_user() with copy_folio_from_user(). copy_folio_from_user() does the same as copy_huge_page_from_user(), but takes in a folio instead of a page. Convert page_kaddr to kaddr in copy_folio_from_user() to do indenting cleanup. Signed-off-by: ZhangPeng Reviewed-by: Sidhartha Kumar Reviewed-by: Mike Kravetz --- include/linux/mm.h | 7 +++---- mm/hugetlb.c | 5 ++--- mm/memory.c | 23 +++++++++++------------ mm/userfaultfd.c | 6 ++---- 4 files changed, 18 insertions(+), 23 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 243bfba378c5..a978413b40a4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3698,10 +3698,9 @@ extern void copy_user_huge_page(struct page *dst, st= ruct page *src, unsigned long addr_hint, struct vm_area_struct *vma, unsigned int pages_per_huge_page); -extern long copy_huge_page_from_user(struct page *dst_page, - const void __user *usr_src, - unsigned int pages_per_huge_page, - bool allow_pagefault); +long copy_folio_from_user(struct folio *dst_folio, + const void __user *usr_src, + bool allow_pagefault); =20 /** * vma_is_special_huge - Are transhuge page-table entries considered speci= al? diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7e4a80769c9e..aade1b513474 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6217,9 +6217,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out; } =20 - ret =3D copy_huge_page_from_user(&folio->page, - (const void __user *) src_addr, - pages_per_huge_page(h), false); + ret =3D copy_folio_from_user(folio, (const void __user *) src_addr, + false); =20 /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { diff --git a/mm/memory.c b/mm/memory.c index 808f354bce65..021cab989703 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5868,26 +5868,25 @@ void copy_user_huge_page(struct page *dst, struct p= age *src, process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); } =20 -long copy_huge_page_from_user(struct page *dst_page, - const void __user *usr_src, - unsigned int pages_per_huge_page, - bool allow_pagefault) +long copy_folio_from_user(struct folio *dst_folio, + const void __user *usr_src, + bool allow_pagefault) { - void *page_kaddr; + void *kaddr; unsigned long i, rc =3D 0; - unsigned long ret_val =3D pages_per_huge_page * PAGE_SIZE; + unsigned int nr_pages =3D folio_nr_pages(dst_folio); + unsigned long ret_val =3D nr_pages * PAGE_SIZE; struct page *subpage; =20 - for (i =3D 0; i < pages_per_huge_page; i++) { - subpage =3D nth_page(dst_page, i); - page_kaddr =3D kmap_local_page(subpage); + for (i =3D 0; i < nr_pages; i++) { + subpage =3D folio_page(dst_folio, i); + kaddr =3D kmap_local_page(subpage); if (!allow_pagefault) pagefault_disable(); - rc =3D copy_from_user(page_kaddr, - usr_src + i * PAGE_SIZE, PAGE_SIZE); + rc =3D copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); if (!allow_pagefault) pagefault_enable(); - kunmap_local(page_kaddr); + kunmap_local(kaddr); =20 ret_val -=3D (PAGE_SIZE - rc); if (rc) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 313bc683c2b6..1e7dba6c4c5f 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -421,10 +421,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb( mmap_read_unlock(dst_mm); BUG_ON(!page); =20 - err =3D copy_huge_page_from_user(page, - (const void __user *)src_addr, - vma_hpagesize / PAGE_SIZE, - true); + err =3D copy_folio_from_user(page_folio(page), + (const void __user *)src_addr, true); if (unlikely(err)) { err =3D -EFAULT; goto out; --=20 2.25.1 From nobody Sun Feb 8 11:21:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9BB92C77B61 for ; Mon, 10 Apr 2023 13:39:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229886AbjDJNj5 (ORCPT ); Mon, 10 Apr 2023 09:39:57 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59928 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229536AbjDJNjr (ORCPT ); Mon, 10 Apr 2023 09:39:47 -0400 Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 6D99B526B for ; Mon, 10 Apr 2023 06:39:46 -0700 (PDT) Received: from kwepemm600020.china.huawei.com (unknown [172.30.72.55]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4Pw94l0JW4zKxpD; Mon, 10 Apr 2023 21:37:11 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by kwepemm600020.china.huawei.com (7.193.23.147) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Mon, 10 Apr 2023 21:39:42 +0800 From: Peng Zhang To: , , , , , , CC: , , , ZhangPeng Subject: [PATCH v6 4/6] userfaultfd: convert mfill_atomic_hugetlb() to use a folio Date: Mon, 10 Apr 2023 21:39:30 +0800 Message-ID: <20230410133932.32288-5-zhangpeng362@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230410133932.32288-1-zhangpeng362@huawei.com> References: <20230410133932.32288-1-zhangpeng362@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm600020.china.huawei.com (7.193.23.147) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: ZhangPeng Convert hugetlb_mfill_atomic_pte() to take in a folio pointer instead of a page pointer. Convert mfill_atomic_hugetlb() to use a folio. Signed-off-by: ZhangPeng Reviewed-by: Sidhartha Kumar Reviewed-by: Mike Kravetz --- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 26 +++++++++++++------------- mm/userfaultfd.c | 16 ++++++++-------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 2a758bcd6719..28703fe22386 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -163,7 +163,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep); + struct folio **foliop); #endif /* CONFIG_USERFAULTFD */ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, @@ -397,7 +397,7 @@ static inline int hugetlb_mfill_atomic_pte(pte_t *dst_p= te, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { BUG(); return 0; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index aade1b513474..c88f856ec2e2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6178,7 +6178,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { struct mm_struct *dst_mm =3D dst_vma->vm_mm; bool is_continue =3D uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); @@ -6201,8 +6201,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, if (IS_ERR(folio)) goto out; folio_in_pagecache =3D true; - } else if (!*pagep) { - /* If a page already exists, then it's UFFDIO_COPY for + } else if (!*foliop) { + /* If a folio already exists, then it's UFFDIO_COPY for * a non-missing case. Return -EEXIST. */ if (vm_shared && @@ -6237,33 +6237,33 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, ret =3D -ENOMEM; goto out; } - *pagep =3D &folio->page; - /* Set the outparam pagep and return to the caller to + *foliop =3D folio; + /* Set the outparam foliop and return to the caller to * copy the contents outside the lock. Don't free the - * page. + * folio. */ goto out; } } else { if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { - put_page(*pagep); + folio_put(*foliop); ret =3D -EEXIST; - *pagep =3D NULL; + *foliop =3D NULL; goto out; } =20 folio =3D alloc_hugetlb_folio(dst_vma, dst_addr, 0); if (IS_ERR(folio)) { - put_page(*pagep); + folio_put(*foliop); ret =3D -ENOMEM; - *pagep =3D NULL; + *foliop =3D NULL; goto out; } - copy_user_huge_page(&folio->page, *pagep, dst_addr, dst_vma, + copy_user_huge_page(&folio->page, &(*foliop)->page, dst_addr, dst_vma, pages_per_huge_page(h)); - put_page(*pagep); - *pagep =3D NULL; + folio_put(*foliop); + *foliop =3D NULL; } =20 /* diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 1e7dba6c4c5f..2f263afb823d 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -321,7 +321,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb( pte_t *dst_pte; unsigned long src_addr, dst_addr; long copied; - struct page *page; + struct folio *folio; unsigned long vma_hpagesize; pgoff_t idx; u32 hash; @@ -341,7 +341,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb( src_addr =3D src_start; dst_addr =3D dst_start; copied =3D 0; - page =3D NULL; + folio =3D NULL; vma_hpagesize =3D vma_kernel_pagesize(dst_vma); =20 /* @@ -410,7 +410,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb( } =20 err =3D hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, - src_addr, flags, &page); + src_addr, flags, &folio); =20 hugetlb_vma_unlock_read(dst_vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); @@ -419,9 +419,9 @@ static __always_inline ssize_t mfill_atomic_hugetlb( =20 if (unlikely(err =3D=3D -ENOENT)) { mmap_read_unlock(dst_mm); - BUG_ON(!page); + BUG_ON(!folio); =20 - err =3D copy_folio_from_user(page_folio(page), + err =3D copy_folio_from_user(folio, (const void __user *)src_addr, true); if (unlikely(err)) { err =3D -EFAULT; @@ -432,7 +432,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb( dst_vma =3D NULL; goto retry; } else - BUG_ON(page); + BUG_ON(folio); =20 if (!err) { dst_addr +=3D vma_hpagesize; @@ -449,8 +449,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb( out_unlock: mmap_read_unlock(dst_mm); out: - if (page) - put_page(page); + if (folio) + folio_put(folio); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); --=20 2.25.1 From nobody Sun Feb 8 11:21:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 32378C77B70 for ; Mon, 10 Apr 2023 13:40:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229892AbjDJNj7 (ORCPT ); Mon, 10 Apr 2023 09:39:59 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59938 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229592AbjDJNjs (ORCPT ); Mon, 10 Apr 2023 09:39:48 -0400 Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BBA5D558B for ; Mon, 10 Apr 2023 06:39:46 -0700 (PDT) Received: from kwepemm600020.china.huawei.com (unknown [172.30.72.55]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4Pw94l217rzKxqv; Mon, 10 Apr 2023 21:37:11 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by kwepemm600020.china.huawei.com (7.193.23.147) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Mon, 10 Apr 2023 21:39:43 +0800 From: Peng Zhang To: , , , , , , CC: , , , ZhangPeng Subject: [PATCH v6 5/6] mm: convert copy_user_huge_page() to copy_user_large_folio() Date: Mon, 10 Apr 2023 21:39:31 +0800 Message-ID: <20230410133932.32288-6-zhangpeng362@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230410133932.32288-1-zhangpeng362@huawei.com> References: <20230410133932.32288-1-zhangpeng362@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm600020.china.huawei.com (7.193.23.147) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: ZhangPeng Replace copy_user_huge_page() with copy_user_large_folio(). copy_user_large_folio() does the same as copy_user_huge_page(), but takes in folios instead of pages. Remove pages_per_huge_page from copy_user_large_folio(), because we can get that from folio_nr_pages(dst). Convert copy_user_gigantic_page() to take in folios. Signed-off-by: ZhangPeng --- include/linux/mm.h | 7 +++---- mm/hugetlb.c | 11 +++++------ mm/memory.c | 28 ++++++++++++++-------------- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index a978413b40a4..c8f05c3e1acb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3694,10 +3694,9 @@ extern const struct attribute_group memory_failure_a= ttr_group; extern void clear_huge_page(struct page *page, unsigned long addr_hint, unsigned int pages_per_huge_page); -extern void copy_user_huge_page(struct page *dst, struct page *src, - unsigned long addr_hint, - struct vm_area_struct *vma, - unsigned int pages_per_huge_page); +void copy_user_large_folio(struct folio *dst, struct folio *src, + unsigned long addr_hint, + struct vm_area_struct *vma); long copy_folio_from_user(struct folio *dst_folio, const void __user *usr_src, bool allow_pagefault); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c88f856ec2e2..f16b25b1a6b9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5097,8 +5097,9 @@ int copy_hugetlb_page_range(struct mm_struct *dst, st= ruct mm_struct *src, ret =3D PTR_ERR(new_folio); break; } - copy_user_huge_page(&new_folio->page, ptepage, addr, dst_vma, - npages); + copy_user_large_folio(new_folio, + page_folio(ptepage), + addr, dst_vma); put_page(ptepage); =20 /* Install the new hugetlb folio if src pte stable */ @@ -5616,8 +5617,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, st= ruct vm_area_struct *vma, goto out_release_all; } =20 - copy_user_huge_page(&new_folio->page, old_page, address, vma, - pages_per_huge_page(h)); + copy_user_large_folio(new_folio, page_folio(old_page), address, vma); __folio_mark_uptodate(new_folio); =20 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr, @@ -6260,8 +6260,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, *foliop =3D NULL; goto out; } - copy_user_huge_page(&folio->page, &(*foliop)->page, dst_addr, dst_vma, - pages_per_huge_page(h)); + copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); folio_put(*foliop); *foliop =3D NULL; } diff --git a/mm/memory.c b/mm/memory.c index 021cab989703..f315c2198098 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5815,21 +5815,21 @@ void clear_huge_page(struct page *page, process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); } =20 -static void copy_user_gigantic_page(struct page *dst, struct page *src, - unsigned long addr, - struct vm_area_struct *vma, - unsigned int pages_per_huge_page) +static void copy_user_gigantic_page(struct folio *dst, struct folio *src, + unsigned long addr, + struct vm_area_struct *vma, + unsigned int pages_per_huge_page) { int i; - struct page *dst_base =3D dst; - struct page *src_base =3D src; + struct page *dst_page; + struct page *src_page; =20 for (i =3D 0; i < pages_per_huge_page; i++) { - dst =3D nth_page(dst_base, i); - src =3D nth_page(src_base, i); + dst_page =3D folio_page(dst, i); + src_page =3D folio_page(src, i); =20 cond_resched(); - copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); + copy_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma); } } =20 @@ -5847,15 +5847,15 @@ static void copy_subpage(unsigned long addr, int id= x, void *arg) addr, copy_arg->vma); } =20 -void copy_user_huge_page(struct page *dst, struct page *src, - unsigned long addr_hint, struct vm_area_struct *vma, - unsigned int pages_per_huge_page) +void copy_user_large_folio(struct folio *dst, struct folio *src, + unsigned long addr_hint, struct vm_area_struct *vma) { + unsigned int pages_per_huge_page =3D folio_nr_pages(dst); unsigned long addr =3D addr_hint & ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); struct copy_subpage_arg arg =3D { - .dst =3D dst, - .src =3D src, + .dst =3D &dst->page, + .src =3D &src->page, .vma =3D vma, }; =20 --=20 2.25.1 From nobody Sun Feb 8 11:21:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5F9FDC77B61 for ; Mon, 10 Apr 2023 13:40:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229911AbjDJNkF (ORCPT ); Mon, 10 Apr 2023 09:40:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:60014 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229831AbjDJNjt (ORCPT ); Mon, 10 Apr 2023 09:39:49 -0400 Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 2B6777ABF for ; Mon, 10 Apr 2023 06:39:48 -0700 (PDT) Received: from kwepemm600020.china.huawei.com (unknown [172.30.72.53]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4Pw93d28PcznbVp; Mon, 10 Apr 2023 21:36:13 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by kwepemm600020.china.huawei.com (7.193.23.147) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Mon, 10 Apr 2023 21:39:44 +0800 From: Peng Zhang To: , , , , , , CC: , , , ZhangPeng Subject: [PATCH v6 6/6] userfaultfd: convert mfill_atomic() to use a folio Date: Mon, 10 Apr 2023 21:39:32 +0800 Message-ID: <20230410133932.32288-7-zhangpeng362@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230410133932.32288-1-zhangpeng362@huawei.com> References: <20230410133932.32288-1-zhangpeng362@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm600020.china.huawei.com (7.193.23.147) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: ZhangPeng Convert mfill_atomic_pte_copy(), shmem_mfill_atomic_pte() and mfill_atomic_pte() to take in a folio pointer. Convert mfill_atomic() to use a folio. Convert page_kaddr to kaddr in mfill_atomic(). Signed-off-by: ZhangPeng Reviewed-by: Mike Kravetz --- include/linux/shmem_fs.h | 4 ++-- mm/shmem.c | 16 ++++++++-------- mm/userfaultfd.c | 40 ++++++++++++++++++++-------------------- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 3bb8d21edbb3..9e151ba45068 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -158,10 +158,10 @@ extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep); + struct folio **foliop); #else /* !CONFIG_SHMEM */ #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ - src_addr, flags, pagep) ({ BUG(); 0; }) + src_addr, flags, foliop) ({ BUG(); 0; }) #endif /* CONFIG_SHMEM */ #endif /* CONFIG_USERFAULTFD */ =20 diff --git a/mm/shmem.c b/mm/shmem.c index 6c08f5a75d3a..9218c955f482 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2548,7 +2548,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { struct inode *inode =3D file_inode(dst_vma->vm_file); struct shmem_inode_info *info =3D SHMEM_I(inode); @@ -2566,14 +2566,14 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, * and now we find ourselves with -ENOMEM. Release the page, to * avoid a BUG_ON in our caller. */ - if (unlikely(*pagep)) { - put_page(*pagep); - *pagep =3D NULL; + if (unlikely(*foliop)) { + folio_put(*foliop); + *foliop =3D NULL; } return -ENOMEM; } =20 - if (!*pagep) { + if (!*foliop) { ret =3D -ENOMEM; folio =3D shmem_alloc_folio(gfp, info, pgoff); if (!folio) @@ -2605,7 +2605,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, =20 /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { - *pagep =3D &folio->page; + *foliop =3D folio; ret =3D -ENOENT; /* don't free the page */ goto out_unacct_blocks; @@ -2616,9 +2616,9 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, clear_user_highpage(&folio->page, dst_addr); } } else { - folio =3D page_folio(*pagep); + folio =3D *foliop; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); - *pagep =3D NULL; + *foliop =3D NULL; } =20 VM_BUG_ON(folio_test_locked(folio)); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 2f263afb823d..11cfd82c6726 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -133,13 +133,13 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { void *kaddr; int ret; struct folio *folio; =20 - if (!*pagep) { + if (!*foliop) { ret =3D -ENOMEM; folio =3D vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, dst_addr, false); @@ -171,15 +171,15 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret =3D -ENOENT; - *pagep =3D &folio->page; + *foliop =3D folio; /* don't free the page */ goto out; } =20 flush_dcache_folio(folio); } else { - folio =3D page_folio(*pagep); - *pagep =3D NULL; + folio =3D *foliop; + *foliop =3D NULL; } =20 /* @@ -470,7 +470,7 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *= dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { ssize_t err; =20 @@ -493,14 +493,14 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t= *dst_pmd, if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) err =3D mfill_atomic_pte_copy(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); else err =3D mfill_atomic_pte_zeropage(dst_pmd, dst_vma, dst_addr); } else { err =3D shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); } =20 return err; @@ -518,7 +518,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_s= truct *dst_mm, pmd_t *dst_pmd; unsigned long src_addr, dst_addr; long copied; - struct page *page; + struct folio *folio; =20 /* * Sanitize the command parameters: @@ -533,7 +533,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_s= truct *dst_mm, src_addr =3D src_start; dst_addr =3D dst_start; copied =3D 0; - page =3D NULL; + folio =3D NULL; retry: mmap_read_lock(dst_mm); =20 @@ -629,28 +629,28 @@ static __always_inline ssize_t mfill_atomic(struct mm= _struct *dst_mm, BUG_ON(pmd_trans_huge(*dst_pmd)); =20 err =3D mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, - src_addr, flags, &page); + src_addr, flags, &folio); cond_resched(); =20 if (unlikely(err =3D=3D -ENOENT)) { - void *page_kaddr; + void *kaddr; =20 mmap_read_unlock(dst_mm); - BUG_ON(!page); + BUG_ON(!folio); =20 - page_kaddr =3D kmap_local_page(page); - err =3D copy_from_user(page_kaddr, + kaddr =3D kmap_local_folio(folio, 0); + err =3D copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE); - kunmap_local(page_kaddr); + kunmap_local(kaddr); if (unlikely(err)) { err =3D -EFAULT; goto out; } - flush_dcache_page(page); + flush_dcache_folio(folio); goto retry; } else - BUG_ON(page); + BUG_ON(folio); =20 if (!err) { dst_addr +=3D PAGE_SIZE; @@ -667,8 +667,8 @@ static __always_inline ssize_t mfill_atomic(struct mm_s= truct *dst_mm, out_unlock: mmap_read_unlock(dst_mm); out: - if (page) - put_page(page); + if (folio) + folio_put(folio); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); --=20 2.25.1