From nobody Mon Feb 9 21:19:57 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5F9FDC77B61 for ; Mon, 10 Apr 2023 13:40:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229911AbjDJNkF (ORCPT ); Mon, 10 Apr 2023 09:40:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:60014 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229831AbjDJNjt (ORCPT ); Mon, 10 Apr 2023 09:39:49 -0400 Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 2B6777ABF for ; Mon, 10 Apr 2023 06:39:48 -0700 (PDT) Received: from kwepemm600020.china.huawei.com (unknown [172.30.72.53]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4Pw93d28PcznbVp; Mon, 10 Apr 2023 21:36:13 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by kwepemm600020.china.huawei.com (7.193.23.147) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.23; Mon, 10 Apr 2023 21:39:44 +0800 From: Peng Zhang To: , , , , , , CC: , , , ZhangPeng Subject: [PATCH v6 6/6] userfaultfd: convert mfill_atomic() to use a folio Date: Mon, 10 Apr 2023 21:39:32 +0800 Message-ID: <20230410133932.32288-7-zhangpeng362@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230410133932.32288-1-zhangpeng362@huawei.com> References: <20230410133932.32288-1-zhangpeng362@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm600020.china.huawei.com (7.193.23.147) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: ZhangPeng Convert mfill_atomic_pte_copy(), shmem_mfill_atomic_pte() and mfill_atomic_pte() to take in a folio pointer. Convert mfill_atomic() to use a folio. Convert page_kaddr to kaddr in mfill_atomic(). Signed-off-by: ZhangPeng Reviewed-by: Mike Kravetz --- include/linux/shmem_fs.h | 4 ++-- mm/shmem.c | 16 ++++++++-------- mm/userfaultfd.c | 40 ++++++++++++++++++++-------------------- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 3bb8d21edbb3..9e151ba45068 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -158,10 +158,10 @@ extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep); + struct folio **foliop); #else /* !CONFIG_SHMEM */ #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ - src_addr, flags, pagep) ({ BUG(); 0; }) + src_addr, flags, foliop) ({ BUG(); 0; }) #endif /* CONFIG_SHMEM */ #endif /* CONFIG_USERFAULTFD */ =20 diff --git a/mm/shmem.c b/mm/shmem.c index 6c08f5a75d3a..9218c955f482 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2548,7 +2548,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { struct inode *inode =3D file_inode(dst_vma->vm_file); struct shmem_inode_info *info =3D SHMEM_I(inode); @@ -2566,14 +2566,14 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, * and now we find ourselves with -ENOMEM. Release the page, to * avoid a BUG_ON in our caller. */ - if (unlikely(*pagep)) { - put_page(*pagep); - *pagep =3D NULL; + if (unlikely(*foliop)) { + folio_put(*foliop); + *foliop =3D NULL; } return -ENOMEM; } =20 - if (!*pagep) { + if (!*foliop) { ret =3D -ENOMEM; folio =3D shmem_alloc_folio(gfp, info, pgoff); if (!folio) @@ -2605,7 +2605,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, =20 /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { - *pagep =3D &folio->page; + *foliop =3D folio; ret =3D -ENOENT; /* don't free the page */ goto out_unacct_blocks; @@ -2616,9 +2616,9 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, clear_user_highpage(&folio->page, dst_addr); } } else { - folio =3D page_folio(*pagep); + folio =3D *foliop; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); - *pagep =3D NULL; + *foliop =3D NULL; } =20 VM_BUG_ON(folio_test_locked(folio)); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 2f263afb823d..11cfd82c6726 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -133,13 +133,13 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { void *kaddr; int ret; struct folio *folio; =20 - if (!*pagep) { + if (!*foliop) { ret =3D -ENOMEM; folio =3D vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, dst_addr, false); @@ -171,15 +171,15 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret =3D -ENOENT; - *pagep =3D &folio->page; + *foliop =3D folio; /* don't free the page */ goto out; } =20 flush_dcache_folio(folio); } else { - folio =3D page_folio(*pagep); - *pagep =3D NULL; + folio =3D *foliop; + *foliop =3D NULL; } =20 /* @@ -470,7 +470,7 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *= dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { ssize_t err; =20 @@ -493,14 +493,14 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t= *dst_pmd, if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) err =3D mfill_atomic_pte_copy(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); else err =3D mfill_atomic_pte_zeropage(dst_pmd, dst_vma, dst_addr); } else { err =3D shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); } =20 return err; @@ -518,7 +518,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_s= truct *dst_mm, pmd_t *dst_pmd; unsigned long src_addr, dst_addr; long copied; - struct page *page; + struct folio *folio; =20 /* * Sanitize the command parameters: @@ -533,7 +533,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_s= truct *dst_mm, src_addr =3D src_start; dst_addr =3D dst_start; copied =3D 0; - page =3D NULL; + folio =3D NULL; retry: mmap_read_lock(dst_mm); =20 @@ -629,28 +629,28 @@ static __always_inline ssize_t mfill_atomic(struct mm= _struct *dst_mm, BUG_ON(pmd_trans_huge(*dst_pmd)); =20 err =3D mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, - src_addr, flags, &page); + src_addr, flags, &folio); cond_resched(); =20 if (unlikely(err =3D=3D -ENOENT)) { - void *page_kaddr; + void *kaddr; =20 mmap_read_unlock(dst_mm); - BUG_ON(!page); + BUG_ON(!folio); =20 - page_kaddr =3D kmap_local_page(page); - err =3D copy_from_user(page_kaddr, + kaddr =3D kmap_local_folio(folio, 0); + err =3D copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE); - kunmap_local(page_kaddr); + kunmap_local(kaddr); if (unlikely(err)) { err =3D -EFAULT; goto out; } - flush_dcache_page(page); + flush_dcache_folio(folio); goto retry; } else - BUG_ON(page); + BUG_ON(folio); =20 if (!err) { dst_addr +=3D PAGE_SIZE; @@ -667,8 +667,8 @@ static __always_inline ssize_t mfill_atomic(struct mm_s= truct *dst_mm, out_unlock: mmap_read_unlock(dst_mm); out: - if (page) - put_page(page); + if (folio) + folio_put(folio); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); --=20 2.25.1