From nobody Wed Dec 17 14:21:49 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E3525EE4996 for ; Mon, 21 Aug 2023 12:31:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234959AbjHUMb3 (ORCPT ); Mon, 21 Aug 2023 08:31:29 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40888 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234887AbjHUMbX (ORCPT ); Mon, 21 Aug 2023 08:31:23 -0400 Received: from szxga08-in.huawei.com (szxga08-in.huawei.com [45.249.212.255]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 50999CC; Mon, 21 Aug 2023 05:31:20 -0700 (PDT) Received: from dggpemm100001.china.huawei.com (unknown [172.30.72.56]) by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4RTsHg0Sf6z1L9Pp; Mon, 21 Aug 2023 20:29:51 +0800 (CST) Received: from localhost.localdomain (10.175.112.125) by dggpemm100001.china.huawei.com (7.185.36.93) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.31; Mon, 21 Aug 2023 20:31:16 +0800 From: Kefeng Wang To: Andrew Morton , CC: , , Russell King , Catalin Marinas , Will Deacon , Huacai Chen , WANG Xuerui , Michael Ellerman , Nicholas Piggin , Christophe Leroy , Paul Walmsley , Palmer Dabbelt , Albert Ou , Alexander Gordeev , Gerald Schaefer , Heiko Carstens , Vasily Gorbik , Christian Borntraeger , Sven Schnelle , Dave Hansen , Andy Lutomirski , Peter Zijlstra , Thomas Gleixner , Ingo Molnar , Borislav Petkov , , "H . Peter Anvin" , , , , , , , Kefeng Wang Subject: [PATCH rfc v2 04/10] s390: mm: use try_vma_locked_page_fault() Date: Mon, 21 Aug 2023 20:30:50 +0800 Message-ID: <20230821123056.2109942-5-wangkefeng.wang@huawei.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20230821123056.2109942-1-wangkefeng.wang@huawei.com> References: <20230821123056.2109942-1-wangkefeng.wang@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.112.125] X-ClientProxiedBy: dggems705-chm.china.huawei.com (10.3.19.182) To dggpemm100001.china.huawei.com (7.185.36.93) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Use new try_vma_locked_page_fault() helper to simplify code. No functional change intended. Signed-off-by: Kefeng Wang --- arch/s390/mm/fault.c | 66 ++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 39 deletions(-) diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 099c4824dd8a..fbbdebde6ea7 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -357,16 +357,18 @@ static noinline void do_fault_error(struct pt_regs *r= egs, vm_fault_t fault) static inline vm_fault_t do_exception(struct pt_regs *regs, int access) { struct gmap *gmap; - struct task_struct *tsk; - struct mm_struct *mm; struct vm_area_struct *vma; enum fault_type type; - unsigned long address; - unsigned int flags; + struct mm_struct *mm =3D current->mm; + unsigned long address =3D get_fault_address(regs); vm_fault_t fault; bool is_write; + struct vm_fault vmf =3D { + .real_address =3D address, + .flags =3D FAULT_FLAG_DEFAULT, + .vm_flags =3D access, + }; =20 - tsk =3D current; /* * The instruction that caused the program check has * been nullified. Don't signal single step via SIGTRAP. @@ -376,8 +378,6 @@ static inline vm_fault_t do_exception(struct pt_regs *r= egs, int access) if (kprobe_page_fault(regs, 14)) return 0; =20 - mm =3D tsk->mm; - address =3D get_fault_address(regs); is_write =3D fault_is_write(regs); =20 /* @@ -398,45 +398,33 @@ static inline vm_fault_t do_exception(struct pt_regs = *regs, int access) } =20 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - flags =3D FAULT_FLAG_DEFAULT; if (user_mode(regs)) - flags |=3D FAULT_FLAG_USER; + vmf.flags |=3D FAULT_FLAG_USER; if (is_write) - access =3D VM_WRITE; - if (access =3D=3D VM_WRITE) - flags |=3D FAULT_FLAG_WRITE; - if (!(flags & FAULT_FLAG_USER)) - goto lock_mmap; - vma =3D lock_vma_under_rcu(mm, address); - if (!vma) - goto lock_mmap; - if (!(vma->vm_flags & access)) { - vma_end_read(vma); - goto lock_mmap; - } - fault =3D handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs= ); - if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) - vma_end_read(vma); - if (!(fault & VM_FAULT_RETRY)) { - count_vm_vma_lock_event(VMA_LOCK_SUCCESS); - if (likely(!(fault & VM_FAULT_ERROR))) - fault =3D 0; + vmf.vm_flags =3D VM_WRITE; + if (vmf.vm_flags =3D=3D VM_WRITE) + vmf.flags |=3D FAULT_FLAG_WRITE; + + fault =3D try_vma_locked_page_fault(&vmf); + if (fault =3D=3D VM_FAULT_NONE) + goto lock_mm; + if (!(fault & VM_FAULT_RETRY)) goto out; - } - count_vm_vma_lock_event(VMA_LOCK_RETRY); + /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { fault =3D VM_FAULT_SIGNAL; goto out; } -lock_mmap: + +lock_mm: mmap_read_lock(mm); =20 gmap =3D NULL; if (IS_ENABLED(CONFIG_PGSTE) && type =3D=3D GMAP_FAULT) { gmap =3D (struct gmap *) S390_lowcore.gmap; current->thread.gmap_addr =3D address; - current->thread.gmap_write_flag =3D !!(flags & FAULT_FLAG_WRITE); + current->thread.gmap_write_flag =3D !!(vmf.flags & FAULT_FLAG_WRITE); current->thread.gmap_int_code =3D regs->int_code & 0xffff; address =3D __gmap_translate(gmap, address); if (address =3D=3D -EFAULT) { @@ -444,7 +432,7 @@ static inline vm_fault_t do_exception(struct pt_regs *r= egs, int access) goto out_up; } if (gmap->pfault_enabled) - flags |=3D FAULT_FLAG_RETRY_NOWAIT; + vmf.flags |=3D FAULT_FLAG_RETRY_NOWAIT; } =20 retry: @@ -466,7 +454,7 @@ static inline vm_fault_t do_exception(struct pt_regs *r= egs, int access) * we can handle it.. */ fault =3D VM_FAULT_BADACCESS; - if (unlikely(!(vma->vm_flags & access))) + if (unlikely(!(vma->vm_flags & vmf.vm_flags))) goto out_up; =20 /* @@ -474,10 +462,10 @@ static inline vm_fault_t do_exception(struct pt_regs = *regs, int access) * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault =3D handle_mm_fault(vma, address, flags, regs); + fault =3D handle_mm_fault(vma, address, vmf.flags, regs); if (fault_signal_pending(fault, regs)) { fault =3D VM_FAULT_SIGNAL; - if (flags & FAULT_FLAG_RETRY_NOWAIT) + if (vmf.flags & FAULT_FLAG_RETRY_NOWAIT) goto out_up; goto out; } @@ -497,7 +485,7 @@ static inline vm_fault_t do_exception(struct pt_regs *r= egs, int access) =20 if (fault & VM_FAULT_RETRY) { if (IS_ENABLED(CONFIG_PGSTE) && gmap && - (flags & FAULT_FLAG_RETRY_NOWAIT)) { + (vmf.flags & FAULT_FLAG_RETRY_NOWAIT)) { /* * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has * not been released @@ -506,8 +494,8 @@ static inline vm_fault_t do_exception(struct pt_regs *r= egs, int access) fault =3D VM_FAULT_PFAULT; goto out_up; } - flags &=3D ~FAULT_FLAG_RETRY_NOWAIT; - flags |=3D FAULT_FLAG_TRIED; + vmf.flags &=3D ~FAULT_FLAG_RETRY_NOWAIT; + vmf.flags |=3D FAULT_FLAG_TRIED; mmap_read_lock(mm); goto retry; } --=20 2.27.0