From nobody Mon Feb 9 05:52:46 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 86ABF16726E for ; Fri, 30 Aug 2024 08:42:15 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1725007337; cv=none; b=HD6vZ14x2vy4lMgfvuQCEddCXpEnp9Iq1cDqs5lIs9v/QCLy3ksNKIroet5WgOJgKEhWHidlLRylkEMqaKRDLlP19KLzm+EugkkzPq5W/qeZreEb14VTaFauXWJp9E4dQyrxjkETKZEa50fBgz9gWGFeoxWVFKD8y95Mwm0obL4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1725007337; c=relaxed/simple; bh=yqZR4d6sfB78SV6nbnnsR9ZhRGZccqnOa0XipqdioF4=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=Bx2/J4Zi1ul4MTSV2YeSGu9NWmvDGAlZpzmQaGbl1Td0t+4sKOaa3Ke3qFQ7DPNBjZ5yK69srlYrhMcjKYGqsU+/vCODLZJ0hJ5bcYdzYuCzZKTz5UJ7K547ld4DBOrrqMCXFXtVzLpxdfHHYCMTjWqu995JYYDrq///Ah500uI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id A87481063; Fri, 30 Aug 2024 01:42:40 -0700 (PDT) Received: from e116581.blr.arm.com (e116581.arm.com [10.162.40.24]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 249723F762; Fri, 30 Aug 2024 01:42:05 -0700 (PDT) From: Dev Jain To: akpm@linux-foundation.org, david@redhat.com, willy@infradead.org, kirill.shutemov@linux.intel.com Cc: ryan.roberts@arm.com, anshuman.khandual@arm.com, catalin.marinas@arm.com, cl@gentwo.org, vbabka@suse.cz, mhocko@suse.com, apopple@nvidia.com, dave.hansen@linux.intel.com, will@kernel.org, baohua@kernel.org, jack@suse.cz, mark.rutland@arm.com, hughd@google.com, aneesh.kumar@kernel.org, yang@os.amperecomputing.com, peterx@redhat.com, ioworker0@gmail.com, jglisse@google.com, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Dev Jain Subject: [PATCH 1/2] mm: Abstract THP allocation Date: Fri, 30 Aug 2024 14:11:16 +0530 Message-Id: <20240830084117.4079805-2-dev.jain@arm.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240830084117.4079805-1-dev.jain@arm.com> References: <20240830084117.4079805-1-dev.jain@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" In preparation for the second patch, abstract away the THP allocation logic present in the create_huge_pmd() path, which corresponds to the faulting case when no page is present. There should be no functional change as a result of applying this patch. Signed-off-by: Dev Jain --- mm/huge_memory.c | 113 +++++++++++++++++++++++++++++------------------ 1 file changed, 69 insertions(+), 44 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 67c86a5d64a6..e5b568e2bb34 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -943,47 +943,92 @@ unsigned long thp_get_unmapped_area(struct file *filp= , unsigned long addr, } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); =20 -static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, - struct page *page, gfp_t gfp) +static vm_fault_t thp_fault_alloc(gfp_t gfp, int order, struct vm_area_str= uct *vma, + unsigned long haddr, struct folio **foliop, + unsigned long addr) { - struct vm_area_struct *vma =3D vmf->vma; - struct folio *folio =3D page_folio(page); - pgtable_t pgtable; - unsigned long haddr =3D vmf->address & HPAGE_PMD_MASK; - vm_fault_t ret =3D 0; + struct folio *folio =3D vma_alloc_folio(gfp, order, vma, haddr, true); =20 - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + *foliop =3D folio; + if (unlikely(!folio)) { + count_vm_event(THP_FAULT_FALLBACK); + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); + return VM_FAULT_FALLBACK; + } =20 + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { folio_put(folio); count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK_CHARGE); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); return VM_FAULT_FALLBACK; } folio_throttle_swaprate(folio, gfp); =20 - pgtable =3D pte_alloc_one(vma->vm_mm); - if (unlikely(!pgtable)) { - ret =3D VM_FAULT_OOM; - goto release; - } - - folio_zero_user(folio, vmf->address); + folio_zero_user(folio, addr); /* * The memory barrier inside __folio_mark_uptodate makes sure that * folio_zero_user writes become visible before the set_pmd_at() * write. */ __folio_mark_uptodate(folio); + return 0; +} + +static void __thp_fault_success_stats(struct vm_area_struct *vma, int orde= r) +{ + count_vm_event(THP_FAULT_ALLOC); + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_ALLOC); + count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); +} + +static void map_pmd_thp(struct folio *folio, struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long haddr, + pgtable_t pgtable) + __releases(vmf->ptl) +{ + pmd_t entry; + + entry =3D mk_huge_pmd(&folio->page, vma->vm_page_prot); + entry =3D maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); + folio_add_lru_vma(folio, vma); + pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); + update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); + mm_inc_nr_ptes(vma->vm_mm); + spin_unlock(vmf->ptl); + __thp_fault_success_stats(vma, HPAGE_PMD_ORDER); +} + +static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) +{ + struct vm_area_struct *vma =3D vmf->vma; + struct folio *folio =3D NULL; + pgtable_t pgtable; + unsigned long haddr =3D vmf->address & HPAGE_PMD_MASK; + vm_fault_t ret =3D 0; + gfp_t gfp =3D vma_thp_gfp_mask(vma); + + pgtable =3D pte_alloc_one(vma->vm_mm); + if (unlikely(!pgtable)) { + ret =3D VM_FAULT_OOM; + goto release; + } + + ret =3D thp_fault_alloc(gfp, HPAGE_PMD_ORDER, vma, haddr, &folio, + vmf->address); + if (ret) + goto release; =20 vmf->ptl =3D pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_none(*vmf->pmd))) { goto unlock_release; } else { - pmd_t entry; - ret =3D check_stable_address_space(vma->vm_mm); if (ret) goto unlock_release; @@ -997,20 +1042,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct= vm_fault *vmf, VM_BUG_ON(ret & VM_FAULT_FALLBACK); return ret; } - - entry =3D mk_huge_pmd(page, vma->vm_page_prot); - entry =3D maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); - folio_add_lru_vma(folio, vma); - pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); - set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); - update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); - add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); - mm_inc_nr_ptes(vma->vm_mm); - spin_unlock(vmf->ptl); - count_vm_event(THP_FAULT_ALLOC); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); - count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); + map_pmd_thp(folio, vmf, vma, haddr, pgtable); } =20 return 0; @@ -1019,7 +1051,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct= vm_fault *vmf, release: if (pgtable) pte_free(vma->vm_mm, pgtable); - folio_put(folio); + if (folio) + folio_put(folio); return ret; =20 } @@ -1077,8 +1110,6 @@ static void set_huge_zero_folio(pgtable_t pgtable, st= ruct mm_struct *mm, vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) { struct vm_area_struct *vma =3D vmf->vma; - gfp_t gfp; - struct folio *folio; unsigned long haddr =3D vmf->address & HPAGE_PMD_MASK; vm_fault_t ret; =20 @@ -1129,14 +1160,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_faul= t *vmf) } return ret; } - gfp =3D vma_thp_gfp_mask(vma); - folio =3D vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); - if (unlikely(!folio)) { - count_vm_event(THP_FAULT_FALLBACK); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); - return VM_FAULT_FALLBACK; - } - return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); + + return __do_huge_pmd_anonymous_page(vmf); } =20 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, --=20 2.30.2 From nobody Mon Feb 9 05:52:46 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 2C70416726E for ; Fri, 30 Aug 2024 08:42:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1725007345; cv=none; b=hjkPdIstaE3ympD/zM/BRHxIqSoX7Z6p3Ib6ztXpUhNFge998vv+gLKpLEMxid1GK522PkiTYCGmTkrCmme+61gtU+5V+7xV8wNlMeceEE0bs/SFrR2Miy8dUGoQ86ZNyWtRP//P/nNkJPOSkC2K63Vx9mgD3IhROtXVdChXnZg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1725007345; c=relaxed/simple; bh=bLMrpbo6a9HmoNlufuB2nr3rQCvRo/fuQxMuhFzkLUs=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=mMfxZBLvxCueDAbugt6yos1Ljm32NYsAk9vCxl4Em0fUmWnObzD3ZJGDb2o+EwdjmXwF+o6+RKAW6MjMIrY36W24WsN73qzst5TSrxpxaJmGWWTx+oCIHeauIHZRHxGyQTEsEPUdGL+ShUYYgg/w4PK18yeIUWKeQ9C0JzACV/Q= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id AB1401063; Fri, 30 Aug 2024 01:42:49 -0700 (PDT) Received: from e116581.blr.arm.com (e116581.arm.com [10.162.40.24]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 0DE073F762; Fri, 30 Aug 2024 01:42:14 -0700 (PDT) From: Dev Jain To: akpm@linux-foundation.org, david@redhat.com, willy@infradead.org, kirill.shutemov@linux.intel.com Cc: ryan.roberts@arm.com, anshuman.khandual@arm.com, catalin.marinas@arm.com, cl@gentwo.org, vbabka@suse.cz, mhocko@suse.com, apopple@nvidia.com, dave.hansen@linux.intel.com, will@kernel.org, baohua@kernel.org, jack@suse.cz, mark.rutland@arm.com, hughd@google.com, aneesh.kumar@kernel.org, yang@os.amperecomputing.com, peterx@redhat.com, ioworker0@gmail.com, jglisse@google.com, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Dev Jain Subject: [PATCH 2/2] mm: Allocate THP on hugezeropage wp-fault Date: Fri, 30 Aug 2024 14:11:17 +0530 Message-Id: <20240830084117.4079805-3-dev.jain@arm.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240830084117.4079805-1-dev.jain@arm.com> References: <20240830084117.4079805-1-dev.jain@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Introduce do_huge_zero_wp_pmd() to handle wp-fault on a hugezeropage and replace it with a PMD-mapped THP. Change the helpers introduced in the previous patch to flush TLB entry corresponding to the hugezeropage, and preserve PMD uffd-wp marker. In case of failure, fallback to splitting the PMD. Signed-off-by: Dev Jain --- include/linux/huge_mm.h | 7 ++++ mm/huge_memory.c | 76 +++++++++++++++++++++++++++++++++++------ mm/memory.c | 5 +-- 3 files changed, 76 insertions(+), 12 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e25d9ebfdf89..375dba4fb130 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -9,6 +9,13 @@ #include =20 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); +vm_fault_t thp_fault_alloc(gfp_t gfp, int order, struct vm_area_struct *vm= a, + unsigned long haddr, struct folio **foliop, + unsigned long addr); +void map_pmd_thp(struct folio *folio, struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long haddr, + pgtable_t pgtable) + __releases(vmf->ptl); int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e5b568e2bb34..0f8b2e224795 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -943,9 +943,9 @@ unsigned long thp_get_unmapped_area(struct file *filp, = unsigned long addr, } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); =20 -static vm_fault_t thp_fault_alloc(gfp_t gfp, int order, struct vm_area_str= uct *vma, - unsigned long haddr, struct folio **foliop, - unsigned long addr) +vm_fault_t thp_fault_alloc(gfp_t gfp, int order, struct vm_area_struct *vm= a, + unsigned long haddr, struct folio **foliop, + unsigned long addr) { struct folio *folio =3D vma_alloc_folio(gfp, order, vma, haddr, true); =20 @@ -984,22 +984,30 @@ static void __thp_fault_success_stats(struct vm_area_= struct *vma, int order) count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); } =20 -static void map_pmd_thp(struct folio *folio, struct vm_fault *vmf, - struct vm_area_struct *vma, unsigned long haddr, - pgtable_t pgtable) +void map_pmd_thp(struct folio *folio, struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long haddr, + pgtable_t pgtable) __releases(vmf->ptl) { - pmd_t entry; + pmd_t entry, old_pmd; + bool is_pmd_none =3D pmd_none(*vmf->pmd); =20 entry =3D mk_huge_pmd(&folio->page, vma->vm_page_prot); entry =3D maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); - pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); + if (!is_pmd_none) { + old_pmd =3D pmdp_huge_clear_flush(vma, haddr, vmf->pmd); + if (pmd_uffd_wp(old_pmd)) + entry =3D pmd_mkuffd_wp(entry); + } + if (pgtable) + pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); - mm_inc_nr_ptes(vma->vm_mm); + if (is_pmd_none) + mm_inc_nr_ptes(vma->vm_mm); spin_unlock(vmf->ptl); __thp_fault_success_stats(vma, HPAGE_PMD_ORDER); } @@ -1577,6 +1585,47 @@ void huge_pmd_set_accessed(struct vm_fault *vmf) spin_unlock(vmf->ptl); } =20 +static vm_fault_t do_huge_zero_wp_pmd_locked(struct vm_fault *vmf, + unsigned long haddr) +{ + struct vm_area_struct *vma =3D vmf->vma; + gfp_t gfp =3D vma_thp_gfp_mask(vma); + struct folio *folio =3D NULL; + vm_fault_t ret; + + ret =3D thp_fault_alloc(gfp, HPAGE_PMD_ORDER, vma, haddr, &folio, + vmf->address); + if (ret) + goto unlock; + ret =3D check_stable_address_space(vma->vm_mm); + if (ret) + goto unlock; + map_pmd_thp(folio, vmf, vma, haddr, NULL); + return 0; + +unlock: + spin_unlock(vmf->ptl); + return ret; +} + +static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf, unsigned long = haddr) +{ + struct vm_area_struct *vma =3D vmf->vma; + struct mmu_notifier_range range; + vm_fault_t ret =3D 0; + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr, + haddr + HPAGE_PMD_SIZE); + mmu_notifier_invalidate_range_start(&range); + vmf->ptl =3D pmd_lock(vma->vm_mm, vmf->pmd); + if (likely(pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) + ret =3D do_huge_zero_wp_pmd_locked(vmf, haddr); + else + spin_unlock(vmf->ptl); + mmu_notifier_invalidate_range_end(&range); + return ret; +} + vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) { const bool unshare =3D vmf->flags & FAULT_FLAG_UNSHARE; @@ -1589,8 +1638,15 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) vmf->ptl =3D pmd_lockptr(vma->vm_mm, vmf->pmd); VM_BUG_ON_VMA(!vma->anon_vma, vma); =20 - if (is_huge_zero_pmd(orig_pmd)) + if (is_huge_zero_pmd(orig_pmd)) { + vm_fault_t ret =3D do_huge_zero_wp_pmd(vmf, haddr); + + if (!(ret & VM_FAULT_FALLBACK)) + return ret; + + /* Fallback to splitting PMD if THP cannot be allocated */ goto fallback; + } =20 spin_lock(vmf->ptl); =20 diff --git a/mm/memory.c b/mm/memory.c index 3c01d68065be..c081a25f5173 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5409,9 +5409,10 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault= *vmf) if (vma_is_anonymous(vma)) { if (likely(!unshare) && userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { - if (userfaultfd_wp_async(vmf->vma)) + if (!userfaultfd_wp_async(vmf->vma)) + return handle_userfault(vmf, VM_UFFD_WP); + if (!is_huge_zero_pmd(vmf->orig_pmd)) goto split; - return handle_userfault(vmf, VM_UFFD_WP); } return do_huge_pmd_wp_page(vmf); } --=20 2.30.2