From nobody Mon Feb 9 23:15:29 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 8BC48278177 for ; Tue, 4 Mar 2025 15:05:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1741100724; cv=none; b=O9e4ynmFvDVwvoOa7+7H1Mt0bGiHaLG/KFSkv5zw493dgrImW1akFoeYzl4rmdkhgHsDzAMeGUlqp8K/LFmpxyagnsqo39sQpPdYNJ5SPWwIclpxquOhCkBESe6ciAvO+kL5tpskZiuJ9LtlxjiihVKfxCZLJSghkg9z1B4tNtg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1741100724; c=relaxed/simple; bh=BizjNedsDUXt0gOAQ3mDk3IHO3CO31Z0uKt2z3hZ7AQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=bPECtWhpsVkSePy2HJgaWWv7ZgMJNbL0I5hwJ32tpJYmnznDGsoRuUGj/SwEJ2Bw40QoUHciox3FPnNR9ytuDCWa98J4eXz6Jdt/tMpRpWRnMFQvq5I+HWoX6FhX18zytGZ8oUhk+SOA6J552k3km+ndFMQ9be36ua0KeTOGTP8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 1F4C61C2B; Tue, 4 Mar 2025 07:05:30 -0800 (PST) Received: from e125769.cambridge.arm.com (e125769.cambridge.arm.com [10.1.196.27]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 8D33D3F66E; Tue, 4 Mar 2025 07:05:14 -0800 (PST) From: Ryan Roberts To: Catalin Marinas , Will Deacon , Pasha Tatashin , Andrew Morton , Uladzislau Rezki , Christoph Hellwig , David Hildenbrand , "Matthew Wilcox (Oracle)" , Mark Rutland , Anshuman Khandual , Alexandre Ghiti , Kevin Brodsky Cc: Ryan Roberts , linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v3 10/11] mm/vmalloc: Enter lazy mmu mode while manipulating vmalloc ptes Date: Tue, 4 Mar 2025 15:04:40 +0000 Message-ID: <20250304150444.3788920-11-ryan.roberts@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250304150444.3788920-1-ryan.roberts@arm.com> References: <20250304150444.3788920-1-ryan.roberts@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Wrap vmalloc's pte table manipulation loops with arch_enter_lazy_mmu_mode() / arch_leave_lazy_mmu_mode(). This provides the arch code with the opportunity to optimize the pte manipulations. Note that vmap_pfn() already uses lazy mmu mode since it delegates to apply_to_page_range() which enters lazy mmu mode for both user and kernel mappings. These hooks will shortly be used by arm64 to improve vmalloc performance. Signed-off-by: Ryan Roberts Reviewed-by: Anshuman Khandual Reviewed-by: Catalin Marinas Reviewed-by: Uladzislau Rezki (Sony) --- mm/vmalloc.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6111ce900ec4..b63ca0b7dd40 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -104,6 +104,9 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long add= r, unsigned long end, pte =3D pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; + + arch_enter_lazy_mmu_mode(); + do { if (unlikely(!pte_none(ptep_get(pte)))) { if (pfn_valid(pfn)) { @@ -127,6 +130,8 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long add= r, unsigned long end, set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte +=3D PFN_DOWN(size), addr +=3D size, addr !=3D end); + + arch_leave_lazy_mmu_mode(); *mask |=3D PGTBL_PTE_MODIFIED; return 0; } @@ -354,6 +359,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long = addr, unsigned long end, unsigned long size =3D PAGE_SIZE; =20 pte =3D pte_offset_kernel(pmd, addr); + arch_enter_lazy_mmu_mode(); + do { #ifdef CONFIG_HUGETLB_PAGE size =3D arch_vmap_pte_range_unmap_size(addr, pte); @@ -370,6 +377,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long = addr, unsigned long end, ptent =3D ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte +=3D (size >> PAGE_SHIFT), addr +=3D size, addr !=3D end); + + arch_leave_lazy_mmu_mode(); *mask |=3D PGTBL_PTE_MODIFIED; } =20 @@ -515,6 +524,9 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned lo= ng addr, pte =3D pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; + + arch_enter_lazy_mmu_mode(); + do { struct page *page =3D pages[*nr]; =20 @@ -528,6 +540,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned lo= ng addr, set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr +=3D PAGE_SIZE, addr !=3D end); + + arch_leave_lazy_mmu_mode(); *mask |=3D PGTBL_PTE_MODIFIED; return 0; } --=20 2.43.0