From nobody Tue Sep 16 09:01:47 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A7A86C3DA7A for ; Thu, 5 Jan 2023 10:21:16 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232654AbjAEKVN (ORCPT ); Thu, 5 Jan 2023 05:21:13 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40490 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232741AbjAEKT5 (ORCPT ); Thu, 5 Jan 2023 05:19:57 -0500 Received: from mail-yb1-xb49.google.com (mail-yb1-xb49.google.com [IPv6:2607:f8b0:4864:20::b49]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 44A63559C2 for ; Thu, 5 Jan 2023 02:19:22 -0800 (PST) Received: by mail-yb1-xb49.google.com with SMTP id y66-20020a25c845000000b00733b5049b6fso36235986ybf.3 for ; Thu, 05 Jan 2023 02:19:22 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=lp3NkrzzsDy9ah3gYP7Di6ebFhCh/hI0r9EhbhKs3qA=; b=gcqO4fumRx0zgQZPFIbbvhj2RDn3uzOiEjenn5HSn/KoUqOlS6cvCBPlQZqdnokXAv yuBgbgVdZ68dWkcgWO302Mw9PxijLJ23xw6JxoY2ObqFFf3QxAWs1BdbmgNwKsp6kjEu 8GFqq0LZlvusyfldGmkc6FfTZZ/RbptJV8SUrxL5hGv7ZFxVYZY2rxA32ezUN4HQKndZ n4NkO1TfQAKi7YjlKvn942sD0+vyg8DNgNqttkJLun2/ieyUyjjedHWOdKf/Toc3RDXd NYkM9a5g7CgZdcmSy/hZ0BvTwMl6CAeI0KwuHwV/HKzTEVBfjxeq4ELE0Z8GmCd1b9Ya 6emg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=lp3NkrzzsDy9ah3gYP7Di6ebFhCh/hI0r9EhbhKs3qA=; b=R8SICv1wYg3x3KFPJulWGMRhG124aXwkCJfttKj8x6001atFQHIAFFnfjSoFWLBE4Q HONNi0Hny5r5CpD125qflLBxc2dL0BP6WHOPRK6LXM2R3G1Y6/00MQIuIjTLVcQ6UhxV idFfCZYPj2qRkigrFOEnM69TNDZjrpEHSEeeSmuUWq/bjuCuKbUp82JyNywbfArniG7S GueWfLjmHGLa7o8R1dtDZ/4FvMD4e4iC42Tr5ALwS7pzxRgganTmbd9BOUBdK6oKWgtd +3DprPyV8GnEEkJqR5D6JlU95yTj2GkSFDimvPHBchNF70rZFc5gkAisOQS86Xcpg2MO M9jA== X-Gm-Message-State: AFqh2krCTcQG4vr4N8fM/breQyUs39o1Rpdm9H9t03bbJFij4/2svi0d 9JQ9RZY9k6qS3h6Ua2u45Z9TiYUO+FIMIp7y X-Google-Smtp-Source: AMrXdXtIJM/w1a8XxNEazXxZ6FOVAPFAuY6o16gkUOQyGBaWZ7iAlnjKjopmkNnTtIe8ZRhqObJ/lwIhr3EKSgQ+ X-Received: from jthoughton.c.googlers.com ([fda3:e722:ac3:cc00:14:4d90:c0a8:2a4f]) (user=jthoughton job=sendgmr) by 2002:a25:13c2:0:b0:716:10cb:dc2e with SMTP id 185-20020a2513c2000000b0071610cbdc2emr6990557ybt.530.1672913962037; Thu, 05 Jan 2023 02:19:22 -0800 (PST) Date: Thu, 5 Jan 2023 10:18:16 +0000 In-Reply-To: <20230105101844.1893104-1-jthoughton@google.com> Mime-Version: 1.0 References: <20230105101844.1893104-1-jthoughton@google.com> X-Mailer: git-send-email 2.39.0.314.g84b9a713c41-goog Message-ID: <20230105101844.1893104-19-jthoughton@google.com> Subject: [PATCH 18/46] hugetlb: add HGM support for hugetlb_change_protection From: James Houghton To: Mike Kravetz , Muchun Song , Peter Xu Cc: David Hildenbrand , David Rientjes , Axel Rasmussen , Mina Almasry , "Zach O'Keefe" , Manish Mishra , Naoya Horiguchi , "Dr . David Alan Gilbert" , "Matthew Wilcox (Oracle)" , Vlastimil Babka , Baolin Wang , Miaohe Lin , Yang Shi , Andrew Morton , linux-mm@kvack.org, linux-kernel@vger.kernel.org, James Houghton Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The main change here is to do a high-granularity walk and pulling the shift from the walk (not from the hstate). Signed-off-by: James Houghton --- mm/hugetlb.c | 59 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dfd6c1491ac3..73672d806172 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6798,15 +6798,15 @@ unsigned long hugetlb_change_protection(struct vm_a= rea_struct *vma, { struct mm_struct *mm =3D vma->vm_mm; unsigned long start =3D address; - pte_t *ptep; pte_t pte; struct hstate *h =3D hstate_vma(vma); - unsigned long pages =3D 0, psize =3D huge_page_size(h); + unsigned long base_pages =3D 0, psize =3D huge_page_size(h); bool shared_pmd =3D false; struct mmu_notifier_range range; unsigned long last_addr_mask; bool uffd_wp =3D cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve =3D cp_flags & MM_CP_UFFD_WP_RESOLVE; + struct hugetlb_pte hpte; =20 /* * In the case of shared PMDs, the area to flush could be beyond @@ -6824,28 +6824,30 @@ unsigned long hugetlb_change_protection(struct vm_a= rea_struct *vma, hugetlb_vma_lock_write(vma); i_mmap_lock_write(vma->vm_file->f_mapping); last_addr_mask =3D hugetlb_mask_last_page(h); - for (; address < end; address +=3D psize) { + while (address < end) { spinlock_t *ptl; - ptep =3D hugetlb_walk(vma, address, psize); - if (!ptep) { - address |=3D last_addr_mask; + + if (hugetlb_full_walk(&hpte, vma, address)) { + address =3D (address | last_addr_mask) + psize; continue; } - ptl =3D huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, vma, address, ptep)) { + + ptl =3D hugetlb_pte_lock(&hpte); + if (hugetlb_pte_size(&hpte) =3D=3D psize && + huge_pmd_unshare(mm, vma, address, hpte.ptep)) { /* * When uffd-wp is enabled on the vma, unshare * shouldn't happen at all. Warn about it if it * happened due to some reason. */ WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); - pages++; + base_pages +=3D psize / PAGE_SIZE; spin_unlock(ptl); shared_pmd =3D true; - address |=3D last_addr_mask; + address =3D (address | last_addr_mask) + psize; continue; } - pte =3D huge_ptep_get(ptep); + pte =3D huge_ptep_get(hpte.ptep); if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { /* Nothing to do. */ } else if (unlikely(is_hugetlb_entry_migration(pte))) { @@ -6861,7 +6863,7 @@ unsigned long hugetlb_change_protection(struct vm_are= a_struct *vma, entry =3D make_readable_migration_entry( swp_offset(entry)); newpte =3D swp_entry_to_pte(entry); - pages++; + base_pages +=3D hugetlb_pte_size(&hpte) / PAGE_SIZE; } =20 if (uffd_wp) @@ -6869,34 +6871,49 @@ unsigned long hugetlb_change_protection(struct vm_a= rea_struct *vma, else if (uffd_wp_resolve) newpte =3D pte_swp_clear_uffd_wp(newpte); if (!pte_same(pte, newpte)) - set_huge_pte_at(mm, address, ptep, newpte); + set_huge_pte_at(mm, address, hpte.ptep, newpte); } else if (unlikely(is_pte_marker(pte))) { /* No other markers apply for now. */ WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); if (uffd_wp_resolve) /* Safe to modify directly (non-present->none). */ - huge_pte_clear(mm, address, ptep, psize); + huge_pte_clear(mm, address, hpte.ptep, + hugetlb_pte_size(&hpte)); } else if (!huge_pte_none(pte)) { pte_t old_pte; - unsigned int shift =3D huge_page_shift(hstate_vma(vma)); + unsigned int shift =3D hpte.shift; =20 - old_pte =3D huge_ptep_modify_prot_start(vma, address, ptep); + if (unlikely(!hugetlb_pte_present_leaf(&hpte, pte))) { + /* + * Someone split the PTE from under us, so retry + * the walk, + */ + spin_unlock(ptl); + continue; + } + + old_pte =3D huge_ptep_modify_prot_start( + vma, address, hpte.ptep); pte =3D huge_pte_modify(old_pte, newprot); - pte =3D arch_make_huge_pte(pte, shift, vma->vm_flags); + pte =3D arch_make_huge_pte( + pte, shift, vma->vm_flags); if (uffd_wp) pte =3D huge_pte_mkuffd_wp(pte); else if (uffd_wp_resolve) pte =3D huge_pte_clear_uffd_wp(pte); - huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); - pages++; + huge_ptep_modify_prot_commit( + vma, address, hpte.ptep, + old_pte, pte); + base_pages +=3D hugetlb_pte_size(&hpte) / PAGE_SIZE; } else { /* None pte */ if (unlikely(uffd_wp)) /* Safe to modify directly (none->non-present). */ - set_huge_pte_at(mm, address, ptep, + set_huge_pte_at(mm, address, hpte.ptep, make_pte_marker(PTE_MARKER_UFFD_WP)); } spin_unlock(ptl); + address +=3D hugetlb_pte_size(&hpte); } /* * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare @@ -6919,7 +6936,7 @@ unsigned long hugetlb_change_protection(struct vm_are= a_struct *vma, hugetlb_vma_unlock_write(vma); mmu_notifier_invalidate_range_end(&range); =20 - return pages << h->order; + return base_pages; } =20 /* Return true if reservation was successful, false otherwise. */ --=20 2.39.0.314.g84b9a713c41-goog