From nobody Fri Nov 29 18:33:44 2024 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 9434C1531C5; Tue, 17 Sep 2024 07:31:54 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1726558318; cv=none; b=WR6ztom/HU7e5e8mP+okESc4Biz9QsOg6Wmg0G+OI+fUg+zKkbaxUteAOi236Xokv9oto3tnT+BGyZybtSVsIgdypNd0ucTNwzrUAriOWoaIm/wyiBbCNkbffMyrmccpOIhSSACVQ5E5M46ZD6hu+fCUhmUOLKkcqg3lVJfhfug= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1726558318; c=relaxed/simple; bh=FgijrsPIvbZTFooLsYIMm26QoLxkvLeBgUkhCtokpuk=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=PTkBdzUf0SKZdkTp69JYlXOJ9d/ququudqn2LyPidJylTOuwSs55jKOUGrzSztvz1jehzACIcHRzLq5Wku2GoiQ7LvUcleOri0l+kxk1ZtotF6VreXSLGtzGhyk52DUdwr22ethMUXAyNOIy9Qzs985ZHNOBz3qkPgfM3YXmNXw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 6DEA31063; Tue, 17 Sep 2024 00:32:23 -0700 (PDT) Received: from a077893.arm.com (unknown [10.163.61.158]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 36A203F64C; Tue, 17 Sep 2024 00:31:45 -0700 (PDT) From: Anshuman Khandual To: linux-mm@kvack.org Cc: Anshuman Khandual , Andrew Morton , David Hildenbrand , Ryan Roberts , "Mike Rapoport (IBM)" , Arnd Bergmann , x86@kernel.org, linux-m68k@lists.linux-m68k.org, linux-fsdevel@vger.kernel.org, kasan-dev@googlegroups.com, linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Dimitri Sivanich , Muchun Song , Andrey Ryabinin , Miaohe Lin , Naoya Horiguchi , Pasha Tatashin , Dennis Zhou , Tejun Heo , Christoph Lameter , Uladzislau Rezki , Christoph Hellwig Subject: [PATCH V2 4/7] mm: Use pmdp_get() for accessing PMD entries Date: Tue, 17 Sep 2024 13:01:14 +0530 Message-Id: <20240917073117.1531207-5-anshuman.khandual@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20240917073117.1531207-1-anshuman.khandual@arm.com> References: <20240917073117.1531207-1-anshuman.khandual@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Convert PMD accesses via pmdp_get() helper that defaults as READ_ONCE() but also provides the platform an opportunity to override when required. This stores read page table entry value in a local variable which can be used in multiple instances there after. This helps in avoiding multiple memory load operations as well possible race conditions. Cc: Dimitri Sivanich Cc: Muchun Song Cc: Andrey Ryabinin Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Pasha Tatashin Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Uladzislau Rezki Cc: Christoph Hellwig Cc: Andrew Morton Cc: David Hildenbrand Cc: Ryan Roberts Cc: "Mike Rapoport (IBM)" Cc: linux-kernel@vger.kernel.org Cc: linux-fsdevel@vger.kernel.org Cc: linux-mm@kvack.org Cc: kasan-dev@googlegroups.com Signed-off-by: Anshuman Khandual --- drivers/misc/sgi-gru/grufault.c | 7 ++-- fs/proc/task_mmu.c | 28 +++++++------- include/linux/huge_mm.h | 4 +- include/linux/mm.h | 2 +- include/linux/pgtable.h | 15 ++++---- mm/gup.c | 14 +++---- mm/huge_memory.c | 66 +++++++++++++++++---------------- mm/hugetlb_vmemmap.c | 4 +- mm/kasan/init.c | 10 ++--- mm/kasan/shadow.c | 4 +- mm/khugepaged.c | 4 +- mm/madvise.c | 6 +-- mm/memory-failure.c | 6 +-- mm/memory.c | 25 +++++++------ mm/mempolicy.c | 4 +- mm/migrate.c | 4 +- mm/migrate_device.c | 10 ++--- mm/mlock.c | 6 +-- mm/mprotect.c | 2 +- mm/mremap.c | 4 +- mm/page_table_check.c | 2 +- mm/pagewalk.c | 4 +- mm/percpu.c | 2 +- mm/pgtable-generic.c | 20 +++++----- mm/ptdump.c | 2 +- mm/rmap.c | 4 +- mm/sparse-vmemmap.c | 4 +- mm/vmalloc.c | 15 ++++---- 28 files changed, 145 insertions(+), 133 deletions(-) diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufaul= t.c index 3557d78ee47a..804f275ece99 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -208,7 +208,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma= , unsigned long vaddr, pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; - pmd_t *pmdp; + pmd_t *pmdp, old_pmd; pte_t pte; =20 pgdp =3D pgd_offset(vma->vm_mm, vaddr); @@ -224,10 +224,11 @@ static int atomic_pte_lookup(struct vm_area_struct *v= ma, unsigned long vaddr, goto err; =20 pmdp =3D pmd_offset(pudp, vaddr); - if (unlikely(pmd_none(*pmdp))) + old_pmd =3D pmdp_get(pmdp); + if (unlikely(pmd_none(old_pmd))) goto err; #ifdef CONFIG_X86_64 - if (unlikely(pmd_leaf(*pmdp))) + if (unlikely(pmd_leaf(old_pmd))) pte =3D ptep_get((pte_t *)pmdp); else #endif diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 5f171ad7b436..f0c63884d008 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -861,12 +861,13 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long= addr, struct page *page =3D NULL; bool present =3D false; struct folio *folio; + pmd_t old_pmd =3D pmdp_get(pmd); =20 - if (pmd_present(*pmd)) { - page =3D vm_normal_page_pmd(vma, addr, *pmd); + if (pmd_present(old_pmd)) { + page =3D vm_normal_page_pmd(vma, addr, old_pmd); present =3D true; - } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { - swp_entry_t entry =3D pmd_to_swp_entry(*pmd); + } else if (unlikely(thp_migration_supported() && is_swap_pmd(old_pmd))) { + swp_entry_t entry =3D pmd_to_swp_entry(old_pmd); =20 if (is_pfn_swap_entry(entry)) page =3D pfn_swap_entry_to_page(entry); @@ -883,7 +884,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long a= ddr, else mss->file_thp +=3D HPAGE_PMD_SIZE; =20 - smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), + smaps_account(mss, page, true, pmd_young(old_pmd), pmd_dirty(old_pmd), locked, present); } #else @@ -1426,7 +1427,7 @@ static inline void clear_soft_dirty(struct vm_area_st= ruct *vma, static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { - pmd_t old, pmd =3D *pmdp; + pmd_t old, pmd =3D pmdp_get(pmdp); =20 if (pmd_present(pmd)) { /* See comment in change_huge_pmd() */ @@ -1468,10 +1469,10 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigne= d long addr, goto out; } =20 - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) goto out; =20 - folio =3D pmd_folio(*pmd); + folio =3D pmd_folio(pmdp_get(pmd)); =20 /* Clear accessed and referenced bits. */ pmdp_test_and_clear_young(vma, addr, pmd); @@ -1769,7 +1770,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned lo= ng addr, unsigned long end, if (ptl) { unsigned int idx =3D (addr & ~PMD_MASK) >> PAGE_SHIFT; u64 flags =3D 0, frame =3D 0; - pmd_t pmd =3D *pmdp; + pmd_t pmd =3D pmdp_get(pmdp); struct page *page =3D NULL; struct folio *folio =3D NULL; =20 @@ -2189,7 +2190,7 @@ static unsigned long pagemap_thp_category(struct page= map_scan_private *p, static void make_uffd_wp_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { - pmd_t old, pmd =3D *pmdp; + pmd_t old, pmd =3D pmdp_get(pmdp); =20 if (pmd_present(pmd)) { old =3D pmdp_invalidate_ad(vma, addr, pmdp); @@ -2416,7 +2417,7 @@ static int pagemap_scan_thp_entry(pmd_t *pmd, unsigne= d long start, return -ENOENT; =20 categories =3D p->cur_vma_category | - pagemap_thp_category(p, vma, start, *pmd); + pagemap_thp_category(p, vma, start, pmdp_get(pmd)); =20 if (!pagemap_scan_is_interesting_page(categories, p)) goto out_unlock; @@ -2946,10 +2947,11 @@ static int gather_pte_stats(pmd_t *pmd, unsigned lo= ng addr, ptl =3D pmd_trans_huge_lock(pmd, vma); if (ptl) { struct page *page; + pmd_t old_pmd =3D pmdp_get(pmd); =20 - page =3D can_gather_numa_stats_pmd(*pmd, vma, addr); + page =3D can_gather_numa_stats_pmd(old_pmd, vma, addr); if (page) - gather_stats(page, md, pmd_dirty(*pmd), + gather_stats(page, md, pmd_dirty(old_pmd), HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(ptl); return 0; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e25d9ebfdf89..38b5de040d02 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -369,7 +369,9 @@ static inline int is_swap_pmd(pmd_t pmd) static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) + pmd_t old_pmd =3D pmdp_get(pmd); + + if (is_swap_pmd(old_pmd) || pmd_trans_huge(old_pmd) || pmd_devmap(old_pmd= )) return __pmd_trans_huge_lock(pmd, vma); else return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index 147073601716..258e49323306 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2921,7 +2921,7 @@ static inline spinlock_t *ptlock_ptr(struct ptdesc *p= tdesc) =20 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { - return ptlock_ptr(page_ptdesc(pmd_page(*pmd))); + return ptlock_ptr(page_ptdesc(pmd_page(pmdp_get(pmd)))); } =20 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 547eeae8c43f..ea283ce958a7 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -367,7 +367,7 @@ static inline int pmdp_test_and_clear_young(struct vm_a= rea_struct *vma, unsigned long address, pmd_t *pmdp) { - pmd_t pmd =3D *pmdp; + pmd_t pmd =3D pmdp_get(pmdp); int r =3D 1; if (!pmd_young(pmd)) r =3D 0; @@ -598,7 +598,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_s= truct *mm, unsigned long address, pmd_t *pmdp) { - pmd_t pmd =3D *pmdp; + pmd_t pmd =3D pmdp_get(pmdp); =20 pmd_clear(pmdp); page_table_check_pmd_clear(mm, pmd); @@ -876,7 +876,7 @@ static inline pte_t pte_sw_mkyoung(pte_t pte) static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - pmd_t old_pmd =3D *pmdp; + pmd_t old_pmd =3D pmdp_get(pmdp); set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } #else @@ -945,7 +945,7 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_= struct *mm, pmd_t *pmdp); static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) { - pmd_t old_pmd =3D *pmdp; + pmd_t old_pmd =3D pmdp_get(pmdp); set_pmd_at(vma->vm_mm, address, pmdp, pmd); return old_pmd; } @@ -1067,7 +1067,8 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) =20 #define set_pmd_safe(pmdp, pmd) \ ({ \ - WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ + pmd_t __old =3D pmdp_get(pmdp); \ + WARN_ON_ONCE(pmd_present(__old) && !pmd_same(__old, pmd)); \ set_pmd(pmdp, pmd); \ }) =20 @@ -1271,9 +1272,9 @@ static inline int pud_none_or_clear_bad(pud_t *pud) =20 static inline int pmd_none_or_clear_bad(pmd_t *pmd) { - if (pmd_none(*pmd)) + if (pmd_none(pmdp_get(pmd))) return 1; - if (unlikely(pmd_bad(*pmd))) { + if (unlikely(pmd_bad(pmdp_get(pmd)))) { pmd_clear_bad(pmd); return 1; } diff --git a/mm/gup.c b/mm/gup.c index 54d0dc3831fb..aeeac0a54944 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -699,7 +699,7 @@ static struct page *follow_huge_pmd(struct vm_area_stru= ct *vma, struct follow_page_context *ctx) { struct mm_struct *mm =3D vma->vm_mm; - pmd_t pmdval =3D *pmd; + pmd_t pmdval =3D pmdp_get(pmd); struct page *page; int ret; =20 @@ -714,7 +714,7 @@ static struct page *follow_huge_pmd(struct vm_area_stru= ct *vma, if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval)) return ERR_PTR(-EFAULT); =20 - if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) + if (pmd_protnone(pmdp_get(pmd)) && !gup_can_follow_protnone(vma, flags)) return NULL; =20 if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page)) @@ -957,7 +957,7 @@ static struct page *follow_pmd_mask(struct vm_area_stru= ct *vma, return no_page_table(vma, flags, address); =20 ptl =3D pmd_lock(mm, pmd); - pmdval =3D *pmd; + pmdval =3D pmdp_get(pmd); if (unlikely(!pmd_present(pmdval))) { spin_unlock(ptl); return no_page_table(vma, flags, address); @@ -1120,7 +1120,7 @@ static int get_gate_page(struct mm_struct *mm, unsign= ed long address, if (pud_none(*pud)) return -EFAULT; pmd =3D pmd_offset(pud, address); - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) return -EFAULT; pte =3D pte_offset_map(pmd, address); if (!pte) @@ -2898,7 +2898,7 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp,= unsigned long addr, if (!folio) goto pte_unmap; =20 - if (unlikely(pmd_val(pmd) !=3D pmd_val(*pmdp)) || + if (unlikely(pmd_val(pmd) !=3D pmd_val(pmdp_get(pmdp))) || unlikely(pte_val(pte) !=3D pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; @@ -3007,7 +3007,7 @@ static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t= *pmdp, unsigned long addr, if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) return 0; =20 - if (unlikely(pmd_val(orig) !=3D pmd_val(*pmdp))) { + if (unlikely(pmd_val(orig) !=3D pmd_val(pmdp_get(pmdp)))) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); return 0; } @@ -3074,7 +3074,7 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp,= unsigned long addr, if (!folio) return 0; =20 - if (unlikely(pmd_val(orig) !=3D pmd_val(*pmdp))) { + if (unlikely(pmd_val(orig) !=3D pmd_val(pmdp_get(pmdp)))) { gup_put_folio(folio, refs, flags); return 0; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 67c86a5d64a6..bb63de935937 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1065,7 +1065,7 @@ static void set_huge_zero_folio(pgtable_t pgtable, st= ruct mm_struct *mm, struct folio *zero_folio) { pmd_t entry; - if (!pmd_none(*pmd)) + if (!pmd_none(pmdp_get(pmd))) return; entry =3D mk_pmd(&zero_folio->page, vma->vm_page_prot); entry =3D pmd_mkhuge(entry); @@ -1144,17 +1144,17 @@ static void insert_pfn_pmd(struct vm_area_struct *v= ma, unsigned long addr, pgtable_t pgtable) { struct mm_struct *mm =3D vma->vm_mm; - pmd_t entry; + pmd_t entry, old_pmd =3D pmdp_get(pmd); spinlock_t *ptl; =20 ptl =3D pmd_lock(mm, pmd); - if (!pmd_none(*pmd)) { + if (!pmd_none(old_pmd)) { if (write) { - if (pmd_pfn(*pmd) !=3D pfn_t_to_pfn(pfn)) { - WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); + if (pmd_pfn(old_pmd) !=3D pfn_t_to_pfn(pfn)) { + WARN_ON_ONCE(!is_huge_zero_pmd(old_pmd)); goto out_unlock; } - entry =3D pmd_mkyoung(*pmd); + entry =3D pmd_mkyoung(old_pmd); entry =3D maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) update_mmu_cache_pmd(vma, addr, pmd); @@ -1318,7 +1318,7 @@ void touch_pmd(struct vm_area_struct *vma, unsigned l= ong addr, { pmd_t _pmd; =20 - _pmd =3D pmd_mkyoung(*pmd); + _pmd =3D pmd_mkyoung(pmdp_get(pmd)); if (write) _pmd =3D pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, @@ -1329,17 +1329,18 @@ void touch_pmd(struct vm_area_struct *vma, unsigned= long addr, struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long a= ddr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) { - unsigned long pfn =3D pmd_pfn(*pmd); + pmd_t old_pmd =3D pmdp_get(pmd); + unsigned long pfn =3D pmd_pfn(old_pmd); struct mm_struct *mm =3D vma->vm_mm; struct page *page; int ret; =20 assert_spin_locked(pmd_lockptr(mm, pmd)); =20 - if (flags & FOLL_WRITE && !pmd_write(*pmd)) + if (flags & FOLL_WRITE && !pmd_write(old_pmd)) return NULL; =20 - if (pmd_present(*pmd) && pmd_devmap(*pmd)) + if (pmd_present(old_pmd) && pmd_devmap(old_pmd)) /* pass */; else return NULL; @@ -1772,7 +1773,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, st= ruct vm_area_struct *vma, if (!ptl) goto out_unlocked; =20 - orig_pmd =3D *pmd; + orig_pmd =3D pmdp_get(pmd); if (is_huge_zero_pmd(orig_pmd)) goto out; =20 @@ -1990,7 +1991,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm= _area_struct *vma, { struct mm_struct *mm =3D vma->vm_mm; spinlock_t *ptl; - pmd_t oldpmd, entry; + pmd_t oldpmd, entry, old_pmd; bool prot_numa =3D cp_flags & MM_CP_PROT_NUMA; bool uffd_wp =3D cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve =3D cp_flags & MM_CP_UFFD_WP_RESOLVE; @@ -2005,13 +2006,14 @@ int change_huge_pmd(struct mmu_gather *tlb, struct = vm_area_struct *vma, if (!ptl) return 0; =20 + old_pmd =3D pmdp_get(pmd); #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION - if (is_swap_pmd(*pmd)) { - swp_entry_t entry =3D pmd_to_swp_entry(*pmd); + if (is_swap_pmd(old_pmd)) { + swp_entry_t entry =3D pmd_to_swp_entry(old_pmd); struct folio *folio =3D pfn_swap_entry_folio(entry); pmd_t newpmd; =20 - VM_BUG_ON(!is_pmd_migration_entry(*pmd)); + VM_BUG_ON(!is_pmd_migration_entry(old_pmd)); if (is_writable_migration_entry(entry)) { /* * A protection check is difficult so @@ -2022,17 +2024,17 @@ int change_huge_pmd(struct mmu_gather *tlb, struct = vm_area_struct *vma, else entry =3D make_readable_migration_entry(swp_offset(entry)); newpmd =3D swp_entry_to_pmd(entry); - if (pmd_swp_soft_dirty(*pmd)) + if (pmd_swp_soft_dirty(old_pmd)) newpmd =3D pmd_swp_mksoft_dirty(newpmd); } else { - newpmd =3D *pmd; + newpmd =3D old_pmd; } =20 if (uffd_wp) newpmd =3D pmd_swp_mkuffd_wp(newpmd); else if (uffd_wp_resolve) newpmd =3D pmd_swp_clear_uffd_wp(newpmd); - if (!pmd_same(*pmd, newpmd)) + if (!pmd_same(old_pmd, newpmd)) set_pmd_at(mm, addr, pmd, newpmd); goto unlock; } @@ -2046,13 +2048,13 @@ int change_huge_pmd(struct mmu_gather *tlb, struct = vm_area_struct *vma, * data is likely to be read-cached on the local CPU and * local/remote hits to the zero page are not interesting. */ - if (is_huge_zero_pmd(*pmd)) + if (is_huge_zero_pmd(old_pmd)) goto unlock; =20 - if (pmd_protnone(*pmd)) + if (pmd_protnone(old_pmd)) goto unlock; =20 - folio =3D pmd_folio(*pmd); + folio =3D pmd_folio(old_pmd); toptier =3D node_is_toptier(folio_nid(folio)); /* * Skip scanning top tier node if normal numa @@ -2266,8 +2268,8 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct = vm_area_struct *vma) { spinlock_t *ptl; ptl =3D pmd_lock(vma->vm_mm, pmd); - if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || - pmd_devmap(*pmd))) + if (likely(is_swap_pmd(pmdp_get(pmd)) || pmd_trans_huge(pmdp_get(pmd)) || + pmd_devmap(pmdp_get(pmd)))) return ptl; spin_unlock(ptl); return NULL; @@ -2404,8 +2406,8 @@ static void __split_huge_pmd_locked(struct vm_area_st= ruct *vma, pmd_t *pmd, VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); VM_BUG_ON_VMA(vma->vm_start > haddr, vma); VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); - VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) - && !pmd_devmap(*pmd)); + VM_BUG_ON(!is_pmd_migration_entry(pmdp_get(pmd)) && !pmd_trans_huge(pmdp_= get(pmd)) + && !pmd_devmap(pmdp_get(pmd))); =20 count_vm_event(THP_SPLIT_PMD); =20 @@ -2438,7 +2440,7 @@ static void __split_huge_pmd_locked(struct vm_area_st= ruct *vma, pmd_t *pmd, return; } =20 - if (is_huge_zero_pmd(*pmd)) { + if (is_huge_zero_pmd(pmdp_get(pmd))) { /* * FIXME: Do we want to invalidate secondary mmu by calling * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below @@ -2451,11 +2453,11 @@ static void __split_huge_pmd_locked(struct vm_area_= struct *vma, pmd_t *pmd, return __split_huge_zero_page_pmd(vma, haddr, pmd); } =20 - pmd_migration =3D is_pmd_migration_entry(*pmd); + pmd_migration =3D is_pmd_migration_entry(pmdp_get(pmd)); if (unlikely(pmd_migration)) { swp_entry_t entry; =20 - old_pmd =3D *pmd; + old_pmd =3D pmdp_get(pmd); entry =3D pmd_to_swp_entry(old_pmd); page =3D pfn_swap_entry_to_page(entry); write =3D is_writable_migration_entry(entry); @@ -2620,9 +2622,9 @@ void split_huge_pmd_locked(struct vm_area_struct *vma= , unsigned long address, * require a folio to check the PMD against. Otherwise, there * is a risk of replacing the wrong folio. */ - if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || - is_pmd_migration_entry(*pmd)) { - if (folio && folio !=3D pmd_folio(*pmd)) + if (pmd_trans_huge(pmdp_get(pmd)) || pmd_devmap(pmdp_get(pmd)) || + is_pmd_migration_entry(pmdp_get(pmd))) { + if (folio && folio !=3D pmd_folio(pmdp_get(pmd))) return; __split_huge_pmd_locked(vma, pmd, address, freeze); } @@ -2719,7 +2721,7 @@ static bool __discard_anon_folio_pmd_locked(struct vm= _area_struct *vma, { struct mm_struct *mm =3D vma->vm_mm; int ref_count, map_count; - pmd_t orig_pmd =3D *pmdp; + pmd_t orig_pmd =3D pmdp_get(pmdp); =20 if (folio_test_dirty(folio) || pmd_dirty(orig_pmd)) return false; diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 0c3f56b3578e..9deb82654d5b 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -70,7 +70,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *hea= d, unsigned long start, } =20 spin_lock(&init_mm.page_table_lock); - if (likely(pmd_leaf(*pmd))) { + if (likely(pmd_leaf(pmdp_get(pmd)))) { /* * Higher order allocations from buddy allocator must be able to * be treated as indepdenent small pages (as they can be freed @@ -104,7 +104,7 @@ static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long = addr, walk->action =3D ACTION_CONTINUE; =20 spin_lock(&init_mm.page_table_lock); - head =3D pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL; + head =3D pmd_leaf(pmdp_get(pmd)) ? pmd_page(pmdp_get(pmd)) : NULL; /* * Due to HugeTLB alignment requirements and the vmemmap * pages being at the start of the hotplugged memory diff --git a/mm/kasan/init.c b/mm/kasan/init.c index 89895f38f722..4418bcdcb2aa 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -121,7 +121,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned= long addr, continue; } =20 - if (pmd_none(*pmd)) { + if (pmd_none(pmdp_get(pmd))) { pte_t *p; =20 if (slab_is_available()) @@ -300,7 +300,7 @@ static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) return; } =20 - pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd))); + pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(pmdp_get(pmd)))); pmd_clear(pmd); } =20 @@ -311,7 +311,7 @@ static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud) =20 for (i =3D 0; i < PTRS_PER_PMD; i++) { pmd =3D pmd_start + i; - if (!pmd_none(*pmd)) + if (!pmd_none(pmdp_get(pmd))) return; } =20 @@ -381,10 +381,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsign= ed long addr, =20 next =3D pmd_addr_end(addr, end); =20 - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) continue; =20 - if (kasan_pte_table(*pmd)) { + if (kasan_pte_table(pmdp_get(pmd))) { if (IS_ALIGNED(addr, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE)) { pmd_clear(pmd); diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index d6210ca48dda..aec16a7236f7 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -202,9 +202,9 @@ static bool shadow_mapped(unsigned long addr) if (pud_leaf(*pud)) return true; pmd =3D pmd_offset(pud, addr); - if (pmd_none(*pmd)) + if (pmd_none(pmdp_get(pmd))) return false; - if (pmd_leaf(*pmd)) + if (pmd_leaf(pmdp_get(pmd))) return true; pte =3D pte_offset_kernel(pmd, addr); return !pte_none(ptep_get(pte)); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index cdd1d8655a76..793da996313f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1192,7 +1192,7 @@ static int collapse_huge_page(struct mm_struct *mm, u= nsigned long address, if (pte) pte_unmap(pte); spin_lock(pmd_ptl); - BUG_ON(!pmd_none(*pmd)); + BUG_ON(!pmd_none(pmdp_get(pmd))); /* * We can only use set_pmd_at when establishing * hugepmds and never for establishing regular pmds that @@ -1229,7 +1229,7 @@ static int collapse_huge_page(struct mm_struct *mm, u= nsigned long address, _pmd =3D maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); =20 spin_lock(pmd_ptl); - BUG_ON(!pmd_none(*pmd)); + BUG_ON(!pmd_none(pmdp_get(pmd))); folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); diff --git a/mm/madvise.c b/mm/madvise.c index 89089d84f8df..382c55d2ec94 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -357,7 +357,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, !can_do_file_pageout(vma); =20 #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (pmd_trans_huge(*pmd)) { + if (pmd_trans_huge(pmdp_get(pmd))) { pmd_t orig_pmd; unsigned long next =3D pmd_addr_end(addr, end); =20 @@ -366,7 +366,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, if (!ptl) return 0; =20 - orig_pmd =3D *pmd; + orig_pmd =3D pmdp_get(pmd); if (is_huge_zero_pmd(orig_pmd)) goto huge_unlock; =20 @@ -655,7 +655,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned = long addr, int nr, max_nr; =20 next =3D pmd_addr_end(addr, end); - if (pmd_trans_huge(*pmd)) + if (pmd_trans_huge(pmdp_get(pmd))) if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) return 0; =20 diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7066fc84f351..305dbef3cc4d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -422,9 +422,9 @@ static unsigned long dev_pagemap_mapping_shift(struct v= m_area_struct *vma, if (pud_devmap(*pud)) return PUD_SHIFT; pmd =3D pmd_offset(pud, address); - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) return 0; - if (pmd_devmap(*pmd)) + if (pmd_devmap(pmdp_get(pmd))) return PMD_SHIFT; pte =3D pte_offset_map(pmd, address); if (!pte) @@ -775,7 +775,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned l= ong addr, short shift, static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, struct hwpoison_walk *hwp) { - pmd_t pmd =3D *pmdp; + pmd_t pmd =3D pmdp_get(pmdp); unsigned long pfn; unsigned long hwpoison_vaddr; =20 diff --git a/mm/memory.c b/mm/memory.c index ebfc9768f801..5520e1f6a1b9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -189,7 +189,7 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member) static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { - pgtable_t token =3D pmd_pgtable(*pmd); + pgtable_t token =3D pmd_pgtable(pmdp_get(pmd)); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); mm_dec_nr_ptes(tlb->mm); @@ -421,7 +421,7 @@ void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgta= ble_t *pte) { spinlock_t *ptl =3D pmd_lock(mm, pmd); =20 - if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ + if (likely(pmd_none(pmdp_get(pmd)))) { /* Has another populated it ? */ mm_inc_nr_ptes(mm); /* * Ensure all pte setup (eg. pte page lock and page clearing) are @@ -462,7 +462,7 @@ int __pte_alloc_kernel(pmd_t *pmd) return -ENOMEM; =20 spin_lock(&init_mm.page_table_lock); - if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ + if (likely(pmd_none(pmdp_get(pmd)))) { /* Has another populated it ? */ smp_wmb(); /* See comment in pmd_install() */ pmd_populate_kernel(&init_mm, pmd, new); new =3D NULL; @@ -1710,7 +1710,8 @@ static inline unsigned long zap_pmd_range(struct mmu_= gather *tlb, pmd =3D pmd_offset(pud, addr); do { next =3D pmd_addr_end(addr, end); - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { + if (is_swap_pmd(pmdp_get(pmd)) || pmd_trans_huge(pmdp_get(pmd)) || + pmd_devmap(pmdp_get(pmd))) { if (next - addr !=3D HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); else if (zap_huge_pmd(tlb, vma, pmd, addr)) { @@ -1720,7 +1721,7 @@ static inline unsigned long zap_pmd_range(struct mmu_= gather *tlb, /* fall through */ } else if (details && details->single_folio && folio_test_pmd_mappable(details->single_folio) && - next - addr =3D=3D HPAGE_PMD_SIZE && pmd_none(*pmd)) { + next - addr =3D=3D HPAGE_PMD_SIZE && pmd_none(pmdp_get(pmd))) { spinlock_t *ptl =3D pmd_lock(tlb->mm, pmd); /* * Take and drop THP pmd lock so that we cannot return @@ -1729,7 +1730,7 @@ static inline unsigned long zap_pmd_range(struct mmu_= gather *tlb, */ spin_unlock(ptl); } - if (pmd_none(*pmd)) { + if (pmd_none(pmdp_get(pmd))) { addr =3D next; continue; } @@ -1975,7 +1976,7 @@ static pmd_t *walk_to_pmd(struct mm_struct *mm, unsig= ned long addr) if (!pmd) return NULL; =20 - VM_BUG_ON(pmd_trans_huge(*pmd)); + VM_BUG_ON(pmd_trans_huge(pmdp_get(pmd))); return pmd; } =20 @@ -2577,7 +2578,7 @@ static inline int remap_pmd_range(struct mm_struct *m= m, pud_t *pud, pmd =3D pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; - VM_BUG_ON(pmd_trans_huge(*pmd)); + VM_BUG_ON(pmd_trans_huge(pmdp_get(pmd))); do { next =3D pmd_addr_end(addr, end); err =3D remap_pte_range(mm, pmd, addr, next, @@ -2846,11 +2847,11 @@ static int apply_to_pmd_range(struct mm_struct *mm,= pud_t *pud, } do { next =3D pmd_addr_end(addr, end); - if (pmd_none(*pmd) && !create) + if (pmd_none(pmdp_get(pmd)) && !create) continue; - if (WARN_ON_ONCE(pmd_leaf(*pmd))) + if (WARN_ON_ONCE(pmd_leaf(pmdp_get(pmd)))) return -EINVAL; - if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { + if (!pmd_none(pmdp_get(pmd)) && WARN_ON_ONCE(pmd_bad(pmdp_get(pmd)))) { if (!create) continue; pmd_clear_bad(pmd); @@ -6167,7 +6168,7 @@ int follow_pte(struct vm_area_struct *vma, unsigned l= ong address, goto out; =20 pmd =3D pmd_offset(pud, address); - VM_BUG_ON(pmd_trans_huge(*pmd)); + VM_BUG_ON(pmd_trans_huge(pmdp_get(pmd))); =20 ptep =3D pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b858e22b259d..03f2df44b07f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -505,11 +505,11 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_wa= lk *walk) struct folio *folio; struct queue_pages *qp =3D walk->private; =20 - if (unlikely(is_pmd_migration_entry(*pmd))) { + if (unlikely(is_pmd_migration_entry(pmdp_get(pmd)))) { qp->nr_failed++; return; } - folio =3D pmd_folio(*pmd); + folio =3D pmd_folio(pmdp_get(pmd)); if (is_huge_zero_folio(folio)) { walk->action =3D ACTION_CONTINUE; return; diff --git a/mm/migrate.c b/mm/migrate.c index 923ea80ba744..a1dd5c8f88dd 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -369,9 +369,9 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd= _t *pmd) spinlock_t *ptl; =20 ptl =3D pmd_lock(mm, pmd); - if (!is_pmd_migration_entry(*pmd)) + if (!is_pmd_migration_entry(pmdp_get(pmd))) goto unlock; - migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); + migration_entry_wait_on_locked(pmd_to_swp_entry(pmdp_get(pmd)), ptl); return; unlock: spin_unlock(ptl); diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 6d66dc1c6ffa..3a08cef6cd39 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -67,19 +67,19 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, pte_t *ptep; =20 again: - if (pmd_none(*pmdp)) + if (pmd_none(pmdp_get(pmdp))) return migrate_vma_collect_hole(start, end, -1, walk); =20 - if (pmd_trans_huge(*pmdp)) { + if (pmd_trans_huge(pmdp_get(pmdp))) { struct folio *folio; =20 ptl =3D pmd_lock(mm, pmdp); - if (unlikely(!pmd_trans_huge(*pmdp))) { + if (unlikely(!pmd_trans_huge(pmdp_get(pmdp)))) { spin_unlock(ptl); goto again; } =20 - folio =3D pmd_folio(*pmdp); + folio =3D pmd_folio(pmdp_get(pmdp)); if (is_huge_zero_folio(folio)) { spin_unlock(ptl); split_huge_pmd(vma, pmdp, addr); @@ -596,7 +596,7 @@ static void migrate_vma_insert_page(struct migrate_vma = *migrate, pmdp =3D pmd_alloc(mm, pudp, addr); if (!pmdp) goto abort; - if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) + if (pmd_trans_huge(pmdp_get(pmdp)) || pmd_devmap(pmdp_get(pmdp))) goto abort; if (pte_alloc(mm, pmdp)) goto abort; diff --git a/mm/mlock.c b/mm/mlock.c index e3e3dc2b2956..c3c479e9d0f8 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -363,11 +363,11 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long = addr, =20 ptl =3D pmd_trans_huge_lock(pmd, vma); if (ptl) { - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) goto out; - if (is_huge_zero_pmd(*pmd)) + if (is_huge_zero_pmd(pmdp_get(pmd))) goto out; - folio =3D pmd_folio(*pmd); + folio =3D pmd_folio(pmdp_get(pmd)); if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else diff --git a/mm/mprotect.c b/mm/mprotect.c index 222ab434da54..121fb448b0db 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -381,7 +381,7 @@ static inline long change_pmd_range(struct mmu_gather *= tlb, break; } =20 - if (pmd_none(*pmd)) + if (pmd_none(pmdp_get(pmd))) goto next; =20 /* invoke the mmu notifier if the pmd is populated */ diff --git a/mm/mremap.c b/mm/mremap.c index e7ae140fc640..d42ac62bd34e 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -63,7 +63,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned = long addr) return NULL; =20 pmd =3D pmd_offset(pud, addr); - if (pmd_none(*pmd)) + if (pmd_none(pmdp_get(pmd))) return NULL; =20 return pmd; @@ -97,7 +97,7 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct = vm_area_struct *vma, if (!pmd) return NULL; =20 - VM_BUG_ON(pmd_trans_huge(*pmd)); + VM_BUG_ON(pmd_trans_huge(pmdp_get(pmd))); =20 return pmd; } diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 509c6ef8de40..48a2cf56c80e 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -241,7 +241,7 @@ void __page_table_check_pmd_set(struct mm_struct *mm, p= md_t *pmdp, pmd_t pmd) =20 page_table_check_pmd_flags(pmd); =20 - __page_table_check_pmd_clear(mm, *pmdp); + __page_table_check_pmd_clear(mm, pmdp_get(pmdp)); if (pmd_user_accessible_page(pmd)) { page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT, pmd_write(pmd)); diff --git a/mm/pagewalk.c b/mm/pagewalk.c index ae2f08ce991b..c3019a160e77 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -86,7 +86,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr,= unsigned long end, do { again: next =3D pmd_addr_end(addr, end); - if (pmd_none(*pmd)) { + if (pmd_none(pmdp_get(pmd))) { if (ops->pte_hole) err =3D ops->pte_hole(addr, next, depth, walk); if (err) @@ -112,7 +112,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long add= r, unsigned long end, * Check this here so we only break down trans_huge * pages when we _need_ to */ - if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) || + if ((!walk->vma && (pmd_leaf(pmdp_get(pmd)) || !pmd_present(pmdp_get(pmd= )))) || walk->action =3D=3D ACTION_CONTINUE || !(ops->pte_entry)) continue; diff --git a/mm/percpu.c b/mm/percpu.c index 20d91af8c033..7ee77c0fd5e3 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -3208,7 +3208,7 @@ void __init __weak pcpu_populate_pte(unsigned long ad= dr) } =20 pmd =3D pmd_offset(pud, addr); - if (!pmd_present(*pmd)) { + if (!pmd_present(pmdp_get(pmd))) { pte_t *new; =20 new =3D memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index a78a4adf711a..920947bb76cd 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -51,7 +51,7 @@ void pud_clear_bad(pud_t *pud) */ void pmd_clear_bad(pmd_t *pmd) { - pmd_ERROR(*pmd); + pmd_ERROR(pmdp_get(pmd)); pmd_clear(pmd); } =20 @@ -110,7 +110,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { - int changed =3D !pmd_same(*pmdp, entry); + int changed =3D !pmd_same(pmdp_get(pmdp), entry); VM_BUG_ON(address & ~HPAGE_PMD_MASK); if (changed) { set_pmd_at(vma->vm_mm, address, pmdp, entry); @@ -137,10 +137,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long addr= ess, pmd_t *pmdp) { - pmd_t pmd; + pmd_t pmd, old_pmd =3D pmdp_get(pmdp); VM_BUG_ON(address & ~HPAGE_PMD_MASK); - VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && - !pmd_devmap(*pmdp)); + VM_BUG_ON(pmd_present(old_pmd) && !pmd_trans_huge(old_pmd) && + !pmd_devmap(old_pmd)); pmd =3D pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; @@ -198,8 +198,10 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct= *mm, pmd_t *pmdp) pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - VM_WARN_ON_ONCE(!pmd_present(*pmdp)); - pmd_t old =3D pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); + pmd_t old_pmd =3D pmdp_get(pmdp); + + VM_WARN_ON_ONCE(!pmd_present(old_pmd)); + pmd_t old =3D pmdp_establish(vma, address, pmdp, pmd_mkinvalid(old_pmd)); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return old; } @@ -209,7 +211,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsig= ned long address, pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - VM_WARN_ON_ONCE(!pmd_present(*pmdp)); + VM_WARN_ON_ONCE(!pmd_present(pmdp_get(pmdp))); return pmdp_invalidate(vma, address, pmdp); } #endif @@ -225,7 +227,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, u= nsigned long address, pmd_t pmd; =20 VM_BUG_ON(address & ~HPAGE_PMD_MASK); - VM_BUG_ON(pmd_trans_huge(*pmdp)); + VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp))); pmd =3D pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); =20 /* collapse entails shooting down ptes not pmd */ diff --git a/mm/ptdump.c b/mm/ptdump.c index 106e1d66e9f9..e17588a32012 100644 --- a/mm/ptdump.c +++ b/mm/ptdump.c @@ -99,7 +99,7 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long add= r, unsigned long next, struct mm_walk *walk) { struct ptdump_state *st =3D walk->private; - pmd_t val =3D READ_ONCE(*pmd); + pmd_t val =3D pmdp_get(pmd); =20 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) if (pmd_page(val) =3D=3D virt_to_page(lm_alias(kasan_early_shadow_pte))) diff --git a/mm/rmap.c b/mm/rmap.c index 2490e727e2dc..32e4920e419d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1034,9 +1034,9 @@ static int page_vma_mkclean_one(struct page_vma_mappe= d_walk *pvmw) } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t *pmd =3D pvmw->pmd; - pmd_t entry; + pmd_t entry, old_pmd =3D pmdp_get(pmd); =20 - if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) + if (!pmd_dirty(old_pmd) && !pmd_write(old_pmd)) continue; =20 flush_cache_range(vma, address, diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index edcc7a6b0f6f..c89706e107ce 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -187,7 +187,7 @@ static void * __meminit vmemmap_alloc_block_zero(unsign= ed long size, int node) pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int= node) { pmd_t *pmd =3D pmd_offset(pud, addr); - if (pmd_none(*pmd)) { + if (pmd_none(pmdp_get(pmd))) { void *p =3D vmemmap_alloc_block_zero(PAGE_SIZE, node); if (!p) return NULL; @@ -332,7 +332,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long = start, unsigned long end, return -ENOMEM; =20 pmd =3D pmd_offset(pud, addr); - if (pmd_none(READ_ONCE(*pmd))) { + if (pmd_none(pmdp_get(pmd))) { void *p; =20 p =3D vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a0df1e2e155a..1da56cbe5feb 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -150,7 +150,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long = addr, unsigned long end, if (!IS_ALIGNED(phys_addr, PMD_SIZE)) return 0; =20 - if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) + if (pmd_present(pmdp_get(pmd)) && !pmd_free_pte_page(pmd, addr)) return 0; =20 return pmd_set_huge(pmd, phys_addr, prot); @@ -371,7 +371,7 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long = addr, unsigned long end, next =3D pmd_addr_end(addr, end); =20 cleared =3D pmd_clear_huge(pmd); - if (cleared || pmd_bad(*pmd)) + if (cleared || pmd_bad(pmdp_get(pmd))) *mask |=3D PGTBL_PMD_MODIFIED; =20 if (cleared) @@ -743,7 +743,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) pgd_t *pgd =3D pgd_offset_k(addr); p4d_t *p4d; pud_t *pud; - pmd_t *pmd; + pmd_t *pmd, old_pmd; pte_t *ptep, pte; =20 /* @@ -776,11 +776,12 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) return NULL; =20 pmd =3D pmd_offset(pud, addr); - if (pmd_none(*pmd)) + old_pmd =3D pmdp_get(pmd); + if (pmd_none(old_pmd)) return NULL; - if (pmd_leaf(*pmd)) - return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); - if (WARN_ON_ONCE(pmd_bad(*pmd))) + if (pmd_leaf(old_pmd)) + return pmd_page(old_pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + if (WARN_ON_ONCE(pmd_bad(old_pmd))) return NULL; =20 ptep =3D pte_offset_kernel(pmd, addr); --=20 2.25.1