[PATCH v3 02/13] mm/huge: avoid big else branch in zap_huge_pmd()

Lorenzo Stoakes (Oracle) posted 13 patches 2 weeks ago
[PATCH v3 02/13] mm/huge: avoid big else branch in zap_huge_pmd()
Posted by Lorenzo Stoakes (Oracle) 2 weeks ago
We don't need to have an extra level of indentation, we can simply exit
early in the first two branches.

No functional change intended.

Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
---
 mm/huge_memory.c | 87 +++++++++++++++++++++++++-----------------------
 1 file changed, 45 insertions(+), 42 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2775309b317a..4e8df3a35cab 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2328,8 +2328,10 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		 pmd_t *pmd, unsigned long addr)
 {
-	pmd_t orig_pmd;
+	struct folio *folio = NULL;
+	int flush_needed = 1;
 	spinlock_t *ptl;
+	pmd_t orig_pmd;
 
 	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
 
@@ -2350,59 +2352,60 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		if (arch_needs_pgtable_deposit())
 			zap_deposited_table(tlb->mm, pmd);
 		spin_unlock(ptl);
-	} else if (is_huge_zero_pmd(orig_pmd)) {
+		return 1;
+	}
+	if (is_huge_zero_pmd(orig_pmd)) {
 		if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
 			zap_deposited_table(tlb->mm, pmd);
 		spin_unlock(ptl);
-	} else {
-		struct folio *folio = NULL;
-		int flush_needed = 1;
+		return 1;
+	}
 
-		if (pmd_present(orig_pmd)) {
-			struct page *page = pmd_page(orig_pmd);
+	if (pmd_present(orig_pmd)) {
+		struct page *page = pmd_page(orig_pmd);
 
-			folio = page_folio(page);
-			folio_remove_rmap_pmd(folio, page, vma);
-			WARN_ON_ONCE(folio_mapcount(folio) < 0);
-			VM_BUG_ON_PAGE(!PageHead(page), page);
-		} else if (pmd_is_valid_softleaf(orig_pmd)) {
-			const softleaf_t entry = softleaf_from_pmd(orig_pmd);
+		folio = page_folio(page);
+		folio_remove_rmap_pmd(folio, page, vma);
+		WARN_ON_ONCE(folio_mapcount(folio) < 0);
+		VM_BUG_ON_PAGE(!PageHead(page), page);
+	} else if (pmd_is_valid_softleaf(orig_pmd)) {
+		const softleaf_t entry = softleaf_from_pmd(orig_pmd);
 
-			folio = softleaf_to_folio(entry);
-			flush_needed = 0;
+		folio = softleaf_to_folio(entry);
+		flush_needed = 0;
 
-			if (!thp_migration_supported())
-				WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
-		}
+		if (!thp_migration_supported())
+			WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+	}
 
-		if (folio_test_anon(folio)) {
+	if (folio_test_anon(folio)) {
+		zap_deposited_table(tlb->mm, pmd);
+		add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+	} else {
+		if (arch_needs_pgtable_deposit())
 			zap_deposited_table(tlb->mm, pmd);
-			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-		} else {
-			if (arch_needs_pgtable_deposit())
-				zap_deposited_table(tlb->mm, pmd);
-			add_mm_counter(tlb->mm, mm_counter_file(folio),
-				       -HPAGE_PMD_NR);
-
-			/*
-			 * Use flush_needed to indicate whether the PMD entry
-			 * is present, instead of checking pmd_present() again.
-			 */
-			if (flush_needed && pmd_young(orig_pmd) &&
-			    likely(vma_has_recency(vma)))
-				folio_mark_accessed(folio);
-		}
+		add_mm_counter(tlb->mm, mm_counter_file(folio),
+			       -HPAGE_PMD_NR);
 
-		if (folio_is_device_private(folio)) {
-			folio_remove_rmap_pmd(folio, &folio->page, vma);
-			WARN_ON_ONCE(folio_mapcount(folio) < 0);
-			folio_put(folio);
-		}
+		/*
+		 * Use flush_needed to indicate whether the PMD entry
+		 * is present, instead of checking pmd_present() again.
+		 */
+		if (flush_needed && pmd_young(orig_pmd) &&
+		    likely(vma_has_recency(vma)))
+			folio_mark_accessed(folio);
+	}
 
-		spin_unlock(ptl);
-		if (flush_needed)
-			tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
+	if (folio_is_device_private(folio)) {
+		folio_remove_rmap_pmd(folio, &folio->page, vma);
+		WARN_ON_ONCE(folio_mapcount(folio) < 0);
+		folio_put(folio);
 	}
+
+	spin_unlock(ptl);
+	if (flush_needed)
+		tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
+
 	return 1;
 }
 
-- 
2.53.0
Re: [PATCH v3 02/13] mm/huge: avoid big else branch in zap_huge_pmd()
Posted by Suren Baghdasaryan 6 days, 8 hours ago
On Fri, Mar 20, 2026 at 11:07 AM Lorenzo Stoakes (Oracle)
<ljs@kernel.org> wrote:
>
> We don't need to have an extra level of indentation, we can simply exit
> early in the first two branches.
>
> No functional change intended.
>
> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
> Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>

Reviewed-by: Suren Baghdasaryan <surenb@google.com>

> ---
>  mm/huge_memory.c | 87 +++++++++++++++++++++++++-----------------------
>  1 file changed, 45 insertions(+), 42 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 2775309b317a..4e8df3a35cab 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2328,8 +2328,10 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
>  int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>                  pmd_t *pmd, unsigned long addr)
>  {
> -       pmd_t orig_pmd;
> +       struct folio *folio = NULL;
> +       int flush_needed = 1;
>         spinlock_t *ptl;
> +       pmd_t orig_pmd;
>
>         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
>
> @@ -2350,59 +2352,60 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>                 if (arch_needs_pgtable_deposit())
>                         zap_deposited_table(tlb->mm, pmd);
>                 spin_unlock(ptl);
> -       } else if (is_huge_zero_pmd(orig_pmd)) {
> +               return 1;
> +       }
> +       if (is_huge_zero_pmd(orig_pmd)) {
>                 if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
>                         zap_deposited_table(tlb->mm, pmd);
>                 spin_unlock(ptl);
> -       } else {
> -               struct folio *folio = NULL;
> -               int flush_needed = 1;
> +               return 1;
> +       }
>
> -               if (pmd_present(orig_pmd)) {
> -                       struct page *page = pmd_page(orig_pmd);
> +       if (pmd_present(orig_pmd)) {
> +               struct page *page = pmd_page(orig_pmd);
>
> -                       folio = page_folio(page);
> -                       folio_remove_rmap_pmd(folio, page, vma);
> -                       WARN_ON_ONCE(folio_mapcount(folio) < 0);
> -                       VM_BUG_ON_PAGE(!PageHead(page), page);
> -               } else if (pmd_is_valid_softleaf(orig_pmd)) {
> -                       const softleaf_t entry = softleaf_from_pmd(orig_pmd);
> +               folio = page_folio(page);
> +               folio_remove_rmap_pmd(folio, page, vma);
> +               WARN_ON_ONCE(folio_mapcount(folio) < 0);
> +               VM_BUG_ON_PAGE(!PageHead(page), page);
> +       } else if (pmd_is_valid_softleaf(orig_pmd)) {
> +               const softleaf_t entry = softleaf_from_pmd(orig_pmd);
>
> -                       folio = softleaf_to_folio(entry);
> -                       flush_needed = 0;
> +               folio = softleaf_to_folio(entry);
> +               flush_needed = 0;
>
> -                       if (!thp_migration_supported())
> -                               WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
> -               }
> +               if (!thp_migration_supported())
> +                       WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
> +       }
>
> -               if (folio_test_anon(folio)) {
> +       if (folio_test_anon(folio)) {
> +               zap_deposited_table(tlb->mm, pmd);
> +               add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> +       } else {
> +               if (arch_needs_pgtable_deposit())
>                         zap_deposited_table(tlb->mm, pmd);
> -                       add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> -               } else {
> -                       if (arch_needs_pgtable_deposit())
> -                               zap_deposited_table(tlb->mm, pmd);
> -                       add_mm_counter(tlb->mm, mm_counter_file(folio),
> -                                      -HPAGE_PMD_NR);
> -
> -                       /*
> -                        * Use flush_needed to indicate whether the PMD entry
> -                        * is present, instead of checking pmd_present() again.
> -                        */
> -                       if (flush_needed && pmd_young(orig_pmd) &&
> -                           likely(vma_has_recency(vma)))
> -                               folio_mark_accessed(folio);
> -               }
> +               add_mm_counter(tlb->mm, mm_counter_file(folio),
> +                              -HPAGE_PMD_NR);
>
> -               if (folio_is_device_private(folio)) {
> -                       folio_remove_rmap_pmd(folio, &folio->page, vma);
> -                       WARN_ON_ONCE(folio_mapcount(folio) < 0);
> -                       folio_put(folio);
> -               }
> +               /*
> +                * Use flush_needed to indicate whether the PMD entry
> +                * is present, instead of checking pmd_present() again.
> +                */
> +               if (flush_needed && pmd_young(orig_pmd) &&
> +                   likely(vma_has_recency(vma)))
> +                       folio_mark_accessed(folio);
> +       }
>
> -               spin_unlock(ptl);
> -               if (flush_needed)
> -                       tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
> +       if (folio_is_device_private(folio)) {
> +               folio_remove_rmap_pmd(folio, &folio->page, vma);
> +               WARN_ON_ONCE(folio_mapcount(folio) < 0);
> +               folio_put(folio);
>         }
> +
> +       spin_unlock(ptl);
> +       if (flush_needed)
> +               tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
> +
>         return 1;
>  }
>
> --
> 2.53.0
>