mm/vmscan.c | 125 ++++++++++++++++++++++++---------------------------- 1 file changed, 57 insertions(+), 68 deletions(-)
From: Vern Hao <vernhao@tencent.com>
In lru_gen_look_around() and walk_pte_range(), there are too many
similarities between them, so there add a common function
lru_gen_folio_status_check() to simplify these part of duplicate codes.
Signed-off-by: Vern Hao <vernhao@tencent.com>
---
mm/vmscan.c | 125 ++++++++++++++++++++++++----------------------------
1 file changed, 57 insertions(+), 68 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6f13394b112e..2b5d61eeb039 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4000,6 +4000,52 @@ static bool suitable_to_scan(int total, int young)
return young * n >= total;
}
+static bool lru_gen_folio_status_check(pte_t *pte, struct vm_area_struct *vma,
+ struct pglist_data *pgdat,
+ unsigned long addr, int new_gen, int *old_count,
+ struct lru_gen_mm_walk *walk, struct mem_cgroup *memcg)
+{
+ struct folio *folio;
+ int old_gen;
+ unsigned long pfn;
+ pte_t ptent = ptep_get(pte);
+
+ pfn = get_pte_pfn(ptent, vma, addr);
+ if (pfn == -1)
+ return false;
+
+ if (!pte_young(ptent)) {
+ (*old_count)++;
+ return false;
+ }
+
+ folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap);
+ if (!folio)
+ return false;
+
+ if (!ptep_test_and_clear_young(vma, addr, pte))
+ VM_WARN_ON_ONCE(true);
+
+ if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
+ !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_test_swapcache(folio)))
+ folio_mark_dirty(folio);
+
+ if (walk) {
+ old_gen = folio_update_gen(folio, new_gen);
+ if (old_gen >= 0 && old_gen != new_gen)
+ update_batch_size(walk, folio, old_gen, new_gen);
+ return true;
+ } else {
+ old_gen = folio_lru_gen(folio);
+ if (old_gen < 0)
+ folio_set_referenced(folio);
+ else if (old_gen != new_gen)
+ folio_activate(folio);
+ }
+ return false;
+}
+
static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
struct mm_walk *args)
{
@@ -4012,7 +4058,8 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
struct lru_gen_mm_walk *walk = args->private;
struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
- int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
+ int new_gen = lru_gen_from_seq(walk->max_seq);
+ int old_count = 0;
pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl);
if (!pte)
@@ -4025,41 +4072,15 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
arch_enter_lazy_mmu_mode();
restart:
for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
- unsigned long pfn;
- struct folio *folio;
- pte_t ptent = ptep_get(pte + i);
-
total++;
- walk->mm_stats[MM_LEAF_TOTAL]++;
-
- pfn = get_pte_pfn(ptent, args->vma, addr);
- if (pfn == -1)
- continue;
-
- if (!pte_young(ptent)) {
- walk->mm_stats[MM_LEAF_OLD]++;
- continue;
- }
-
- folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
- if (!folio)
+ if (!lru_gen_folio_status_check(pte + i, args->vma, pgdat,
+ addr, new_gen, &old_count, walk, memcg))
continue;
-
- if (!ptep_test_and_clear_young(args->vma, addr, pte + i))
- VM_WARN_ON_ONCE(true);
-
young++;
- walk->mm_stats[MM_LEAF_YOUNG]++;
-
- if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
- !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio)))
- folio_mark_dirty(folio);
-
- old_gen = folio_update_gen(folio, new_gen);
- if (old_gen >= 0 && old_gen != new_gen)
- update_batch_size(walk, folio, old_gen, new_gen);
}
+ walk->mm_stats[MM_LEAF_TOTAL] += total;
+ walk->mm_stats[MM_LEAF_YOUNG] += young;
+ walk->mm_stats[MM_LEAF_OLD] += old_count;
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
goto restart;
@@ -4662,7 +4683,8 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
DEFINE_MAX_SEQ(lruvec);
- int old_gen, new_gen = lru_gen_from_seq(max_seq);
+ int new_gen = lru_gen_from_seq(max_seq);
+ int old_count = 0;
lockdep_assert_held(pvmw->ptl);
VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
@@ -4696,43 +4718,10 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
pte -= (addr - start) / PAGE_SIZE;
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
- unsigned long pfn;
- pte_t ptent = ptep_get(pte + i);
-
- pfn = get_pte_pfn(ptent, pvmw->vma, addr);
- if (pfn == -1)
- continue;
-
- if (!pte_young(ptent))
+ if (!lru_gen_folio_status_check(pte + i, pvmw->vma, pgdat,
+ addr, new_gen, &old_count, walk, memcg))
continue;
-
- folio = get_pfn_folio(pfn, memcg, pgdat, can_swap);
- if (!folio)
- continue;
-
- if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
- VM_WARN_ON_ONCE(true);
-
young++;
-
- if (pte_dirty(ptent) && !folio_test_dirty(folio) &&
- !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio)))
- folio_mark_dirty(folio);
-
- if (walk) {
- old_gen = folio_update_gen(folio, new_gen);
- if (old_gen >= 0 && old_gen != new_gen)
- update_batch_size(walk, folio, old_gen, new_gen);
-
- continue;
- }
-
- old_gen = folio_lru_gen(folio);
- if (old_gen < 0)
- folio_set_referenced(folio);
- else if (old_gen != new_gen)
- folio_activate(folio);
}
arch_leave_lazy_mmu_mode();
--
2.41.0
Hi Xin,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Xin-Hao/mm-multi-gen-LRU-Optimize-some-duplicate-codes/20230824-193855
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20230824113538.5160-1-user%40VERNHAO-MC1
patch subject: [PATCH] mm: multi-gen LRU: Optimize some duplicate codes
config: openrisc-randconfig-r025-20230825 (https://download.01.org/0day-ci/archive/20230825/202308251315.UUQoJwKC-lkp@intel.com/config)
compiler: or1k-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20230825/202308251315.UUQoJwKC-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202308251315.UUQoJwKC-lkp@intel.com/
All warnings (new ones prefixed by >>):
mm/vmscan.c: In function 'lru_gen_look_around':
>> mm/vmscan.c:4681:14: warning: unused variable 'can_swap' [-Wunused-variable]
4681 | bool can_swap = !folio_is_file_lru(folio);
| ^~~~~~~~
vim +/can_swap +4681 mm/vmscan.c
ac35a490237446 Yu Zhao 2022-09-18 4659
db19a43d9b3a88 T.J. Alumbaugh 2023-01-18 4660 /******************************************************************************
db19a43d9b3a88 T.J. Alumbaugh 2023-01-18 4661 * rmap/PT walk feedback
db19a43d9b3a88 T.J. Alumbaugh 2023-01-18 4662 ******************************************************************************/
db19a43d9b3a88 T.J. Alumbaugh 2023-01-18 4663
018ee47f14893d Yu Zhao 2022-09-18 4664 /*
49fd9b6df54e61 Matthew Wilcox (Oracle 2022-09-02 4665) * This function exploits spatial locality when shrink_folio_list() walks the
bd74fdaea14602 Yu Zhao 2022-09-18 4666 * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
bd74fdaea14602 Yu Zhao 2022-09-18 4667 * the scan was done cacheline efficiently, it adds the PMD entry pointing to
bd74fdaea14602 Yu Zhao 2022-09-18 4668 * the PTE table to the Bloom filter. This forms a feedback loop between the
bd74fdaea14602 Yu Zhao 2022-09-18 4669 * eviction and the aging.
018ee47f14893d Yu Zhao 2022-09-18 4670 */
018ee47f14893d Yu Zhao 2022-09-18 4671 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
018ee47f14893d Yu Zhao 2022-09-18 4672 {
018ee47f14893d Yu Zhao 2022-09-18 4673 int i;
018ee47f14893d Yu Zhao 2022-09-18 4674 unsigned long start;
018ee47f14893d Yu Zhao 2022-09-18 4675 unsigned long end;
bd74fdaea14602 Yu Zhao 2022-09-18 4676 struct lru_gen_mm_walk *walk;
bd74fdaea14602 Yu Zhao 2022-09-18 4677 int young = 0;
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4678 pte_t *pte = pvmw->pte;
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4679 unsigned long addr = pvmw->address;
018ee47f14893d Yu Zhao 2022-09-18 4680 struct folio *folio = pfn_folio(pvmw->pfn);
a3235ea2a88b78 Kalesh Singh 2023-08-01 @4681 bool can_swap = !folio_is_file_lru(folio);
018ee47f14893d Yu Zhao 2022-09-18 4682 struct mem_cgroup *memcg = folio_memcg(folio);
018ee47f14893d Yu Zhao 2022-09-18 4683 struct pglist_data *pgdat = folio_pgdat(folio);
018ee47f14893d Yu Zhao 2022-09-18 4684 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
018ee47f14893d Yu Zhao 2022-09-18 4685 DEFINE_MAX_SEQ(lruvec);
5eaa8481da76b4 Vern Hao 2023-08-24 4686 int new_gen = lru_gen_from_seq(max_seq);
5eaa8481da76b4 Vern Hao 2023-08-24 4687 int old_count = 0;
018ee47f14893d Yu Zhao 2022-09-18 4688
018ee47f14893d Yu Zhao 2022-09-18 4689 lockdep_assert_held(pvmw->ptl);
018ee47f14893d Yu Zhao 2022-09-18 4690 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
018ee47f14893d Yu Zhao 2022-09-18 4691
018ee47f14893d Yu Zhao 2022-09-18 4692 if (spin_is_contended(pvmw->ptl))
018ee47f14893d Yu Zhao 2022-09-18 4693 return;
018ee47f14893d Yu Zhao 2022-09-18 4694
bd74fdaea14602 Yu Zhao 2022-09-18 4695 /* avoid taking the LRU lock under the PTL when possible */
bd74fdaea14602 Yu Zhao 2022-09-18 4696 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
bd74fdaea14602 Yu Zhao 2022-09-18 4697
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4698 start = max(addr & PMD_MASK, pvmw->vma->vm_start);
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4699 end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
018ee47f14893d Yu Zhao 2022-09-18 4700
018ee47f14893d Yu Zhao 2022-09-18 4701 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4702 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
018ee47f14893d Yu Zhao 2022-09-18 4703 end = start + MIN_LRU_BATCH * PAGE_SIZE;
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4704 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
018ee47f14893d Yu Zhao 2022-09-18 4705 start = end - MIN_LRU_BATCH * PAGE_SIZE;
018ee47f14893d Yu Zhao 2022-09-18 4706 else {
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4707 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4708 end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
018ee47f14893d Yu Zhao 2022-09-18 4709 }
018ee47f14893d Yu Zhao 2022-09-18 4710 }
018ee47f14893d Yu Zhao 2022-09-18 4711
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4712 /* folio_update_gen() requires stable folio_memcg() */
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4713 if (!mem_cgroup_trylock_pages(memcg))
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4714 return;
018ee47f14893d Yu Zhao 2022-09-18 4715
018ee47f14893d Yu Zhao 2022-09-18 4716 arch_enter_lazy_mmu_mode();
018ee47f14893d Yu Zhao 2022-09-18 4717
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4718 pte -= (addr - start) / PAGE_SIZE;
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4719
018ee47f14893d Yu Zhao 2022-09-18 4720 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
5eaa8481da76b4 Vern Hao 2023-08-24 4721 if (!lru_gen_folio_status_check(pte + i, pvmw->vma, pgdat,
5eaa8481da76b4 Vern Hao 2023-08-24 4722 addr, new_gen, &old_count, walk, memcg))
018ee47f14893d Yu Zhao 2022-09-18 4723 continue;
bd74fdaea14602 Yu Zhao 2022-09-18 4724 young++;
018ee47f14893d Yu Zhao 2022-09-18 4725 }
018ee47f14893d Yu Zhao 2022-09-18 4726
018ee47f14893d Yu Zhao 2022-09-18 4727 arch_leave_lazy_mmu_mode();
abf086721a2f1e T.J. Alumbaugh 2023-01-18 4728 mem_cgroup_unlock_pages();
018ee47f14893d Yu Zhao 2022-09-18 4729
bd74fdaea14602 Yu Zhao 2022-09-18 4730 /* feedback from rmap walkers to page table walkers */
bd74fdaea14602 Yu Zhao 2022-09-18 4731 if (suitable_to_scan(i, young))
bd74fdaea14602 Yu Zhao 2022-09-18 4732 update_bloom_filter(lruvec, max_seq, pvmw->pmd);
018ee47f14893d Yu Zhao 2022-09-18 4733 }
018ee47f14893d Yu Zhao 2022-09-18 4734
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.