From: xu xin <xu.xin16@zte.com.cn>
This is a minor performance optimization, especially when there are many
for-loop iterations, because the addr variable doesn’t change across
iterations.
Therefore, it only needs to be initialized once before the loop.
Signed-off-by: xu xin <xu.xin16@zte.com.cn>
---
mm/ksm.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index 2d89a7c8b4eb..950e122bcbf4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -3168,6 +3168,8 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
return;
again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
+ /* Ignore the stable/unstable/sqnr flags */
+ const unsigned long addr = rmap_item->address & PAGE_MASK;
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
@@ -3180,16 +3182,13 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
}
anon_vma_lock_read(anon_vma);
}
+
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) {
- unsigned long addr;
cond_resched();
vma = vmac->vma;
- /* Ignore the stable/unstable/sqnr flags */
- addr = rmap_item->address & PAGE_MASK;
-
if (addr < vma->vm_start || addr >= vma->vm_end)
continue;
/*
--
2.25.1