[RFC PATCH V2 11/13] vmstat: Add vmstat counters

Raghavendra K T posted 13 patches 3 months, 2 weeks ago
There is a newer version of this series
[RFC PATCH V2 11/13] vmstat: Add vmstat counters
Posted by Raghavendra K T 3 months, 2 weeks ago
Add vmstat counter to track scanning, migration and
type of pages.

Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
---
 include/linux/mm.h            | 11 ++++++++
 include/linux/vm_event_item.h | 10 +++++++
 mm/kscand.c                   | 51 ++++++++++++++++++++++++++++++++++-
 mm/vmstat.c                   | 10 +++++++
 4 files changed, 81 insertions(+), 1 deletion(-)

This implementation will change with upcoming changes in vmstat.

diff --git a/include/linux/mm.h b/include/linux/mm.h
index fdda6b16263b..b67d06cbc2ed 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -656,6 +656,17 @@ struct vm_operations_struct {
 					  unsigned long addr);
 };
 
+#ifdef CONFIG_KSCAND
+void count_kscand_mm_scans(void);
+void count_kscand_vma_scans(void);
+void count_kscand_migadded(void);
+void count_kscand_migrated(void);
+void count_kscand_migrate_failed(void);
+void count_kscand_slowtier(void);
+void count_kscand_toptier(void);
+void count_kscand_idlepage(void);
+#endif
+
 #ifdef CONFIG_NUMA_BALANCING
 static inline void vma_numab_state_init(struct vm_area_struct *vma)
 {
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 9e15a088ba38..8f324ad73821 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -67,6 +67,16 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 		NUMA_HINT_FAULTS_LOCAL,
 		NUMA_PAGE_MIGRATE,
 #endif
+#ifdef CONFIG_KSCAND
+		KSCAND_MM_SCANS,
+		KSCAND_VMA_SCANS,
+		KSCAND_MIGADDED,
+		KSCAND_MIGRATED,
+		KSCAND_MIGRATE_FAILED,
+		KSCAND_SLOWTIER,
+		KSCAND_TOPTIER,
+		KSCAND_IDLEPAGE,
+#endif
 #ifdef CONFIG_MIGRATION
 		PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
 		THP_MIGRATION_SUCCESS,
diff --git a/mm/kscand.c b/mm/kscand.c
index abffcb868447..db7b2f940f36 100644
--- a/mm/kscand.c
+++ b/mm/kscand.c
@@ -337,6 +337,39 @@ struct attribute_group kscand_attr_group = {
 };
 #endif
 
+void count_kscand_mm_scans(void)
+{
+	count_vm_numa_event(KSCAND_MM_SCANS);
+}
+void count_kscand_vma_scans(void)
+{
+	count_vm_numa_event(KSCAND_VMA_SCANS);
+}
+void count_kscand_migadded(void)
+{
+	count_vm_numa_event(KSCAND_MIGADDED);
+}
+void count_kscand_migrated(void)
+{
+	count_vm_numa_event(KSCAND_MIGRATED);
+}
+void count_kscand_migrate_failed(void)
+{
+	count_vm_numa_event(KSCAND_MIGRATE_FAILED);
+}
+void count_kscand_slowtier(void)
+{
+	count_vm_numa_event(KSCAND_SLOWTIER);
+}
+void count_kscand_toptier(void)
+{
+	count_vm_numa_event(KSCAND_TOPTIER);
+}
+void count_kscand_idlepage(void)
+{
+	count_vm_numa_event(KSCAND_IDLEPAGE);
+}
+
 static inline int kscand_has_work(void)
 {
 	return !list_empty(&kscand_scan.mm_head);
@@ -789,6 +822,9 @@ static int hot_vma_idle_pte_entry(pte_t *pte,
 		return 0;
 	}
 
+	if (node_is_toptier(srcnid))
+		count_kscand_toptier();
+
 	if (!folio_test_idle(folio) || folio_test_young(folio) ||
 			mmu_notifier_test_young(mm, addr) ||
 			folio_test_referenced(folio) || pte_young(pteval)) {
@@ -802,11 +838,14 @@ static int hot_vma_idle_pte_entry(pte_t *pte,
 
 		info = kzalloc(sizeof(struct kscand_migrate_info), GFP_NOWAIT);
 		if (info && scanctrl) {
+			count_kscand_slowtier();
 			info->address = addr;
 			info->folio = folio;
 			list_add_tail(&info->migrate_node, &scanctrl->scan_list);
+			count_kscand_migadded();
 		}
-	}
+	} else
+		count_kscand_idlepage();
 
 	folio_set_idle(folio);
 	folio_put(folio);
@@ -997,6 +1036,12 @@ static void kmigrated_migrate_mm(struct kmigrated_mm_slot *mm_slot)
 
 			ret = kmigrated_promote_folio(info, mm, dest);
 
+			/* TBD: encode migrated count here, currently assume folio_nr_pages */
+			if (!ret)
+				count_kscand_migrated();
+			else
+				count_kscand_migrate_failed();
+
 			kfree(info);
 
 			cond_resched();
@@ -1202,6 +1247,7 @@ static unsigned long kscand_scan_mm_slot(void)
 
 	for_each_vma(vmi, vma) {
 		kscand_walk_page_vma(vma, &kscand_scanctrl);
+		count_kscand_vma_scans();
 		vma_scanned_size += vma->vm_end - vma->vm_start;
 
 		if (vma_scanned_size >= mm_slot_scan_size ||
@@ -1237,6 +1283,8 @@ static unsigned long kscand_scan_mm_slot(void)
 
 	update_mmslot_info = true;
 
+	count_kscand_mm_scans();
+
 	total = get_slowtier_accesed(&kscand_scanctrl);
 	target_node = get_target_node(&kscand_scanctrl);
 
@@ -1252,6 +1300,7 @@ static unsigned long kscand_scan_mm_slot(void)
 		kscand_update_mmslot_info(mm_slot, total, target_node);
 	}
 
+
 outerloop:
 	/* exit_mmap will destroy ptes after this */
 	mmap_read_unlock(mm);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4c268ce39ff2..d32e88e4153d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1348,6 +1348,16 @@ const char * const vmstat_text[] = {
 	"numa_hint_faults_local",
 	"numa_pages_migrated",
 #endif
+#ifdef CONFIG_KSCAND
+	"nr_kscand_mm_scans",
+	"nr_kscand_vma_scans",
+	"nr_kscand_migadded",
+	"nr_kscand_migrated",
+	"nr_kscand_migrate_failed",
+	"nr_kscand_slowtier",
+	"nr_kscand_toptier",
+	"nr_kscand_idlepage",
+#endif
 #ifdef CONFIG_MIGRATION
 	"pgmigrate_success",
 	"pgmigrate_fail",
-- 
2.34.1