commit f4cb78af91e3 ("mm: add system wide stats items category") and
commit 9d8573111024 ("mm: don't account memmap per-node") renamed
NR_VM_WRITEBACK_STAT_ITEMS to NR_VM_STAT_ITEMS to track
memmap/memmap_boot pages system wide.
Extend the implementation so that the system wide page statistics can
be tracked using a generic interface. This patch is in preparation for
the next patch which adds a rarely modified system wide vmstat.
Note that this implementation uses global atomic fields with no per-cpu
optimizations as the existing usecase (memmap pages) is rarely modified
as well.
Signed-off-by: Kinsey Ho <kinseyho@google.com>
---
include/linux/vmstat.h | 8 ++++++++
mm/vmstat.c | 32 +++++++++++++++++++++++---------
2 files changed, 31 insertions(+), 9 deletions(-)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d2761bf8ff32..ac4d42c4fabd 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -145,6 +145,11 @@ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
+/*
+ * Global page accounting (no per cpu differentials).
+ */
+extern atomic_long_t vm_global_stat[NR_VM_STAT_ITEMS];
+
#ifdef CONFIG_NUMA
static inline void zone_numa_event_add(long x, struct zone *zone,
enum numa_stat_item item)
@@ -491,6 +496,9 @@ static inline void node_stat_sub_folio(struct folio *folio,
mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
}
+void mod_global_page_state(enum vm_stat_item item, long nr);
+unsigned long global_page_state(enum vm_stat_item item);
+
extern const char * const vmstat_text[];
static inline const char *zone_stat_name(enum zone_stat_item item)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 22a294556b58..e5a6dd5106c2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -161,9 +161,11 @@ void vm_events_fold_cpu(int cpu)
*/
atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
+atomic_long_t vm_global_stat[NR_VM_STAT_ITEMS] __cacheline_aligned_in_smp;
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
EXPORT_SYMBOL(vm_zone_stat);
EXPORT_SYMBOL(vm_node_stat);
+EXPORT_SYMBOL(vm_global_stat);
#ifdef CONFIG_NUMA
static void fold_vm_zone_numa_events(struct zone *zone)
@@ -1033,22 +1035,34 @@ unsigned long node_page_state(struct pglist_data *pgdat,
}
#endif
+void mod_global_page_state(enum vm_stat_item item, long nr)
+{
+ atomic_long_add(nr, &vm_global_stat[item]);
+}
+
+unsigned long global_page_state(enum vm_stat_item item)
+{
+ long x = atomic_long_read(&vm_global_stat[item]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
/*
* Count number of pages "struct page" and "struct page_ext" consume.
- * nr_memmap_boot_pages: # of pages allocated by boot allocator
- * nr_memmap_pages: # of pages that were allocated by buddy allocator
+ * NR_MEMMAP_BOOT_PAGES: # of pages allocated by boot allocator
+ * NR_MEMMAP_PAGES: # of pages that were allocated by buddy allocator
*/
-static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
-static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
-
void memmap_boot_pages_add(long delta)
{
- atomic_long_add(delta, &nr_memmap_boot_pages);
+ mod_global_page_state(NR_MEMMAP_BOOT_PAGES, delta);
}
void memmap_pages_add(long delta)
{
- atomic_long_add(delta, &nr_memmap_pages);
+ mod_global_page_state(NR_MEMMAP_PAGES, delta);
}
#ifdef CONFIG_COMPACTION
@@ -1880,8 +1894,8 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
v + NR_DIRTY_THRESHOLD);
- v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
- v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
+ for (int i = NR_MEMMAP_PAGES; i < NR_VM_STAT_ITEMS; i++)
+ v[i] = global_page_state(i);
v += NR_VM_STAT_ITEMS;
#ifdef CONFIG_VM_EVENT_COUNTERS
--
2.47.0.163.g1226f6d8fa-goog
© 2016 - 2024 Red Hat, Inc.