The function memcg_rstat_updated() is used to track the memcg stats
updates for optimizing the flushes. At the moment, it is not re-entrant
safe and the callers disabled irqs before calling. However to achieve
the goal of updating memcg stats without irqs, memcg_rstat_updated()
needs to be re-entrant safe against irqs.
This patch makes memcg_rstat_updated() re-entrant safe using this_cpu_*
ops. On archs with CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS, this patch is
also making memcg_rstat_updated() nmi safe.
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
---
mm/memcontrol.c | 34 ++++++++++++++++++++--------------
1 file changed, 20 insertions(+), 14 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a713f160d669..cb10bcd1028d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -506,8 +506,8 @@ struct memcg_vmstats_percpu {
unsigned int stats_updates;
/* Cached pointers for fast iteration in memcg_rstat_updated() */
- struct memcg_vmstats_percpu *parent;
- struct memcg_vmstats *vmstats;
+ struct memcg_vmstats_percpu __percpu *parent_pcpu;
+ struct memcg_vmstats *vmstats;
/* The above should fit a single cacheline for memcg_rstat_updated() */
@@ -589,32 +589,38 @@ static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
{
- struct memcg_vmstats_percpu *statc;
- int cpu = smp_processor_id();
+ struct memcg_vmstats_percpu __percpu *statc_pcpu;
+ int cpu;
unsigned int stats_updates;
if (!val)
return;
+ /* Don't assume callers have preemption disabled. */
+ cpu = get_cpu();
+
css_rstat_updated(&memcg->css, cpu);
- statc = this_cpu_ptr(memcg->vmstats_percpu);
- for (; statc; statc = statc->parent) {
+ statc_pcpu = memcg->vmstats_percpu;
+ for (; statc_pcpu; statc_pcpu = this_cpu_ptr(statc_pcpu)->parent_pcpu) {
/*
* If @memcg is already flushable then all its ancestors are
* flushable as well and also there is no need to increase
* stats_updates.
*/
- if (memcg_vmstats_needs_flush(statc->vmstats))
+ if (memcg_vmstats_needs_flush(this_cpu_ptr(statc_pcpu)->vmstats))
break;
- stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
- WRITE_ONCE(statc->stats_updates, stats_updates);
+ stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
+ abs(val));
if (stats_updates < MEMCG_CHARGE_BATCH)
continue;
- atomic64_add(stats_updates, &statc->vmstats->stats_updates);
- WRITE_ONCE(statc->stats_updates, 0);
+ stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
+ if (stats_updates)
+ atomic64_add(stats_updates,
+ &this_cpu_ptr(statc_pcpu)->vmstats->stats_updates);
}
+ put_cpu();
}
static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
@@ -3716,7 +3722,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
{
- struct memcg_vmstats_percpu *statc, *pstatc;
+ struct memcg_vmstats_percpu *statc, __percpu *pstatc_pcpu;
struct mem_cgroup *memcg;
int node, cpu;
int __maybe_unused i;
@@ -3747,9 +3753,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
for_each_possible_cpu(cpu) {
if (parent)
- pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
+ pstatc_pcpu = parent->vmstats_percpu;
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
- statc->parent = parent ? pstatc : NULL;
+ statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
statc->vmstats = memcg->vmstats;
}
--
2.47.1
On 5/14/25 07:08, Shakeel Butt wrote:
> The function memcg_rstat_updated() is used to track the memcg stats
> updates for optimizing the flushes. At the moment, it is not re-entrant
> safe and the callers disabled irqs before calling. However to achieve
> the goal of updating memcg stats without irqs, memcg_rstat_updated()
> needs to be re-entrant safe against irqs.
>
> This patch makes memcg_rstat_updated() re-entrant safe using this_cpu_*
> ops. On archs with CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS, this patch is
> also making memcg_rstat_updated() nmi safe.
>
> Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Some nits:
> static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
> {
> - struct memcg_vmstats_percpu *statc;
> - int cpu = smp_processor_id();
> + struct memcg_vmstats_percpu __percpu *statc_pcpu;
> + int cpu;
> unsigned int stats_updates;
>
> if (!val)
> return;
>
> + /* Don't assume callers have preemption disabled. */
> + cpu = get_cpu();
> +
> css_rstat_updated(&memcg->css, cpu);
> - statc = this_cpu_ptr(memcg->vmstats_percpu);
> - for (; statc; statc = statc->parent) {
> + statc_pcpu = memcg->vmstats_percpu;
Wonder if extracting the this_cpu_ptr() statc pointer would still make the
code a bit simpler when accessing parent_pcpu and vmstats later on.
> + for (; statc_pcpu; statc_pcpu = this_cpu_ptr(statc_pcpu)->parent_pcpu) {
> /*
> * If @memcg is already flushable then all its ancestors are
> * flushable as well and also there is no need to increase
> * stats_updates.
> */
> - if (memcg_vmstats_needs_flush(statc->vmstats))
> + if (memcg_vmstats_needs_flush(this_cpu_ptr(statc_pcpu)->vmstats))
> break;
>
> - stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
> - WRITE_ONCE(statc->stats_updates, stats_updates);
> + stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
> + abs(val));
> if (stats_updates < MEMCG_CHARGE_BATCH)
> continue;
>
> - atomic64_add(stats_updates, &statc->vmstats->stats_updates);
> - WRITE_ONCE(statc->stats_updates, 0);
> + stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
> + if (stats_updates)
I think this is very likely to be true (given stats_updates >=
MEMCG_CHARGE_BATCH from above), only an irq can change it at this point? So
we could just do this unconditionally, and if we very rarely add a zero, it
doesn't matter?
> + atomic64_add(stats_updates,
> + &this_cpu_ptr(statc_pcpu)->vmstats->stats_updates);
> }
> + put_cpu();
> }
>
> static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
> @@ -3716,7 +3722,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
>
> static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
> {
> - struct memcg_vmstats_percpu *statc, *pstatc;
> + struct memcg_vmstats_percpu *statc, __percpu *pstatc_pcpu;
> struct mem_cgroup *memcg;
> int node, cpu;
> int __maybe_unused i;
> @@ -3747,9 +3753,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
>
> for_each_possible_cpu(cpu) {
> if (parent)
> - pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
> + pstatc_pcpu = parent->vmstats_percpu;
> statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
> - statc->parent = parent ? pstatc : NULL;
> + statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
> statc->vmstats = memcg->vmstats;
> }
>
On Wed, May 14, 2025 at 11:50:16AM +0200, Vlastimil Babka wrote: > On 5/14/25 07:08, Shakeel Butt wrote: > > The function memcg_rstat_updated() is used to track the memcg stats > > updates for optimizing the flushes. At the moment, it is not re-entrant > > safe and the callers disabled irqs before calling. However to achieve > > the goal of updating memcg stats without irqs, memcg_rstat_updated() > > needs to be re-entrant safe against irqs. > > > > This patch makes memcg_rstat_updated() re-entrant safe using this_cpu_* > > ops. On archs with CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS, this patch is > > also making memcg_rstat_updated() nmi safe. > > > > Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz> > > Some nits: > Thanks Vlastimil. I will address your comments in v2 and Andrew asked me to rebase over mm-new instead of next which I will do as well.
© 2016 - 2026 Red Hat, Inc.