[RFC PATCH 6/7] memcg: objcg stock trylock without irq disabling

Shakeel Butt posted 7 patches 7 months, 1 week ago
There is a newer version of this series
[RFC PATCH 6/7] memcg: objcg stock trylock without irq disabling
Posted by Shakeel Butt 7 months, 1 week ago
There is no need to disable irqs to use objcg per-cpu stock, so let's
just not do that but consume_obj_stock() and refill_obj_stock() will
need to use trylock instead to keep per-cpu stock safe. One consequence
of this change is that the charge request from irq context may take
slowpath more often but it should be rare.

Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
---
 mm/memcontrol.c | 26 ++++++++++----------------
 1 file changed, 10 insertions(+), 16 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index adf2f1922118..af7df675d733 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1918,18 +1918,17 @@ static void drain_local_memcg_stock(struct work_struct *dummy)
 static void drain_local_obj_stock(struct work_struct *dummy)
 {
 	struct obj_stock_pcp *stock;
-	unsigned long flags;
 
 	if (WARN_ONCE(!in_task(), "drain in non-task context"))
 		return;
 
-	local_lock_irqsave(&obj_stock.lock, flags);
+	local_lock(&obj_stock.lock);
 
 	stock = this_cpu_ptr(&obj_stock);
 	drain_obj_stock(stock);
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-	local_unlock_irqrestore(&obj_stock.lock, flags);
+	local_unlock(&obj_stock.lock);
 }
 
 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -2062,14 +2061,13 @@ void drain_all_stock(struct mem_cgroup *root_memcg)
 static int memcg_hotplug_cpu_dead(unsigned int cpu)
 {
 	struct obj_stock_pcp *obj_st;
-	unsigned long flags;
 
 	obj_st = &per_cpu(obj_stock, cpu);
 
 	/* drain_obj_stock requires objstock.lock */
-	local_lock_irqsave(&obj_stock.lock, flags);
+	local_lock(&obj_stock.lock);
 	drain_obj_stock(obj_st);
-	local_unlock_irqrestore(&obj_stock.lock, flags);
+	local_unlock(&obj_stock.lock);
 
 	/* no need for the local lock */
 	drain_stock_fully(&per_cpu(memcg_stock, cpu));
@@ -2943,14 +2941,12 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
 			      struct pglist_data *pgdat, enum node_stat_item idx)
 {
 	struct obj_stock_pcp *stock;
-	unsigned long flags;
 	bool ret = false;
 
-	if (unlikely(in_nmi()))
+	if (unlikely(in_nmi()) ||
+	    !local_trylock(&obj_stock.lock))
 		return ret;
 
-	local_lock_irqsave(&obj_stock.lock, flags);
-
 	stock = this_cpu_ptr(&obj_stock);
 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
 		stock->nr_bytes -= nr_bytes;
@@ -2960,7 +2956,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
 			__account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
 	}
 
-	local_unlock_irqrestore(&obj_stock.lock, flags);
+	local_unlock(&obj_stock.lock);
 
 	return ret;
 }
@@ -3049,10 +3045,10 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
 		enum node_stat_item idx)
 {
 	struct obj_stock_pcp *stock;
-	unsigned long flags;
 	unsigned int nr_pages = 0;
 
-	if (unlikely(in_nmi())) {
+	if (unlikely(in_nmi()) ||
+	    !local_trylock(&obj_stock.lock)) {
 		if (pgdat)
 			mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
 		nr_pages = nr_bytes >> PAGE_SHIFT;
@@ -3061,8 +3057,6 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
 		goto out;
 	}
 
-	local_lock_irqsave(&obj_stock.lock, flags);
-
 	stock = this_cpu_ptr(&obj_stock);
 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
 		drain_obj_stock(stock);
@@ -3083,7 +3077,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
 		stock->nr_bytes &= (PAGE_SIZE - 1);
 	}
 
-	local_unlock_irqrestore(&obj_stock.lock, flags);
+	local_unlock(&obj_stock.lock);
 out:
 	if (nr_pages)
 		obj_cgroup_uncharge_pages(objcg, nr_pages);
-- 
2.47.1
Re: [RFC PATCH 6/7] memcg: objcg stock trylock without irq disabling
Posted by Vlastimil Babka 7 months, 1 week ago
On 5/13/25 05:13, Shakeel Butt wrote:
> There is no need to disable irqs to use objcg per-cpu stock, so let's
> just not do that but consume_obj_stock() and refill_obj_stock() will
> need to use trylock instead to keep per-cpu stock safe. One consequence

I'd rather say "to avoid deadlock".

> of this change is that the charge request from irq context may take
> slowpath more often but it should be rare.
> 
> Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>

Acked-by: Vlastimil Babka <vbabka@suse.cz>