From: Shakeel Butt Date: Wed, 14 May 2025 18:41:58 +0000 (-0700) Subject: memcg: objcg stock trylock without irq disabling X-Git-Url: http://git.monstr.eu/?a=commitdiff_plain;h=200577f69f29a58c90c67c83a0df6d12850e1d09;p=linux-2.6-microblaze.git memcg: objcg stock trylock without irq disabling There is no need to disable irqs to use objcg per-cpu stock, so let's just not do that but consume_obj_stock() and refill_obj_stock() will need to use trylock instead to avoid deadlock against irq. One consequence of this change is that the charge request from irq context may take slowpath more often but it should be rare. Link: https://lkml.kernel.org/r/20250514184158.3471331-8-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Vlastimil Babka Cc: Alexei Starovoitov Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Sebastian Andrzej Siewior Signed-off-by: Andrew Morton --- diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d8508b57d0fa..35db91fddd1f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1880,18 +1880,17 @@ static void drain_local_memcg_stock(struct work_struct *dummy) static void drain_local_obj_stock(struct work_struct *dummy) { struct obj_stock_pcp *stock; - unsigned long flags; if (WARN_ONCE(!in_task(), "drain in non-task context")) return; - local_lock_irqsave(&obj_stock.lock, flags); + local_lock(&obj_stock.lock); stock = this_cpu_ptr(&obj_stock); drain_obj_stock(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); - local_unlock_irqrestore(&obj_stock.lock, flags); + local_unlock(&obj_stock.lock); } static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) @@ -2874,10 +2873,10 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, struct pglist_data *pgdat, enum node_stat_item idx) { struct obj_stock_pcp *stock; - unsigned long flags; bool ret = false; - local_lock_irqsave(&obj_stock.lock, flags); + if (!local_trylock(&obj_stock.lock)) + return ret; stock = this_cpu_ptr(&obj_stock); if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { @@ -2888,7 +2887,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, __account_obj_stock(objcg, stock, nr_bytes, pgdat, idx); } - local_unlock_irqrestore(&obj_stock.lock, flags); + local_unlock(&obj_stock.lock); return ret; } @@ -2977,10 +2976,16 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, enum node_stat_item idx) { struct obj_stock_pcp *stock; - unsigned long flags; unsigned int nr_pages = 0; - local_lock_irqsave(&obj_stock.lock, flags); + if (!local_trylock(&obj_stock.lock)) { + if (pgdat) + mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes); + nr_pages = nr_bytes >> PAGE_SHIFT; + nr_bytes = nr_bytes & (PAGE_SIZE - 1); + atomic_add(nr_bytes, &objcg->nr_charged_bytes); + goto out; + } stock = this_cpu_ptr(&obj_stock); if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ @@ -3002,8 +3007,8 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, stock->nr_bytes &= (PAGE_SIZE - 1); } - local_unlock_irqrestore(&obj_stock.lock, flags); - + local_unlock(&obj_stock.lock); +out: if (nr_pages) obj_cgroup_uncharge_pages(objcg, nr_pages); }